mirror of
https://github.com/huggingface/transformers.git
synced 2025-07-31 18:22:34 +06:00
Update README.txt (#8957)
This commit is contained in:
parent
37f4c24f10
commit
7ccd973ea1
@ -1,7 +1,6 @@
|
||||
---
|
||||
language: vn
|
||||
---
|
||||
|
||||
# BERT for Vietnamese is trained on more 20 GB news dataset
|
||||
|
||||
Apply for task sentiment analysis on using [AIViVN's comments dataset](https://www.aivivn.com/contests/6)
|
||||
@ -19,7 +18,24 @@ You can download trained model:
|
||||
- [tensorflow](https://drive.google.com/file/d/1X-sRDYf7moS_h61J3L79NkMVGHP-P-k5/view?usp=sharing).
|
||||
- [pytorch](https://drive.google.com/file/d/11aFSTpYIurn-oI2XpAmcCTccB_AonMOu/view?usp=sharing).
|
||||
|
||||
Use with huggingface/transformers
|
||||
``` bash
|
||||
import torch
|
||||
from transformers import AutoTokenizer,AutoModel
|
||||
tokenizer= AutoTokenizer.from_pretrained("NlpHUST/vibert4news-base-cased")
|
||||
bert_model = AutoModel.from_pretrained("NlpHUST/vibert4news-base-cased")
|
||||
|
||||
line = "Tôi là sinh viên trường Bách Khoa Hà Nội ."
|
||||
input_id = tokenizer.encode(line,add_special_tokens = True)
|
||||
att_mask = [int(token_id > 0) for token_id in input_id]
|
||||
input_ids = torch.tensor([input_id])
|
||||
att_masks = torch.tensor([att_mask])
|
||||
with torch.no_grad():
|
||||
features = bert_model(input_ids,att_masks)
|
||||
|
||||
print(features)
|
||||
|
||||
```
|
||||
|
||||
Run training with base config
|
||||
|
||||
@ -36,3 +52,4 @@ python train_pytorch.py \
|
||||
|
||||
### Contact information
|
||||
For personal communication related to this project, please contact Nha Nguyen Van (nha282@gmail.com).
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user