legacy-datasets/wikipedia
Updated โข 121k โข 629
How to use akahana/tiny-roberta-indonesia with Transformers:
# Use a pipeline as a high-level helper
from transformers import pipeline
pipe = pipeline("feature-extraction", model="akahana/tiny-roberta-indonesia") # Load model directly
from transformers import AutoTokenizer, AutoModel
tokenizer = AutoTokenizer.from_pretrained("akahana/tiny-roberta-indonesia")
model = AutoModel.from_pretrained("akahana/tiny-roberta-indonesia")from transformers import pipeline
pretrained_name = "akahana/tiny-roberta-indonesia"
fill_mask = pipeline(
"fill-mask",
model=pretrained_name,
tokenizer=pretrained_name
)
fill_mask("ikiryo adalah <mask> hantu dalam mitologi jepang.")
from transformers import RobertaModel, RobertaTokenizerFast
pretrained_name = "akahana/tiny-roberta-indonesia"
model = RobertaModel.from_pretrained(pretrained_name)
tokenizer = RobertaTokenizerFast.from_pretrained(pretrained_name)
prompt = "ikiryo adalah <mask> hantu dalam mitologi jepang."
encoded_input = tokenizer(prompt, return_tensors='pt')
output = model(**encoded_input)