Rask6723 commited on
Commit
18f4207
·
verified ·
1 Parent(s): 7532505

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +27 -3
app.py CHANGED
@@ -1,19 +1,43 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  from transformers import M2M100ForConditionalGeneration, M2M100Tokenizer
2
  import torch
3
  from gtts import gTTS
4
  import gradio as gr
5
  import tempfile
6
 
 
 
7
  # Load model and tokenizer
8
- model_name = "SweUmaVarsh/m2m100-en-sa-translation"
9
  model__name = "Helsinki-NLP/opus-mt-en-hi"
10
- tokenizer = M2M100Tokenizer.from_pretrained(model_name)
11
- model = M2M100ForConditionalGeneration.from_pretrained(model_name)
12
 
13
  # Use GPU if available
14
  device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
15
  model = model.to(device)
16
 
 
 
 
 
 
 
 
17
  def translate_and_speak(text):
18
  input_text = "en " + text
19
  encoded = tokenizer(input_text, return_tensors="pt", truncation=True, padding=True).to(device)
 
1
+ # def translate_and_speak(text):
2
+ # input_text = "en " + text
3
+ # encoded = tokenizer(input_text, return_tensors="pt", truncation=True, padding=True).to(device)
4
+ # generated_tokens = model.generate(**encoded, max_length=128, num_beams=5, early_stopping=True)
5
+ # output = tokenizer.decode(generated_tokens[0], skip_special_tokens=True)
6
+
7
+ # for tag in ["__en__", "__sa__", "en", "sa"]:
8
+ # output = output.replace(tag, "")
9
+ # sanskrit_text = output.strip()
10
+
11
+ # # Convert to speech
12
+ # tts = gTTS(sanskrit_text, lang='hi')
13
+ # with tempfile.NamedTemporaryFile(delete=False, suffix=".mp3") as fp:
14
+ # tts.save(fp.name)
15
+ # audio_path = fp.name
16
+
17
+ # return sanskrit_text, audio_path
18
  from transformers import M2M100ForConditionalGeneration, M2M100Tokenizer
19
  import torch
20
  from gtts import gTTS
21
  import gradio as gr
22
  import tempfile
23
 
24
+
25
+
26
  # Load model and tokenizer
 
27
  model__name = "Helsinki-NLP/opus-mt-en-hi"
28
+
 
29
 
30
  # Use GPU if available
31
  device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
32
  model = model.to(device)
33
 
34
+
35
+ model_name = "SweUmaVarsh/m2m100-en-sa-translation"
36
+ tokenizer = M2M100Tokenizer.from_pretrained(model_name)
37
+ model = M2M100ForConditionalGeneration.from_pretrained(model_name)
38
+
39
+
40
+
41
  def translate_and_speak(text):
42
  input_text = "en " + text
43
  encoded = tokenizer(input_text, return_tensors="pt", truncation=True, padding=True).to(device)