Spaces:
Runtime error
Runtime error
| from __future__ import annotations | |
| import os | |
| import gradio as gr | |
| import spaces | |
| import torch | |
| import torchaudio | |
| from transformers import ( | |
| SeamlessM4TFeatureExtractor, | |
| SeamlessM4TTokenizer, | |
| SeamlessM4Tv2ForSpeechToText, | |
| ) | |
| from lang_list import ( | |
| ASR_TARGET_LANGUAGE_NAMES, | |
| LANGUAGE_NAME_TO_CODE, | |
| S2ST_TARGET_LANGUAGE_NAMES, | |
| S2TT_TARGET_LANGUAGE_NAMES, | |
| T2ST_TARGET_LANGUAGE_NAMES, | |
| TEXT_SOURCE_LANGUAGE_NAMES, | |
| ) | |
| DESCRIPTION = """\ | |
| ### **IndicSeamless: Speech-to-Text Translation Model for Indian Languages** ποΈβ‘οΈπ | |
| This Gradio demo showcases **IndicSeamless**, a fine-tuned **SeamlessM4T-v2-large** model for **speech-to-text translation** across **13 Indian languages and English**. Trained on **BhasaAnuvaad**, the largest open-source speech translation dataset for Indian languages, it delivers **accurate and robust translations** across diverse linguistic and acoustic conditions. | |
| π **Model Checkpoint:** [ai4bharat/indic-seamless](https://huggingface.co/ai4bharat/indic-seamless) | |
| #### **How to Use:** | |
| 1. **Upload or record** an audio clip in any supported Indian language. | |
| 2. Click **"Translate"** to generate the corresponding text in the target language. | |
| 3. View or copy the output for further use. | |
| π Try it out and experience seamless speech translation for Indian languages! | |
| """ | |
| hf_token = os.getenv("HF_TOKEN") | |
| device = "cuda:0" if torch.cuda.is_available() else "mps" if torch.backends.mps.is_available() else "cpu" | |
| torch_dtype = torch.bfloat16 if device != "cpu" else torch.float32 | |
| model = SeamlessM4Tv2ForSpeechToText.from_pretrained("ai4bharat/indic-seamless", torch_dtype=torch_dtype, token=hf_token).to(device) | |
| processor = SeamlessM4TFeatureExtractor.from_pretrained("ai4bharat/indic-seamless", token=hf_token) | |
| tokenizer = SeamlessM4TTokenizer.from_pretrained("ai4bharat/indic-seamless", token=hf_token) | |
| CACHE_EXAMPLES = os.getenv("CACHE_EXAMPLES") == "1" and torch.cuda.is_available() | |
| AUDIO_SAMPLE_RATE = 16000 | |
| MAX_INPUT_AUDIO_LENGTH = 60 # in seconds | |
| DEFAULT_TARGET_LANGUAGE = "Hindi" | |
| def preprocess_audio(input_audio: str) -> None: | |
| arr, org_sr = torchaudio.load(input_audio) | |
| new_arr = torchaudio.functional.resample(arr, orig_freq=org_sr, new_freq=AUDIO_SAMPLE_RATE) | |
| max_length = int(MAX_INPUT_AUDIO_LENGTH * AUDIO_SAMPLE_RATE) | |
| if new_arr.shape[1] > max_length: | |
| new_arr = new_arr[:, :max_length] | |
| gr.Warning(f"Input audio is too long. Only the first {MAX_INPUT_AUDIO_LENGTH} seconds is used.") | |
| torchaudio.save(input_audio, new_arr, sample_rate=int(AUDIO_SAMPLE_RATE)) | |
| def run_s2tt(input_audio: str, source_language: str, target_language: str) -> str: | |
| # preprocess_audio(input_audio) | |
| # source_language_code = LANGUAGE_NAME_TO_CODE[source_language] | |
| target_language_code = LANGUAGE_NAME_TO_CODE[target_language] | |
| input_audio, orig_freq = torchaudio.load(input_audio) | |
| input_audio = torchaudio.functional.resample(input_audio, orig_freq=orig_freq, new_freq=16000) | |
| audio_inputs= processor(input_audio, sampling_rate=16000, return_tensors="pt").to(device=device, dtype=torch_dtype) | |
| text_out = model.generate(**audio_inputs, tgt_lang=target_language_code)[0].float().cpu().numpy().squeeze() | |
| return tokenizer.decode(text_out, clean_up_tokenization_spaces=True, skip_special_tokens=True) | |
| def run_asr(input_audio: str, target_language: str) -> str: | |
| # preprocess_audio(input_audio) | |
| target_language_code = LANGUAGE_NAME_TO_CODE[target_language] | |
| input_audio, orig_freq = torchaudio.load(input_audio) | |
| input_audio = torchaudio.functional.resample(input_audio, orig_freq=orig_freq, new_freq=16000) | |
| audio_inputs= processor(input_audio, sampling_rate=16000, return_tensors="pt").to(device=device, dtype=torch_dtype) | |
| text_out = model.generate(**audio_inputs, tgt_lang=target_language_code)[0].float().cpu().numpy().squeeze() | |
| return tokenizer.decode(text_out, clean_up_tokenization_spaces=True, skip_special_tokens=True) | |
| with gr.Blocks() as demo_s2st: | |
| with gr.Row(): | |
| with gr.Column(): | |
| with gr.Group(): | |
| input_audio = gr.Audio(label="Input speech", type="filepath") | |
| source_language = gr.Dropdown( | |
| label="Source language", | |
| choices=ASR_TARGET_LANGUAGE_NAMES, | |
| value="English", | |
| ) | |
| target_language = gr.Dropdown( | |
| label="Target language", | |
| choices=S2ST_TARGET_LANGUAGE_NAMES, | |
| value=DEFAULT_TARGET_LANGUAGE, | |
| ) | |
| btn = gr.Button("Translate") | |
| with gr.Column(): | |
| with gr.Group(): | |
| output_audio = gr.Audio( | |
| label="Translated speech", | |
| autoplay=False, | |
| streaming=False, | |
| type="numpy", | |
| ) | |
| output_text = gr.Textbox(label="Translated text") | |
| with gr.Blocks() as demo_s2tt: | |
| with gr.Row(): | |
| with gr.Column(): | |
| with gr.Group(): | |
| input_audio = gr.Audio(label="Input speech", type="filepath") | |
| source_language = gr.Dropdown( | |
| label="Source language", | |
| choices=ASR_TARGET_LANGUAGE_NAMES, | |
| value="English", | |
| ) | |
| target_language = gr.Dropdown( | |
| label="Target language", | |
| choices=S2TT_TARGET_LANGUAGE_NAMES, | |
| value=DEFAULT_TARGET_LANGUAGE, | |
| ) | |
| btn = gr.Button("Translate") | |
| with gr.Column(): | |
| output_text = gr.Textbox(label="Translated text") | |
| gr.Examples( | |
| examples=[ | |
| ["assets/Bengali.wav", "Bengali", "English"], | |
| ["assets/Gujarati.wav", "Gujarati", "Hindi"], | |
| ["assets/Punjabi.wav", "Punjabi", "Hindi"], | |
| ], | |
| inputs=[input_audio, source_language, target_language], | |
| outputs=output_text, | |
| fn=run_s2tt, | |
| cache_examples=CACHE_EXAMPLES, | |
| api_name=False, | |
| ) | |
| btn.click( | |
| fn=run_s2tt, | |
| inputs=[input_audio, source_language, target_language], | |
| outputs=output_text, | |
| api_name="s2tt", | |
| ) | |
| with gr.Blocks() as demo_t2st: | |
| with gr.Row(): | |
| with gr.Column(): | |
| with gr.Group(): | |
| input_text = gr.Textbox(label="Input text") | |
| with gr.Row(): | |
| source_language = gr.Dropdown( | |
| label="Source language", | |
| choices=TEXT_SOURCE_LANGUAGE_NAMES, | |
| value="English", | |
| ) | |
| target_language = gr.Dropdown( | |
| label="Target language", | |
| choices=T2ST_TARGET_LANGUAGE_NAMES, | |
| value=DEFAULT_TARGET_LANGUAGE, | |
| ) | |
| btn = gr.Button("Translate") | |
| with gr.Column(): | |
| with gr.Group(): | |
| output_audio = gr.Audio( | |
| label="Translated speech", | |
| autoplay=False, | |
| streaming=False, | |
| type="numpy", | |
| ) | |
| output_text = gr.Textbox(label="Translated text") | |
| with gr.Blocks() as demo_asr: | |
| with gr.Row(): | |
| with gr.Column(): | |
| with gr.Group(): | |
| input_audio = gr.Audio(label="Input speech", type="filepath") | |
| target_language = gr.Dropdown( | |
| label="Target language", | |
| choices=ASR_TARGET_LANGUAGE_NAMES, | |
| value=DEFAULT_TARGET_LANGUAGE, | |
| ) | |
| btn = gr.Button("Transcribe") | |
| with gr.Column(): | |
| output_text = gr.Textbox(label="Transcribed text") | |
| gr.Examples( | |
| examples=[ | |
| ["assets/Bengali.wav", "Bengali", "English"], | |
| ["assets/Gujarati.wav", "Gujarati", "Hindi"], | |
| ["assets/Punjabi.wav", "Punjabi", "Hindi"], | |
| ], | |
| inputs=[input_audio, target_language], | |
| outputs=output_text, | |
| fn=run_asr, | |
| cache_examples=CACHE_EXAMPLES, | |
| api_name=False, | |
| ) | |
| btn.click( | |
| fn=run_asr, | |
| inputs=[input_audio, target_language], | |
| outputs=output_text, | |
| api_name="asr", | |
| ) | |
| with gr.Blocks(css="style.css") as demo: | |
| gr.Markdown(DESCRIPTION) | |
| gr.DuplicateButton( | |
| value="Duplicate Space for private use", | |
| elem_id="duplicate-button", | |
| visible=os.getenv("SHOW_DUPLICATE_BUTTON") == "1", | |
| ) | |
| with gr.Tabs(): | |
| # with gr.Tab(label="S2ST"): | |
| # demo_s2st.render() | |
| with gr.Tab(label="S2TT"): | |
| demo_s2tt.render() | |
| # with gr.Tab(label="T2ST"): | |
| # demo_t2st.render() | |
| # with gr.Tab(label="T2TT"): | |
| # demo_t2tt.render() | |
| with gr.Tab(label="ASR"): | |
| demo_asr.render() | |
| demo.launch(share=True) | |