m(""" Try the model live! """) gr.TabbedInterface([io1, io2, io3, io4], ["Saxophone", "Flute", "Trombone", "Another Saxophone"]) m(""" ### Using the model for cloning You can also use this model a different way, to simply clone the audio file and reconstruct it using machine learning. Here, we'll show a demo of that below: """) a2 = gr.Audio() a2.change(reconstruct, a2, a2) m(""" Thanks for reading this! As you may have realized, all of the "models" in this demo are fake. They are just designed to show you what is possible using Blocks 🤗. For details of the model, read the [original report here](https://erlj.notion.site/Neural-Instrument-Cloning-from-very-few-samples-2cf41d8b630842ee8c7eb55036a1bfd6). *Details for nerds*: this report was "launched" on: """) t = gr.Textbox(label="timestamp") demo.load(get_time, [], t) if __name__ == "__main__": demo.launch()
import gradio as gr str = """Hello friends hello friends Hello friends """ with gr.Blocks() as demo: txt = gr.Textbox(label="Input", lines=5) txt_2 = gr.Textbox(label="Output") txt_3 = gr.Textbox(str, label="Output") btn = gr.Button("Submit") btn.click(lambda a: a, inputs=[txt], outputs=[txt_2]) if __name__ == "__main__": demo.launch()
import gradio as gr def greet(name): return "Hello " + name + "!" demo = gr.Interface( fn=greet, inputs=gr.Textbox(lines=2, placeholder="Name Here..."), outputs="text", ) if __name__ == "__main__": app, local_url, share_url = demo.launch()
import gradio as gr demo = gr.Blocks() with demo: gr.Markdown( "Load the flashcards in the table below, then use the Practice tab to practice." ) with gr.Tabs(): with gr.TabItem("Word Bank"): flashcards_table = gr.Dataframe(headers=["front", "back"], type="array") with gr.TabItem("Practice"): with gr.Row(): front = gr.Textbox() answer_row = gr.Row(visible=False) with answer_row: back = gr.Textbox() with gr.Row(): new_btn = gr.Button("New Card") flip_btn = gr.Button("Flip Card") selected_card = gr.Variable() feedback_row = gr.Row(visible=False) with feedback_row: correct_btn = gr.Button( "Correct", css={ "background-color": "lightgreen", "color": "green" },
import gradio as gr def image_mod(text): return text[::-1] demo = gr.Blocks() with demo: text = gr.Textbox(label="Input-Output") btn = gr.Button("Run") btn.click(image_mod, text, text) print(demo.get_config_file()) if __name__ == "__main__": demo.launch()
import gradio as gr demo = gr.Blocks() with demo: gr.Textbox("Hello") gr.Number(5) if __name__ == "__main__": demo.launch()
from difflib import Differ import gradio as gr def diff_texts(text1, text2): d = Differ() return [(token[2:], token[0] if token[0] != " " else None) for token in d.compare(text1, text2)] demo = gr.Interface( diff_texts, [ gr.Textbox( lines=3, default_value="The quick brown fox jumped over the lazy dogs."), gr.Textbox(lines=3, default_value="The fast brown fox jumps over lazy dogs."), ], gr.HighlightedText(), ) if __name__ == "__main__": demo.launch()
demo = gr.Interface( fn=fake_gan, inputs=[ gr.Image(label="Initial Image (optional)"), gr.Markdown("**Parameters**"), gr.Slider(25, minimum=0, maximum=50, label="TV_scale (for smoothness)"), gr.Slider(25, minimum=0, maximum=50, label="Range_Scale (out of range RBG)"), gr.Number(label="Respacing"), gr.Markdown("**Parameters Two**"), gr.Slider(25, minimum=0, maximum=50, label="Range_Scale (out of range RBG)"), gr.Number(label="Respacing"), gr.Markdown("**Parameters Three**"), gr.Textbox(label="Respacing"), ], outputs=gr.Image(label="Generated Image"), title="FD-GAN", description= "This is a fake demo of a GAN. In reality, the images are randomly chosen from Unsplash.", ) if __name__ == "__main__": demo.launch()
+ radio + "</button>", # HTML "files/titanic.csv", df1, # Dataframe np.random.randint(0, 10, (4, 4)), # Dataframe [ im for im in [im1, im2, im3, im4, "files/cheetah1.jpg"] if im is not None ], # Carousel df2, # Timeseries ) demo = gr.Interface( fn, inputs=[ gr.Textbox(default_value="Lorem ipsum", label="Textbox"), gr.Textbox(lines=3, placeholder="Type here..", label="Textbox 2"), gr.Number(label="Number", default=42), gr.Slider(minimum=10, maximum=20, default_value=15, label="Slider: 10 - 20"), gr.Slider(maximum=20, step=0.04, label="Slider: step @ 0.04"), gr.Checkbox(label="Checkbox"), gr.CheckboxGroup( label="CheckboxGroup", choices=CHOICES, default_selected=CHOICES[0:2] ), gr.Radio(label="Radio", choices=CHOICES, default_selected=CHOICES[2]), gr.Dropdown(label="Dropdown", choices=CHOICES), gr.Image(label="Image"), gr.Image(label="Image w/ Cropper", tool="select"), gr.Image(label="Sketchpad", source="canvas"), gr.Image(label="Webcam", source="webcam"), gr.Video(label="Video"),
import gradio as gr def print_message(n): return "Welcome! This page has loaded for " + n with gr.Blocks() as demo: t = gr.Textbox("Frank", label="Name") t2 = gr.Textbox(label="Output") demo.load(print_message, t, t2) if __name__ == "__main__": demo.launch()
import gradio as gr def greet(name: str, repeat: int): return "Hello " + name * repeat + "!!" demo = gr.Interface(fn=greet, inputs=[gr.Textbox(lines=2, max_lines=4), gr.Number()], outputs=gr.component("textarea")) if __name__ == "__main__": demo.launch()
import gradio as gr with gr.Blocks() as demo: txt = gr.Textbox(label="Small Textbox", lines=1) txt = gr.Textbox(label="Large Textbox", lines=5) num = gr.Number(label="Number") check = gr.Checkbox(label="Checkbox") check_g = gr.CheckboxGroup(label="Checkbox Group", choices=["One", "Two", "Three"]) radio = gr.Radio(label="Radio", choices=["One", "Two", "Three"]) drop = gr.Dropdown(label="Dropdown", choices=["One", "Two", "Three"]) slider = gr.Slider(label="Slider") audio = gr.Audio() video = gr.Video() image = gr.Image() ts = gr.Timeseries() df = gr.Dataframe() html = gr.HTML() json = gr.JSON() md = gr.Markdown() label = gr.Label() highlight = gr.HighlightedText() # layout components are static only # carousel doesn't work like other components # carousel = gr.Carousel() if __name__ == "__main__": demo.launch()
import gradio as gr examples = [[ "The Amazon rainforest is a moist broadleaf forest that covers most of the Amazon basin of South America", "Which continent is the Amazon rainforest in?", ]] demo = gr.Interface.load( "huggingface/deepset/roberta-base-squad2", inputs=[ gr.Textbox(lines=5, label="Context", placeholder="Type a sentence or paragraph here."), gr.Textbox( lines=2, label="Question", placeholder="Ask a question based on the context.", ), ], outputs=[gr.Textbox(label="Answer"), gr.Label(label="Probability")], examples=examples, ) if __name__ == "__main__": demo.launch()
import nltk from nltk.sentiment.vader import SentimentIntensityAnalyzer import gradio as gr nltk.download("vader_lexicon") sid = SentimentIntensityAnalyzer() def sentiment_analysis(text): scores = sid.polarity_scores(text) del scores["compound"] return scores demo = gr.Interface( sentiment_analysis, gr.Textbox(placeholder="Enter a positive or negative sentence here..."), "label", interpretation="default") if __name__ == "__main__": demo.launch()
import gradio as gr title = "GPT-J-6B" examples = [ ["The tower is 324 metres (1,063 ft) tall,"], ["The Moon's orbit around Earth has"], ["The smooth Borealis basin in the Northern Hemisphere covers 40%"], ] demo = gr.Interface.load( "huggingface/EleutherAI/gpt-j-6B", inputs=gr.Textbox(lines=5, label="Input Text"), title=title, examples=examples, ) if __name__ == "__main__": demo.launch()
import gradio as gr male_words, female_words = ["he", "his", "him"], ["she", "hers", "her"] def gender_of_sentence(sentence): male_count = len([word for word in sentence.split() if word.lower() in male_words]) female_count = len( [word for word in sentence.split() if word.lower() in female_words] ) total = max(male_count + female_count, 1) return {"male": male_count / total, "female": female_count / total} demo = gr.Interface( fn=gender_of_sentence, inputs=gr.Textbox(default_value="She went to his house to get her keys."), outputs="label", interpretation="default", ) if __name__ == "__main__": demo.launch()
import gradio as gr asr = pipeline("automatic-speech-recognition", "facebook/wav2vec2-base-960h") classifier = pipeline("text-classification") def speech_to_text(speech): text = asr(speech)["text"] return text def text_to_sentiment(text): return classifier(text)[0]["label"] demo = gr.Blocks() with demo: m = gr.Audio(type="filepath") t = gr.Textbox() l = gr.Label() b1 = gr.Button("Recognize Speech") b2 = gr.Button("Classify Sentiment") b1.click(speech_to_text, inputs=m, outputs=t) b2.click(text_to_sentiment, inputs=t, outputs=l) if __name__ == "__main__": demo.launch()