예제 #1
0
    def test_xray(self):
        def fake_func():
            return "Hello There"

        xray_model = lambda diseases, img: {
            disease: random.random()
            for disease in diseases
        }
        ct_model = lambda diseases, img: {disease: 0.1 for disease in diseases}

        with gr.Blocks() as demo:
            gr.components.Markdown("""
            # Detect Disease From Scan
            With this model you can lorem ipsum
            - ipsum 1
            - ipsum 2
            """)
            disease = gr.components.CheckboxGroup(
                choices=["Covid", "Malaria", "Lung Cancer"],
                label="Disease to Scan For")

            with gr.Tabs():
                with gr.TabItem("X-ray"):
                    with gr.Row():
                        xray_scan = gr.components.Image()
                        xray_results = gr.components.JSON()
                    xray_run = gr.Button(
                        "Run",
                        css={
                            "background-color": "red",
                            "--hover-color": "orange"
                        },
                    )
                    xray_run.click(xray_model,
                                   inputs=[disease, xray_scan],
                                   outputs=xray_results)

                with gr.TabItem("CT Scan"):
                    with gr.Row():
                        ct_scan = gr.components.Image()
                        ct_results = gr.components.JSON()
                    ct_run = gr.Button("Run")
                    ct_run.click(ct_model,
                                 inputs=[disease, ct_scan],
                                 outputs=ct_results)
            textbox = gr.components.Textbox()
            demo.load(fake_func, [], [textbox])

        print(demo.get_config_file())
        self.assertEqual(XRAY_CONFIG, demo.get_config_file())
예제 #2
0
    
    ### What does this machine learning model do?
    It combines techniques from neural voice cloning with musical instrument synthesis. This makes it possible to produce neural instrument synthesisers from just seconds of target instrument audio.
    
    ### Audio Examples
    Here are some **real** 16 second saxophone recordings:
    """)
    gr.Audio(os.path.join(os.path.dirname(__file__), "sax.wav"),
             label="Here is a real 16 second saxophone recording:")
    gr.Audio(os.path.join(os.path.dirname(__file__), "sax.wav"))

    m("""\n
        Here is a **generated** saxophone recordings:""")
    a = gr.Audio(os.path.join(os.path.dirname(__file__), "new-sax.wav"))

    gr.Button("Generate a new saxophone recording")

    m("""
    ### Inputs to the model
    The inputs to the model are:
    * pitch
    * loudness
    * base audio file
    """)

    m("""
    Try the model live!
    """)

    gr.TabbedInterface([io1, io2, io3, io4],
                       ["Saxophone", "Flute", "Trombone", "Another Saxophone"])
예제 #3
0
import gradio as gr

str = """Hello friends
hello friends

Hello friends

"""


with gr.Blocks() as demo:
    txt = gr.Textbox(label="Input", lines=5)
    txt_2 = gr.Textbox(label="Output")
    txt_3 = gr.Textbox(str, label="Output")
    btn = gr.Button("Submit")
    btn.click(lambda a: a, inputs=[txt], outputs=[txt_2])

if __name__ == "__main__":
    demo.launch()
예제 #4
0
    gr.Markdown(
        "Load the flashcards in the table below, then use the Practice tab to practice."
    )

    with gr.Tabs():
        with gr.TabItem("Word Bank"):
            flashcards_table = gr.Dataframe(headers=["front", "back"],
                                            type="array")
        with gr.TabItem("Practice"):
            with gr.Row():
                front = gr.Textbox()
                answer_row = gr.Row(visible=False)
                with answer_row:
                    back = gr.Textbox()
            with gr.Row():
                new_btn = gr.Button("New Card")
                flip_btn = gr.Button("Flip Card")
                selected_card = gr.Variable()
                feedback_row = gr.Row(visible=False)
                with feedback_row:
                    correct_btn = gr.Button(
                        "Correct",
                        css={
                            "background-color": "lightgreen",
                            "color": "green"
                        },
                    )
                    incorrect_btn = gr.Button("Incorrect",
                                              css={
                                                  "background-color": "pink",
                                                  "color": "red"
예제 #5
0
파일: run.py 프로젝트: gradio-app/gradio
    theta = a / 180 * 3.14
    tmax = ((2 * v) * np.sin(theta)) / g
    timemat = tmax * np.linspace(0, 1, 40)[:, None]

    x = (v * timemat) * np.cos(theta)
    y = ((v * timemat) * np.sin(theta)) - ((0.5 * g) * (timemat**2))

    fig = plt.figure()
    plt.scatter(x=x, y=y, marker=".")
    plt.xlim(0, 100)
    plt.ylim(0, 60)
    return fig


demo = gr.Blocks()

with demo:
    gr.Markdown(
        "Let's do some kinematics! Choose the speed and angle to see the trajectory."
    )

    with gr.Row():
        speed = gr.Slider(25, min=1, max=30, label="Speed")
        angle = gr.Slider(45, min=0, max=90, label="Angle")
    output = gr.Image(type="plot")
    btn = gr.Button("Run")
    btn.click(plot, [speed, angle], output)

if __name__ == "__main__":
    demo.launch()
예제 #6
0
파일: run.py 프로젝트: gradio-app/gradio
# Detect Disease From Scan
With this model you can lorem ipsum
- ipsum 1
- ipsum 2
"""
    )
    disease = gr.CheckboxGroup(
        choices=["Covid", "Malaria", "Lung Cancer"], label="Disease to Scan For"
    )

    with gr.Tabs():
        with gr.TabItem("X-ray"):
            with gr.Row():
                xray_scan = gr.Image()
                xray_results = gr.JSON()
            xray_run = gr.Button("Run")
            xray_progress = gr.StatusTracker(cover_container=True)
            xray_run.click(
                xray_model,
                inputs=[disease, xray_scan],
                outputs=xray_results,
                status_tracker=xray_progress,
            )

        with gr.TabItem("CT Scan"):
            with gr.Row():
                ct_scan = gr.Image()
                ct_results = gr.JSON()
            ct_run = gr.Button("Run")
            ct_progress = gr.StatusTracker(cover_container=True)
            ct_run.click(
예제 #7
0
파일: run.py 프로젝트: gradio-app/gradio
import gradio as gr

test = gr.Blocks()

with test:
    num = gr.Variable(default_value=0)
    squared = gr.Number(default_value=0)
    btn = gr.Button("Next Square")

    def increase(var):
        var += 1
        return var, var**2

    btn.click(increase, [num], [num, squared])

test.launch()
예제 #8
0
파일: run.py 프로젝트: gradio-app/gradio
import gradio as gr

asr = pipeline("automatic-speech-recognition", "facebook/wav2vec2-base-960h")
classifier = pipeline("text-classification")


def speech_to_text(speech):
    text = asr(speech)["text"]
    return text


def text_to_sentiment(text):
    return classifier(text)[0]["label"]


demo = gr.Blocks()

with demo:
    m = gr.Audio(type="filepath")
    t = gr.Textbox()
    l = gr.Label()

    b1 = gr.Button("Recognize Speech")
    b2 = gr.Button("Classify Sentiment")

    b1.click(speech_to_text, inputs=m, outputs=t)
    b2.click(text_to_sentiment, inputs=t, outputs=l)

if __name__ == "__main__":
    demo.launch()