Exemple #1
0
    def test_xray(self):
        def fake_func():
            return "Hello There"

        xray_model = lambda diseases, img: {
            disease: random.random()
            for disease in diseases
        }
        ct_model = lambda diseases, img: {disease: 0.1 for disease in diseases}

        with gr.Blocks() as demo:
            gr.components.Markdown("""
            # Detect Disease From Scan
            With this model you can lorem ipsum
            - ipsum 1
            - ipsum 2
            """)
            disease = gr.components.CheckboxGroup(
                choices=["Covid", "Malaria", "Lung Cancer"],
                label="Disease to Scan For")

            with gr.Tabs():
                with gr.TabItem("X-ray"):
                    with gr.Row():
                        xray_scan = gr.components.Image()
                        xray_results = gr.components.JSON()
                    xray_run = gr.Button(
                        "Run",
                        css={
                            "background-color": "red",
                            "--hover-color": "orange"
                        },
                    )
                    xray_run.click(xray_model,
                                   inputs=[disease, xray_scan],
                                   outputs=xray_results)

                with gr.TabItem("CT Scan"):
                    with gr.Row():
                        ct_scan = gr.components.Image()
                        ct_results = gr.components.JSON()
                    ct_run = gr.Button("Run")
                    ct_run.click(ct_model,
                                 inputs=[disease, ct_scan],
                                 outputs=ct_results)
            textbox = gr.components.Textbox()
            demo.load(fake_func, [], [textbox])

        print(demo.get_config_file())
        self.assertEqual(XRAY_CONFIG, demo.get_config_file())
Exemple #2
0
        gr.Audio(label="base audio file (optional)"),
    ],
    gr.Audio(),
)

io4 = gr.Interface(
    lambda x, y, z: os.path.join(os.path.dirname(__file__), "sax2.wav"),
    [
        gr.Slider(label="pitch"),
        gr.Slider(label="loudness"),
        gr.Audio(label="base audio file (optional)"),
    ],
    gr.Audio(),
)

demo = gr.Blocks()

with demo.clear():
    m("""
    ## Neural Instrument Cloning from Very Few Samples
    <center><img src="https://media.istockphoto.com/photos/brass-trombone-picture-id490455809?k=20&m=490455809&s=612x612&w=0&h=l9KJvH_25z0QTLggHrcH_MsR4gPLH7uXwDPUAZ_C5zk=" width="400px"></center>"""
      )
    m("""
    This Blocks implementation is an adaptation [a report written](https://erlj.notion.site/Neural-Instrument-Cloning-from-very-few-samples-2cf41d8b630842ee8c7eb55036a1bfd6) by Nicolas Jonason and Bob L.T. Sturm.
    
    I've implemented it in Blocks to show off some cool features, such as embedding live ML demos. More on that ahead...
    
    ### What does this machine learning model do?
    It combines techniques from neural voice cloning with musical instrument synthesis. This makes it possible to produce neural instrument synthesisers from just seconds of target instrument audio.
    
    ### Audio Examples
Exemple #3
0
import gradio as gr

str = """Hello friends
hello friends

Hello friends

"""


with gr.Blocks() as demo:
    txt = gr.Textbox(label="Input", lines=5)
    txt_2 = gr.Textbox(label="Output")
    txt_3 = gr.Textbox(str, label="Output")
    btn = gr.Button("Submit")
    btn.click(lambda a: a, inputs=[txt], outputs=[txt_2])

if __name__ == "__main__":
    demo.launch()
Exemple #4
0
import gradio as gr

test = gr.Blocks()

with test:
    num = gr.Variable(default_value=0)
    squared = gr.Number(default_value=0)
    btn = gr.Button("Next Square")

    def increase(var):
        var += 1
        return var, var**2

    btn.click(increase, [num], [num, squared])

test.launch()