def melody_walk_view(): return html.Div( className="melody-walk melody-container", children=[ html.H3("Melody Walk"), html.Div( className="row", children=[ html.Div( className="columns two", children=html.Label(children="Interpolation steps:")), dcc.Input(id="num-steps-input", type="number", min=3, max=12, value=7, step=1, className="columns two"), dcc.Input(id="melody-walk-temperature-input", type="number", value=0.01, className="columns two"), html.Div(className="columns five", children=html.Button(id="interpolate-btn", children="Start walking")) ]), html.Hr(), html.Img(id="melody-walk-graph", className="pianoroll-graph", src="./assets/images/pianoroll_placeholder.png"), html.Audio(id="melody-walk-player", className="audio-player", controls=True) ])
def generate_clips(): clips = [] for i, clip in enumerate(sorted(os.listdir(CLIP_FOLDER))): clips.append( dbc.Col([ dbc.Card([ dbc.Row([ dbc.Col(html.Audio(src=f'/assets/unlabeled_data/{clip}', controls=True)) ]), dbc.Row([ dbc.Col(html.P(clip)), dbc.Col(dbc.RadioItems( id={ 'type': 'filter-dropdown', 'index': i }, options=[ {"label": "Correct", "value": True}, {"label": "Wrong", "value": False}, ], labelClassName="date-group-labels", labelCheckedClassName="date-group-labels-checked", className="date-group-items", inline=True, ), align='center') ]) ], color='light') ], width=12) ) if not clips: clips = [dbc.Row(dbc.Col(html.P('Nothing to review!')))] return clips
def play_track(tracks, durations): artwork, artist, track, album, duration = get_track_info(tracks[-1]) print(f'{len(tracks)}. {artist} - {track} ({album})') df = pd.DataFrame({'tracks': tracks, 'durations': durations + [duration]}) jsonifed_data = df.to_json() return html.Div([ html.H1(f'{len(tracks)}. {artist} - {track} ({album})'), html.Div(dcc.Upload(id='upload-image', style={'display': 'none'})), html.Audio(id='audio', src='data:audio/mp3;base64,{}'.format( base64.b64encode(open(tracks[-1], 'rb').read()).decode()), controls=False, autoPlay=True, style={'display': 'none'}), html.Div([ html.Div(html.Img(src='data:image/jpeg;base64,{}'.format(artwork), style={ 'width': '85vh', 'margin': 'auto', 'display': 'inline-block' }), style={ 'textAlign': 'center', }) ]), html.Div(id='shared-info', style={'display': 'none'}, children=jsonifed_data) ])
def update_output_div(turn_index): global turn global features global wavpath turn, features = get_turn(turn_index, pre_pad, post_pad, turns, waveform, sr) prev_utterance = turns[turn_index - 1]["utterance"] tokens, trps = get_turn_trp(turns, turn_index) wavpath = f"/tmp/audio_{turn_index}.wav" torchaudio.save(filepath=wavpath, src=features["Waveform"].unsqueeze(0), sample_rate=sr) return [ f"Turn: {turn_index}", turn["utterance"], prev_utterance, f"Words: {features['statistics']['n_words']}", f"Duration: {features['statistics']['duration']}", feature, get_trp_figure(tokens, trps), html.Audio( src=f"http://localhost:5002/api/audio{wavpath}", controls=True, ), ]
def melody_selector(id, midi_dropdown_options): return html.Div( className="melody-selector melody-container", children=[ html.H3("Melody Selector"), html.Div( className="row", children=[ html.Div( className="columns five", children=dcc.Dropdown( id={ "type": "midi-dropdown", "id": id }, options=[{ "label": key, "value": value } for key, value in midi_dropdown_options.items()], value=list(midi_dropdown_options.values())[0])), html.Div(className="columns five", children=dcc.Dropdown(id={ "type": "track-dropdown", "id": id })), dcc.Input(id={ "type": "start-bar-input", "id": id }, type="number", min=0, value=0, step=1, className="columns two") ]), html.Hr(), dcc.RadioItems(id={ "type": "pianoroll-view-type", "id": id }, options=[{ 'label': i, 'value': i } for i in ['Whole melody', 'Selected bars']], value='Selected bars', labelStyle={'display': 'inline-block'}), html.Img(id={ "type": "melody-pianoroll-graph", "id": id }, className="pianoroll-graph", src="./assets/images/pianoroll_placeholder.png"), html.Audio(id={ "type": "melody-audio-player", "id": id }, className="audio-player", controls=True) ])
def get_html_tracks(tracks): return [html.Li( children=[ html.P(className='track-name', children=track['name']), html.P(className='track-artists', children=', '.join([artist['name'] for artist in track['artists']])), html.P(className='track-id display-none', children=track['id']), html.Audio(className='track-audio', src=track['preview_url'], controls='controls') ]) for track in tracks]
def play(n_clicks): if n_clicks is None: n_clicks = 0 if n_clicks % 2 != 0: return html.Audio( src='data:audio/mpeg;base64,{}'.format(encoded_kuk_sound.decode()), controls=False, autoPlay=True, )
def raise_alert_action(n_clicks): if n_clicks > 0: return html.Div([ html.Img(src="/assets/send alert.gif", width=200, height=200), html.Audio(src="/assets/Alert_beep.mp3", autoPlay=True, controls=False, loop=True) ])
def raise_alarm_action(n_clicks): if n_clicks > 0: return html.Div([ html.Img(src="assets/raise_alarm.gif", width=200, height=200), html.Audio( src="/assets/Annoying_Alien_Buzzer-Kevan-1659966176.mp3", autoPlay=True, controls=False, loop=True) ])
def melody_result_view(id, show_original=False, h3="Melody Result"): original_view = [] class_name = "melody-result melody-container" placeolder_url = "./assets/images/pianoroll_placeholder.png" if show_original: class_name += " show-original" placeolder_url = "./assets/images/pianoroll_placeholder_flat.png" original_view = [ html.Label("Original:", className="melody-result-label"), html.Img(id={ "type": "melody-original-graph", "id": id }, className="pianoroll-graph", src=placeolder_url), html.Audio(id={ "type": "melody-original-player", "id": id }, className="audio-player", controls=True) ] return html.Div(className=class_name, children=[ html.H3(h3), html.Hr(), html.Label("Reconstruction:", className="melody-result-label"), html.Img(id={ "type": "melody-result-graph", "id": id }, className="pianoroll-graph", src=placeolder_url), html.Audio(id={ "type": "melody-result-player", "id": id }, className="audio-player", controls=True), *original_view ])
def update_timerange_and_audio(selection, clicked): if selection is None and clicked is None: return make_range_plot([]), [] elif selection is None and clicked is not None: new_index = clicked['points'][0]['customdata'][0] source = 'static/frames/frame-{:04d}.wav'.format(new_index) audio = html.Audio(src=source, controls=True, style={'paddingLeft': '100px'}) return make_range_plot([new_index]), audio elif selection is not None: new_indices = [p['customdata'][0] for p in selection['points']] subset = df.loc[new_indices, 'mfcc01':'mfcc20'] nearest = np.sum((subset - subset.mean(axis=0))**2, axis=1).idxmin() source = 'static/frames/frame-{:04d}.wav'.format(nearest) audio = html.Audio(src=source, controls=True, style={'paddingLeft': '100px'}) return make_range_plot(new_indices), audio
def parse_contents(contents, filename, date): if contents is not None: content_type, content_string = contents.split(',') script = get_transcript(filename, content_type) return html.Div([ html.H5(filename), html.H6(datetime.datetime.fromtimestamp(date)), html.Audio(id="player", src=contents, controls=True, style={"width": "50%"}), html.Hr(), html.Div('Transcript'), html.Plaintext(script), ])
def update_output(n_clicks): if n_clicks == 0: return None else: if os.path.exists(os.path.join(f'{csv_folder}/{get_my_id()}', 'rec.json')): data = json.load(open(os.path.join(f'{csv_folder}/{get_my_id()}', 'rec.json'))) def b64(im_pil): """Conversion to Base64 :param im_pil: Pillow Image to be converted :type im_pil: Pillow Image :return: base64 encoded image :rtype: base64 Image """ buff = BytesIO() im_pil.save(buff, format="png") im_b64 = base64.b64encode(buff.getvalue()).decode("utf-8") return im_b64 temp = [] for k, v in data.items(): im = Image.open(requests.get(v['img_href'], stream=True).raw) im_b64 = b64(im) temp.append(html.Div([ html.H2(v['artist'], style={'text-align': 'center', 'margin': '0'}), html.H3(v['track'], style={'text-align': 'center', 'margin': '0'}), html.Img( src="data:image/png;base64, " + im_b64, style={"height": "25vh", "display": "block", "margin": "auto", 'border-radius': '15em'}, ), html.Div([ html.Audio(src=v['preview_href'], controls=True, style={'margin-top': '1em'}), ], style={'text-align': 'center'}) ], style={'margin-top': '2em'})) return html.Div(temp, style={'height': '1000px', 'overflow-y': 'scroll'})
def main(): """ Setup the layout of the Dash application """ select_file_options = [{'value': s['file'], 'label': s['title']} for s in samples] app.layout = dbc.Container( dbc.Jumbotron([ html.H1('Sound recognition with Yamnet', className='mb-3'), dbc.Row([dbc.Col(dcc.Dropdown(id='select-file', options=select_file_options, value='', persistence=True, persistence_type='session'), width='md-6'), dbc.Col(html.Audio(id='listen', controls=True), width='md-6')], className='align-items-center mb-3'), html.H3(id='subtitle'), dcc.Loading( html.Div(id='graph_wrapper', children=[dcc.Graph(id='waveform'), dcc.Graph(id='scores')]) ) ]) ) return app.server
def generate_audio_table(df, max_rows=10): cols = df.columns.tolist() cols.remove('file_nm') # Header header = html.Tr([html.Th(col) for col in cols] + [html.Th('player')]) # Body body = list() for i in range(min(max_rows, len(df))): row = [html.Td(df.iloc[i][col]) for col in cols] row.append( html.Td( html.Audio(autoPlay=False, loop=False, preload='none', controls=True, src='/music/' + quote(df.iloc[i]['file_nm'])))) body.append(html.Tr(row)) table = html.Table([header] + body) return table
def display_controls(contents, filename): print("hi there") if contents is not None: get_audio_contents(contents, filename) return html.Div([ html.Div([DYNAMIC_GRAPH['Valance-arousal']], style={ 'width': '50%', 'display': 'inline-block' }), html.Div([DYNAMIC_GRAPH['Features']], style={ 'width': '50%', 'display': 'inline-block' }), html.Audio(src=contents, id='music-audio', autoPlay='audio'), dcc.Interval(id=generate_interval_id('interval'), interval=1 * 500, n_intervals=0), ])
def display_output(rows, columns, indices): df = pd.DataFrame(rows, columns=[c['name'] for c in columns]) print "indices :", indices if indices is None: return html.Div(style={"padding":"20px"},children=[html.P("Select Any audio ",style={"color":"green"})]) else: global input_name path = subprocess.Popen("find /media/wildly/1TB-HDD/ -name "+\ df.iloc[indices]["YTID"].astype(str)+"-"+df.iloc[indices]["start_seconds"].astype(str)+"-"+\ df.iloc[indices]["end_seconds"].astype(str)+".wav",shell=True, stdout=subprocess.PIPE) path = path.stdout.read().split("\n")[0] print "path ",path.split("\n") ENCODED_IMAGE = base64.b64encode(open(path, 'rb').read()) print "len of indices ", len(indices) input_name = path return html.Div(style={"padding-bottom":"10%"}, children=[ html.Br(), html.Br(), html.Audio(id='myaudio', src='data:audio/WAV;base64,{}'.format(ENCODED_IMAGE), controls=True, title=True), html.Br(), html.Button('Input audio to model', id='button')])
def parse_contents(contents, filename, date): if '.wav' not in filename: m1 = 'file should be in .wav format' print('please pass an audio file name ending with .wav') else: if 'pos' in filename: file_name = './pos_samples/' + filename else: file_name = './neg_samples/' + filename encode_string = bytes(contents.split(',')[1], 'utf-8') wav_file = open("temp.wav", "wb") decode_string = base64.b64decode(encode_string) wav_file.write(decode_string) x_img = process_data('./temp.wav') prob = model.predict(x_img) m1 = f'probability of covid-19: {prob[0][0]}' return html.Div([ html.H5(filename), html.H6(datetime.datetime.fromtimestamp(date)), html.Audio(src=contents, controls=True), html.Hr(), html.H5("Model predictions"), html.H6(m1), ])
] + [ dbc.Row( [ dbc.Col( html.Div(children=k.replace('_', ' ')), width=3, className='mt-2 bg-light text-monospace text-break rounded border', ), dbc.Col(html.Div(id='_' + k), className='mt-2 bg-light text-monospace text-break rounded border'), ] ) for k in data[0] ] + [ dbc.Row(dbc.Col(html.Audio(id='player', controls=True),), className='mt-3'), dbc.Row(dbc.Col(dcc.Graph(id='signal-graph')), className='mt-3'), ] ) app.layout = html.Div( [ dcc.Location(id='url', refresh=False), dbc.NavbarSimple( children=[ dbc.NavItem(dbc.NavLink('Statistics', id='stats_link', href='/', active=True)), dbc.NavItem(dbc.NavLink('Samples', id='samples_link', href='/samples')), ], brand='Speech Data Explorer', sticky='top', color='green',
#app = dash.Dash(__name__, suppress_callback_exceptions=True) layout = html.Div([ html.H3("Rick And Morty Generator comming soon", style={ 'text-align': 'center', }), html.H5(children=[ "Hello this page will one day be where you can generate your own rick and morty transcript." "I will also like to add my rick voice model heres a small sample", ], style={ 'text-align': 'center', }), html.Audio(src='assets/rick_voice.wav', controls=True), html.Div([ dcc.Textarea(id="loading-input-2", draggable='false', rows="5", value='Input triggers nested spinner', style={ 'resize': 'none', 'width': '80%', 'display': 'block', 'margin-left': 'auto', 'margin-right': 'auto' }), dcc.Loading( id="loading-2", children=[html.Div([html.Div(id="loading-output-2")])],
'textAlign': 'center', 'margin-bottom': '20px' } app = dash.Dash(__name__, external_stylesheets=external_stylesheets) server = app.server app.layout = html.Div([ html.H2('Hello World'), dcc.Upload(id="upload-audio", children=html.Div(["Drag and Drop or ", html.A("Select Files")]), style=upload_data_style), dcc.Upload(id="upload-audio-2", children=html.Div(["Drag and Drop or ", html.A("Select Files")]), style=upload_data_style), html.Audio(src=audio_file, controls=True), html.Audio(src=audio_file2, controls=True), # html.Audio(src=audio_file2, controls=True), dcc.Graph(id="audio-1", figure=fig1), dcc.Graph(id="audio-2", figure=fig2), html.Img(src='data:image/png;base64,{}'.format(encoded_image1), className="img1"), html.Img(src='data:image/png;base64,{}'.format(encoded_image2), className="img2") ]) if __name__ == '__main__': app.run_server(debug=True)
dcc.Link("Event History", href="/"), ], id="tabs", className="row tabs", ), ], ), html.Div( [ html.Div( [ html.H3("Audio Data Analysis", className="audio_label"), html.H6("Detected Audio Sound", className="audio_label"), html.Br(), html.Audio(id="player", src=FILE, controls=True), html.H6("Audio Graph", ), html.Img( id='spectrogram', src="data:image/png;base64,{}".format(spec_data), style={"padding": "30px"}) # dcc.Graph( # figure = fig, # # id="waveform", # # style = {"padding":"30px"} # ) ], className="eight columns", id="audio_analysis"), html.Div(id="vertical_line", className="one columns"), html.Div(
dcc.Textarea( id="transcription_input", maxLength=300, rows=2, style={'width': '100%' }, value= 'I believe in living in the present and making each day count. I don’t pay much attention to the past or the future.' )), html.Div(html.Button('Submit', id='submit', n_clicks=0)), html.Br(), dcc.Loading(id="loading-1", children=[ html.Audio(id="player", src="./assets/generated/new_test.wav", controls=True, style={ "width": "100%", }) ], type='default'), html.H4('How would you rate the quality of the audio ?'), dcc.Slider( id='rating', max=5, min=1, step=1, marks={i: f'{i}' for i in range(1, 6)}, ), # dcc.Graph(id="waveform", figure=fig), html.Div(html.Button('Rate', id='rate-button', n_clicks=0)),
import dash import dash_core_components as dcc import dash_html_components as html from dash.dependencies import Input, Output import pandas as pd import plotly.graph_objs as go # Application app = dash.Dash(__name__) # This will pull css from 'assets' folder app.layout = html.Div( [html.Audio(controls=True, style=dict(hidden=False, display='block'))]) if __name__ == '__main__': app.run_server(debug=True)
def display_images(*clickDatas): clickData = clickDatas[0] ctx = dash.callback_context if ctx.triggered: trig = ctx.triggered[0]['prop_id'].split('.')[1] if trig == 'clickData': clickData = clickDatas[0] if clickData is not None: point_text = clickData['points'][0]['hovertext'][9:] else: clickData = clickDatas[1] point_text = clickData if clickData is not None: #point_text = clickData['points'][0]['hovertext'][9:] print(trig) paths = df.loc[df['clusters'] == point_text, ['files']].values path_list = [paths[i][0] for i in range(len(paths))] imgs_paths = [ os.path.splitext(path_list[i])[0] + '.jpg' for i in range(len(path_list)) ] if len(imgs_paths) < 11: num_files = len(imgs_paths) else: num_files = 10 files = np.random.choice(imgs_paths, num_files, replace=False) filenames = [ os.path.splitext(files[i])[0] for i in range(len(files)) ] card = [] for i in range(len(filenames)): audio = filenames[i] + '.wav' image = filenames[i] + '.jpg' card.append( Card([ html.Img(src='/assets/SONYC/images/train/' + image, style={ 'display': 'inline-block', 'height': '100%', 'width': '100%', 'margin-top': 10 }), html.Audio(src='/assets/SONYC/train/' + audio, controls=True, style={ 'display': 'inline-block', 'vertical-align': 'top', 'margin-left': 10, 'margin-bottom': 5 }), dcc.Dropdown(id='options_' + str(i), options=option_dict, style={'margin-bottom': 5}, multi=True, placeholder='Select all that apply'), dcc.Input(id='input_' + str(i), style={'display': 'none'}) ])) if len(filenames) < 10: for i in range(len(filenames), 10): card.append( Card([ html.Img(style={'display': 'none'}), html.Audio(style={'display': 'none'}), dcc.Dropdown(id='options_' + str(i), options=option_dict, style={'display': 'none'}), dcc.Input(id='input_' + str(i), style={'display': 'none'}) ])) return card, html.Button('Submit', id='submit_btn', type='submit', style={ 'display': 'inline-block', 'vertical-align': 'bottom', 'float': 'left', 'margin-left': -40, 'margin-top': 10 }), json.dumps(filenames) else: card = [] for i in range(10): card.append( Card([ html.Img(id='img' + str(i), style={'display': 'none'}), html.Audio(id='audio' + str(i), style={'display': 'none'}), dcc.Dropdown(id='options_' + str(i), options=option_dict, style={'display': 'none'}), dcc.Input(id='input_' + str(i), style={'display': 'none'}) ])) return card, None, None
dbc.Row( [ dbc.Col(html.H2( id="audio-title", children = "Audio File", ),), dbc.Col(html.H2( id="image-title", children = "Image File" ),) ], style = {"margin":"auto"}), dbc.Row( [ dbc.Col(html.Audio( id='a1', controls = True, autoPlay = False, style = {'display':'inline-block'} )), dbc.Col(html.Img( id='img1', style={'height':'550px'} )) ], style = {"margin":"auto"} ) ]) def save_file(name, content): """ save_file: Decode and store a file uploaded with Plotly Dash. """
"""), dcc.Markdown("**Choose your celebrity**"), dcc.Dropdown(id="celebrity-dropdown", options=[{ 'label': celebrity, 'value': celebrity } for celebrity in celebrities]), html.Div(id="slider-output-container"), html.P(children="Carnegie Mellon Sphinx transcription:"), dcc.Textarea(id="transcription_input", cols=80), html.Button('Submit', id='submit', n_clicks=0), html.Br(), html.Audio( id="player", src= "http://docs.google.com/uc?export=open&id=1jY1Gz9naGhvesxpm5mG1hr6Y486Wry60", controls=True, style={ "width": "100%", }), # dcc.Graph(id="waveform", figure=fig), ]) # Transcribe audio @app.callback( dash.dependencies.Output("player", "src"), [ dash.dependencies.Input("submit", "n_clicks"), ], [ dash.dependencies.State("celebrity-dropdown", "value"),
className="app__text_output_box", ), html.Br(), html.Div( [ html.Span("Click to listen to the text: "), ], className="app__subheader", ), #html.Button('Click to conver to sound', id='convert-sound-button', n_clicks=0), html.Br(), html.Div( [ html.Audio(id="player", src=AUDIO_FILE, controls=True, style={"width": "50%"}), ], className="app__audio_output", ), # html.Div( # [ # dcc.Dropdown( # id="chem_dropdown", # multi=True, # value=[STARTING_DRUG], # options=[{"label": i, "value": i} # for i in df["NAME"]], # ) # ],
html.Div( className="two-thirds column omega", children=[dcc.Graph(id='heatmap', figure = figindic, style={ "width": "100%"})] ) ] ), html.Div( className="row", children=[ html.H6( id="wav-info", children=[f"Wave file informations : file name {data['name'][0]}, date : {datetime.strptime(data['datetime'][0], '%Y%m%d_%H%M%S')}, Geophony : {data['geophony'][0]}, Biophony {data['biophony'][0]}, Anthropophony {data['anthropophony'][0]}"], style={"color": "blue", "border": "solid 1px white"}, ), html.Audio(id="player", src='data:audio/mpeg;base64,{}'.format(encoded_sound.decode()), controls=True, style={ "width": "100%"}), dcc.Graph(id='spactrogram', figure = wavefig, style={ "width": "100%"}) ] ) ] ) ] ) ##### call back @app.callback([Output('heatmap', 'figure'), Output('map-info', 'children')],
ex.WebcamDashUiComponents(id="web-cam", audio=False, screenshotFormat="image/jpeg", width=500, height=300, screenshotInterval=500), html.Div(id='webcam-output'), html.H4("Callback value is base64 image"), html.Img(id="receive-img", src="", width=500, height=300), html.H4("Microphone Preview"), ex.MicrophoneDashUiComponents(id='microphone', className='sound-wave', strokeColor='#000', backgroundColor='#FF4081'), html.Div(id='audio-output'), html.Audio(id='audio', controls=True) ]) @app.callback(Output('receive-img', 'src'), [Input('web-cam', 'screenshot')]) def WebcamOutput(value): if value == None: return "" return value @app.callback(Output('audio-output', 'children'), [Input('microphone', 'realData')]) def AudioOutput(value): return 'chunk of real-time data is: {}'.format(value)