def chat(passage): st.write("Excited to know more about the place?!") st.write("Here's HAL9000 to help you with your queries!") st.write("**type _bye_ to end the chat") while True: inp = st.text_input("You: ", key='1') if inp.lower() == "bye": st.text_area("Bot:", value="Thank You, Have a nice day!", height=200, max_chars=None, key=None) break result = predictor.predict(passage=passage, question=inp) response = result['best_span_str'] st.text_area("Bot:", value=response, height=200, max_chars=None, key=None) mytext = response myobj = gTTS(text=mytext, lang=language, slow=False) date_string = datetime.datetime.now().strftime("%d%m%Y%H%M%S") filename = "voice" + date_string + ".ogg" myobj.save(filename) name = "C:\\Users\\Arpitha Shibu\\Documents\\BDA SEM III\\1 MOM\\MOM AI Project\\Final\\" + filename audio_file = open(name, 'rb') audio_bytes = audio_file.read() st.audio(audio_bytes, format='audio/ogg') os.remove(name)
def play_file_uploaded(file_upload, file_type): if 'audio' in file_type: audio_bytes = file_upload.read() st.audio(audio_bytes, format="audio/wav") elif 'video' in file_type: video_bytes = file_upload.read() st.video(video_bytes)
def pdf_to_audio(pdf, slow): with st.spinner("Converting PDF to audio... "): audio = convert_pdf_to_audio(pdf, slow) audio.save('audiobook.wav') st.audio('audiobook.wav', format='audio/wav') os.remove('audiobook.wav')
def show_media_widgets() -> None: DATA = Path('data/external') st.write(""" ### Display images """) st.write("#### Reference images") with st.echo(): st.image( "https://www.dogalize.com/wp-content/uploads/2018/03/ceiling-cat.jpg", caption="Ceiling cat", use_column_width=True) st.write("#### Embed image") with st.echo(): st.image( """data:image/jpeg;base64,/9j/4AAQSkZJRgABAQAAAQABAAD/2wCEAAkGBxAQEBAPEBAPEBAPFRAVDw8PDxAVFw8PFRUWFhURFxUYHSggGB0lHhUVITEhJSkrLi4uFx8zODMsNygtLisBCgoKDg0OFxAQGy0fHyUrLS0tLS0tLS0tLS0tLS0tLSsrLS0tKy0tLS0tLS0rLS0tLS0tLS0tLS0tLS0tLS0tLf/AABEIAJQBVAMBIgACEQEDEQH/xAAbAAACAgMBAAAAAAAAAAAAAAAAAQIFAwQGB//EADoQAAEEAAUCBAQEBQQBBQAAAAEAAgMRBAUSITFBURMiYXEGgZGxMkJSoRRigsHwFSPR4fEHQ1OSov/EABgBAAMBAQAAAAAAAAAAAAAAAAABAgME/8QAIxEBAQACAgMAAgIDAAAAAAAAAAECEQMhEjFBE1FhcSIyQv/aAAwDAQACEQMRAD8A9TcoFZCoFURJoCaRhNJNANNJNIGE0gmkYSTQkEUlJJI0UJpIATCEwgEmhCZBMIUZDQJ7IDahW0xcxl+fefRNG6Nt+WSwW/Ot2rpozarHKX0VlntlCChBTJiesD1mesLkyQTpAUqQEaUHBZaUXBI2u4LEQs7gsZCQRaFlY1RCTMUwnSHAu7ApwNhoTIU2hBCZMRUaWRyggAKEhUiVgmcqhVrSP3QtSaXdCvTPa2coFTcoFZNgE0k0jNNJNACaSYSBhNJNIwhCEgRSUkkjRQmkgBMJJhACaE0yC1syfpieeNitlYMe24n+xRfQntz+Goi7B+RVlk2YmN3huNxngk/gPYen2WhgpXVub9tR+4pZZaNHr3ohRZruNb31XZNcCm4rlsLm/hN33YO2+j/pbDPiFjhtvfC1xyljGyy6XMrwFpyYto6hcX8S/EmIjvSzY9VxmL+IsR+o7ouWhMbXsBzJn6gsc2cxN5cPqvGG5/N1cVrzZvI/bUfqlMrTuL3PD5rG/hwPzWY4kHqvC8LmEzN2uIv1V9D8TTsbvZ07/wBPB/z3T8i8XqhlCwYjFtY3UfkO5XM5Lnxn2oihbieGjuVtl3jHklo6b7+6nLOT17Vjhv2hjZ5Jh5rDb2jaSBXqeXfZaWBfHBM1zWtY4nfSK1DqD3Vjig6qA/z5rnca0hwc/wAu/Vwv6LKyzv62kl6ensdYTJVZgMTcbN+g+yzmddM7ct6bDljJWLxUeImSTitTEP2Wd7lX4t+ycKqzEzeYoVfipfMULRm7VyiplQWLcIQhIzTQhIGgITCAYQgISM0k0kgEimkUjJCEIATCSYQDQhCZGEpW20juEF1LBLL0sD3KqQmjFgmjYOIPoaKyuw5qrBH8w/uFhkcxpNuJ9AAFqy5kxvGofJ1KbZI01bUp4S3jcdQANx29VymbOdhpAW2YpLLP5Hcub/ndX2JzVpHr6KjlxP8AEF0J06XNcRq5Eo3B+Yv6Lnuer008Ou0psUJmVubC47MJNy0jcEgq0y7EuYS2z5bsdiOiwZhDHLK0mmkg6hf7rXj5N9VGWGlQ3BaheoBSdgHNF8qGLmokMFaTXoVvYJwcwukdXhkW3uOq3lxk9M7LWzluXvNa26RzZ4pdLhWwH/aYwPJoOeeGjqSfa1SzYsvZbN21tXa6UcJjGRxxt1VJK4kgdADTQVzZc1n+sazjl9u1wELQAyPS2M7k1Zee57BWrI2gbfULm8hx7i23fm4vsrl8moWJAPQmlWMvui6+FiS/fSIne5Nqpnwbncs5O4Dh/n7LYnind+F7P6HC/wByscOCxWptvlIsX5jVfVPI8V9AzSAONhtufssokr/CtGaCcElrjR6OOywtMt+bR9QtJdMKtvESEq1Yn9yPksyrZaZTMtTEOtKZ1LAJE9lpoTYeyhb+lCryT4uhcoKblArNYQEk0GkhJO0gE0kJGlaEkICVotRQkZ2laErSBoSQgBMJJhANBdSRK1Z5wFUJDFYk+y53M8WQ4Bp9za2cfjQN74XPuxgc7cDkLHl5PGajbjw33W7LMQ7VK4b1pHp6rbgwwlFtc2/Tg11VTnszJK0HegBXZHw7mLGBzXHfkO/K4cFZ4Z723znU0sJcJeppFGjt2K4vOoiyRsjHEEW6h1r/AClaZrnbjJqZ6gmzwDzzSocThZH6nlx08nVXlG/1HH14Uy9ovpNk5jh8T8UrhuXHgbj67X81V5JG+aR0jjpB2snnqaHoAVvywUHMk9apwHFc3717rUyKcQv87bshkQdemgQdlpjdS6RZ2zZhBTXCxq12R1Dex9TsskETXl8QIDnj8J9BQdf0PzUM8gfFLKGlzhIWu1NrdwI2Hbm69QrDAugivEvHiBwa3Y7CuAD0/CVpcvSNKfKMa+CbwT5xdc7Obe5/ZXIgjZK1xaHtFlh9btt+tuH0WLOsJG6WPFMowkURY2oAtIB2s+bb0KJ8KXHU1w0usCuXuPB+ZpTdU+3YZWywG23fkjo35qyly5rgO29XQXEYSWWJxokEFoqqBAd35/8AK7DC5tcfnrVWwsK8b8KtGWKBp0nU49S0jlbeUysDjVgj8IcTZ+S4/GzPMjjFeoneuK7j0XQ4GItex7tnSMN+jglle2kn+LqzMS35LncdjXNdQG3ci1cYOahR3WpmswAsNHutvcc3piw2OPVWUOItccMb5tyrnA4rhKQ1+4WtObZTiltEjLT0lhEySxuYhTpTrnLGVkesTlVSEJItSaSajaaDNNJCAaEk0gEWklaRnaErRaAaEkIBpqKaAwzy0FTYjFjdWWYmmk2ubk6lOHIUzPE4Gx5PYLUlMMXlDQ93W1mMxDSBt62qmeYagCQT77rHkykbYxgzKBpY5zSY3Hc0en6VXYXEU0tvURZsbe6sZsJJINRsgnoq/MG+FpDSB+rz/dc+99NNaPFYgMa5ziD1AFUFr5RP407QfK14FXxbdtj/AGP/AArbKYYpWl0paaHG1VW1k7deqlowpe0CTDxEEaKmDTf8tEXfZPGzuFlK0Pj7DjDxRua0tLgK5rgAi+nH7jsVgnwfiZc2YgNc2nNPWz5j8+nzVr/6j4V8uDYW050bm3o81g0LA+m3G6hG5owrIpBsWtFAFupw247XW/snllJJr9lMbu7VGeM0wQSX5tBa4D9RHlB+YWKJgdlzS0tIL26rNaX6tyN/UD6/LNjcC90LGVqZFq1gnczkUaHYDj2U4MA8MMLms0uJkY5p22I5PQijt6rSXqJs7qHxdh/4fC4ctBpzt6/KS3+5Ct8LgR/p4l2aSNQfY8rAANttifstT43Dp8NFHEL/ANxtbGyytnexJ/ZdNHl9YSPD6qpo1bAnjez0/wCk/kT9riMvzjU4l+zSSGneh7A9fmr2PFtJJAGlwqyRsB1W5hMpw7wYdhxTWujbt1NMN/MlUePGiTw26SGmm6TYr+YGqT9XomZmJqXyebormXMwdAIcNA2Nd1Stj06ZGkA7amj83qN1ZxYzV0BHGgp3I9LnKMXqJ8wIW5jWAtNqrweEjHmj2/lC33y+Uj7rfD0wy9uQxzNLiRxa2Muxu9LemwGsGlzssLoX78KrCld3gZwQrIOBXI5Zi7A3V5DiUQVuPZuhYxMhPRbdO9YnLI8rE4qaZWi1FNSpK07UbTSCVpqCYKAlaLSQgztK0ikkDtFpJJGdp2opoB2mSooQGjmZ8ptcni8RRrlddmMVt2XE5oBGSaKWVsi8Jtq4nFHSbAb/AJytLCuaSXP3a2/MeoWji81jB837LD47pfJFejrXK5bu1vNRvZn8SOJ0xgUPQqkOCfO7XJI5jTd6SSr7CZBw42eL26LYzXBNDAN2kdq3Kn8mMvR+Fs7amdfDTIsvfNFJJIWgEW87bizt81izPCQRYK2MBBjbX9QHm297+Sx5JnT4DLBKPGgf+OJ1BzQ7YlpO3yWzHhDXhYaWHEwH8EUsgimiB/J5hTgncb1/e0bVnwyJY4WSxOkIt/iRSvDo3NHGkct67/8AhdljoY8Xg24qD9FtBPHdp9bH7KjxuXYwQOY3DCBgabkfIw6W/wAoasPwXnTIoJsG4k1egd9fIHp1+ankyuVuc/jpfHNSY1v4SSoRpNkD8TiTp5t3vZG/Wlu+AJ3RRULmLSQ0kBnUvA77N/dV8bNMbxe1bkN6dSNvdXGWzNZNFIQdMd/O21v24tbcNlZ8kqx+IXjDsjw0Aa2V7XOD6B8ONmkF1Hk24AX6+y8zZFeP8OaTESNNVbtQe4gHcOIAF3uLql0OaZpJiM114fTIYmGLwy6g9lanNvoSTYP8qlNgpRJ4oy/ENe2wDI7Dhrb5IcX8crbfjlv3GNlymmr8UZZE2bCHDtMcj5Wt0se69JG+/Pb6rNn3wzO14e3Ek6RYa4C6HrzSMNI2GYYzEyRyzRhwiw8DrETndXymgT6BDs7lleXyFtu4YDqodgDQUzaiyicmNwcLc3knt3pbmkVquuLAUIQLLQKLh2q1r4bFPFs0Vz12SyhyrTD4ktoi6WycXq5r0VNHiKPAv33W3G+jqsHY2tcajKLjBzNA3PPdV+eQteCW1acjNbdjW2xVU7DzMsE2CtpWdiuwuLcx+krpsDitVLlMXA4Osilb5TNwiB1cZ2QowO8oQr0l17isRKm4rGVlVQWhJCRpJ2ohNI0gU7UU0A01FNACEJJAIQkgzTSQgGEwoqQQTDiRYK5TOYLBsLr5G2FSZphrB32RnjvFWF1XmONw0fihum7O9HZdvkWAhZGKazjmh91zec4YWdPIP1Wvgs6xEDaLNQ6crgzls06sdSu7MLWkmgFo5lPhiNLjGT01VyuSx3xdIWlug2eBpKpsswWImlEkjTp53Dj9Bys8eG3u9LvJPU7dJisHA7VuG30Z5wT7EKgOEJmDGamv3vS2iB7AnZdtDDEGamt3A3Gk/PoqdromEkuEbieA7fbgbi/snjyXEssJQzEyshfG4nQ4b24kG+ov2P0XL5d/tyeLVlxPya3+66XDH+IL9LmuDTTiOvqT1/7VVn+XCHQ8u0htgdje/PTf7InJLbj9p+F1tbYCYSNtw56X0Cy4rE6PLsfEsC657H6KmwUlNoA2Olg7+qWMkFWS0d7cBpB2uuy04sbMk594tbK2eFii8FzdXmadra7t6cLoM1w+IxJc50pc1oDq24roHev1UcBkLnMZIH+tuO5B3PKsGENpuqPaxTnXV8/PddH3bH5pR4fDiQU6MmhX+y0AOG3Q9duiusvwcbdmReFxYc3cet2rCPBhjTp0tP5TQIB6uPorPI8YzENIkawSMNHcfXdVvaPTi8c/Ti3sNg20s7WKJA7bWo5tiJIZS+Noe0gawOPdb3xDlzn4gFml1bWCPor3AZUxjCX+ax133RkI4OfPI3ODtIY7qCDurfBhsw1RuAPuVVZ9hYvGOkgm9gAPougyDBtLQQK4TJs4BjwKdVb72sZxLQ6unqVbzYfS3j5rhszeWSk3QP3W2MZ5Vd5vE1zbCpsvk0upZH4s6OVpQPBda0Q7jCy+UIWpgT5BukqG3oLioEocVAlY1aVotQtFpGyWnax2naQTtO1C00j0ladqFp2gJWi1G0rSNK0WootIJWi1G0WgJWmHKFotGwy2tLGQagVs6ljldstMcv2mxx+b4aNgqrceB6+3Vc5LhZmu1HTEHHy+JqJd7Rstx+i7DPcI6Sgx/hney38RHv0XEz48x6omDSReqV9lzwOTfb0WXLx/ZOmvHn8q7wuCgI1StJcO4azfvVur+ohbP8VCyg1jdI7+KT/+dQXAf6nOXUXuY3hmggAH1A6+qqMyml1aZnPJJprGk1/Ue/oP2WE4ttLyaetRZpFdVG0Hmo8T+/kAWPMsLG5rjULw7e3Mk27U7al5blmEaLc4gady0UT7HoP3Kt8gz2ZuohobCCKMmp2rsGg/iJ/lACy5OC/8rw5f26L+EbGXvZHiYWgxkvgcJWvA/KB09V02Xx4edhild4r99QdE5u3IFHY0CAqHLs3jkc4EFkg32eGkkH8J5APoO6tXY94aTqcA6uQ0afQULKjHC77i7lNdOWz3II8ulfMzaKYABxOzX3ek/I7exV98MfCUErjjJm7yG2NNbsAoE37WB7Kr+IJHzM0NYZBqaXcjcHlWuU49w0gnSaGpr+3v7rux3O3PbueLbz+aNjSyDECMU0UzDue4SF1DfgddvmtDCZLEXOeRiMQ7V/7rjHG7ULNEb0PZdEMSXtq3tN8sYwGuQ2+3qtiEDd2nfu7cg9RfI+S0mLK5K5+XRsAdp0fpaHAi/nTlV5cI2Ykv8WFpOxja2fjpvqr9lYZ9inMF6nN224c0n2NrmsPj5S+3xMI3p7CWWPXSaPtSV1B3XVTRPc5+hsT+x8SVp5/nFKtzhk9fmj2/+SGvfzBv3Sgikdx5bvjSAQezgNR+ZCt8LhHx0XPfXZ24I+eo/uiY7LenI4L4efI7U7zHrqb/AHbbf3XXYPBNjG4LSO17+u63A2M7Brb7sq/pz90GRzRW5HZyqYfsrm1sRu3bcen/AAuL+I8I19luxHIXaOkad217cELmc+midYcC1x4cP7/8rbFlXKYefYtPRYWv0nlRlZTjysIge40AU6cdpleKBjCFp5ZlkgjG6EtjT1ghQIUi5IlZ1ZaUqTtFqTFIpBTCDOkUhNICkUmhBlSKTtFpAqRSLRaQLSikWi0AUikWnaAiQtbE6q6raLlq4iVMKDMWyaTpNOPB9FxmZ5bO9xJDTYO42O4orv8AETBarZGgk6QbBG44vqleWzpcwnt5a3IsW38or9Qdx6gd0sZl+IczS6PSf1AgucPl9l6aGN7KL4mnosblV+MeTYfCTxtczwXEHhxAJ+Q4H7o8WWBhAZK6V34nBjqivo0/qra+m9bklestwrP0hH8Cw8tH0T879Lwjx4ySAgNZIBRohruQfL9vq4910GVfFkkLQJWF+53IJK7/AP0yP9I+gU25TH+hv/1CLnv3BMdfVdh89wbmi3saXbUdjurjAY3CPbra+NwF3uNupUf9Lh6xsP8ASFsYfAQt4jYL7AK8M5Pic8bUJPiDDt/BR9loP+JoSSLcHcimu+YJCtzgI/0hQ/0+P9IW15WX43HY6fxXFzC8gcsI6d6/MPstrLXR/hILSfQ0VfSYBoNgJxx10H0WP5Zv008P5abcS5vlALT7WCs7c3f+Esd78hbrB3CzAN7D6K5yp/GrWvLjYFH1/wCVmfjJG7USOxW8A3ssgjBT/KX41FKNe4Ba702/ZVOPy4v/ABWd744XaDDN7BD8ID0+yrzpeEcIMkutlmgyctN0uyOAHr+yYwHqUbo1FPBG4NA0pK8GCPdCCb5UUIQAmkhJRpoQkDQhCRmmhCAEIQkBSVIQkCpFIQgCkIQmGN60p0ITpxXzBYAE0LCtoYalpTQpNINUgkhKmk1Z2BCEQieskQQhaRNZkghCpDHIFja0WhCX00iFPShCYAaszAhCuIrIE7SQtEJtKytQhMkwhCEB/9k=""", caption="embedded image", width=500) # audio st.write("------") st.subheader("Embed audio") with st.echo(): audio_file = open(DATA / 'applause7.mp3', 'rb') st.audio(audio_file, format='audio/mp3') st.write("_Note: seems to have trouble in Firefox_") # video st.write("------") st.subheader("Embed video") with st.echo(): st.video("https://www.youtube.com/watch?v=B2iAodr0fOo", start_time=2)
def about(): set_png_as_page_bg('oil2.png') st.title("About present work") st.subheader( "Billions of dollars are spent in oil rig operations including the safety on deck, quick analysis, efficiency etc. While multiple systems and heavy machinery are" " used for various tasks at hand, there always are avenues that can be explored to bring the efficiency and safety at optimum level." ) st.subheader( "Multiple sounds are generated at the rigs during the extraction process and classifying the sounds correctly can help the engineers in reinforcing their" " initial estimates and quick decisioing.") audio_file = open( "/Users/prashantmudgal/Documents/Quantplex Labs/Sound_app/machine_6.wav", 'rb') audio_bytes = audio_file.read() st.audio(audio_bytes, format='audio/wav') st.subheader( "In the present POC, we are classifying the sounds obtained from oil rigs into 10 categories:" ) Final_Sound = [ 'Blowout', 'Gas Emission', 'Rock Bed', 'Heavy Gas', 'Heavy Metal', 'Oil Drill Rig Exterior', 'Operatre Pump', 'Dieseling', 'Fracturing', 'Hydraulic' ] df = pd.DataFrame(Final_Sound, columns=['Sound Class']) st.table(df)
def present_audio(audio_samples, label, fig, plotting_slot, row): "Show audio player and plot" # show audio player st.header(label) temp_file_name = "temp.wav" sf.write(temp_file_name, audio_samples.T, FS) st.audio(temp_file_name) remove(temp_file_name) # plot audio in subplot if fig is None: if label == "original": n_rows = 3 titles = ["original", "input", "enhanced"] height = 600 else: n_rows = 2 titles = ["input", "enhanced"] height = 400 fig = make_subplots( rows=n_rows, cols=1, subplot_titles=titles, shared_xaxes=True, ) fig.update_layout(showlegend=False, height=height) fig.update_yaxes(fixedrange=True, range=[-1.0, 1.0], title="level") fig.update_xaxes(title="time [s]") time = np.arange(len(audio_samples)) / FS fig.append_trace( go.Scatter(x=time, y=audio_samples, line=dict(width=1)), row=row, col=1, ) plotting_slot.plotly_chart(fig, use_container_width=True) return fig
def play(raw_text, idioma_key): tts = gTTS(text=raw_text, lang=idioma_key) tts.save("audio.mp3") audio_file = open("audio.mp3", "rb") audio_bytes = audio_file.read() #st.write("To download -> see options on the right side") st.audio(audio_bytes, format="audio/mp3")
def run(alarmH, alarmM): while (True): if (alarmH == datetime.datetime.now().hour and alarmM == datetime.datetime.now().minute): st.write("Time to wake up") audio_file = open("song.mp3", "rb") st.audio(audio_file, format='audio/mp3') break
def main(): st.sidebar.subheader( 'Saiba tudo sobre o seu artista favorito no Spotify! \nConfira popularidade, número de seguidores e músicas mais tocadas.\n' ) name_input = st.sidebar.text_input('Digite o nome do artista:') st.sidebar.markdown('[GitHub](https://github.com/mariromildo)') if name_input: result = sp.search(q='artist:' + name_input, type='artist') result_id = result['artists']['items'][0]['id'] query = 'spotify:artist:' + result_id artist = sp.artist(query) # Artist infos name = artist['name'] genres = artist['genres'][0] popularity = artist['popularity'] followers = artist['followers']['total'] image = artist['images'][0]['url'] st.header(name) st.write('**Gênero musical:**', genres) st.write('**Popularidade:**', popularity) st.write('**Seguidores no Spotify:**', followers) st.image(image, width=200) st.write('__________________________________________________________') # Top 3 track infos top_tracks = sp.artist_top_tracks(query)['tracks'] st.header('Top tracks') for track in range(3): music_name = top_tracks[track]['name'] album = top_tracks[track]['album']['name'] album_image = top_tracks[track]['album']['images'][track]['url'] preview = top_tracks[track]['preview_url'] feat = top_tracks[track]['artists'] feat_name = [] for i in range(len(feat)): feat_name.append(feat[i]['name']) if len(feat_name) > 0: feat_name.remove(name) st.write('**Música:**', music_name) if len( feat_name ) > 0: # Returning featured artists only when the value isn't the searched artist name st.write('**Participação:**', ', '.join(str(p) for p in feat_name)) st.write('**Album:**', album) st.image(album_image, width=200) # track audio preview st.audio(preview) st.write( '__________________________________________________________')
def alarm(alarmH,alarmM,ap): if ap == 'pm': alarmH=alarmM+12 while(True): if(alarmH==datetime.datetime.now().hour and alarm==datetime.datetime.now().minute): st.write("Time to wake up") audio_file=open("song.mp3","rb") st.audio(audio_file,format='audio/mp3') break
def text2speech(input_text): synthesizer = torch.hub.load('coqui-ai/TTS:dev', 'tts', source='github') wav = synthesizer.tts(input_text) audio_obj = IPython.display.Audio(wav, rate=synthesizer.ap.sample_rate) with open(WAVE_OUTPUT_FILE, 'wb') as f: f.write(audio_obj.data) with open(WAVE_OUTPUT_FILE, 'rb') as audio_file: st.audio(audio_file, format='audio/wav')
def main(): st.title('Hello World') st.header('') st.subheader('') st.text('') st.image('logo.png') st.subheader('') st.audio('name.wav') st.video('name.mov')
def myplaysound(filename): # plays audio file filename in streamlit # have to click button though audio_file = open(filename, "rb") audio_bytes = audio_file.read() st.audio(audio_bytes, format="audio/ogg") # audio_file.close # os.remove(filename) st.write('Click play button to play')
def main(): st.header('This is a header') st.subheader('This is a subheader') st.text("It's a text") st.image('image.png') st.audio('record.wav') st.video('video.mov')
def present_audio( audio_samples, label, fig, plotting_slot, row, signal_presentation, pesq_score ): "Show audio player and plot" # show audio player st.header(label) st.write("pesq: " + str(round(pesq_score, 2))) temp_file_name = "temp.wav" sf.write(temp_file_name, audio_samples.T, FS) st.audio(temp_file_name) remove(temp_file_name) # plot audio in subplot if fig is None: if label == "original": n_rows = 3 titles = ["original", "input", "enhanced"] height = 600 else: n_rows = 2 titles = ["input", "enhanced"] height = 400 fig = make_subplots( rows=n_rows, cols=1, subplot_titles=titles, shared_xaxes=True, ) fig.update_layout(showlegend=False, height=height) if signal_presentation == "Time signal": fig.update_yaxes(fixedrange=True, range=[-1.0, 1.0], title="level") else: fig.update_yaxes(fixedrange=True, title="Frequency Hz") fig.update_xaxes(title="time [s]") time = np.arange(len(audio_samples)) / FS if signal_presentation == "Time signal": fig.append_trace( go.Scatter(x=time, y=audio_samples, line=dict(width=1)), row=row, col=1, ) else: freqs, bins, Pxx = signal.spectrogram( audio_samples, fs=FS, window="hann", nfft=512, ) fig.append_trace( go.Heatmap( x=bins, y=freqs, z=10 * np.log10(Pxx), colorscale="Jet", showscale=False ), row=row, col=1, ) plotting_slot.plotly_chart(fig, use_container_width=True) return fig
def main(): st.title('Hello World') st.header('This is a header') st.subheader('This is a subheader') st.text('This is a text') st.image('logo.png') st.subheader('This is a audio') st.audio('record.wav') st.subheader('This is a video') st.video('sentiment_motion.mov')
def np_audio(np_array, stem_name, samplerate=44100): soundfile.write('temp.wav', np_array, samplerate, 'PCM_24') st.audio('temp.wav', format='audio/wav') with open("temp.wav", "rb") as f: file_id = _calculate_file_id(f.read(), "audio/wav") wav_url = f'{STATIC_MEDIA_ENDPOINT}/{file_id}.wav' stem_urls.append({ 'src': wav_url, 'name': stem_name, 'customClass': stem_name, })
def test_st_audio_options(self): """Test st.audio with options.""" fake_audio_data = "\x11\x22\x33\x44\x55\x66".encode("utf-8") st.audio(fake_audio_data, format="audio/mp3", start_time=10) el = self.get_delta_from_queue().new_element # Manually base64 encoded payload above via # base64.b64encode(bytes('\x11\x22\x33\x44\x55\x66'.encode('utf-8'))) self.assertEqual(el.audio.data, "ESIzRFVm") self.assertEqual(el.audio.format, "audio/mp3") self.assertEqual(el.audio.start_time, 10)
def render_trial_sound(label, trials_per_class): # read the files from the label directory label_dir = './data/' + label wav_files = [f for f in listdir(label_dir) if isfile(join(label_dir, f))] # print('Wav files', wav_files) # render samples sounds random_wav_files = random.sample(wav_files, trials_per_class) for wav_file in random_wav_files: audio_file = open(label_dir + '/' + wav_file, 'rb') audio_bytes = audio_file.read() st.audio(audio_bytes)
def test_st_audio_options(self): """Test st.audio with options.""" from streamlit.media_file_manager import _calculate_file_id fake_audio_data = "\x11\x22\x33\x44\x55\x66".encode("utf-8") st.audio(fake_audio_data, format="audio/mp3", start_time=10) el = self.get_delta_from_queue().new_element self.assertEqual(el.audio.start_time, 10) self.assertTrue(el.audio.url.startswith(STATIC_MEDIA_ENDPOINT)) self.assertTrue(_calculate_file_id(fake_audio_data, "audio/mp3"), el.audio.url)
def audio_section(idx, file_name): # print('file name', file_name) audio_file = open(file_name, 'rb') audio_bytes = audio_file.read() st.audio(audio_bytes) options = copy.deepcopy(random_labels) options.insert(0, DEFAULT_OPTION) options.append(UNKNOWN_OPTION) radio_answer = st.radio("Audio label", options, key=file_name) st.write('You selected label: **{}**'.format(radio_answer)) global answers answers[idx] = [file_name, radio_answer]
def main(): st.title('Hello World') st.header('This is header') st.subheader('This is subheader') st.text('This is text') st.subheader('Imagens') st.image('0.jpeg') st.image('logo.png') st.subheader('Audio') st.audio('record.mov') st.subheader('Video') st.video('formaturaNicolas.mp4')
def main(): title = "Music Genre Classifier" st.title(title) image = Image.open('./presentation/turntable-2154823_1920.jpg') st.image(image, use_column_width=True) if st.button('Record'): with st.spinner('Recording...'): signal = sd.rec(int(6 * RECORD_SR), samplerate=RECORD_SR, channels=1, blocking=True, dtype='float64') #sd.wait() signal = signal.reshape(signal.shape[0]) #st.write(signal.shape) #st.write(signal) new_num_samples = round( len(signal) * float(DESIRED_SR) / RECORD_SR) signal = sps.resample(signal, new_num_samples) # st.write(signal.shape) # st.write(type(signal)) # st.write(signal) st.experimental_set_query_params(signal=signal) st.success("Recording completed") app_state = st.experimental_get_query_params() if st.button('Play'): try: signal = np.array(app_state["signal"], dtype='float') # st.write(type(signal)) # st.write(signal.shape) temp_file = io.BytesIO() write(temp_file, 22050, signal) st.audio(temp_file, format='audio/wav') except: st.write("Please record sound first") if st.button('Classify'): with st.spinner("Thinking..."): signal = np.array(app_state["signal"], dtype='float') # st.write(type(signal)) # st.write(signal.shape) # st.write(signal) pred = model.predict(get_harm_perc(signal)) st.success("Classification completed") labels[np.argmax(pred)]
def main(): img = Image.open('data/image_03.jpg') st.image(img, use_column_width=True) # 이미지를 페이지 화면에 맞게 폭을 늘려서 출력함 # 인터넷에 있는 이미지도 가능 # st.image('') 여기안에 url 주소 가져오면 됨 video_file = open('data/secret_of_success.mp4', 'rb').read() # 비디오 파일 불러오기 st.video(video_file) # 비디오파일 불러온것을 실행해라 audio_file = open('data/song.mp3', 'rb').read() # 오디오 파일 불러오기 st.audio(audio_file) # 오디오 실행
def test_st_audio(self): """Test st.audio.""" # TODO(armando): generate real audio data # For now it doesnt matter cause browser is the one that uses it. fake_audio_data = "\x11\x22\x33\x44\x55\x66".encode("utf-8") st.audio(fake_audio_data) el = self.get_delta_from_queue().new_element # Manually base64 encoded payload above via # base64.b64encode(bytes('\x11\x22\x33\x44\x55\x66'.encode('utf-8'))) self.assertEqual(el.audio.data, "ESIzRFVm") self.assertEqual(el.audio.format, "audio/wav")
def cs_display_media(): st.markdown( "<h5 style='text-align: center ; color: black;'>Rendered -- Display Media Section</h5>", unsafe_allow_html=True) st.markdown('***') st.markdown('**Image**') st.markdown("- **st.image**('_`path`_')") st.image('./brain.png', width=300) st.markdown('***') st.markdown('**Audio**') st.markdown("- **st.audio**(_`data`_)") audio_code = ''' audio_file = open('./Media/Cornfield_Chase.wav', 'rb') audio_bytes = audio_file.read() st.audio(audio_bytes, format='audio/wav') ''' st.code(audio_code) audio_file = open('./Media/Cornfield_Chase.wav', 'rb') audio_bytes = audio_file.read() st.audio(audio_bytes, format='audio/wav') st.markdown( "<h6 style='text-align: center ;'>Source ~ Interstellar ✨( Cornfield Chase ) </h6>", unsafe_allow_html=True) st.markdown('***') st.markdown('**Video**') st.markdown("- **st.video**(_`data`_)") video_code = ''' video_file = open('./Media/Star-6962.mp4', 'rb') video_bytes = video_file.read() st.video(video_bytes) ''' st.code(video_code) video_file = open('./Media/Star-6962.mp4', 'rb') video_bytes = video_file.read() st.video(video_bytes) st.markdown( "<h6 style='text-align: center ;'>Creator - fxxu, Source - Pixbay </h6>", unsafe_allow_html=True) st.markdown('***') info = st.beta_expander('Direct References') info.markdown( '''[<img src='data:image/png;base64,{}' class='img-fluid' width=32 height=32 style='float:left; vertical-align:middle'>](https://docs.streamlit.io/en/stable/api.html#display-media) <small style='color:black; font-size:16px;'>Link to the Official Documentation of this Section </small>''' .format(img_to_bytes("logo.png")), unsafe_allow_html=True) st.markdown('***')
def main(): col1, mid, col2 = st.beta_columns([10, 1, 10]) with col1: image = Image.open(os.path.join(IMAGE_DIR, 'speak.jpg')) st.image(image, width=200) with col2: image = Image.open(os.path.join(IMAGE_DIR, 'unicorn.jpg')) st.image(image, width=200) title = "Talk to Transformer" st.title(title) session_state = SessionState.get(name="", button_sent=False) record_button_clicked = st.button('Record') if record_button_clicked or session_state.button_sent: # <-- first time is button interaction, next time use state: st.empty() session_state.button_sent = True if record_button_clicked: # you can get some voice samples from here: https://huggingface.co/facebook/wav2vec2-base-960h with st.spinner(f'Recording for {DURATION} seconds ....'): sound.record() st.success("Recording completed") if st.button('Play'): try: audio_file = open(WAVE_OUTPUT_FILE, 'rb') audio_bytes = audio_file.read() st.audio(audio_bytes, format='audio/wav') except: st.write("Please record sound first") st.subheader("Here is what you spoke:\n") if record_button_clicked: session_state.input_text = speech2text(WAVE_OUTPUT_FILE) session_state.input_text = session_state.input_text.capitalize() for question_str in [ 'Which ', 'Why ', 'Who ', 'Whose ', 'Whom ', 'What ', 'How ' ]: if session_state.input_text.startswith(question_str): session_state.input_text += '?' break st.write(session_state.input_text) st.subheader("Generating text using Transformer(DistilGPT2) model: \n") if record_button_clicked: session_state.gen_txt = generate_text(session_state.input_text) st.write(session_state.gen_txt) with st.spinner('Converting text to speech...'): text2speech(session_state.gen_txt)
def render_file(wavfile, transcript_path, PathToFixed): TranscriptFile = os.path.join( transcript_path, wavfile.split(DELIM)[-1].replace(".wav", ".txt")) with open(TranscriptFile, 'r', encoding='utf-8') as f: data = f.read() key = wavfile.split(DELIM)[-1] st.subheader("Name of File = " + key) st.audio(open(wavfile, 'rb').read(), format='audio/wav') content = st_ace(value=data, theme=st.sidebar.selectbox("Theme.", options=THEMES, key=key), font_size=st.sidebar.slider("Font size.", 5, 24, 24, key=key), tab_size=st.sidebar.slider("Tab size.", 1, 8, 4, key=key), show_gutter=st.sidebar.checkbox("Show gutter.", value=True, key=key), show_print_margin=st.sidebar.checkbox( "Show print margin.", value=True, key=key), wrap=st.sidebar.checkbox("Wrap enabled.", value=True, key=key), key=key) st.title(content) if st.sidebar.button("Reject"): print("Reject", key) SessionState.get().current_index += 1 SessionState.sync() SavePickleState(SessionState.get().current_index, key.replace(".wav", ".txt")) return if st.sidebar.button("Go to next file"): FixedTranscript = os.path.join(PathToFixed, key.replace(".wav", ".txt")) print(FixedTranscript) open(FixedTranscript, "w", encoding="utf-8").write(content) SessionState.get().current_index += 1 SessionState.sync() SavePickleState(SessionState.get().current_index)
def display_media_audio(path: Path, start_second: int = 0): format_ = path.name.split(".")[-1] if format_ == "mp3": format_ = "audio/mp3" elif format_ == "wav": format_ = "audio/wav" elif format_ == "flac": format_ = "audio/x-flac" else: st.warning("Selected type is not readable format") if format_ in {"audio/wav", "audio/mp3", "audio/x-flac"}: st.audio(read_audio_bytes(path), start_time=start_second, format=format_)
def display_media_audio_from_ndarray(y: np.ndarray, sr: int): max_num = 32767.0 / y.max() y_hex = (y * max_num).astype(np.int16) binary_wave = struct.pack("h" * len(y_hex), *(y_hex.tolist())) with tempfile.TemporaryFile() as fp: w = wave.Wave_write(fp) # type: ignore params = (1, 2, sr, len(binary_wave), "NONE", "not compressed") w.setparams(params) # type: ignore w.writeframes(binary_wave) w.close() fp.seek(0) bytesio = io.BytesIO(fp.read()) st.audio(bytesio)