Example #1
0
    with tf.GradientTape() as tape:
      output_prep = self.preprocess_image(output_image.value())
      output_features = self.feature_extractor(output_prep)
      
      output_content_map = build_content_layer_map(output_features, content_layer_weights.keys())
      output_style_map = build_style_layer_map(output_features, style_layer_weights.keys())
      total_loss = style_content_loss( output_content_map,output_style_map,self.content_layer_weights, self.style_layer_weights, self.content_targets, \
  self.style_targets,self.content_reconstruction_weight, self.style_reconstruction_weight, self.total_variation_weight, output_image)

    grads = tape.gradient(total_loss, output_image)  
    optimizer.apply_gradients([(grads, output_image)])
    output_image.assign(clipping(output_image))

try:
  st.title("Neural Style Transfer With Tensorflow")
  progress_text = st.empty()
  output_image_placeholder = st.empty()
  model_load_unable_error = st.empty()
  progress_text.text("Preparing given data for applying NST ...")
  progress_bar = st.progress(0)

  st.sidebar.text("Neural Style Transfer")
  style_image = cv2.imread("./images/logo_image/nst_image.jpg")
  st.sidebar.image(image=np.asarray(style_image), use_column_width=True, 
    caption='', clamp=True, channels='RGB')


  video_required = st.sidebar.number_input(label='Is video of output required(1 for yes , 0  for No)', min_value=0, max_value=1, value=0, step=1)
  images_required = st.sidebar.number_input(label='Are images of output required(1 for yes , 0  for No)', min_value=0, max_value=1, value=0, step=1)

  st.sidebar.header("Set images and various parameters ")
Example #2
0
    that this file lives outside the Streamlit distribution. Otherwise, changes
    to this file may be ignored!
""")
"""
1. If run-on-save is on, make sure the page changes every few seconds. Then
   turn run-on-save off in the settigns menu and check (2).
2. If run-on-save is off, make sure "Rerun"/"Always rerun" buttons appear in
   the status area. Click "Always rerun" and check (1).
"""

st.write("This should change every ", secs_to_wait, " seconds: ", random())

# Sleep for 5s (rather than, say, 1s) because on the first run we need to make
# sure Streamlit is fully initialized before the timer below expires. This can
# take several seconds.
status = st.empty()
for i in range(secs_to_wait, 0, -1):
    time.sleep(1)
    status.text("Sleeping %ss..." % i)

status.text("Touching %s" % __file__)

platform_system = platform.system()

if platform_system == "Linux":
    cmd = (
        "sed -i "
        "'s/^# MODIFIED AT:.*/# MODIFIED AT: %(time)s/' %(file)s"
        " && touch %(file)s"
        % {  # sed on Linux modifies a different file.
            "time": time.time(),
import time
import numpy as np
from ga_tsp import *

max_iterations = 200
city_count = 25
population_size = 100
elite_size = 20
mutation_rate = 0.05

st.markdown('## Genetic Algorithm (Travelling Postman Problem)')

##################
# Progress Update
##################
latest_iteration = st.empty()
bar = st.progress(0)

###########################
# Initialize cities
# Initialize GA algorithm
###########################
cityList = [
    City(x=int(random.gauss(0, 1) * 100), y=int(random.gauss(0, 1) * 100))
    for i in range(city_count)
]
GA_gen = geneticAlgorithm(population=cityList,
                          popSize=population_size,
                          eliteSize=elite_size,
                          mutationRate=mutation_rate)
Example #4
0
def write():
    st.markdown("## Modelo Epidemiológico (SEIR-Bayes)")
    st.sidebar.markdown(texts.PARAMETER_SELECTION)
    w_granularity = st.sidebar.selectbox('Unidade',
                                         options=['state', 'city'],
                                         index=1,
                                         format_func=global_format_func)

    source = 'ms' if w_granularity == 'state' else 'wcota'
    cases_df = data.load_cases(w_granularity, source)
    population_df = data.load_population(w_granularity)

    DEFAULT_PLACE = (DEFAULT_CITY
                     if w_granularity == 'city' else DEFAULT_STATE)

    options_place = make_place_options(cases_df, population_df)
    w_place = st.sidebar.selectbox('Município',
                                   options=options_place,
                                   index=options_place.get_loc(DEFAULT_PLACE),
                                   format_func=global_format_func)

    options_date = make_date_options(cases_df, w_place)
    w_date = st.sidebar.selectbox('Data inicial',
                                  options=options_date,
                                  index=len(options_date) - 1)
    NEIR0 = make_NEIR0(cases_df, population_df, w_place, w_date)

    # Estimativa R0
    st.markdown(texts.r0_ESTIMATION_TITLE)
    should_estimate_r0 = st.checkbox('Estimar R0 a partir de dados históricos',
                                     value=True)
    if should_estimate_r0:
        r0_samples, used_brazil = estimate_r0(cases_df, w_place, SAMPLE_SIZE,
                                              MIN_DAYS_r0_ESTIMATE, w_date)
        if used_brazil:
            st.write(texts.r0_NOT_ENOUGH_DATA(w_place, w_date))

        _place = 'Brasil' if used_brazil else w_place
        st.markdown(texts.r0_ESTIMATION(_place, w_date))

        st.altair_chart(
            plot_r0(r0_samples, w_date, _place, MIN_DAYS_r0_ESTIMATE))
        r0_dist = r0_samples[:, -1]
        st.markdown(
            f'*O $R_{{0}}$ estimado está entre '
            f'${np.quantile(r0_dist, 0.01):.03}$ e ${np.quantile(r0_dist, 0.99):.03}$*'
        )
        st.markdown(texts.r0_CITATION)
    else:
        r0_dist = make_r0_widgets()
        st.markdown(texts.r0_ESTIMATION_DONT)

    # Previsão de infectados
    w_params = make_param_widgets(NEIR0)
    model = SEIRBayes(**w_params, r0_dist=r0_dist)
    model_output = model.sample(SAMPLE_SIZE)
    ei_df = make_EI_df(model, model_output, SAMPLE_SIZE)
    st.markdown(texts.MODEL_INTRO)
    w_scale = st.selectbox('Escala do eixo Y', ['log', 'linear'], index=1)
    fig = plot_EI(model_output, w_scale, w_date)
    st.altair_chart(fig)
    download_placeholder = st.empty()
    if download_placeholder.button('Preparar dados para download em CSV'):
        href = make_download_href(ei_df, w_params, r0_dist, should_estimate_r0)
        st.markdown(href, unsafe_allow_html=True)
        download_placeholder.empty()

    # Parâmetros de simulação
    dists = [w_params['alpha_inv_dist'], w_params['gamma_inv_dist'], r0_dist]
    SEIR0 = model._params['init_conditions']
    params_intro_txt, seir0_dict, other_params_txt = texts.make_SIMULATION_PARAMS(
        SEIR0, dists, should_estimate_r0)
    st.markdown(params_intro_txt)
    st.write(pd.DataFrame(seir0_dict).set_index("Compartimento"))
    st.markdown(other_params_txt)

    # Configurações da simulação
    st.markdown(texts.SIMULATION_CONFIG)
    # Fontes dos dados
    st.markdown(texts.DATA_SOURCES)
Example #5
0
# net = NeuralMem(image_size=TRAINING_IMAGE_SIZE, index_pretrain=INDEX_PRETRAIN, kernel_size=KERNEL_SIZE, stride=STRIDE, padding=PADDING)

with st.beta_expander("FAST AND ROBUST IMAGE STYLETRANSFER AND COLORIZATION", expanded=True):
    # header1 = st.write('## FAST AND ROBUST IMAGE STYLETRANSFER AND COLORIZATION')
    header2 = st.markdown('#### by providing input and output example image pairs and by using similarity search')
    header3 = st.markdown('##### Transfer the style of images by providing input and output example images.')
    header4 = st.markdown('##### Colorize images by providing black-white or grayscale input and colored output example images(like grayscale photo as input example and colored photo as output example for training)')

# video_file = open('tutorial.webm', 'rb')
# video_bytes = video_file.read()
# st.video(video_bytes)


col1_1, col1_2 = st.beta_columns(2)
input_ph = st.empty()
train_int_col, train_out_col= st.beta_columns(2)
input_col, output_col = st.beta_columns(2)
rand_input_col, rand_output_col = st.beta_columns(2)


uploaded_inp_example = col1_1.file_uploader("Choose INPUT EXAMPLE for training", type=['png', 'jpg'])
uploaded_out_example = col1_2.file_uploader("Choose OUTPUT EXAMPLE for training", type=['png', 'jpg'])
uploaded_file = input_ph.file_uploader("Choose input image", type=['png', 'jpg']    )

if uploaded_inp_example is not None and uploaded_out_example is not None and uploaded_file is not None:
    net = NeuralMem(image_size=TRAINING_IMAGE_SIZE, index_pretrain=INDEX_PRETRAIN, kernel_size=KERNEL_SIZE,
                    stride=STRIDE, padding=PADDING)
    train_inp_example = preprocess(uploaded_inp_example, image_size=TRAINING_IMAGE_SIZE[0:2], gray_scale=False)
    train_int_col.image(train_inp_example, caption="INPUT EXAMPLE", width=250)
    train_inp_example = torch.tensor(train_inp_example)
Example #6
0
projData

from sklearn import manifold
from sklearn import decomposition

if method_name == 'PCA':
    method = decomposition.PCA(n_components=2)

if method_name == 'MDS':
    method = manifold.MDS(n_components=2)

if method_name == 'TSNE':
    perplexity = st.slider('Perplexity', 10, 100, 30)
    method = manifold.TSNE(n_components=2, perplexity=perplexity)

placeholder = st.empty()
placeholder.text('calculating...')

pos = pd.DataFrame(method.fit_transform(projData), columns=['x', 'y'])
projcars = pd.concat([projcars, pos.reset_index(drop=True)], axis='columns')

placeholder.empty()
color = st.selectbox(
    'Color by:', ('Origin', 'Weight_in_lbs', 'Horsepower', 'Miles_per_Gallon',
                  'Displacement', 'Cylinders', 'Acceleration'))
# We use a point as mark
chart = alt.Chart(projcars).mark_point().encode(
    x='x', y='y', color=color).properties(width=600, height=600)
chart

projcars
Example #7
0
import streamlit as st
import time
# import numpy as np
# import pandas as pd
# from PIL import Image

# heading
st.title('Streamlit 入門')

st.write('プログレスバー')
'Start!!'
latest_iteration = st.empty() # 空の要素
bar = st.progress(0)

for i in range(100):
  latest_iteration.text(f'Iteration {i+1}')
  bar.progress(i + 1)
  time.sleep(0.1)
'Done!!'

# text
# st.write('DataFrame')

# df = pd.DataFrame(
#   np.random.rand(20,3), # 正規分布を元に乱数を生成
#   columns=['a','b','c'],
# )

# dfの描画
# st.write(df)
def dashboard():
    ''' Main code to display the streamlit dashboard '''

    # get score data
    df_scores_full, scorenames = load_real_data()

    # descriptive names for each score
    scorenames_desc_manual = {
        "gmap_score": "Menschen an Haltestellen des ÖPNV",
        "gmap_supermarket_score": "Besucher in Supermärkten",
        "hystreet_score": "Fußgänger in Innenstädten",
        "zug_score": "DB Züge",
        "bike_score": "Fahrradfahrer",
        "bus_score": "ÖPV Busse",
        "national_score": "ÖPV IC-Züge",
        "suburban_score": "ÖPV Nahverkehr",
        "regional_score": "ÖPV Regionalzüge",
        "nationalExpress_score": "ÖPV ICE-Züge",
        "webcam_score": "Fußgänger auf öffentlichen Webcams",
        "tomtom_score": "Autoverkehr",
        "airquality_score": "Luftqualität"
    }
    # very short axis labels for each score
    scorenames_axis_manual = {
        "gmap_score": "Menschen",
        "gmap_supermarket_score": "Besucher",
        "hystreet_score": "Fußgänger",
        "zug_score": "Züge",
        "bike_score": "Fahrradfahrer",
        "bus_score": "Busse",
        "national_score": "IC-Züge",
        "suburban_score": "Nahverkehr",
        "regional_score": "Regionalzüge",
        "nationalExpress_score": "ICE-Züge",
        "webcam_score": "Fußgänger",
        "tomtom_score": "Traffic Index",
        "airquality_score": "Luftqualität"
    }

    # for scores not in the hardcoded list above
    # default to their scorename as a fallback
    scorenames_desc = {}
    scorenames_axis = {}
    for scorename in scorenames:
        if scorename in scorenames_desc_manual:
            scorenames_desc[scorename] = scorenames_desc_manual[scorename]
        else:
            scorenames_desc[scorename] = scorename
        if scorename in scorenames_axis_manual:
            scorenames_axis[scorename] = scorenames_axis_manual[scorename]
        else:
            scorenames_axis[scorename] = scorename

    st.markdown('''
        Die Maßnahmen gegen COVID19 wie Kontaktverbote und geschlossene Geschäfte haben große Änderungen in unserem Alltag mit sich gebracht. Wir sehen dies jeden Tag wenn wir vor die Haustür gehen. Aber wie ist die Lage im Rest des Landes? Wird Social Distancing überall gleich strikt befolgt? Sinkt die Zurückhaltung am Wochenende oder bei guten Wetter? Sind tatsächlich mehr/weniger Menschen im Park unterwegs? Diese Fragen sind sehr schwer direkt zu beantworten, aber wir können versuchen, **indirekt** Erkentnisse darüber zu gewinnen indem wir verschiedene Indikatoren betrachten.

        Dazu setzen wir auf unterschiedliche Datenquellen, um ein möglichst umfassendes Bild zu zeichnen. 
        Wo es möglich ist, berechnen wir einen einfach verständlichen relativen Wert. Dabei entspricht **100%  dem Normal-Wert vor der COVID19-Pandemie**, also bevor die Bürger zu Social Distancing aufgerufen wurden. Ein kleiner Wert weist darauf hin, dass in unserer Datenquelle eine Verringerung der Aktivität gemessen wurde. Wenn eine relative Angabe nicht möglich ist (z.B. weil unsere Daten nicht weit genug in die Vergangenheit reichen um zu bestimmen was ein "normales" Aktivitätsniveau ist) werden absolute Werte angegeben.

        Im Folgenden kannst Du unseren Datensatz auf unterschiedliche Weise interaktiv erkunden:
        - <a href="#map">Aktuelle Deutschlandkarte</a>
        - <a href="#timeline">Zeitlicher Verlauf</a>
        - <a href="#histogram">Verteilung über alle Landkreise</a>
    ''',
                unsafe_allow_html=True)

    # MAP DESCRIPTION
    st_map_desc = st.empty()

    # Selection box for the map
    df_scores, selected_score, selected_score_desc, selected_score_axis, use_states, use_states_select, countys, latest_date = detail_score_selector(
        df_scores_full,
        scorenames_desc,
        scorenames_axis,
        allow_county_select=False,
        allow_detail_select=True,
        key='map',
        default_detail_index=0,
        default_score="hystreet_score")

    st_map_desc.markdown('''
        ---
        ## Aktuelle Karte vom {datum}<span id="map"></span>    
        In der Karte siehst Du wie sich die COVID-19-Maßnahmen auf die verschiedenen **{regionen}** in Deutschland auswirkt. Angezeigt werden Daten über **{datasource}**. Du kannst die Datenquelle über die Schaltflächen ändern.
    '''.format(regionen=use_states_select,
               datasource=selected_score_desc,
               datum=datetime.datetime.strptime(
                   latest_date, "%Y-%m-%d").strftime("%d.%m.%Y")),
                         unsafe_allow_html=True)

    # DRAW MAP
    map = get_map(df_scores, selected_score, selected_score_axis,
                  selected_score_desc, use_states, latest_date)
    map2 = map.copy(
    )  # otherwise streamlit gives a Cached Object Mutated warning
    st.altair_chart(map2)

    # MAP LEGEND
    if selected_score == "airquality_score":
        st.image("images/legende_airquality.png")
    elif selected_score in ["webcam_score", "tomtom_score"]:
        pass  # no legend
    else:
        st.image("images/legende.png")

    # TIMELINE DESCRIPTION
    st_timeline_desc = st.empty()

    # Selection box for the timeline
    df_scores2, selected_score2, selected_score_desc2, selected_score_axis2, use_states2, use_states_select2, countys2, latest_date2 = detail_score_selector(
        df_scores_full,
        scorenames_desc,
        scorenames_axis,
        allow_county_select=True,
        allow_detail_select=True,
        key='timeline',
        default_detail_index=1,
        default_score="hystreet_score")

    st_timeline_desc.markdown('''
        ---
        ## Zeitlicher Verlauf <span id="timeline"></span>   
        Hier kannst Du den zeitlichen Verlauf der gewählten Datenquelle für verschiedene **{regionen}** in Deutschland vergleichen. Angezeigt werden Daten über **{datasource}**. Du kannst die Datenquelle über die Schaltflächen ändern.
        
        **Sieh doch mal nach wie die Lage in Deiner Region ist!**
    '''.format(regionen=use_states_select2, datasource=selected_score_desc2),
                              unsafe_allow_html=True)

    timeline = get_timeline_plots(df_scores2, selected_score2,
                                  selected_score_axis2, selected_score_desc2,
                                  use_states2, countys2)
    if timeline is not None:
        timeline2 = timeline.copy(
        )  # otherwise streamlit gives a Cached Object Mutated warning
        st.altair_chart(timeline2)

    # DRAW HISTOGRAMS
    # ===============
    st_histo_desc = st.empty()

    # Selection box for the timeline
    df_scores3, selected_score3, selected_score_desc3, selected_score_axis3, use_states3, use_states_select3, countys3, latest_date3 = detail_score_selector(
        df_scores_full,
        scorenames_desc,
        scorenames_axis,
        allow_county_select=False,
        allow_detail_select=False,
        key='histo',
        default_score="hystreet_score")

    st_histo_desc.markdown('''
        ---
        ## Verteilung über alle Landkreise <span id="histogram"></span>
        Hier kannst Du einen Überblick bekommen, wie die Verteilung der Daten über **{datasource}** für alle verfügbaren Landkreise ist. Du kannst die Datenauswahl weiter unten im Menü ändern. 
        
        Die pinke Linie ist der **Median**, das heißt jeweils die Hälfte aller Landkreise hat einen höheren beziehungswiese niedrigeren Score als dieser Wert. Im unteren Graph ist der zeitliche Verlauf des Medians dargestellt. **In diesem Graph kannst Du das Datum auswählen, für welches Dir die Verteilung über alle Landkreise angezeigt wird.**
        '''.format(datasource=selected_score_desc3),
                           unsafe_allow_html=True)
    c = get_histograms(df_scores3, selected_score3, selected_score_desc3,
                       selected_score_axis3)
    st.altair_chart(c)
    st.markdown('''
        Zur zeitlichen Einordung: Die [Vereinbarung zwischen der Bundesregierung und den Regierungschefinnen und Regierungschefs der Bundesländer angesichts der Corona-Epidemie in Deutschland](https://www.bundeskanzlerin.de/bkin-de/aktuelles/vereinbarung-zwischen-der-bundesregierung-und-den-regierungschefinnen-und-regierungschefs-der-bundeslaender-angesichts-der-corona-epidemie-in-deutschland-1730934) wurde am 16. März veröffentlicht.
    ''')

    # tracking javascript
    st.markdown("""   
    <!-- Matomo Image Tracker-->
    <img src="https://matomo.everyonecounts.de/matomo.php?idsite=1&amp;rec=1&amp;action_name=Dashboard" style="border:0" alt="" />
    <!-- End Matomo -->
    """,
                unsafe_allow_html=True)
Example #9
0
def app_sst(model_path: str, lm_path: str, lm_alpha: float, lm_beta: float,
            beam: int):
    webrtc_ctx = webrtc_streamer(
        key="speech-to-text",
        mode=WebRtcMode.SENDONLY,
        audio_receiver_size=1024,
        client_settings=ClientSettings(
            rtc_configuration={
                "iceServers": [{
                    "urls": ["stun:stun.l.google.com:19302"]
                }]
            },
            media_stream_constraints={
                "video": False,
                "audio": True
            },
        ),
    )

    status_indicator = st.empty()

    if not webrtc_ctx.state.playing:
        return

    status_indicator.write("Loading...")
    text_output = st.empty()
    stream = None

    while True:
        if webrtc_ctx.audio_receiver:
            if stream is None:
                from deepspeech import Model

                model = Model(model_path)
                model.enableExternalScorer(lm_path)
                model.setScorerAlphaBeta(lm_alpha, lm_beta)
                model.setBeamWidth(beam)

                stream = model.createStream()

                status_indicator.write("Model loaded.")

            sound_chunk = pydub.AudioSegment.empty()
            try:
                audio_frames = webrtc_ctx.audio_receiver.get_frames(timeout=1)
            except queue.Empty:
                time.sleep(0.1)
                status_indicator.write("No frame arrived.")
                continue

            status_indicator.write("Running. Say something!")

            for audio_frame in audio_frames:
                sound = pydub.AudioSegment(
                    data=audio_frame.to_ndarray().tobytes(),
                    sample_width=audio_frame.format.bytes,
                    frame_rate=audio_frame.sample_rate,
                    channels=len(audio_frame.layout.channels),
                )
                sound_chunk += sound

            if len(sound_chunk) > 0:
                sound_chunk = sound_chunk.set_channels(1).set_frame_rate(
                    model.sampleRate())
                buffer = np.array(sound_chunk.get_array_of_samples())
                stream.feedAudioContent(buffer)
                text = stream.intermediateDecode()
                text_output.markdown(f"**Text:** {text}")
        else:
            status_indicator.write("AudioReciver is not set. Abort.")
            break
Example #10
0
def ProcessFrames(vf, stop, hyp, tfile, phrase_id, signer_ids, state):
    """

        main loop for processing video file:

        Params

        vf = VideoCapture Object

        tracker = Tracker Object that was instantiated 

        obj_detector = Object detector (model and some properties) 

    """

    try:
        num_frames = int(vf.get(cv2.CAP_PROP_FRAME_COUNT))
        fps = int(vf.get(cv2.CAP_PROP_FPS))
        print('Total number of frames to be processed:', num_frames,
        '\nFrame rate (frames per second):', fps)
    except:
        print('We cannot determine number of frames and FPS!')

    frame_counter = 0
    _stop = stop.button("stop")
    new_car_count_txt = st.empty()
    fps_meas_txt = st.empty()
    bar = st.progress(frame_counter)
    stframe = st.empty()
    start = time.time()
    pred_txt = st.empty()
    upload = st.empty()

    while vf.isOpened():
        # if frame is read correctly ret is True

        ret, frame = vf.read()
        if _stop:
            break

        if not ret:
            st.markdown("""

                <style>

                .big-font {

                font-size:25px !important;

              }

              </style>

              """, unsafe_allow_html=True)

            st.markdown(f'**Prediction**: <p class="big-font">{hyp[0]} </p>', unsafe_allow_html=True)

            with open("/app/app/upload.txt") as f:
                bool = int(f.readline())

            if phrase_id == None or not bool:
                st.info("Please login and specify the phrase to upload the video to our database!")
            #pred_txt.markdown(f'**Prediction:** {hyp[0]}')
            print("Can't receive frame (stream end?). Exiting ...")
            break

        end = time.time()

        frame_counter += 1
        fps_measurement = frame_counter/(end - start)
        bar.progress(frame_counter/num_frames)
        frm = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
        stframe.image(cv2.resize(frm, (224, 224)))

        time.sleep(0.02)
def detail_score_selector(df_scores_in,
                          scorenames_desc,
                          scorenames_axis,
                          allow_county_select,
                          allow_detail_select,
                          key,
                          default_detail_index=0,
                          default_score="hystreet_score"):
    df_scores = df_scores_in.copy()

    # get counties
    county_names, county_ids, state_names, state_ids = load_topojson()
    id_to_name = {cid: county_names[idx] for idx, cid in enumerate(county_ids)}
    state_id_to_name = {
        cid: state_names[idx]
        for idx, cid in enumerate(state_ids)
    }
    state_name_to_id = {
        state_names[idx]: cid
        for idx, cid in enumerate(state_ids)
    }

    st_desc = st.empty()

    # LEVEL OF DETAIL SELECT
    if allow_detail_select:
        use_states_select = st.radio('Detailgrad:',
                                     ('Bundesländer', 'Landkreise'),
                                     index=default_detail_index,
                                     key=key)
    else:
        use_states_select = 'Landkreise'
    use_states = use_states_select == 'Bundesländer'

    # SCORE SELECT
    sorted_desc = sorted(list(scorenames_desc.values()))
    selected_score_desc = st.selectbox(
        'Datenquelle:',
        sorted_desc,
        index=sorted_desc.index(
            scorenames_desc[default_score]),  # default value in sorted list
        key=key)
    inverse_scorenames_desc = {
        scorenames_desc[key]: key
        for key in scorenames_desc.keys()
    }
    selected_score = inverse_scorenames_desc[selected_score_desc]
    if selected_score == "webcam_score":
        selected_score_axis = scorenames_axis[
            selected_score] + ' (Tagesdurchschnitt)'  # absolute values
    elif selected_score == "airquality_score":
        selected_score_axis = scorenames_axis[
            selected_score] + ' (AQI)'  # absolute values
    else:
        selected_score_axis = scorenames_axis[selected_score] + ' (%)'

    latest_date = pd.Series(
        df_scores[df_scores[selected_score] > 0]["date"]).values[-1]

    # COUNTY SELECT
    if (not use_states) and allow_county_select:
        available_countys = [
            value for value in county_names if value in df_scores[
                df_scores[selected_score] >= 0]["name"].values
        ]
        if len(available_countys) > 1:
            default = available_countys[:2]
        else:
            default = []
        countys = st.multiselect('Wähle Landkreise aus:',
                                 options=available_countys,
                                 default=default,
                                 key=key)
    else:
        countys = []

    # Show additional information text for certain scores
    desc = ""
    if selected_score in ["webcam_score", "airquality_score", "tomtom_score"]:
        desc += '''
                
        Für diesen Datensatz besitzen wir leider keine Referenz-Daten vor der COVID-Pandemie, daher werden **Absolutwerte** angezeigt und Werte zwischen {regionen}n lassen sich nicht vergleichen.
        '''.format(regionen=use_states_select)
    if selected_score == "airquality_score":
        desc += '''
                
        Die Daten kommen vom [World Air Quality Project](https://aqicn.org/here/de/) und die Skala richtet sich nach dem ["Air Quality Index" (AQI)](https://aqicn.org/scale/de/).
        '''
    elif selected_score == "tomtom_score":
        desc += '''
                
        Die Daten stammen vom [TomTom Traffic Index](https://www.tomtom.com/). Die Prozentangaben beschreiben die Zeit, die man aufgrund der Verkehrslage länger unterwegs ist als auf einer komplett freien Straße. Ein Wert von 50% bedeutet also, dass man für einen Trip, der normalerweise 30 Minuten dauern würde, aufgrund der Verkehrslage 15 Minuten mehr benötigt, nämlich 45 Minuten. ([Quelle](https://www.tomtom.com/en_gb/traffic-index/about/))
        '''
    if selected_score in ["gmap_score", "tomtom_score"]:
        desc += '''
                
        <div style="background:#FEE59F; border-radius:5px; padding:10px;"><b>Hinweis:</b> Für diesen Datensatz können wir leider keine aktuellen Daten mehr anbieten.</div>
        '''.format(regionen=use_states_select)
    st_desc.markdown(desc, unsafe_allow_html=True)

    # Prepare df_scores according to Landkreis/Bundesland selection
    if use_states:
        # aggregate state data
        df_scores['state_id'] = df_scores.apply(
            lambda x: str(x['id'])[:2],
            axis=1)  # get state id (first two letters of county id)
        df_scores['name'] = df_scores.apply(
            lambda x: state_id_to_name[x['state_id']],
            axis=1)  # get state name
        df_scores = df_scores.groupby(
            ['name',
             'date']).mean()  # group by state and date, calculate mean scores
        df_scores = df_scores.round(1)  #round
        df_scores['id'] = df_scores.apply(
            lambda x: state_name_to_id[x.name[0]],
            axis=1)  # re-add state indices
        df_scores = df_scores.replace([np.inf, -np.inf], np.nan)  # remove infs
        df_scores = df_scores.reset_index(
        )  # make index columns into regular columns
        df_scores["airquality_desc"] = df_scores.apply(
            lambda x: get_airquality_desc(x["airquality_score"]), axis=1)

    df_scores["date"] = pd.to_datetime(df_scores["date"])
    df_scores = df_scores.round(1)

    return (df_scores, selected_score, selected_score_desc,
            selected_score_axis, use_states, use_states_select, countys,
            latest_date)
Example #12
0
def main():
    #st.set_page_config(page_title = "Continuous Sign Language Recognition")
    st.markdown("### Model Architecture")

    st.image(
        f'/app/architecture.png',
        caption='Architecture overview',
        use_column_width=True
    )

    base_size = [256, 256]
    crop_size = [224, 224]
    random_crop = False
    p_drop = 0.5
    random_drop = False

    transform_phoenix = transforms.Compose(
    [
        transforms.Resize(base_size),
        transforms.RandomCrop(crop_size)
        if random_crop
        else transforms.CenterCrop(crop_size),
        transforms.ToTensor(),
        #transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]),
        transforms.Normalize([0.53724027, 0.5272855, 0.51954997], [1, 1, 1])
    ]
    )

    transform_krsl = transforms.Compose(
    [
        transforms.Resize(base_size),
        transforms.RandomCrop(crop_size)
        if random_crop
        else transforms.CenterCrop(crop_size),
        transforms.ToTensor(),
        transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]),
        #transforms.Normalize([0.53724027, 0.5272855, 0.51954997], [1, 1, 1])
    ]
    )


    state = SessionState.get(upload_key = None, enabled = True,
    start = False, conf = 70, nms = 50, run = False, upload_db = False)
    hide_streamlit_widgets()

    """

    # Continuous Sign Language Recogntion

    """

    with open("/app/phrases.txt", "r") as f:
        lines = f.readlines()

    my_phrases = [""] + [line.strip().split("\t")[1] for line in lines]

    with open("/app/app/test_ids.txt", "r") as f:
        ids = f.readlines()

    signer_ids = [""] + [id.strip() for id in ids]
    phrase_dict = {line.strip().split("\t")[1]:line.strip().split("\t")[0] for line in lines}

    with st.sidebar:
        """

        ## :floppy_disk: Stochastic CSLR model
        SOTA among single cue models


        """

        #state.conf, state.nms = parameter_sliders(

        #    keys, state.enabled, value = [state.conf, state.nms])

        st.text("")

        st.text("")

        st.text("")

        lang = st.radio("Select language: ", ('Russian', 'German'))

        backbone = st.sidebar.selectbox(
            label = 'Please choose the backbone for Stochastic CSLR',

            options = [
                'ResNet18'
            ],

            index = 0,

            key = 'backbone'

        )

        phrase = st.sidebar.selectbox(
            label = "Please select the phrase for K-RSL dataset here",

            options = my_phrases,

            index = 0,

            key = 'phrase'
        )

        signer_id = st.sidebar.selectbox(
            label = "Please select the signer id for K-RSL dataset here",

            options = signer_ids,

            index = 0,

            key = 'signer_id'
        )

    upload = st.empty()
    start_button = st.empty()
    stop_button = st.empty()

    with upload:
        f = st.file_uploader('Upload Video file (mpeg/mp4 format)', key = state.upload_key)

    if lang == "Russian" and len(phrase) != 0 and len(signer_id) != 0:
        video_path = "/app/test_videos/" + str(phrase_dict[phrase]) + "/" + "P" + str(signer_id) + "_" + "S" + str(phrase_dict[phrase]) + "_" + "00.mp4"

        if not os.path.exists(video_path):
            st.info("The video is not in the database!")
            return

        vf = cv2.VideoCapture(video_path)
        vf = cv2.VideoCapture(video_path)
        frames = get_frames(video_path=video_path)
        indices = sample_indices(n=len(frames), p_drop=p_drop, random_drop=random_drop)
        frames = [Image.fromarray(frames[i].asnumpy(), 'RGB') for i in indices]

        if lang == "Russian":
            frames = map(transform_krsl, frames)
        else:
            frames = map(transform_phoenix, frames)

        frames = np.stack(list(frames))

        if lang == "Russian":
            epoch = 18
            vocab = create_vocab(split="train_rus", sep=",")
        else:
            vocab = create_vocab(split="train_ger", sep="|")

            if backbone == "ResNet18":
                epoch = 100
            else:
                epoch = 200

        hyp = inference(epoch, vocab, frames, lang)

        if not state.run:
            start_button.empty()
            start = start_button.button("PREDICT")
            state.start = start

        if state.start:
            start_button.empty()
            state.enabled = False

            if state.run:
                if phrase in phrase_dict:
                    phrase_id = phrase_dict[phrase]

                state.upload_key = str(randint(1000, int(1e6)))
                state.enabled = True
                state.run = False
                ProcessFrames(vf, stop_button, hyp, video_path, phrase_id, signer_ids, state)
            else:
                state.run = True
                trigger_rerun()


    if f is not None:
        tfile  = tempfile.NamedTemporaryFile(delete = False)
        tfile.write(f.read())

        upload.empty()
        vf = cv2.VideoCapture(tfile.name)
        frames = get_frames(video_path=tfile.name)
        indices = sample_indices(n=len(frames), p_drop=p_drop, random_drop=random_drop)
        frames = [Image.fromarray(frames[i].asnumpy(), 'RGB') for i in indices]

        if lang == "Russian":
            frames = map(transform_krsl, frames)
        else:
            frames = map(transform_phoenix, frames)

        frames = np.stack(list(frames))

        if lang == "Russian":
            epoch = 18
            vocab = create_vocab(split="train_rus", sep=",")
        else:
            vocab = create_vocab(split="train_ger", sep="|")

            if backbone == "ResNet18":
                epoch = 100
            else:
                epoch = 200

        hyp = inference(epoch, vocab, frames, lang)

        if not state.run:
            start_button.empty()
            start = start_button.button("PREDICT ")
            state.start = start

            with open("/app/app/upload.txt") as f:
                bool = int(f.readline())
            phrase_id = None
            if phrase in phrase_dict:
                    phrase_id = phrase_dict[phrase]

            if bool and phrase_id != None:
                up = upload.button("UPLOAD TO DATABASE")
                state.upload_db = up

        if state.upload_db:
            with open("/app/app/test_ids.txt", "a") as f:
                if "51" not in signer_ids:
                    f.write("51\n")

                shutil.move(tfile.name, f"/app/test_videos/{phrase_id}/P51_S{phrase_id}_00.mp4")
                st.info("The data was successfully uploaded to the database!")
            state.run = False



        if state.start:
            start_button.empty()
            state.enabled = False

            if state.run:
                f.close()
                state.upload_key = str(randint(1000, int(1e6)))
                state.enabled = True
                state.run = False
                phrase_id = None

                if phrase in phrase_dict:
                    phrase_id = phrase_dict[phrase]

                ProcessFrames(vf, stop_button, hyp, tfile, phrase_id, signer_ids, state)
            else:
                state.run = True
                trigger_rerun()
Example #13
0
def run_app():
    # GUI
    epoch_loc = st.empty()
    prog_bar = st.empty()
    loss_loc = st.empty()
    global_loss_loc = st.empty()
    col1, col2 = st.beta_columns(2)
    img_loc = col1.empty()
    stats_loc = col2.empty()
    image_meta_loc = st.empty()
    right_chart = st.empty()
    loss_chart = st.empty()
    glob_loss_chart = st.empty()
    # right_chart = st.empty()
    test_progress_bar = st.empty()
    testing_chart = st.empty()
    test_stats = st.empty()

    cuda = torch.device('cuda')
    cpu = torch.device('cpu')
    net = Net()
    net.to(cuda)

    criterion = nn.CrossEntropyLoss()
    # criterion = nn.MSELoss()
    #optimizer = optim.AdamW(net.parameters(), lr=0.01)
    optimizer = optim.AdamW(net.parameters(), lr=0.001)

    dataset = EegDataset()

    EPOCHS = 200
    losses = deque(maxlen=100)
    global_losses = deque(maxlen=100)
    right_list = deque(maxlen=100)
    wrong_list = deque(maxlen=100)
    # st.write(list(dataset))
    for epoch in range(EPOCHS):
        i = 1
        epoch_loc.write(f"EPOCH:\t{epoch}/{EPOCHS-1}")
        global_loss = torch.tensor([0.0], device=cuda)
        optimizer.zero_grad()
        right = 0
        wrong = 0
        for image in dataset:
            # sleep(1)
            prog_bar.progress(i / len(dataset))
            i += 1
            optimizer.zero_grad()
            x = image.data
            img_loc.image(image.data.numpy(), width=200)
            image_meta_loc.write(f"ID:\t{image.id}  \nCategory:\t{image.category}")

            # out = net(x.cuda().float())#.unsqueeze(0)
            out = net(x.cuda().float()).unsqueeze(0)

            out_id = torch.argmax(out.detach().cpu(), 1)
            target_id = dataset.categories.index(image.category)
            target = torch.zeros(len(dataset.categories)).float()
            target[target_id] = 1.0

            target = target.cuda()
            # target = torch.tensor([dataset.categories.index(image.category)]).cuda()
            # stats_loc.write(f"OUTPUT:\t{torch.argmax(out.detach().cpu(), 1)}  \nTARGET:\t{target.detach().cpu()}")
            stats_loc.write(f"OUTPUT:\t{out_id}  \nTARGET:\t{target_id}")

            # out = torch.round(out)
            # target = torch.round(target)
            # loss = criterion(out, target)
            loss = criterion(out, torch.tensor([target_id]).cuda())

            if out_id == target_id:
                right += 1
                loss = loss * (1 / len(dataset))
            else:
                wrong += 1
                loss = loss * 1

            losses.append(loss.detach().cpu().numpy())
            loss_chart.line_chart(
                pd.DataFrame(losses, columns=['loss',])
            )
            global_loss += loss
            loss.backward()
            optimizer.step()
            loss_loc.write(f"LOSS:\t{loss.detach().cpu()}  \nRIGHT:\t{right}/{len(dataset)}  \nWRONG:\t{wrong}/{len(dataset)}")
            # print(loss)
        right_list.append(right)
        wrong_list.append(wrong)
        rc_data = pd.DataFrame(np.array([[r,w] for r,w in zip(right_list, wrong_list)]), columns=['right', 'wrong'])
        right_chart.line_chart(rc_data)
        # wc_data = pd.DataFrame(np.array(wrong_list), columns=['wrong',])
        global_loss_loc.write(f"GLOBAL LOSS:\t{global_loss.detach().cpu()}  \nGLOB AVERAGE LOSS:\t{global_loss.detach().cpu()/len(dataset)}")
        global_losses.append(global_loss.detach().cpu().numpy())
        glob_loss_chart.line_chart(
            pd.DataFrame(global_losses, columns=['global_loss', ])
        )
        # global_loss.backward()
        # optimizer.step()

    # TESTING PHASE:
    dataset = EegDataset(testing=True)
    right = 0
    wrong = 0
    st.write('TESTING!!!!!!!!!!!!!!!!!!!/EVALUATING????')
    i = 1
    with torch.no_grad():
        for image in dataset:
            test_progress_bar.progress(i / len(dataset))
            i += 1
            x = image.data
            out = net(x.cuda().float())
            out_id = torch.argmax(out.detach().cpu(), 0)
            target_id = dataset.categories.index(image.category)
            if out_id == target_id:
                right += 1
            else:
                wrong += 1
            # chart_data = pd.DataFrame(np.array([[right, wrong]]), columns=['right', wrong])
            # testing_chart.bar_chart(chart_data)
            test_stats.write(f'RIGHT: {right}/{len(dataset)}  \nWRONG: {wrong}/{len(dataset)}')
Example #14
0
st.sidebar.markdown("""
                    ### Location of the property:
                    """)

property_address = st.sidebar.text_input('Property Adress',
                                         value='R. Itapeva, 636 - Bela Vista')
property_municipality = st.sidebar.selectbox('Property municipality',
                                             ['São Paulo'])
property_metropolitan_region = st.sidebar.selectbox('Metropolitan region',
                                                    ['São Paulo'])

run_button = st.sidebar.button('Get result')
result = st.sidebar.empty()

# main panels
text_description = st.empty()

if run_button:
    # t_rex = st.empty()
    # t_rex.markdown('<iframe width="560" height="315" src="http://wayou.github.io/t-rex-runner/"></iframe>', unsafe_allow_html=True)
    # spinner_ = st.markdown('<div class="container"><h2>Spinners</h2><p>To create a spinner/loader, use the <code>.spinner-border</code> class:</p><div class="spinner-border"></div></div>', unsafe_allow_html=True)
    text_description.empty()

    with st.spinner('Please wait...'):
        progress_bar = st.empty()
        info_progress = st.empty()
        progress_bar.progress(0)

        info_progress.text('Geocoding address...')
        property_location = utils.geo_code(
            property_address,
Example #15
0
                "count",
                "message count",
                "msg count",
                "msg",
            ]:
                print(
                    f"Received message count: {receiver.received_messages}\n"
                    + f"Produced message count: {producer.produced_messages}"
                )

        except KeyboardInterrupt:
            thread_end_event.set()
            receiver_thread.join()
            producer_thread.join()
            break


participant_list = [Participant() for i in range(20)]
game = Thread(target=f1_pipeline, args=(participant_list,))
game.daemon = True


if __name__ == "__main__":
    st.title("F1 Dashboard")
    game.start()

    empty_element = st.empty()
    while True:
        empty_element.text(dict(participant_list[0]))
        time.sleep(1)
Example #16
0
def app_sst_with_video(model_path: str, lm_path: str, lm_alpha: float,
                       lm_beta: float, beam: int):
    class AudioProcessor(AudioProcessorBase):
        frames_lock: threading.Lock
        frames: deque

        def __init__(self) -> None:
            self.frames_lock = threading.Lock()
            self.frames = deque([])

        async def recv_queued(self,
                              frames: List[av.AudioFrame]) -> av.AudioFrame:
            with self.frames_lock:
                self.frames.extend(frames)

            # Return empty frames to be silent.
            new_frames = []
            for frame in frames:
                input_array = frame.to_ndarray()
                new_frame = av.AudioFrame.from_ndarray(
                    np.zeros(input_array.shape, dtype=input_array.dtype),
                    layout=frame.layout.name,
                )
                new_frame.sample_rate = frame.sample_rate
                new_frames.append(new_frame)

            return new_frames

    webrtc_ctx = webrtc_streamer(
        key="speech-to-text-w-video",
        mode=WebRtcMode.SENDRECV,
        audio_processor_factory=AudioProcessor,
        client_settings=ClientSettings(
            rtc_configuration={
                "iceServers": [{
                    "urls": ["stun:stun.l.google.com:19302"]
                }]
            },
            media_stream_constraints={
                "video": True,
                "audio": True
            },
        ),
    )

    status_indicator = st.empty()

    if not webrtc_ctx.state.playing:
        return

    status_indicator.write("Loading...")
    text_output = st.empty()
    stream = None

    while True:
        if webrtc_ctx.audio_processor:
            if stream is None:
                from deepspeech import Model

                model = Model(model_path)
                model.enableExternalScorer(lm_path)
                model.setScorerAlphaBeta(lm_alpha, lm_beta)
                model.setBeamWidth(beam)

                stream = model.createStream()

                status_indicator.write("Model loaded.")

            sound_chunk = pydub.AudioSegment.empty()

            audio_frames = []
            with webrtc_ctx.audio_processor.frames_lock:
                while len(webrtc_ctx.audio_processor.frames) > 0:
                    frame = webrtc_ctx.audio_processor.frames.popleft()
                    audio_frames.append(frame)

            if len(audio_frames) == 0:
                time.sleep(0.1)
                status_indicator.write("No frame arrived.")
                continue

            status_indicator.write("Running. Say something!")

            for audio_frame in audio_frames:
                sound = pydub.AudioSegment(
                    data=audio_frame.to_ndarray().tobytes(),
                    sample_width=audio_frame.format.bytes,
                    frame_rate=audio_frame.sample_rate,
                    channels=len(audio_frame.layout.channels),
                )
                sound_chunk += sound

            if len(sound_chunk) > 0:
                sound_chunk = sound_chunk.set_channels(1).set_frame_rate(
                    model.sampleRate())
                buffer = np.array(sound_chunk.get_array_of_samples())
                stream.feedAudioContent(buffer)
                text = stream.intermediateDecode()
                text_output.markdown(f"**Text:** {text}")
        else:
            status_indicator.write("AudioReciver is not set. Abort.")
            break
Example #17
0
if file:
    img = read_from_file(file)

    grid = img_to_grid(img,
                       detector_model,
                       recognizer_model,
                       plot_path=None,
                       print_result=False)

    x = [a for x in grid for a in x]

    initial_board = Board(x)
    set_initially_available(initial_board.cells)

    solving_time = st.empty()

    html_board = st.markdown("<center>" + initial_board.html() + "</center>",
                             unsafe_allow_html=True)

    time.sleep(0.5)

    to_solve_board = initial_board.copy()
    start = time.time()
    n_iter, _ = backtracking_solve(to_solve_board)
    solve_duration = time.time() - start

    solving_time.markdown(
        "<center>" + "<h3>Solved in %.5f seconds and %s iterations</h3>" %
        (solve_duration, n_iter) + "</center>",
        unsafe_allow_html=True,
Example #18
0
    def update_doctor(self):
        id = st.text_input('Enter Doctor ID of the doctor to be updated')
        if id == '':
            st.empty()
        elif not verify_doctor_id(id):
            st.error('Invalid Doctor ID')
        else:
            st.success('Verified')
            conn, c = db.connection()

            # shows the current details of the doctor before updating
            with conn:
                c.execute(
                    """
                    SELECT *
                    FROM doctor_record
                    WHERE id = :id;
                    """,
                    { 'id': id }
                )
                st.write('Here are the current details of the doctor:')
                show_doctor_details(c.fetchall())

            st.write('Enter new details of the doctor:')
            department_id = st.text_input('Department ID')
            if department_id == '':
                st.empty()
            elif not department.verify_department_id(department_id):
                st.error('Invalid Department ID')
            else:
                st.success('Verified')
                self.department_id = department_id
                self.department_name = get_department_name(department_id)
            self.contact_number_1 = st.text_input('Contact number')
            contact_number_2 = st.text_input('Alternate contact number (optional)')
            self.contact_number_2 = (lambda phone : None if phone == '' else phone)(contact_number_2)
            self.email_id = st.text_input('Email ID')
            self.qualification = st.text_input('Qualification')
            self.specialisation = st.text_input('Specialisation')
            self.years_of_experience = st.number_input('Years of experience', value = 0, min_value = 0, max_value = 100)
            self.address = st.text_area('Address')
            self.city = st.text_input('City')
            self.state = st.text_input('State')
            self.pin_code = st.text_input('PIN code')
            update = st.button('Update')

            # executing SQLite statements to update this doctor's record in the database
            if update:
                with conn:
                    c.execute(
                        """
                        SELECT date_of_birth
                        FROM doctor_record
                        WHERE id = :id;
                        """,
                        { 'id': id }
                    )

                    # converts date of birth to the required format for age calculation
                    dob = [int(d) for d in c.fetchone()[0].split('-')[::-1]]
                    dob = date(dob[0], dob[1], dob[2])
                    self.age = calculate_age(dob)

                with conn:
                    c.execute(
                        """
                        UPDATE doctor_record
                        SET age = :age, department_id = :dept_id,
                        department_name = :dept_name, contact_number_1 = :phone_1,
                        contact_number_2 = :phone_2, email_id = :email_id,
                        qualification = :qualification, specialisation = :specialisation,
                        years_of_experience = :experience, address = :address,
                        city = :city, state = :state, pin_code = :pin
                        WHERE id = :id;
                        """,
                        {
                            'id': id, 'age': self.age, 'dept_id': self.department_id,
                            'dept_name': self.department_name,
                            'phone_1': self.contact_number_1,
                            'phone_2': self.contact_number_2, 'email_id': self.email_id,
                            'qualification': self.qualification,
                            'specialisation': self.specialisation,
                            'experience': self.years_of_experience,
                            'address': self.address, 'city': self.city,
                            'state': self.state, 'pin': self.pin_code
                        }
                    )
                st.success('Doctor details updated successfully.')
                conn.close()
def main():
    st.set_page_config(page_title = "Traffic Flow Counter", 
    page_icon=":vertical_traffic_light:")

    obj_detector = load_obj_detector(config, wt_file)
    tracker = tc.CarsInFrameTracker(num_previous_frames = 10, frame_shape = (720, 1080))

    state = SessionState.get(upload_key = None, enabled = True, 
    start = False, conf = 70, nms = 50, run = False)
    hide_streamlit_widgets()
    """
    #  Traffic Flow Counter :blue_car:  :red_car:
    Upload a video file to track and count vehicles. Don't forget to change parameters to tune the model!

    #### Features to be added in the future:
    + speed measurement
    + traffic density
    + vehicle type distribution
    """

    with st.sidebar:
        """
        ## :floppy_disk: Parameters  

        """
        state.conf, state.nms = parameter_sliders(
            keys, state.enabled, value = [state.conf, state.nms])
        
        st.text("")
        st.text("")
        st.text("")

        """
        #### :desktop_computer: [Source code in Github](https://github.com/aldencabajar/traffic_flow_counter)

        """

    #set model confidence and nms threshold 
    if (state.conf is not None):
        obj_detector.confidence = state.conf/ 100
    if (state.nms is not None):
        obj_detector.nms_threshold = state.nms/ 100 



    upload = st.empty()
    start_button = st.empty()
    stop_button = st.empty()

    with upload:
        f = st.file_uploader('Upload Video file (mpeg/mp4 format)', key = state.upload_key)
    if f is not None:
        tfile  = tempfile.NamedTemporaryFile(delete = True)
        tfile.write(f.read())

        upload.empty()
        vf = cv2.VideoCapture(tfile.name)

        if not state.run:
            start = start_button.button("start")
            state.start = start
        
        if state.start:
            start_button.empty()
            #state.upload_key = str(randint(1000, int(1e6)))
            state.enabled = False
            if state.run:
                tfile.close()
                f.close()
                state.upload_key = str(randint(1000, int(1e6)))
                state.enabled = True
                state.run = False
                ProcessFrames(vf, tracker, obj_detector, stop_button)
            else:
                state.run = True
                trigger_rerun()
Example #20
0
    def add_doctor(self):
        st.write('Enter doctor details:')
        self.name = st.text_input('Full name')
        gender = st.radio('Gender', ['Female', 'Male', 'Other'])
        if gender == 'Other':
            gender = st.text_input('Please mention')
        self.gender = gender
        dob = st.date_input('Date of birth (YYYY/MM/DD)')
        st.info('If the required date is not in the calendar, please type it in the box above.')
        self.date_of_birth = dob.strftime('%d-%m-%Y')       # converts date of birth to the desired string format
        self.age = calculate_age(dob)
        self.blood_group = st.text_input('Blood group')
        department_id = st.text_input('Department ID')
        if department_id == '':
            st.empty()
        elif not department.verify_department_id(department_id):
            st.error('Invalid Department ID')
        else:
            st.success('Verified')
            self.department_id = department_id
            self.department_name = get_department_name(department_id)
        self.contact_number_1 = st.text_input('Contact number')
        contact_number_2 = st.text_input('Alternate contact number (optional)')
        self.contact_number_2 = (lambda phone : None if phone == '' else phone)(contact_number_2)
        self.aadhar_or_voter_id = st.text_input('Aadhar ID / Voter ID')
        self.email_id = st.text_input('Email ID')
        self.qualification = st.text_input('Qualification')
        self.specialisation = st.text_input('Specialisation')
        self.years_of_experience = st.number_input('Years of experience', value = 0, min_value = 0, max_value = 100)
        self.address = st.text_area('Address')
        self.city = st.text_input('City')
        self.state = st.text_input('State')
        self.pin_code = st.text_input('PIN code')
        self.id = generate_doctor_id()
        save = st.button('Save')

        # executing SQLite statements to save the new doctor record to the database
        if save:
            conn, c = db.connection()
            with conn:
                c.execute(
                    """
                    INSERT INTO doctor_record
                    (
                        id, name, age, gender, date_of_birth, blood_group,
                        department_id, department_name, contact_number_1,
                        contact_number_2, aadhar_or_voter_id, email_id,
                        qualification, specialisation, years_of_experience,
                        address, city, state, pin_code
                    )
                    VALUES (
                        :id, :name, :age, :gender, :dob, :blood_group, :dept_id,
                        :dept_name, :phone_1, :phone_2, :uid, :email_id, :qualification,
                        :specialisation, :experience, :address, :city, :state, :pin
                    );
                    """,
                    {
                        'id': self.id, 'name': self.name, 'age': self.age,
                        'gender': self.gender, 'dob': self.date_of_birth,
                        'blood_group': self.blood_group,
                        'dept_id': self.department_id,
                        'dept_name': self.department_name,
                        'phone_1': self.contact_number_1,
                        'phone_2': self.contact_number_2,
                        'uid': self.aadhar_or_voter_id, 'email_id': self.email_id,
                        'qualification': self.qualification,
                        'specialisation': self.specialisation,
                        'experience': self.years_of_experience,
                        'address': self.address, 'city': self.city,
                        'state': self.state, 'pin': self.pin_code
                    }
                )
            st.success('Doctor details saved successfully.')
            st.write('Your Doctor ID is: ', self.id)
            conn.close()
Example #21
0
def main():
    local_css(
        "/home/pasonatech/workspace/albumentations_forked/albumentations-demo/src/custom_css.css"
    )
    # logo_img = "/home/pasonatech/workspace/albumentations_forked/albumentations-demo/images/p.png"
    # html_sticky = f"""
    #     <div class="sticky pt-2">
    #         <img class="img-fluid" src="data:image/png;base64,{base64.b64encode(open(logo_img, "rb").read()).decode()}">
    #     </div>
    # """
    # st.markdown(html_sticky ,unsafe_allow_html = True)

    # get CLI params: the path to images and image width
    path_to_images, width_original = get_arguments()

    if not os.path.isdir(path_to_images):
        st.title("There is no directory: " + path_to_images)
    else:
        # select interface type
        interface_type = st.sidebar.radio(
            "Select the interface mode",
            ["Simple", "Professional", "Custom", "LoadMyFile"])

        #pick css
        if interface_type == "LoadMyFile":
            local_css(
                "/home/pasonatech/workspace/albumentations_forked/albumentations-demo/src/custom_loadmy_css.css"
            )

        if interface_type == "Custom":
            json_file_name = st.sidebar.text_input(
                "Insert Json File Name", "aug_file")  #text_area same format
            json_file_name = os.path.join("./my_json_files",
                                          f"{json_file_name}" + '.json')

        # select image
        status, image = select_image(path_to_images, interface_type)
        if status == 1:
            st.title("Can't load image")
        if status == 2:
            st.title("Please, upload the image")
        else:
            # image was loaded successfully
            placeholder_params = get_placeholder_params(image)

            # load the config
            augmentations = load_augmentations_config(
                placeholder_params, "configs/augmentations.json")

            if interface_type is not "LoadMyFile":
                # get the list of transformations names
                transform_names = select_transformations(
                    augmentations, interface_type)

            if interface_type is "Custom":
                transforms = get_transormations_params_custom(
                    transform_names, augmentations, json_file_name)

            elif interface_type is "LoadMyFile":

                f_name = st.sidebar.file_uploader("Select your json file",
                                                  type="json")

                view_times = 0
                if f_name:
                    j_text = StringIO.read(f_name)
                    j_data = json.loads(j_text)

                    image_replace = st.empty()
                    st.image(image,
                             caption="Original image",
                             width=width_original)
                    if st.sidebar.button("Play Preview"):
                        view_times = 1
                    stop_btn = st.sidebar.button("STOP Preview")
                    if stop_btn:
                        view_times = 0
                    # for seconds in range(view_times):
                    # data =j_data
                    try:
                        transform = A.from_dict(j_data)
                        display_value = True
                    except KeyError:
                        st.error(
                            "Please, confirm your augmentations structure.")
                        st.error(
                            "Supports only albumentations augmentation generated 'A.to_dict()'."
                        )
                        # view_times = 0
                        display_value = False

                    while (view_times == 1):

                        try:
                            # data = json.load(open(file_name, 'r'))
                            # transform = A.from_dict(data)
                            aug_img_obj = transform(image=image)
                            # print(aug_img_obj.keys())
                            aug_img = aug_img_obj['image']

                            image_replace.image(
                                aug_img,
                                caption="Transformed image",
                                width=width_original,
                            )
                        except IOError:
                            st.error("Confirm your json file path.")
                            view_times = 0
                        except UnboundLocalError:
                            st.error(
                                "Your json file seems incompatible to run this task. "
                            )
                            view_times = 0
                        except ValueError as e:
                            image_replace.error(
                                e)  #replaces error log in same field
                            pass

                        time.sleep(1)
                    if stop_btn is True:
                        st.info(
                            "Preview Stopped. Press Play Preview button to resume previewing."
                        )
                    if display_value:
                        if st.sidebar.checkbox(
                                "Display Augmentation Parameters"):
                            onetine_data_loader(j_data)

                    transforms = []
                else:
                    st.header("WELCOME")
                    st.header("Please upload a JSON File")

            else:
                # get parameters for each transform
                transforms = get_transormations_params(transform_names,
                                                       augmentations)

            if interface_type is not "LoadMyFile":
                try:
                    # apply the transformation to the image
                    data = A.ReplayCompose(transforms)(image=image)
                    error = 0
                except ValueError:
                    error = 1
                    st.title(
                        "The error has occurred. Most probably you have passed wrong set of parameters. \
                    Check transforms that change the shape of image.")

                # proceed only if everything is ok
                if error == 0:
                    augmented_image = data["image"]
                    # show title
                    st.title("Demo of Albumentations")

                    # show the images
                    width_transformed = int(width_original / image.shape[1] *
                                            augmented_image.shape[1])

                    st.image(image,
                             caption="Original image",
                             width=width_original)
                    st.image(
                        augmented_image,
                        caption="Transformed image",
                        width=width_transformed,
                    )

                    # comment about refreshing
                    st.write("*Press 'R' to refresh*")

                    #custom preview of aug list
                    # random values used to get transformations
                    show_random_params(data, interface_type)

                    for transform in transforms:
                        show_docstring(transform)
                        st.code(str(transform))
                    show_credentials()

                # adding google analytics pixel
                # only when deployed online. don't collect statistics of local usage
                if "GA" in os.environ:
                    st.image(os.environ["GA"])
                    st.markdown(
                        ("[Privacy policy]" +
                         ("(https://htmlpreview.github.io/?" +
                          "https://github.com/IliaLarchenko/" +
                          "albumentations-demo/blob/deploy/docs/privacy.html)")
                         ))
    def test_empty(self):
        """Test Empty."""
        st.empty()

        element = self.get_delta_from_queue().new_element
        self.assertEqual(element.empty, EmptyProto())
            df = load_data()

            length = len(df)

            if length == 1:
                st.write("This webpage contains 1 table")
            else:
                st.write("This webpage contains " + str(length) + " tables")

            #st.write("This webpage contains " + str(length) + " tables" )

            if st.button("Show scraped tables"):
                st.table(df)
            else:
                st.empty()

            def createList(r1, r2):
                return [item for item in range(r1, r2 + 1)]

            r1, r2 = 1, length
            funct = createList(r1, r2)

            ###### Selectbox - Selectbox - Selectbox - Selectbox - Selectbox - Selectbox - Selectbox -

            st.markdown('### **2️⃣ Select a table to export **')

            ValueSelected = st.selectbox('', funct)
            st.write('You selected table #', ValueSelected)

            df1 = df[ValueSelected - 1]
with st.echo():
    st.write("Code!")

# Progress and Status

import time

myBar = st.progress(0)

for percent in range(100):
    time.sleep(0.05)
    myBar.progress(percent + 1)

# Temporarily Text Display
with st.spinner('Wait for it...'):
    time.sleep(1)
st.success("Done!")

# Ballons
st.balloons()

# Timer
import time

with st.empty():
    for seconds in range(60):
        st.write(f"⏳ {seconds} seconds have passed")
        time.sleep(1)
st.write("✔️ 1 minute over!")
Example #25
0
def app_object_detection():
    """Object detection demo with MobileNet SSD.
    This model and code are based on
    https://github.com/robmarkcole/object-detection-app
    """
    MODEL_URL = "https://github.com/robmarkcole/object-detection-app/raw/master/model/MobileNetSSD_deploy.caffemodel"  # noqa: E501
    MODEL_LOCAL_PATH = HERE / "./models/MobileNetSSD_deploy.caffemodel"
    PROTOTXT_URL = "https://github.com/robmarkcole/object-detection-app/raw/master/model/MobileNetSSD_deploy.prototxt.txt"  # noqa: E501
    PROTOTXT_LOCAL_PATH = HERE / "./models/MobileNetSSD_deploy.prototxt.txt"

    CLASSES = [
        "background",
        "aeroplane",
        "bicycle",
        "bird",
        "boat",
        "bottle",
        "bus",
        "car",
        "cat",
        "chair",
        "cow",
        "diningtable",
        "dog",
        "horse",
        "motorbike",
        "person",
        "pottedplant",
        "sheep",
        "sofa",
        "train",
        "tvmonitor",
    ]
    COLORS = np.random.uniform(0, 255, size=(len(CLASSES), 3))

    download_file(MODEL_URL, MODEL_LOCAL_PATH, expected_size=23147564)
    download_file(PROTOTXT_URL, PROTOTXT_LOCAL_PATH, expected_size=29353)

    DEFAULT_CONFIDENCE_THRESHOLD = 0.5

    class Detection(NamedTuple):
        name: str
        prob: float

    class MobileNetSSDVideoTransformer(VideoTransformerBase):
        confidence_threshold: float
        result_queue: "queue.Queue[List[Detection]]"

        def __init__(self) -> None:
            self._net = cv2.dnn.readNetFromCaffe(
                str(PROTOTXT_LOCAL_PATH), str(MODEL_LOCAL_PATH)
            )
            self.confidence_threshold = DEFAULT_CONFIDENCE_THRESHOLD
            self.result_queue = queue.Queue()

        def _annotate_image(self, image, detections):
            # loop over the detections
            (h, w) = image.shape[:2]
            result: List[Detection] = []
            for i in np.arange(0, detections.shape[2]):
                confidence = detections[0, 0, i, 2]

                if confidence > self.confidence_threshold:
                    # extract the index of the class label from the `detections`,
                    # then compute the (x, y)-coordinates of the bounding box for
                    # the object
                    idx = int(detections[0, 0, i, 1])
                    box = detections[0, 0, i, 3:7] * np.array([w, h, w, h])
                    (startX, startY, endX, endY) = box.astype("int")

                    name = CLASSES[idx]
                    result.append(Detection(name=name, prob=float(confidence)))

                    # display the prediction
                    label = f"{name}: {round(confidence * 100, 2)}%"
                    cv2.rectangle(image, (startX, startY), (endX, endY), COLORS[idx], 2)
                    y = startY - 15 if startY - 15 > 15 else startY + 15
                    cv2.putText(
                        image,
                        label,
                        (startX, y),
                        cv2.FONT_HERSHEY_SIMPLEX,
                        0.5,
                        COLORS[idx],
                        2,
                    )
            return image, result

        def transform(self, frame: av.VideoFrame) -> np.ndarray:
            image = frame.to_ndarray(format="bgr24")
            blob = cv2.dnn.blobFromImage(
                cv2.resize(image, (300, 300)), 0.007843, (300, 300), 127.5
            )
            self._net.setInput(blob)
            detections = self._net.forward()
            annotated_image, result = self._annotate_image(image, detections)

            # NOTE: This `transform` method is called in another thread,
            # so it must be thread-safe.
            self.result_queue.put(result)

            return annotated_image

    webrtc_ctx = webrtc_streamer(
        key="object-detection",
        mode=WebRtcMode.SENDRECV,
        client_settings=WEBRTC_CLIENT_SETTINGS,
        video_transformer_factory=MobileNetSSDVideoTransformer,
        async_transform=True,
    )

    confidence_threshold = st.slider(
        "Confidence threshold", 0.0, 1.0, DEFAULT_CONFIDENCE_THRESHOLD, 0.05
    )
    if webrtc_ctx.video_transformer:
        webrtc_ctx.video_transformer.confidence_threshold = confidence_threshold

    if st.checkbox("Show the detected labels", value=True):
        if webrtc_ctx.state.playing:
            labels_placeholder = st.empty()
            # NOTE: The video transformation with object detection and
            # this loop displaying the result labels are running
            # in different threads asynchronously.
            # Then the rendered video frames and the labels displayed here
            # are not strictly synchronized.
            while True:
                if webrtc_ctx.video_transformer:
                    try:
                        result = webrtc_ctx.video_transformer.result_queue.get(
                            timeout=1.0
                        )
                    except queue.Empty:
                        result = None
                    labels_placeholder.table(result)
                else:
                    break

    st.markdown(
        "This demo uses a model and code from "
        "https://github.com/robmarkcole/object-detection-app. "
        "Many thanks to the project."
    )
Example #26
0
    def bar_plot(self, data_row):
        empty_plot1 = st.empty()
        empty_plot2 = st.empty()
        num_mat = len(data_row['material'].unique().tolist())
        i = 0
        xx = pd.DataFrame({'radius_nm': data_row['radius_nm'].unique()})
        xx = xx['radius_nm'].to_numpy()
        yy = np.zeros(len(xx))

        for material in data_row['material'].unique().tolist():
            data = data_row[data_row['material'] == material]
            data_medie = pd.DataFrame({'radius_nm': data['radius_nm'].unique()})
            raggi = data['radius_nm'].unique().tolist()

            media_quality = []
            media_speed = []
            media_error_q = []
            media_error_s = []
            media_material = []

            for raggio in raggi:
                media_quality.append(data['normalized_signal_quality'][data['radius_nm'] == raggio].mean())
                media_speed.append(data['signal_speed'][data['radius_nm'] == raggio].mean())
                media_error_q.append(data['normalized_signal_quality'][data['radius_nm'] == raggio].std())
                media_error_s.append(data['signal_speed'][data['radius_nm'] == raggio].std())
                media_material.append(material)

            data_medie['normalized_signal_quality'] = media_quality
            data_medie['signal_speed'] = media_speed
            data_medie['normalized_signal_quality_err'] = media_error_q
            data_medie['signal_speed_err'] = media_error_s
            data_medie['material'] = media_material

            data_medie = data_medie.fillna(0)

            st.write(material)
            st.table(data)

            delta_delay = (12)/num_mat
            delay = -3 + delta_delay*i

            ds().nuova_fig(30)
            ds().titoli(titolo='Normalized Signal Intensity', xtag='radius[nm]', ytag='counts')
            ds().dati(x = data_medie['radius_nm'].to_numpy(), y = data_medie['normalized_signal_quality'].to_numpy(), scat_plot = 'bar', delay = delay, width = 3, descrizione=material)
            ds().dati(x = data_medie['radius_nm'].to_numpy()+delay/2, y = data_medie['normalized_signal_quality'].to_numpy(), y_error=data_medie['normalized_signal_quality_err'].to_numpy()/2, scat_plot = 'err', colore='black')
            ds().dati(x = data['radius_nm']+delay/2, y = data['normalized_signal_quality'], scat_plot ='scat', colore="blue", larghezza_riga =12, layer = 2)
            ds().dati(x = xx, y = yy, scat_plot ='bar', width = 3, delay = 0)
            ds().legenda()

            ds().nuova_fig(31)
            ds().titoli(titolo='Slope (C)', xtag='radius[nm]', ytag='T/I [k/uW]')
            ds().dati(x = data_medie['radius_nm'].to_numpy(), y = data_medie['signal_speed'].to_numpy(), scat_plot = 'bar', delay = delay, width = 3, descrizione=material)
            ds().dati(x = data_medie['radius_nm'].to_numpy()+delay/2, y = data_medie['signal_speed'].to_numpy(), y_error=data_medie['signal_speed_err'].to_numpy()/2, scat_plot = 'err', colore='black')
            ds().dati(x = data['radius_nm']+delay/2, y = data['signal_speed'], scat_plot ='scat', colore="blue", larghezza_riga =12, layer = 2)
            ds().dati(x = xx, y = yy, scat_plot ='bar', width = 3, delay = 0)
            ds().legenda()
            i = i+1

        ds().nuova_fig(30)
        empty_plot1.pyplot()

        ds().nuova_fig(31)
        empty_plot2.pyplot()
Example #27
0
url = url + '.json'

post_ids = requests.get(url).json()['post_stream']['stream']

'## Analysis'
msg = 'There are %s posts in the selected thread.' % len(post_ids)
if len(post_ids) > 20:
  msg = msg + ' Will load and analyze the first 20.'
  post_ids = post_ids[0:20]

msg
try:
  classifier = get_classifier('sentiment-analysis')

  post_results = []
  progress_wrapper = st.empty()
  post_progress = progress_wrapper.progress(0)

  for i, post_id in enumerate(post_ids):
    # https://us.forums.blizzard.com/en/wow/posts/<post_id>.json
    post = get_post(post_id)
    post_text = post['raw'].replace('\n', ' ')
    # '* ' + post_text
    results = classifier(post_text)
    post_progress.progress((i+1)/len(post_ids))
    if results[0]['score'] > 0.75:
      post_results.append({
        'text': post['raw'],
        'url': 'https://us.forums.blizzard.com/en/wow/p/%s' % post['id'],
        'sentiment': results[0]['label'],
        'score': results[0]['score']
Example #28
0
language_index = st.selectbox("Select spoken language in file",
                              ('Polish', 'English'))

uploaded_file = st.file_uploader("Choose a file...", type=["mp4", 'mp3'])

if uploaded_file is not None:

    if uploaded_file.name.lower().strip().endswith("mp4"):
        name = uploaded_file.name.replace(" ", "").replace(".", "_")
        output_path = 'uploads/' + name + '.mp4'
        if not os.path.exists(output_path):
            preprocessing.upload_temporary(io.BytesIO(uploaded_file.read()),
                                           output_path)

        placeholder = st.empty()
        placeholder.text('Sampling video...')
        bar_placeholder = st.empty()
        bar_placeholder.progress(0)

        fps = preprocessing.get_fps(output_path)
        placeholder.text('Sampling video...')
        frame_path = 'frames/frames_' + name
        if not os.path.exists(frame_path):
            os.makedirs(frame_path, exist_ok=True)
            preprocessing.get_frames(output_path, frame_path, fps, 30,
                                     bar_placeholder)
        placeholder.text('Magic happens...')

        bar_placeholder.progress(0)
Example #29
0
#     st.image(image_x)

# st.stop()
# image_tensor = torch.tensor(dataset[0][0])
# image_tensor = image_tensor.unsqueeze(0)
# image_tensor = image_tensor.unsqueeze(0)
# image = image_tensor.squeeze(0).squeeze(0).numpy()
# st.image(image, 'original image')
# unfolded = torch.nn.functional.unfold(image_tensor, (5, 5), stride=5, padding=10)
# print(unfolded.shape)
# recovered_tensor = torch.nn.functional.fold(unfolded, output_size=(128, 128), kernel_size=(5, 5), stride=5, padding=10)
# image = recovered_tensor.squeeze(0).squeeze(0).clamp(0, 1).numpy()
# st.image(image, 'recovered image')
# exit()

st_orig_image = st.empty()
st_memorized_image = st.empty()
st_loss = st.empty()

for image_x, image_y in dataset:
    image_tensor = torch.tensor(image_y)
    net.add(torch.tensor(image_x))

print(net(torch.tensor(dataset[2][1])))
# exit()
average = torch.tensor([0])
# images = []
for image_x, image_y in dataset:
    average += image_y
    # images.append(image_y.flatten())
average /= len(dataset)
Example #30
0
import streamlit as st
import pandas as pd
import numpy as np
import time

st.title('Metaverse Launcher')

st.sidebar.title("Test Agents")

if st.sidebar.button('Launch'):
    "this is where we launch stuff"

progress_bar = st.progress(0)
status_text = st.empty()
chart = st.line_chart(np.random.randn(10, 2))

for i in range(100):
    # Update progress bar.
    progress_bar.progress(i + 1)

    new_rows = np.random.randn(10, 2)

    # Update status text.
    status_text.text('The latest random number is: %s' % new_rows[-1, 1])

    # Append data to the chart.
    chart.add_rows(new_rows)

    # Pretend we're doing some computation that takes time.
    time.sleep(0.1)