ptvsd.enable_attach(address=('localhost', 6790)) # ptvsd.wait_for_attach() # Only include this line if you always want to manually attach the debugger from LayoutAndStyleUtils import (Grid, Cell, BlockContainerStyler) BlockContainerStyler().set_default_block_container_style() # -------------------------------------------------------------------------------- messageboard = st.empty() from utils import SessionState # Session State variables: session_state = SessionState.get( message='To use this application, please login...', token={ 'value': None, 'expiry': None }, user=None, email=None, report=[], ) # -------------------------------------------------------------------------------- # import must come after messageboard as these apps use app.messageboard import dumb_app, dumber_app, login_app, logout_app def main(): pages = { 'DuMMMy aPp [1]': [dumb_app.main], # DUMMY APP 1 'DUmmmY ApP [2]': [dumber_app.main], # DUMMY APP 2
# This is a sample Python script. import streamlit as st import time import cv2 as cv import numpy as np from PIL import Image from utils import SessionState # Assuming SessionState.py lives on this folder from utils.cv_filters import strel_line, imadjust, gaussian_kernel, wiener_filter, laplacianOfGaussian session = SessionState.get(run_id=0) def main(): image_s = None image_h = None st.title("Dermoscopy Images Preprocessing") process = st.sidebar.radio('Type of process', ('Registration', 'Shaver')) with st.beta_container(): if process == 'Registration': st.title("Registration") st.sidebar.write('You selected registration') # Add a slider to the sidebar: austerity = st.sidebar.slider('Austerity', 1.0, 100.0, (70.0)) minimum = st.sidebar.slider('Minimum matches', 0.0, 1000.0, (10.0)) MIN_MATCH_COUNT = minimum sample = st.file_uploader("Choose an sample image...") if sample is not None: image_s = Image.open(sample) st.image(image_s, caption='Sample Image', width=300) history = st.file_uploader("Choose a history image...") if history is not None:
from math import sqrt from utils import SessionState import numpy as np import pandas as pd from sklearn.metrics import mean_squared_error import altair as alt import streamlit as st np.random.seed(0) X_MIN = 0 X_MAX = 1 state = SessionState.get(min_rmse=999) @dataclass class Weight: w0: float w1: float w2: float @st.cache def build_dataset(xres): X_source = np.linspace(X_MIN, X_MAX, xres) y_source = ( np.polynomial.polynomial.polyval(X_source, [0, 2, 5]) + np.sin(8 * X_source)
padding-bottom: {padding_bottom}rem; }} .reportview-container .main {{ color: {COLOR}; background-color: {BACKGROUND_COLOR}; }} </style> """, unsafe_allow_html=True, ) sessions = session.get(key=0, id=0, trainer_params={}, trainer_dict={ "id": [], "dataloader": [], "model": [], "loss": [], "optimizer": [], "scheduler": [], "metrics": [] }) st.sidebar.title("目录") cur = st.sidebar.radio("catalogue", ("Trainer", "Eval")) if cur == "Trainer": data_selections = [] for name, obj in inspect.getmembers(data_module, inspect.isclass): if "data_loader.data_loaders" in str(obj): data_selections.append(str(obj)[33:-12]) model_selections = [] for name, obj in inspect.getmembers(model_module, inspect.isclass):
def main(): st.set_page_config(page_title = "Traffic Flow Counter", page_icon=":vertical_traffic_light:") obj_detector = load_obj_detector(config, wt_file) tracker = tc.CarsInFrameTracker(num_previous_frames = 10, frame_shape = (720, 1080)) state = SessionState.get(upload_key = None, enabled = True, start = False, conf = 70, nms = 50, run = False) hide_streamlit_widgets() """ # Traffic Flow Counter :blue_car: :red_car: Upload a video file to track and count vehicles. Don't forget to change parameters to tune the model! #### Features to be added in the future: + speed measurement + traffic density + vehicle type distribution """ with st.sidebar: """ ## :floppy_disk: Parameters """ state.conf, state.nms = parameter_sliders( keys, state.enabled, value = [state.conf, state.nms]) st.text("") st.text("") st.text("") """ #### :desktop_computer: [Source code in Github](https://github.com/aldencabajar/traffic_flow_counter) """ #set model confidence and nms threshold if (state.conf is not None): obj_detector.confidence = state.conf/ 100 if (state.nms is not None): obj_detector.nms_threshold = state.nms/ 100 upload = st.empty() start_button = st.empty() stop_button = st.empty() with upload: f = st.file_uploader('Upload Video file (mpeg/mp4 format)', key = state.upload_key) if f is not None: tfile = tempfile.NamedTemporaryFile(delete = True) tfile.write(f.read()) upload.empty() vf = cv2.VideoCapture(tfile.name) if not state.run: start = start_button.button("start") state.start = start if state.start: start_button.empty() #state.upload_key = str(randint(1000, int(1e6))) state.enabled = False if state.run: tfile.close() f.close() state.upload_key = str(randint(1000, int(1e6))) state.enabled = True state.run = False ProcessFrames(vf, tracker, obj_detector, stop_button) else: state.run = True trigger_rerun()
def main(): #st.set_page_config(page_title = "Continuous Sign Language Recognition") st.markdown("### Model Architecture") st.image( f'/app/architecture.png', caption='Architecture overview', use_column_width=True ) base_size = [256, 256] crop_size = [224, 224] random_crop = False p_drop = 0.5 random_drop = False transform_phoenix = transforms.Compose( [ transforms.Resize(base_size), transforms.RandomCrop(crop_size) if random_crop else transforms.CenterCrop(crop_size), transforms.ToTensor(), #transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]), transforms.Normalize([0.53724027, 0.5272855, 0.51954997], [1, 1, 1]) ] ) transform_krsl = transforms.Compose( [ transforms.Resize(base_size), transforms.RandomCrop(crop_size) if random_crop else transforms.CenterCrop(crop_size), transforms.ToTensor(), transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]), #transforms.Normalize([0.53724027, 0.5272855, 0.51954997], [1, 1, 1]) ] ) state = SessionState.get(upload_key = None, enabled = True, start = False, conf = 70, nms = 50, run = False, upload_db = False) hide_streamlit_widgets() """ # Continuous Sign Language Recogntion """ with open("/app/phrases.txt", "r") as f: lines = f.readlines() my_phrases = [""] + [line.strip().split("\t")[1] for line in lines] with open("/app/app/test_ids.txt", "r") as f: ids = f.readlines() signer_ids = [""] + [id.strip() for id in ids] phrase_dict = {line.strip().split("\t")[1]:line.strip().split("\t")[0] for line in lines} with st.sidebar: """ ## :floppy_disk: Stochastic CSLR model SOTA among single cue models """ #state.conf, state.nms = parameter_sliders( # keys, state.enabled, value = [state.conf, state.nms]) st.text("") st.text("") st.text("") lang = st.radio("Select language: ", ('Russian', 'German')) backbone = st.sidebar.selectbox( label = 'Please choose the backbone for Stochastic CSLR', options = [ 'ResNet18' ], index = 0, key = 'backbone' ) phrase = st.sidebar.selectbox( label = "Please select the phrase for K-RSL dataset here", options = my_phrases, index = 0, key = 'phrase' ) signer_id = st.sidebar.selectbox( label = "Please select the signer id for K-RSL dataset here", options = signer_ids, index = 0, key = 'signer_id' ) upload = st.empty() start_button = st.empty() stop_button = st.empty() with upload: f = st.file_uploader('Upload Video file (mpeg/mp4 format)', key = state.upload_key) if lang == "Russian" and len(phrase) != 0 and len(signer_id) != 0: video_path = "/app/test_videos/" + str(phrase_dict[phrase]) + "/" + "P" + str(signer_id) + "_" + "S" + str(phrase_dict[phrase]) + "_" + "00.mp4" if not os.path.exists(video_path): st.info("The video is not in the database!") return vf = cv2.VideoCapture(video_path) vf = cv2.VideoCapture(video_path) frames = get_frames(video_path=video_path) indices = sample_indices(n=len(frames), p_drop=p_drop, random_drop=random_drop) frames = [Image.fromarray(frames[i].asnumpy(), 'RGB') for i in indices] if lang == "Russian": frames = map(transform_krsl, frames) else: frames = map(transform_phoenix, frames) frames = np.stack(list(frames)) if lang == "Russian": epoch = 18 vocab = create_vocab(split="train_rus", sep=",") else: vocab = create_vocab(split="train_ger", sep="|") if backbone == "ResNet18": epoch = 100 else: epoch = 200 hyp = inference(epoch, vocab, frames, lang) if not state.run: start_button.empty() start = start_button.button("PREDICT") state.start = start if state.start: start_button.empty() state.enabled = False if state.run: if phrase in phrase_dict: phrase_id = phrase_dict[phrase] state.upload_key = str(randint(1000, int(1e6))) state.enabled = True state.run = False ProcessFrames(vf, stop_button, hyp, video_path, phrase_id, signer_ids, state) else: state.run = True trigger_rerun() if f is not None: tfile = tempfile.NamedTemporaryFile(delete = False) tfile.write(f.read()) upload.empty() vf = cv2.VideoCapture(tfile.name) frames = get_frames(video_path=tfile.name) indices = sample_indices(n=len(frames), p_drop=p_drop, random_drop=random_drop) frames = [Image.fromarray(frames[i].asnumpy(), 'RGB') for i in indices] if lang == "Russian": frames = map(transform_krsl, frames) else: frames = map(transform_phoenix, frames) frames = np.stack(list(frames)) if lang == "Russian": epoch = 18 vocab = create_vocab(split="train_rus", sep=",") else: vocab = create_vocab(split="train_ger", sep="|") if backbone == "ResNet18": epoch = 100 else: epoch = 200 hyp = inference(epoch, vocab, frames, lang) if not state.run: start_button.empty() start = start_button.button("PREDICT ") state.start = start with open("/app/app/upload.txt") as f: bool = int(f.readline()) phrase_id = None if phrase in phrase_dict: phrase_id = phrase_dict[phrase] if bool and phrase_id != None: up = upload.button("UPLOAD TO DATABASE") state.upload_db = up if state.upload_db: with open("/app/app/test_ids.txt", "a") as f: if "51" not in signer_ids: f.write("51\n") shutil.move(tfile.name, f"/app/test_videos/{phrase_id}/P51_S{phrase_id}_00.mp4") st.info("The data was successfully uploaded to the database!") state.run = False if state.start: start_button.empty() state.enabled = False if state.run: f.close() state.upload_key = str(randint(1000, int(1e6))) state.enabled = True state.run = False phrase_id = None if phrase in phrase_dict: phrase_id = phrase_dict[phrase] ProcessFrames(vf, stop_button, hyp, tfile, phrase_id, signer_ids, state) else: state.run = True trigger_rerun()
return case_md @fancy_cache(unique_to_session=True, allow_output_mutation=True) def get_static_store() -> Dict: """This dictionary is initialized once and can be used to store the files uploaded""" return {} select_block_container_style() nltk.download("punkt") st.title("Рулетка кейсов") state = SessionState.get(cases=[], played_inds=[], orig_case_num=0, file_value=None) menu_state = st.radio("Показать меню", ["Показать", "Скрыть"], 0) static_store = get_static_store() file_picker = st.empty() file_buffer = file_picker.file_uploader("Загрузите файл с кейсами", type="txt") if file_buffer: value = file_buffer.getvalue() if value not in static_store.values(): static_store[file_buffer.getvalue()] = value else: static_store.clear()