Beispiel #1
0
def run_labeler():

    state = SessionState.get(vnum=0, vlab=0)
    vlist = get_video_list()

    st.title("Emotion Labeler")
    st.write(
        "Please label as many videos as you can. When done, simply close browser tab."
    )
    st.write("")
    st.write(f"Total videos labeled in current session:{state.vlab}")
    st.write("Note: refreshing browser tab will reset counts.")

    vrnd, choices = get_random_video(vlist, state.vnum)
    vpath = f'{bucket_path}vid_{vrnd}.mp4'
    with open(vpath, 'rb') as video_file:
        vbytes = video_file.read()
    st.video(vbytes)

    labeled = False
    emo = st.selectbox('Emotion:', choices)
    if emo[:6] != "Select":
        labeled = True

    if st.button('Get next video'):
        if labeled:
            state.vlab += 1
            with open(labelfile, 'a') as fd:
                fd.write(
                    f"{time.time()}, {SessionState.get_session_id()}, {vrnd}, {emo}\n"
                )
        state.vnum += 1
        raise RerunException(RerunData(widget_state=None))
Beispiel #2
0
def main():
    static_store = get_static_store()

    # Download files neccessary for model execution
    download_files()

    global predictor
    cfg, predictor, _ = detect.load_models()
    st.title("Facemask and Social Distancing detector")
    st.set_option('deprecation.showfileUploaderEncoding', False)
    inputfilename = input_file_selector() # By default current folder is set is no folder path is provided
    # Here call model and provide input file and output should be filename
    # which should need to be played for face mask detection
    outputfilename = detect_face_mask(inputfilename)
    if st.button("Play video"):
        video_file = open(outputfilename, 'rb')
        video_bytes = video_file.read()
        st.video(video_bytes)

    st.title("Upload New video for processing")
    uploaded_file = st.file_uploader("Choose a video...", type=["mp4"])
    if uploaded_file is not None:
        data = uploaded_file.read()
        checksum = hashlib.md5(data).hexdigest()

        if static_store.get(checksum, False):
            st.text("Already in processing queue or processed")
        else:
        
            static_store[checksum] = True

            g = io.BytesIO(data)  ## BytesIO Object
            temporary_location = "video_%s.mp4" % (datetime.now().strftime("%Y%m%d_%H%M%S"))
            with open(temporary_location, 'wb') as out:  ## Open temporary file as bytes
                out.write(g.read())  ## Read bytes into file
            out.close()
            st.text("uploaded %s to disk" % temporary_location)
            cmd = "python detect.py %s &" % temporary_location
            print(cmd)
            os.system(cmd)        
            st.text("video scheduled to be processed")

    st.title("Upload Image for instant detection")
    uploaded_file = st.file_uploader("Choose an image...", type=["jpg"])
    if uploaded_file is not None:
        #checksum = hashlib.md5(uploaded_file.read()).hexdigest()
        g = io.BytesIO(uploaded_file.read())  ## BytesIO Object
        temporary_location = "img_%s.jpg" % (datetime.now().strftime("%Y%m%d_%H%M%S"))
        with open(temporary_location, 'wb') as out:  ## Open temporary file as bytes
            out.write(g.read())  ## Read bytes into file
        out.close()
        st.text("uploaded %s to disk" % temporary_location)

        img = cv2.imread(temporary_location)
        outputs = detect.process_single_frame(0, img, predictor)
        imgs = list(detect.compute_final_frame([img], [outputs], 0, None))
        pimg = imgs[0]
        pimg = cv2.cvtColor(pimg, cv2.COLOR_BGR2RGB)
        st.image(pimg)
Beispiel #3
0
def show_movie(movie_path, args):
    if args.env == "local":
        with open(movie_path, "rb") as fi:
            video_bytes = fi.read()
    else:
        video_bytes = requests.get(movie_path).content

    st.video(video_bytes)
Beispiel #4
0
def main():

    st.sidebar.title("Home")
    st.sidebar.info("This page conatins all the basic info about the app")

    # Youtube vide link
    st.subheader("Watch the tutorial 👀")
    st.video('https://www.youtube.com/watch?v=Yw6u6YkTgQ4')
Beispiel #5
0
def flash_talk():
    jitsi = """
**Chat with Maxime on [jitsi1.inviteo.com/ISBA9-2-63370](https://jitsi1.inviteo.com/ISBA9-2-63370) - 02/06/2021 - 12:30-15:30 CET **  
    """
    st.markdown(jitsi)
    url = "https://youtu.be/_ccyeKn3wQ4"
    with st.beta_expander("Watch the Flash Talk"):
        st.video(url)
Beispiel #6
0
def genVideoTutorial():
    st.write(
        """<div class="base-wrapper">
                        <span class="section-header primary-span">Antes de começar: entenda como usar!</span>
                </div>""",
        unsafe_allow_html=True,
    )
    st.video(Link.YOUTUBE_TUTORIAL.value)
Beispiel #7
0
def main():
    
    st.sidebar.title("Tutorial")
    st.sidebar.info("This page conatins all the basic info about the app")

    # Youtube vide link 
    st.text("Watch the tutorial 👀")
    st.video('https://youtu.be/Yw6u6YkTgQ4');
Beispiel #8
0
def main():
    st.title('Hello World')
    st.header('')
    st.subheader('')
    st.text('')
    st.image('logo.png')
    st.subheader('')
    st.audio('name.wav')
    st.video('name.mov')
Beispiel #9
0
def write():
    udisp.title_awesome("3D Photo Creator")

    video_keys = globalDefine.SAMPLE_VIDEO_LIST.keys()
    video_id = st.selectbox("Select a sample 3D video output ",
                            list(video_keys))
    video_choice = globalDefine.SAMPLE_VIDEO_LIST.get(video_id)
    st.video(video_choice, format='video/mp4', start_time=0)
    udisp.render_md("resources/home_info.md")
Beispiel #10
0
 def release(self, st_video=False):
     self.video.close()
     if self.save_gif:
         self.gif_out.close()
     if st_video:
         import streamlit as st
         st.video(self.video_file)
         st.write(self.video_file)
     pass
Beispiel #11
0
def add_footer():
    # Final section
    st.write('_________________')
    st.image(
        'https://user-images.githubusercontent.com/52009346/69100304-2eb3e800-0a5d-11ea-9a3a-8e502af2120b.png',
        use_column_width=True)
    st.subheader('About us')
    st.video(
        'https://prowellplan.com/assets/images/Pro-Well-Plan-Produktvideo.mp4')
Beispiel #12
0
def main():

    st.header('This is a header')
    st.subheader('This is a subheader')
    st.text("It's a text")

    st.image('image.png')
    st.audio('record.wav')
    st.video('video.mov')
Beispiel #13
0
def main():
    """
        Face Matching
    """
    
    activity = ["CELEB MATCH", "VIDEO SEARCH"]
    choice = st.sidebar.selectbox("Choose Activity",activity)
    
    #CELEB MATCH
    if choice == "CELEB MATCH":
        face_recogniser = load_model('model/face_recogniser.pkl')
        preprocess = preprocessing.ExifOrientationNormalize()
        uploaded_file = st.file_uploader("Choose an image...", type=["jpg","png", "jpeg"])
        if uploaded_file is not None:
            image = Image.open(uploaded_file)
            image = preprocess(image)
            image = image.convert("RGB")
            bbs, _ = aligner.detect(image)
            if bbs is not None:
                faces = torch.stack([extract_face(image, bb) for bb in bbs])
                embeddings = facenet(facenet_preprocess(faces)).detach().numpy()
                predictions = face_recogniser.classifier.predict_proba(embeddings)
                for bb, probs in zip(bbs, predictions):
                    try:
                        cropped_faces = []
                        cropped_face = image.crop(bb)
                        cropped_faces.append(cropped_face)
                        prediction = top_prediction(face_recogniser.idx_to_class, probs)
                        files = glob.glob("images/" + prediction.label + "/*.*")
                        actor_image = Image.open(files[0])
                        actor_image_bbs, _ = aligner.detect(actor_image)
                        actor_image = actor_image.crop(actor_image_bbs[0]) if len(actor_image_bbs) > 0 else actor_image
                        cropped_faces.append(actor_image)
                        st.image(cropped_faces, width=100)
                        st.write(prediction.label)
                    except:
                        pass
            else:
                st.write("Can't detect face")
            st.image(image, caption='Uploaded Image.', use_column_width=True)
    elif choice == "VIDEO SEARCH":
        st.write("Video Search")
        url = st.text_input("YOUTUBE URL")
        if url:
            video = get_video(url)
            if video:
                st.video(url)
                vpr = get_video_processor(video)
                vpr.read_frames()
                st.write("Number of frames " + str(vpr.frame_count))
                st.write("Duration " + str(int(vpr.duration)) + " s")
                
                frame_idx = st.number_input("Frame index", value=0, min_value=0, max_value=vpr.frame_count-1)
                if frame_idx:
                    frame_image = Image.fromarray(vpr.frames[frame_idx])
                    st.image(frame_image, caption='Image at selected frame')
Beispiel #14
0
def main():
    st.title('Hello World')
    st.header('This is a header')
    st.subheader('This is a subheader')
    st.text('This is a text')
    st.image('logo.png')
    st.subheader('This is a audio')
    st.audio('record.wav')
    st.subheader('This is a video')
    st.video('sentiment_motion.mov')
Beispiel #15
0
def video():
    uploaded_file = st.file_uploader("Choose a video file to play")
    if uploaded_file is not None:
        bytes_data = uploaded_file.read()

        st.video(bytes_data)

    video_file = open('typing.mp4', 'rb')

    video_bytes = video_file.read()
    st.video(video_bytes)
Beispiel #16
0
    def test_st_video_options(self):
        """Test st.video with options."""
        fake_video_data = "\x11\x22\x33\x44\x55\x66".encode("utf-8")
        st.video(fake_video_data, format="video/mp4", start_time=10)

        el = self.get_delta_from_queue().new_element
        # Manually base64 encoded payload above via
        # base64.b64encode(bytes('\x11\x22\x33\x44\x55\x66'.encode('utf-8')))
        self.assertEqual(el.video.data, "ESIzRFVm")
        self.assertEqual(el.video.format, "video/mp4")
        self.assertEqual(el.video.start_time, 10)
Beispiel #17
0
def main():
    auth = st.text_input("Input key credential.", "")

    if auth not in _AUTH:
        st.markdown(_AUTH_DESC)
    else:
        print("[{}] Someone is using the service~".format(time.asctime()))
        data = load_data()

        sidx = st.selectbox("Select video set",
                            [i for i in range(int(len(data) / 20))])
        data = data[20 * sidx:min(20 * (sidx + 1), len(data))]

        vidx = st.slider("Select sample video index", 0, len(data))
        video_id, question_id, question, answer, bbox, duration, orig_len = data[
            vidx]

        # Load raw video
        pbar = st.progress(0.1)
        load_video(os.path.join(_IMG_DIR, video_id))
        pbar.progress(1.0)
        time.sleep(1)
        pbar.empty()

        # Display raw video
        st.subheader(video_id)
        with open('temp.mp4', 'rb') as f:
            st.video(f.read())

        # Ask question
        proposal = st.text_input("Ask question!", question)
        time.sleep(1)
        if proposal == "" or proposal == str(vidx):
            st.markdown("Suggestion: {}".format(question))
        else:
            ann_dir = os.path.join(_ANN_DIR, video_id, 'feat.pkl')
            annos = pkl.load(open(ann_dir, 'rb'))
            # Run inference
            pbar2 = st.progress(0.1)
            hypo = run_inference(os.path.join(_IMG_DIR, video_id), question_id)
            pbar2.progress(1.0)
            time.sleep(1)
            pbar2.empty()
            # Print result
            st.subheader("RESULT")
            with open('temp_annos.mp4', 'rb') as ff:
                st.video(ff.read())
            st.subheader("Question: {}".format(proposal))
            st.subheader("Answer: {}".format(hypo))
            # Print gt
            img = gt_result(video_id, bbox, duration, orig_len)
            # confusion_matrix(list(annos.keys()), 0)
            st.subheader("Correct Inference: {}".format(answer))
            st.image(img, channels="BGR", use_column_width=True)
def show_video():
    st.title("Dynamic Trajectory")
    choice_list = ["Morning (7:00-10:00)", "Evening (18:00-21:00)"]
    choice = st.selectbox("Choose Time", choice_list)
    path = ''
    if choice == choice_list[0]:
        path = '../resource/Taxi_Dynamic_Trajectory_Morning.mp4'
    else:
        path = '../resource/Taxi_Dynamic_Trajectory_Evening.mp4'
    video_bytes = get_video(path)
    st.video(video_bytes)
def display_stats(df):

    st.title('Stats for NERd')
    st.write("")
    df['shape'] = df['shape'].str.strip()
    df['message'] = df['message'].str.strip()

    st.markdown('<h4>Recent requests</h4>', unsafe_allow_html=True)
    fmt = "%d-%m-%Y %H:%M:%S"
    styler = df.tail(10).style.format(
        {"timestampStr": lambda t: t.strftime(fmt)})
    st.table(styler)
    st.markdown(
        '<h3>Minimum Inference time observed so far (in seconds) : ' +
        str(df[df['inp_type'] == "Image"]['inference_time'].min())[:5] +
        '</h3>',
        unsafe_allow_html=True)
    st.markdown(
        '<h3>Maximum Inference time observed so far (in seconds) : ' +
        str(df[df['inp_type'] == "Image"]['inference_time'].max())[:5] +
        '</h3>',
        unsafe_allow_html=True)
    st.markdown('<h3>Average Inference time observed so far (in seconds) : ' +
                str(sum(df['inference_time']) / len(df))[:5] + '</h3>',
                unsafe_allow_html=True)

    df.inp_type.str.get_dummies().sum().plot.pie(label='Requests',
                                                 autopct='%1.0f%%')

    st.pyplot()
    st.write("")
    st.write("")

    df2 = df.copy()
    df2['dates'] = df['timestampStr'].apply(lambda x: x.strftime('%D')[:-3])
    d = df2.groupby(['dates', 'inp_type'])['inp_type'].size().unstack()
    unique_dates = df2['dates'].unique().tolist()
    d = d.reindex(unique_dates)
    d = d[-9:]
    d.plot(kind='bar', stacked=True, title='Request type by Day')
    st.pyplot()

    st.write("")
    st.write("")
    st.title('How it works?')
    st.write("")
    st.video('https://youtu.be/JKR4kX4P8Cw')

    st.write("")
    st.write("")
    st.markdown(
        "Link to Source code : [github](https://github.com/Kaushal-Chapaneri/cartoonify-streamlit)"
    )
def about():
    st.header('Working Demonstration:')
    st.video('images/diagnoser.mp4')
    st.header('How It Works:')
    st.markdown('''
    Diagnoser is a fun Machine Learning project to diagnose the disease based on the symptoms.\n
    The tested diseases along with the symptoms are on the right text file. The dataset consists of 41 diseases with 17
    symptoms max. The total dataset comprises 5000 samples. The accuracy is around 90% (Validation/Test data). Along
    with the diagnosis of the disease, some basic precautions are also provided. The model is made of LSTM and Dense 
    layers.\n
    <strong>#LSTM #neuralnetworks #machinelearning #DiagnoserDiagnoser </strong>
    ''', unsafe_allow_html=True)
def main():
    st.title('Hello World')
    st.header('This is header')
    st.subheader('This is subheader')
    st.text('This is text')
    st.subheader('Imagens')
    st.image('0.jpeg')
    st.image('logo.png')
    st.subheader('Audio')
    st.audio('record.mov')
    st.subheader('Video')
    st.video('formaturaNicolas.mp4')
Beispiel #22
0
def main():
    st.title("**Text Analyzer v0.1**")
    app_mode = st.sidebar.selectbox('Navegation',['Bem Vindos','Word Cloud','Word Counter'])
    if app_mode == 'Bem Vindos':
        st.markdown("***")
        st.markdown(
            "Projeto de desenvolvido durante a aceleração de Data Science da codenation.\n\n"
            "Para mais infocacoes sobre o projeto, segue o link do [GitHub](https://github.com/danvr/streamlit-text-analyzer).")
        st.video("https://www.youtube.com/watch?v=WQ2isQoHMR0")
        st.markdown("***")
        st.markdown("## **Motivação**")
        st.markdown(
    	"O objetivo e resolver uma dor real que envolva análise de dados.\n\n"
        "Durante a fase de validação de um produto, e muito comum que sejam feitas pesquisas "\
        "de usuários para entender as verdadeiras dores. "\
        "O resultado desse processo normalmente gera uma massa de dados textuais, "\
        "onde o profissional de UX normalmente faz um processo manual de contagem de palavras e expressões.\n\n"
        "Essa fermenta se propõem a agilizar esse processo automatizando a contagem de palavras"\
        "de nuvem de palavras e contadores de palavras.\n\n"
        "Espero que seja útil mesmo estando em uma versão muito simplista, mas o foco é resolver o problema, feedbacks são bem vindos para continuar a melhorar"\
        "a ferramenta\n\n"
        "**Boa análise para todos!!**")
        st.markdown("***")
        st.markdown("## *Sobre o Autor*")
        st.markdown("## **Daniel Vieira, Cientista de Dados**\n\n"
        "*Fazendo o mundo melhor resolvendo problemas com dados.*\n\n"
        "* [Linkedin](https://www.linkedin.com/in/danielvieiraroberto/)\n\n"
        "* [Git Hub](https://github.com/danvr)")
      
    elif app_mode == 'Word Cloud':
        st.markdown("***")
        st.markdown("# Word Cloud")
        st.markdown(
            "Gerador interativo de Word Cloud(Nuvem de Plavras) onde o tamanho da palavra "\
            "é correspondente a frequência e relevância.\n\n")
        st.markdown("## **Como Usar**")
        st.markdown(
            "* Faça upload de arquivo .csv\n\n"
            "* Escolha a coluna\n\n"
            "* Obtenha insights!")        
        word_cloud_generetor()
    elif app_mode == 'Word Counter':
        st.markdown("***")
        st.markdown("# Word Counter")
        st.markdown("Contador de palavras parametrizado por número de n-gramas"\
        "(sequência continua de deternimado número itens ou palavras)")
        st.markdown("## **Como Usar**")
        st.markdown(
            "* Faça upload de arquivo .csv\n\n"
            "* Escolha a coluna\n\n"
            "* Obtenha insights!")    
        word_counter()
Beispiel #23
0
    def test_st_video_options(self):
        """Test st.video with options."""

        from streamlit.media_file_manager import _calculate_file_id

        fake_video_data = "\x11\x22\x33\x44\x55\x66".encode("utf-8")
        st.video(fake_video_data, format="video/mp4", start_time=10)

        el = self.get_delta_from_queue().new_element
        self.assertEqual(el.video.start_time, 10)
        self.assertTrue(el.video.url.startswith(STATIC_MEDIA_ENDPOINT))
        self.assertTrue(
            _calculate_file_id(fake_video_data, "video/mp4") in el.video.url)
Beispiel #24
0
    def test_st_video(self):
        """Test st.video."""
        # TODO(armando): generate real video data
        # For now it doesnt matter cause browser is the one that uses it.
        fake_video_data = "\x11\x22\x33\x44\x55\x66".encode("utf-8")

        st.video(fake_video_data)

        el = self.get_delta_from_queue().new_element
        # Manually base64 encoded payload above via
        # base64.b64encode(bytes('\x11\x22\x33\x44\x55\x66'.encode('utf-8')))
        self.assertEqual(el.video.data, "ESIzRFVm")
        self.assertEqual(el.video.format, "video/mp4")
Beispiel #25
0
def main():

    img = Image.open('data/image_03.jpg')
    st.image(img, use_column_width=True)  # 이미지를 페이지 화면에 맞게 폭을 늘려서 출력함

    # 인터넷에 있는 이미지도 가능
    # st.image('')  여기안에 url 주소 가져오면 됨

    video_file = open('data/secret_of_success.mp4', 'rb').read()  # 비디오 파일 불러오기
    st.video(video_file)  # 비디오파일 불러온것을 실행해라

    audio_file = open('data/song.mp3', 'rb').read()  # 오디오 파일 불러오기
    st.audio(audio_file)  # 오디오 실행
Beispiel #26
0
def cs_display_media():
    st.markdown(
        "<h5 style='text-align: center ; color: black;'>Rendered -- Display Media Section</h5>",
        unsafe_allow_html=True)
    st.markdown('***')

    st.markdown('**Image**')
    st.markdown("- **st.image**('_`path`_')")
    st.image('./brain.png', width=300)
    st.markdown('***')

    st.markdown('**Audio**')
    st.markdown("- **st.audio**(_`data`_)")
    audio_code = '''
     audio_file = open('./Media/Cornfield_Chase.wav', 'rb')
     audio_bytes = audio_file.read()
     st.audio(audio_bytes, format='audio/wav')
    '''
    st.code(audio_code)
    audio_file = open('./Media/Cornfield_Chase.wav', 'rb')
    audio_bytes = audio_file.read()
    st.audio(audio_bytes, format='audio/wav')
    st.markdown(
        "<h6 style='text-align: center ;'>Source ~ Interstellar ✨( Cornfield Chase ) </h6>",
        unsafe_allow_html=True)

    st.markdown('***')
    st.markdown('**Video**')
    st.markdown("- **st.video**(_`data`_)")
    video_code = '''
    video_file = open('./Media/Star-6962.mp4', 'rb')
    video_bytes = video_file.read()
    st.video(video_bytes)

    '''
    st.code(video_code)
    video_file = open('./Media/Star-6962.mp4', 'rb')
    video_bytes = video_file.read()
    st.video(video_bytes)
    st.markdown(
        "<h6 style='text-align: center ;'>Creator - fxxu, Source - Pixbay </h6>",
        unsafe_allow_html=True)
    st.markdown('***')

    info = st.beta_expander('Direct References')
    info.markdown(
        '''[<img src='data:image/png;base64,{}' class='img-fluid' width=32 height=32 style='float:left; vertical-align:middle'>](https://docs.streamlit.io/en/stable/api.html#display-media) <small style='color:black; font-size:16px;'>Link to the Official Documentation of this Section </small>'''
        .format(img_to_bytes("logo.png")),
        unsafe_allow_html=True)

    st.markdown('***')
Beispiel #27
0
def default():
    st.header('Working Demonstration:')
    st.video('Images/output.mp4')
    st.header('How is Works:')
    st.markdown('''
            <p>Quora Bot is just a fun Scraping project to grab a relevant, more suitable answer for the asked
            question.</p>
            <p>Scraping is a process of extracting information from the web. Big Data plays an important role in Machine 
            learning. And it happens that you are in search of particular data, which is not easily available, then scraping 
            comes into play.</p>
            <h3><strong>#scraping</strong>  <strong>#selenium</strong>  <strong>#python</strong>  
            <strong>#machinelearning</strong></h3>
            ''',
                unsafe_allow_html=True)
Beispiel #28
0
    def render(self):
        st.markdown(f"""\
            ### {self.title}

            **{self.speaker}** ({self.speaker_affiliation})
            """)

        st.info(f"**Summary:** {self.summary}")
        if self.video_url:
            st.error("Video not yet uploaded")
            st.video(self.video_url, start_time=30)

        if self.resources:
            st.warning(self.resources)
def step1_introduction():
    st.header("Step1: What is CO2 Emissions?")
    st.write(
        "What is CO2 Emissions? Why is it important? Let's watch a short introduction video from BBC!"
    )
    st.video("https://www.youtube.com/watch?v=rFw8MopzXdI&ab_channel=BBCNews")
    next_block()
    st.header("Dataset Description")
    st.write('''
        This dataset comes from [WorldBank]
        (https://github.com/ZeningQu/World-Bank-Data-by-Indicators) It covers over 174 countries and 50 indicators.
    ''')
    if st.checkbox("Show Raw Data"):
        st.write(df)
Beispiel #30
0
def page():
    st.header("Home")
    st.subheader(
        "Team: Marleah Puckett, Michael Wu,  Jonathan Wong, Christian Reyes, James Cheney"
    )
    st.write("""
	University of California, Berkeley
	""")
    assets_path = 'webpages/webpage_assets'
    home_image = Image.open(f'{assets_path}/homeimg.png')
    st.image(
        home_image,
        caption=
        '"Float like a Cadillac, sting like a Beemer." - Lightning McQueen',
        use_column_width=True)

    st.video("https://www.youtube.com/watch?v=JUTDc0dXWjA")

    st.write("""
	Hello and welcome to our 106a final project website. In this website, you
	will find a detailed report about our project. The menu on the side will
	introduce subsections of our report. They include an introduction of our
	whole project, design and implementation details, our results, and finally,
	some discussion of future work. We also have provided profiles and a rundown
	of team contributions as well as additional materials that might be of
	interest to you.

	### Mini Introduction
	Our project builds upon the existing ROAR platform, and it is designed to
	work in both simulation and in real vehicle (with a couple hyper parameter
	changes).

	**Our Goal:** Achieve the fastest lap in the Carla virtual environment.

	Our project is divided into 5 distinct areas:
	 - Real life PID controller
	 - Reinforcement Learning based PID Controller
	 - Ground Plane Detection + Roll Controller
	 - Stanley Controller
	 - Wheel Encoder Calibration and Tuning

	The first four areas focus on agents that we tried to implement and test
	in Carla's simulated environment. We use some of our agents to compete in the
	ROAR race. The last area of our project focuses on the simulation-to-reality
	pipeline, which will be important when we want to eventually port our controllers
	to the physical ROAR RC car. Our contribution of calibrating the car allows
	us to witness expected commands, i.e. the command to go straight will lead
	to the car actually going straight.
	""")