def write():
    """Used to write about page"""
    with st.spinner("Loading Home ..."):
        local_css("style.css")
        st.markdown(
            "<h1 style='text-align: left; color: black;font-size: 40px;'> Who Am I ? </h1>",
            unsafe_allow_html=True)

        img = Image.open('./Me.jpg')
        st.image(img, width=200)
        st.set_option('deprecation.showfileUploaderEncoding', False)
        st.write("""

                I am Anil Bhatt from India. A deep learning enthusiast who loves to try out practical applications of AI. 
                Loves reading, trekking and watching football. Coordinates 
                to reach me are as listed below. Thx for checking in. Have a great day !
                - [Github](https://github.com/anilbhatt1)
                - [Linkedin](https://www.linkedin.com/in/anilkumar-n-bhatt/)
                """)
        i = 0
        for i in range(2):
            st.text(' ')
        st.markdown(
            "<h1 style='text-align: center; color: white;font-size: 16px;'> ' Fool didn't know it was impossible, so he did it ! ' - Unknown </h1>",
            unsafe_allow_html=True)
def write():
    """ Deep Learning Model to create better resolution images from low resolution input images supplied """

    local_css("style.css")
    st.markdown(
        "<h1 style='text-align: center; color: black;font-size: 30px;'>Better Resolution images using SRGAN</h1>",
        unsafe_allow_html=True)
    st.write("""
             Image Super-Resolution (SR) refers to the process of recovering high-resolution(HR) images from low-resolution(LR) images.
             One promising approach to generate HR images is using DNNs with 
             appropriate loss functions. In this app, we are focusing on one such method termed as SRGAN (Super Resolution Generative Adverserial Network).
             Like GANs, SRGANs too use generator and discriminator networks, however loss functions vary. SRGANs are developed based on the paper
             'Photo-Realistic Single Image Super-Resolution Using a Generative Adversarial Network'. As the customized SRGAN model used here is trained 
             majorly on 4 objects - **birds, small quadcopters, large quadcopters and winged drones** - supplying images corresponding to these will 
             yield better results. Real-life usecases includes medical imaging, surveillance etc. Github link references as listed below.
             - [Arxiv link for 'Photo-Realistic Single Image Super-Resolution Using a GAN' paper](https://arxiv.org/abs/1609.04802)
             - [Github code reference for SRGAN](https://github.com/leftthomas/SRGAN)
             - [Github link for customized SRGAN model (used in this app)](https://github.com/anilbhatt1/Deep_Learning_EVA4_Phase2/tree/master/S8_SRGAN_Neural%20Transfer)
             - [Github link for webapp](https://github.com/anilbhatt1/Deep_Learning_EVA4_Phase2_webapp)
             """)

    st.set_option('deprecation.showfileUploaderEncoding', False)
    #SRGAN
    if st.checkbox("Upload image"):
        srgan_image()
Exemple #3
0
def write():
    """Used to save the article links"""
    with st.spinner("Loading Articles ..."):
        local_css("style.css")
        st.markdown("<h1 style='text-align: center; color: black;font-size: 40px;'>Articles</h1>", unsafe_allow_html=True)
        st.text('')
        st.text('')
        st.set_option('deprecation.showfileUploaderEncoding', False)
        st.header('My Articles')
        st.write("""
                - [Understanding Object detection with YOLO](https://anilbhatt1.tech.blog/2020/07/03/understanding-object-detection-with-yolo/)

                - [CNN - Activation Functions, Global Average Pooling, Softmax, Negative Likelihood Loss](https://www.linkedin.com/pulse/cnn-activation-functions-global-average-pooling-softmax-n-bhatt/)

                - [Max-Pooling, Combining Channels using 1×1 convolutions, Receptive Field calculation](https://www.linkedin.com/pulse/max-pooling-combining-channels-using-11-convolutions-field-n-bhatt/)

                - [Convolutions - Work horse behind CNN](https://www.linkedin.com/pulse/convolutions-work-horse-behind-cnn-anilkumar-n-bhatt/)

                - [Understanding Receptive field in Computer Vision](https://www.linkedin.com/pulse/deep-learning-understanding-receptive-field-computer-n-bhatt/)

                - [How Computers classify objects in an image using Deep Learning](https://anilbhatt1.tech.blog/2020/01/30/how-computers-detect-objects-in-an-image-using-deep-learning/)

                """
                )
        st.header('Useful Articles (written by other authors)')
        st.write("""
                - [Differences between OpenCV, TF and PIL while reading and resizing images](https://towardsdatascience.com/image-read-and-resize-with-opencv-tensorflow-and-pil-3e0f29b992be)

                - [Understanding dimensions in PyTorch](https://towardsdatascience.com/understanding-dimensions-in-pytorch-6edf9972d3be)

                - [A Comprehensive Introduction to Different Types of Convolutions in Deep Learning](https://towardsdatascience.com/a-comprehensive-introduction-to-different-types-of-convolutions-in-deep-learning-669281e58215)

                 """
                )
Exemple #4
0
def write():
    """ Deep Learning NLP Model to translate the german text to english """

    local_css("style.css")
    st.markdown("<h1 style='text-align: center; color: black;font-size: 30px;'>German English Translator</h1>", unsafe_allow_html=True)
    st.write(
             """
             This app translates the given input german text to english. This is based on “Neural Machine Translation by Jointly Learning to Align and Translate” of Bahdanau et al. (2015)
             App uses an encoder-decoder with attention architecture. Tokenization is done using spacy and vocab built using IWSLT dataset.
             - [Github code reference(bentrevett)](https://bastings.github.io/annotated_encoder_decoder/)
             """
             )

    st.set_option('deprecation.showfileUploaderEncoding', False)
    #Senti Analysis
    if st.checkbox("Start Translation"):
        translate_text()
Exemple #5
0
def write():
    """ Deep Learning Model to swap the faces """

    local_css("style.css")
    st.markdown(
        "<h1 style='text-align: center; color: black;font-size: 40px;'>Face Swap</h1>",
        unsafe_allow_html=True)
    st.write("""
             Face swap app will merge 2 faces given to it. App uses opencv, dlib-68-face-landmark detector and works based on Delaunay triangulation.
             Practical applications include genertaing new faces that can be used for AI model training & testing and
             realistic image synthesis that can be shown to patients who plan to undergo cosmetic surgeries.
             - [Github link for model](https://github.com/anilbhatt1/Deep_Learning_EVA4_Phase2/tree/master/S3_Facial%20Landmark%20Detection_Alignment_Swap)
             - [Github link for webapp](https://github.com/anilbhatt1/Deep_Learning_EVA4_Phase2_webapp)
             """)

    st.set_option('deprecation.showfileUploaderEncoding', False)
    #Faceswap
    if st.checkbox("Upload faces to swap"):
        st.subheader("Upload the images")
        face_swap()
def write():
    """ Deep Learning NLP Model to analyse the sentiment from a given piece of text """

    local_css("style.css")
    st.markdown(
        "<h1 style='text-align: center; color: black;font-size: 30px;'>Sentiment Analysis</h1>",
        unsafe_allow_html=True)
    st.write("""
             This app returns sentiment of an input review message. 3 possible outcomes are 'Positive', 'Neutral' Or 'Negative'.
             App uses an NLP model built based on CNNs. Model was trained on IMDB review data and hence will work best for
             text given in form of movie reviews. Tokenization is done using spacy and vocab of 25000 words built using glove.6B.100d.
             - [Github code reference(bentrevett)](https://github.com/bentrevett/pytorch-sentiment-analysis/blob/master/4%20-%20Convolutional%20Sentiment%20Analysis.ipynb)
             - [Github link for customized NLP model (used in this app)](https://github.com/anilbhatt1/Deep_Learning_EVA4_Phase2/blob/master/S9_Neural_Embeddings/E4P2S9_Convolutional_Sentiment_Analysis_cpu.ipynb)
             - [Github link for webapp](https://github.com/anilbhatt1/Deep_Learning_EVA4_Phase2_webapp)
             """)

    st.set_option('deprecation.showfileUploaderEncoding', False)
    #Senti Analysis
    if st.checkbox("Start Sentiment Analysis"):
        senti_analysis()
Exemple #7
0
def write():
    """ Deep Learning Model to Align the input face image """

    local_css("style.css")
    st.markdown(
        "<h1 style='text-align: center; color: black;font-size: 40px;'>Face Align</h1>",
        unsafe_allow_html=True)
    st.write("""
             Face Align app will align the face given to it as input. App uses opencv and dlib-68-face-landmark detector.
             Practical applications includes aligning the faces to make it front-facing for crime investigations and realistic 
             image synthesis that can be used for AI model training for computer vision applications.
             - [Github link for model](https://github.com/anilbhatt1/Deep_Learning_EVA4_Phase2/tree/master/S3_Facial%20Landmark%20Detection_Alignment_Swap)
             - [Github link for webapp](https://github.com/anilbhatt1/Deep_Learning_EVA4_Phase2_webapp)
             """)

    st.set_option('deprecation.showfileUploaderEncoding', False)
    #Face-Align
    if st.checkbox("Upload face to Align"):
        st.subheader("Upload the image")
        face_align()
def write():
    """ Deep Learning NLP Model to predict the type of question given as input text """

    local_css("style.css")
    st.markdown("<h1 style='text-align: center; color: black;font-size: 30px;'>Multi-Question Classification</h1>", unsafe_allow_html=True)
    st.write(
             """
             This app will classify which category a particular input question belongs to. 6 types of qn are there : Questions about Humans, Entities, 
             Description, Location, Numercial & Abbreviation. 
             App uses an NLP model built based on CNNs. Model was trained on TREC dataset with 6 labels mentioned above. 
             Tokenization is done using spacy and vocab of 25000 words built using glove.6B.100d.
             - [Github code reference(bentrevett)](https://github.com/bentrevett/pytorch-sentiment-analysis/blob/master/4%20-%20Convolutional%20Sentiment%20Analysis.ipynb)
             - [Github link for customized NLP model (used in this app)](https://github.com/anilbhatt1/Deep_Learning_EVA4_Phase2/blob/master/S9_Neural_Embeddings/E4P2S9_Convolutional_Sentiment_Analysis_cpu.ipynb)
             - [Github link for webapp](https://github.com/anilbhatt1/Deep_Learning_EVA4_Phase2_webapp)
             """
             )

    st.set_option('deprecation.showfileUploaderEncoding', False)
    #Multi-Qn Analysis
    if st.checkbox("Classify the question"):
        multiqn_analysis()
Exemple #9
0
def write():
    """ Deep Learning Model to generate images of Indian car from random vector supplied """

    local_css("style.css")
    st.markdown(
        "<h1 style='text-align: center; color: black;font-size: 40px;'>Car Image Generation using GAN</h1>",
        unsafe_allow_html=True)
    st.write("""
             Generative Adverserial Networks (GANs) are neural networks that are trained in an adversarial manner to generate data by mimicking some
             distribution. GANs are comprised of two neural networks, pitted one against the other (hence the name adversarial). Applications of GANs are vast
             that include Generate Examples for Image Datasets, Generate Photographs of Human Faces, Generate Realistic Photographs, Generate Cartoon Characters, 
             Drug Research etc. Here, GANs are employed to generate images of Indian cars based on input vector values. DCGAN is the type of GAN used.
             - [Reference link for approach followed](https://github.com/anilbhatt1/Deep_Learning_EVA4_Phase2/tree/master/S6_GAN/Model%20Weights/Yangyangii%20Cars%20Example%20Github)
             - [Github link for model](https://github.com/anilbhatt1/Deep_Learning_EVA4_Phase2/tree/master/S6_GAN)
             - [Github link for webapp](https://github.com/anilbhatt1/Deep_Learning_EVA4_Phase2_webapp)
             """)

    st.set_option('deprecation.showfileUploaderEncoding', False)
    #GAN
    if st.checkbox("Car Image generation for given range of values"):
        gp.explore()
    elif st.checkbox("Random Car Image generation"):
        gp.generate()
def write():
    """ Deep Learning Model to estimate the pose of input human image """

    local_css("style.css")
    st.markdown(
        "<h1 style='text-align: center; color: black;font-size: 40px;'>Human Pose Estimation (Images)</h1>",
        unsafe_allow_html=True)
    st.write("""
             Human pose estimation (HPE) is the process of estimating the configuration of the body (pose) from a single, typically monocular, image.
             It can be applied to many applications such as action/activity recognition, action detection, human tracking, in movies and animation, 
             virtual reality, human-computer interaction, video surveillance, medical assistance, self-driving, sports motion analysis, etc.
             HPE Image app will give back the pose of input image with human joints connected. App uses quantized ONNX ResNet model trained based on
             'Simple Baseline for HPE and tracking' paper.
             - [Arxiv link for Simple Baseline for HPE and Tracking paper](https://arxiv.org/pdf/1804.06208.pdf)
             - [Github link for model](https://github.com/anilbhatt1/Deep_Learning_EVA4_Phase2/tree/master/S5_Human_Pose_Estimation)
             - [Github link for webapp](https://github.com/anilbhatt1/Deep_Learning_EVA4_Phase2_webapp)
             """)

    st.set_option('deprecation.showfileUploaderEncoding', False)
    #Human Pose Estimation
    if st.checkbox("Upload image to estimate pose"):
        st.subheader("Upload the image")
        hpe_image()
def write():
    """ Deep Learning Model to reconstruct images of Indian car from input image supplied """

    local_css("style.css")
    st.markdown(
        "<h1 style='text-align: center; color: black;font-size: 40px;'>Car Image Reconstruction using VAE</h1>",
        unsafe_allow_html=True)
    st.write("""
             Variational Auto-encoders(VAE) are special species of Auto-encoders(AE). AE typically will have an encoder and a decoder network.
             Encoder network will create latent vector/bottleneck from given input image.Decoder network will take the bottleneck and recontruct the image.
             Usecases of auto-encoders includes denoising the image, reconstruction of poor resolution images etc. 
             However, auto-encoders cant seamlessly interpolate between classes. This is where VAEs come into picture.
             VAE's latent spaces are, by design, continuous, allowing easy random sampling and interpolation.
             Instead of predicting a point as what vanilla autocoders do, VAE predicts a cloud of points. 
             Here, VAEs are employed to reconstruct image of an input Indian car supplied.
             - [Github link for model](https://github.com/anilbhatt1/Deep_Learning_EVA4_Phase2/tree/master/S7_VAE)
             - [Github link for webapp](https://github.com/anilbhatt1/Deep_Learning_EVA4_Phase2_webapp)
             """)

    st.set_option('deprecation.showfileUploaderEncoding', False)
    #VAE
    if st.checkbox("Upload image to reconstruct"):
        vae_image()
def write():
    """Used to write the page in the main_app.py file"""
    with st.spinner("Loading Home ..."):
        local_css("style.css")
        st.markdown(
            "<h1 style='text-align: center; color: black;font-size: 40px;'>Neural Eyes</h1>",
            unsafe_allow_html=True)
        st.text('')
        st.text('')
        #        if "DYNO" in os.environ:
        #            st.text('Running in Heroku')

        st.set_option('deprecation.showfileUploaderEncoding', False)
        st.write("""

                **Neural Eyes** is a deep-learning webapp focussed on computer-vision related applications. Deep-learning models used are trained in
                pytorch. Web-app is written using streamlit and hosted in heroku. You can find the **github** links in respective sections. Thank you
                for checking in !

                """)
        st.markdown(
            "<h1 style='text-align: center; color: black;font-size: 20px;'> Thx for visiting...Please select an option for Neural Eyes to predict from side navigation bar</h1>",
            unsafe_allow_html=True)
def write():
    """ Deep Learning Model to predict flying object """

    local_css("style.css")
    st.markdown("<h1 style='text-align: center; color: black;font-size: 40px;'>Flying Object Prediction</h1>", unsafe_allow_html=True)
    st.write(
             """
             Deep-learning app to help predict the flying objects in our skies. **Mobilenet** model used here is trained against 4 objects
             : **flying birds, large quadcopters, small quadcopters and winged drones**. App can be enhanced by training the model including 
             more flying objects. Practical application includes distinguishing between innocous flying objects and inimical ones based
             on context. For example, a flying bird over football stadium may be an innocous one whereas same flying bird will be inimical
             over an airport runway.
             - [Github link for model](https://github.com/anilbhatt1/Deep_Learning_EVA4_Phase2/tree/master/S2_Mobilenet_QuadCopters_Lambda)
             - [Github link for webapp](https://github.com/anilbhatt1/Deep_Learning_EVA4_Phase2_webapp)
             """
             )
    st.text('')

    st.set_option('deprecation.showfileUploaderEncoding', False)
    #Mobilenet
    if st.checkbox("Predict flying objects"):
        st.subheader("Predicting flying objects using mobilenet")
        flying_object_classify()
def write():
    """ Deep Learning Model to caption input images supplied """

    local_css("style.css")
    st.markdown(
        "<h1 style='text-align: center; color: black;font-size: 30px;'>Image Captioning</h1>",
        unsafe_allow_html=True)
    st.write("""
             Image Captioning refers to the generation of a descriptive caption for an input image supplied.
             Model implemented here is based on "Show, Attend and Tell" paper. Idea is to give an image to the model and model predicts a caption based on
             image. Encoder-decoder architecture with attention is used. Encoder used to encode the input image is Resnet-18. These encodings
             are fed to the decoder. Job of decoder is to generate the caption based on the encodings. Decoder is an LSTM based network that uses 
             attention mechanism. Attention mechanism helps the LSTM to focus on 
             specific parts of input image (encodings) based on the weight provided for each pixel. Model is trained on flickr8k dataset.  
             - [Arxiv link for 'Show, Attend and Tell' Paper](https://arxiv.org/abs/1502.03044)
             - [Original Github code reference](https://github.com/sgrvinod/a-PyTorch-Tutorial-to-Image-Captioning)
             - [Github link for customized model (used in this app)](https://github.com/anilbhatt1/Deep_Learning_EVA4_Phase2/tree/master/S12_Image_Captioning_and_text_to_images)
             - [Github link for webapp](https://github.com/anilbhatt1/Deep_Learning_EVA4_Phase2_webapp)
             """)

    st.set_option('deprecation.showfileUploaderEncoding', False)
    #SRGAN
    if st.checkbox("Upload image"):
        imgcaption_image()