def prediction(net):
    # Draw or clear?
    option = st.radio('Select option: ', ('Draw', 'Clear'))

    if (option == 'Draw'):
        st.write("Draw a digit below and click on Predict button.")
    else:
        st.write(
            "Double click on the digit or refresh the page to clear the digit."
        )

    # Create a canvas component
    image_data = st_canvas(15,
                           '#FFF',
                           '#000',
                           height=280,
                           width=280,
                           drawing_mode=(option == 'Draw'),
                           key="canvas")

    # Predicting the image
    if image_data is not None:
        if st.button('Predict'):
            # Model inference
            digit, confidence = predictDigit(image_data, net)
            st.write('Recognized Digit: {}'.format(digit))
            st.write('Confidence: {:.2f}'.format(confidence))
Esempio n. 2
0
def prediction(net):
    # Draw or clear?
    #drawing_mode = st.checkbox("Draw or clear?",True)
    drawing_mode = "freedraw"

    # Create a canvas component
    #image_data = st_canvas(
    #    15, '#FFF', '#000', height=280,width=280, drawing_mode=drawing_mode, key="canvas"
    #)
    canvas_result = st_canvas(stroke_width=15,
                              stroke_color='#FFF',
                              background_color='#000',
                              height=280,
                              width=280,
                              drawing_mode=drawing_mode,
                              key="canvas")

    # Predicting the image
    if canvas_result.image_data is not None:
        if st.button('Predict'):
            # Model inference
            image = canvas_result.image_data
            #print("[INFO] image shape = ",image.shape)
            digit, confidence = predictDigit(image, net)
            print('[INFO]', ' Recognized Digit: {}'.format(digit),
                  ' Confidence: {:.2f}'.format(confidence))
            st.write('Recognized Digit: {}'.format(digit))
            st.write('Confidence: {:.2f}'.format(confidence))
Esempio n. 3
0
def run():
    # TITLE & INITIAL TEXT
    st.title("Operations Planning")
    st.set_option("deprecation.showfileUploaderEncoding", False)
    st.markdown(
        """
    Draw surface operations on the canvas, and view diagrams below!
    * Doubleclick to remove the selected object when not in drawing mode
    """
    )

    # DRAWING TOOLS
    st.sidebar.header("Draw")
    drawing_mode = st.sidebar.selectbox(
        "Drawing tool:", ("freedraw", "line", "rect", "circle", "transform")
    )
    stroke_width = st.sidebar.slider("Stroke width: ", 1, 25, 3)
    stroke_color = st.sidebar.beta_color_picker("Stroke color hex: ")

    # BACKGROUND IMAGES
    st.sidebar.header("Background Maps")
    op = st.sidebar.slider('Opacity', 100, (0), key = 'Layer 1')
    bgImageName = st.sidebar.selectbox("Select from local lunar maps", imageNames)
    bgImage = copy.copy(images[bgImageName])
    bgImage.putalpha(int(256*(op/100)))
    
    #bgImage = st.sidebar.file_uploader("Add background image:", type=["png", "jpg"])
    
    #realtimeUpdate = st.sidebar.checkbox("Update in realtime", True)

    # Create a canvas component
    canvas_result = st_canvas(
        fill_color="rgba(255, 165, 0, 0.3)",  # Fixed fill color with some opacity
        stroke_width=stroke_width,
        stroke_color=stroke_color,
        background_image= bgImage,
        #update_streamlit=realtimeUpdate,
        height=840,
        width=1120,
        drawing_mode=drawing_mode,
        key="canvas",
    )

    col1, col2 = st.beta_columns(2)
    col1 = canvas_result

    # Display stand alone diagrmas
    st.header('Diagram View')
    st.write('Your standalone diagram will appear in the space below.')

    # Do something interesting with the image data and paths
    if canvas_result.image_data is not None:
        st.image(canvas_result.image_data)
Esempio n. 4
0
def main():
    page_bg_img = '''
    <style>
    body {
    background-image: url("https://i.pinimg.com/originals/85/6f/31/856f31d9f475501c7552c97dbe727319.jpg");
    background-size: cover;
    }
    </style>
    '''
    st.markdown(page_bg_img, unsafe_allow_html=True)
    download_model()
    ################ load logo from web #########################
    # st.markdown('<style>body{color: White; background-color: DarkSlateGrey}</style>', unsafe_allow_html=True)

    st.title('Riconoscitore di numero')
    st.markdown('''
    Disegna un numero!
    ''')

    # data = np.random.rand(28,28)
    # img = cv2.resize(data, (256, 256), interpolation=cv2.INTER_NEAREST)

    SIZE = 192
    mode = st.checkbox("Draw (or Delete)?", True)
    canvas_result = st_canvas(fill_color='#000000',
                              stroke_width=20,
                              stroke_color='#FFFFFF',
                              background_color='#000000',
                              width=SIZE,
                              height=SIZE,
                              drawing_mode="freedraw" if mode else "transform",
                              key='canvas')

    if canvas_result.image_data is not None:
        img = cv2.resize(canvas_result.image_data.astype('uint8'), (28, 28))
        rescaled = cv2.resize(img, (SIZE, SIZE),
                              interpolation=cv2.INTER_NEAREST)
        st.write('Model Input')
        st.image(rescaled)

    if st.button('Predict'):
        session = onnxruntime.InferenceSession("mnist.onnx")
        test_x = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
        test_x.resize((1, 28, 28, 1))
        data = json.dumps({'data': test_x.tolist()})
        data = np.array(json.loads(data)['data']).astype('float32')
        input_name = session.get_inputs()[0].name
        output_name = session.get_outputs()[0].name
        result = session.run([output_name], {input_name: data})
        prediction = int(np.argmax(np.array(result).squeeze(), axis=0))
        st.write(f'result: {prediction}')
        st.bar_chart(np.array(result).squeeze())
Esempio n. 5
0
def prediction(net):   
    # Draw or clear?
    drawing_mode = st.checkbox("Draw or clear?",True)

    # Create a canvas component
    image_data = st_canvas(
        15, '#FFF', '#000', height=280,width=280, drawing_mode=drawing_mode, key="canvas"
    )

    # Predicting the image
    if image_data is not None:
        if st.button('Predict'):
            # Model inference
            digit, confidence = predictDigit(image_data,net)
            st.write('Recognized Digit: {}'.format(digit))
            st.write('Confidence: {:.2f}'.format(confidence))
Esempio n. 6
0
def main():
    page_bg_img = '''
    <style>
    body {
    background-image: url("https://i.pinimg.com/originals/85/6f/31/856f31d9f475501c7552c97dbe727319.jpg");
    background-size: cover;
    }
    </style>
    '''
    st.markdown(page_bg_img, unsafe_allow_html=True)  
    download_model()
    ################ load logo from web #########################
    model = load_model('mnist.h5')
    # st.markdown('<style>body{color: White; background-color: DarkSlateGrey}</style>', unsafe_allow_html=True)

    st.title('My Digit Recognizer')
    st.markdown('''
    Try to write a digit!
    ''')

    # data = np.random.rand(28,28)
    # img = cv2.resize(data, (256, 256), interpolation=cv2.INTER_NEAREST)

    SIZE = 192
    mode = st.checkbox("Draw (or Delete)?", True)
    canvas_result = st_canvas(
        fill_color='#000000',
        stroke_width=20,
        stroke_color='#FFFFFF',
        background_color='#000000',
        width=SIZE,
        height=SIZE,
        drawing_mode="freedraw" if mode else "transform",
        key='canvas')

    if canvas_result.image_data is not None:
        img = cv2.resize(canvas_result.image_data.astype('uint8'), (28, 28))
        rescaled = cv2.resize(img, (SIZE, SIZE), interpolation=cv2.INTER_NEAREST)
        st.write('Model Input')
        st.image(rescaled)

    if st.button('Predict'):
        test_x = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
        val = model.predict(test_x.reshape(1, 28, 28))
        st.write(f'result: {np.argmax(val[0])}')
        st.bar_chart(val[0])
def run():
    st.title("Write a number")

    canvas_result = st_canvas(
        stroke_width=25,
        stroke_color="#fff",
        background_color="#000",
        height=280,
        width=280,
        drawing_mode="freedraw",
        key="canvas",
    )

    if st.button("Predict"):
        if canvas_result.image_data[:, :, :3].sum() > 0:
            output = number_prediction(canvas_result.image_data[:, :, 0])
            st.success(f'The output is {output}')
        else:
            st.error("Please draw a number")
def input_description(label):
    """Create Menu for user input includes
    'Write' - Custom text message
    'Draw' - Sketch field by Streamlit Component
    'Upload Image' - Insert a png, jpg, or jpeg image that is displayed
    """
    input_methods = ["Write","Draw","Upload image"]
    input_options = []
    #Load Images for input_description function
    
    @st.cache
    def load_image(image_file):
        """Display images using Pillow that 
        have been added via the streamlit file_uploader
        using the input_description function"""
        img = Image.open(image_file)
        return img
        
    with st.beta_expander(label):
        input_methods_cols = st.beta_columns(3)
        for ind,inputs in enumerate(input_methods):
            input_options.append(input_methods_cols[ind].checkbox(inputs))
        if input_options[0]:
            st.text_area("Write a description:",key="write_area")
        if input_options[1]:
            #Provide drawing canvas
            draw_cols = st.beta_columns(2)
            stroke_width = draw_cols[0].number_input("Stroke width: ", 1, 6, 3)
            stroke_color = draw_cols[1].color_picker("Stroke color: ")
            canvas_result = st_canvas(
                fill_color="rgba(255, 165, 0, 0.3)",  # Fixed fill color with some opacity
                stroke_width=stroke_width,
                stroke_color=stroke_color,
                update_streamlit=False,
                height=300,
                drawing_mode="freedraw",
                key="canvas")
        if input_options[2]:
            st.subheader("Custom image:")
            image_file = st.file_uploader("Upload Images",
                type=["png","jpg","jpeg"])
            if image_file is not None:
                st.image(load_image(image_file),use_column_width=True)
Esempio n. 9
0
def main():
    layout.footer()
    st.title("MNIST Number Prediction")
    left_column, right_column = st.beta_columns(2)
    PATH = "./mnist_cnn.pt"
    model = Net()
    # model = torch.load(PATH)
    model.load_state_dict(torch.load(PATH))
    # model.eval()
    # st.write(model.eval())

    # Create a canvas component
    with left_column:
        st.header("Draw a number")
        st.subheader("[0-9]")
        canvas_result = st_canvas(
                fill_color="rgb(0, 0, 0)",  # Fixed fill color with some opacity
                # stroke_width="1, 25, 3",
                stroke_width = 10,
                stroke_color="#FFFFFF",
                background_color="#000000",
                update_streamlit=True,
                width=224,
                height=224,
                drawing_mode="freedraw",
                key="canvas",
        )

    # Do something interesting with the image data and paths
    if canvas_result.image_data is not None:
        img = canvas_result.image_data
        grey = rgb2gray(img)
        grey = zoom(grey, 0.125)
        x_np = torch.from_numpy(grey).unsqueeze(0) #
        x = x_np.unsqueeze(0)
        x = x.float()
        output = model(x)
        pred = torch.max(output, 1)
        pred = pred[1].numpy()
    with right_column:
        st.header("Predicted Result")
        st.title(str(pred[0]))
Esempio n. 10
0
File: app.py Progetto: ctgk/bayesian
def get_xy_from_canvas(stroke_color: str, action: str, for_regression: bool):
    col1, col2 = st.beta_columns([2, 1])
    with col1:
        canvas = st_canvas(
            stroke_width=10, stroke_color=stroke_color, update_streamlit=True,
            background_image=get_background(),
            drawing_mode='circle' if 'add' in action.lower() else 'transform',
            width=WIDTH, height=HEIGHT, key='canvas')
    points = [p for p in canvas.json_data['objects'] if p['type'] == 'circle']
    x_train = [2 * p['left'] / WIDTH - 1 for p in points]
    y_train = [1 - 2 * p['top'] / HEIGHT for p in points]
    if not for_regression:
        x_train = [[x, y] for x, y in zip(x_train, y_train)]
        y_train = [int(p['stroke'] == 'yellow') for p in points]
    with col2:
        st.dataframe(dict(
            **({'x1': [x[0] for x in x_train], 'x2': [x[1] for x in x_train]}
                if not for_regression else {'x': x_train}),
            **{'y': y_train}))
    return x_train, y_train
def prediction(net):
    # Draw or clear?
    drawing_mode = 'freedraw' if st.checkbox("Draw or clear?",
                                             True) else 'transform'

    # Create a canvas component
    result = st_canvas(stroke_width=15,
                       stroke_color='#FFF',
                       background_color='#000',
                       height=280,
                       width=280,
                       drawing_mode=drawing_mode,
                       key="canvas")

    # Predicting the image
    if result.image_data is not None:
        if st.button('Predict'):
            # Model inference
            digit, confidence = predictDigit(result.image_data, net)
            st.write('Recognized Digit: {}'.format(digit))
            st.write('Confidence: {:.2f}'.format(confidence))
Esempio n. 12
0
def draw():
    b_width = st.sidebar.slider('Brush Width: ', 1, 50, 10)
    realtime = st.sidebar.checkbox('Update in realtime', True)

    image_data = st_canvas(
        brush_width=b_width, 
        brush_color='#FFF',
        background_color='#000',
        height=280,
        width=280,
        drawing_mode=True,
        key="canvas"
    )
    try:
        arr = np.array(image_data, dtype=np.uint8)
        img = Image.fromarray(arr)
        img = img.convert('L')
        img = img.resize((28, 28))
        img.save('test.jpg')
        arr, ans = pred.prediction()
        return ans
    except:
        pass
Esempio n. 13
0
	firebase = pyrebase.initialize_app(config)
	storage = firebase.storage()
	st.markdown("## **Enter Medicine Names & Dosage Per Day** :page_with_curl:")
	st.text('(Optional Section)')
	st.text('Please wait a few seconds for page to load.')
	patient_name = st.selectbox('Select Patient:',['user1','user2','user3'])
	medicines = st.text_area("","Medicine1 - A Days")
	
	appointment = st.date_input('Next Appointment Date')
	

	
	with st.spinner('Loading Components...'):
		st.subheader('Your signature goes here... :black_nib:')
		sign = st_canvas(fill_color="rgba(255, 165, 0, 0.3)",  
	                           stroke_width=3,stroke_color='#008FFF',background_color="#FFFF99",
	                           height=150,drawing_mode='freedraw',key="canvas")
		sig = sign.image_data
		sig = sig[:,:,:3]
		sig = np.uint8(sig)
		
		sig = Image.fromarray(sig)
		#sig = np.float32(sig)
		final_sig = sig.resize((150,40), Image.ANTIALIAS)
		final_sig2 = np.array(final_sig)
		#st.text(final_sig.shape)
		#st.text(np.squeeze(sign.image_data).shape)
		img = Image.new('RGB', (500, 300), color = (73, 109, 137))
		d = ImageDraw.Draw(img)
		text = f"""{medicines}"""
		font= ImageFont.truetype("patient_images/arial.ttf",15)
Esempio n. 14
0
        image = PitchImage(pitch, image_bytes=image_to_open)


    st.title('Pitch lines')

    lines_expander = st.beta_expander('Draw pitch lines on selected image (2 horizontal lines, then 2 vertical lines)',
                                      expanded=True)
    with lines_expander:
        col1, col2, col_, col3 = st.beta_columns([2,1,0.5,1])

        with col1:
            canvas_image = st_canvas(
                fill_color = "rgba(255, 165, 0, 0.3)", 
                stroke_width = 2,
                stroke_color = '#e00',
                background_image = image.im,
                width = image.im.width,
                height = image.im.height,
                drawing_mode = "line",
                key = "canvas",
            )

        with col2:
            line_seq = ['UP','DP','RPA', 'RG']
            h_line_options = list(pitch.horiz_lines.keys())
            v_line_options = list(pitch.vert_lines.keys())

            hlines = [st.selectbox(f'Horizontal Line #{x+1}', h_line_options,
                      key=f'hline {x}', index=h_line_options.index(line_seq[x]))
                     for x in range(2)]
            vlines = [st.selectbox(f'Vertical Line #{x+1}', v_line_options,
                      key=f'vline {x}', index=v_line_options.index(line_seq[x+2]))
Esempio n. 15
0
col21, col22 = st.beta_columns(2)

with col22:
    st.text(".. then click the button ")
    update_button = st.button('Classify Digit')

with col21:
    st.text("draw in the canvas ..")
    # Create a canvas component
    canvas_result = st_canvas(
        fill_color="rgba(255, 165, 0, 0.3)",  # Fixed fill color with some opacity
        stroke_width=stroke_width,
        stroke_color=stroke_color,
        background_color= bg_color,
        background_image=None,
        update_streamlit=update_button,
        height=280,
        width=280,
        drawing_mode=drawing_mode,
        key="canvas",
    )

# Do something interesting with the image data
if (canvas_result.image_data is not None):
    scaled_img = canvas_result.image_data[::10,::10,:]

    dnn_img = scaled_img[:,:,0]
    dnn_img = dnn_img/255.0
    dnn_img += 1.0
    dnn_img[dnn_img>1.0] = 0.0
    result = model.predict(dnn_img.reshape([1, 28,28,1]))[0]
Esempio n. 16
0
    orig_image = np.array(Image.open(bg_image))
    image = np.uint8(np.stack((orig_image, ) * 3, axis=-1))
    mask_red = np.zeros(image.shape)
    mask_red[:, :] = (255, 0, 0)
    mask_red = np.uint8(mask_red * gt_mask[:, :, np.newaxis])
    image[mask_red >
          0] = image[mask_red > 0] * 0.5 + mask_red[mask_red > 0] * 0.5
    image = Image.fromarray(image)

# Create a canvas component
canvas_result = st_canvas(
    fill_color="rgba(255, 165, 0, 0.3)",  # Fixed fill color with some opacity
    stroke_width=stroke_width,
    stroke_color=stroke_color,
    background_color="",
    background_image=image,
    update_streamlit=realtime_update,
    height=512,
    width=512,
    drawing_mode=drawing_mode,
    key="canvas",
)

if canvas_result.json_data is not None and bg_image is not None and gt_mask_file is not None:
    results = canvas_result.json_data["objects"]
    mask = np.zeros((1024, 1024), dtype=bool)
    for res in results:

        mask[int(res["top"]) * 2:(int(res["top"]) + int(res["height"])) * 2,
             int(res["left"]) * 2:(int(res["left"]) + int(res["width"])) *
             2] = 1
Esempio n. 17
0
    # target choice
    target = st.sidebar.selectbox(text["what"],
                                  (text["digits"], text["letters"]))
    if target == text["digits"]:
        target = "digits"
    else:
        target = "letters"

    # Create canva
    CanvasResult = st_canvas(
        fill_color="rgba(255, 165, 0, 0.3)",
        stroke_width=20,
        stroke_color="#D01111",
        background_color="#fff",
        background_image=None,
        update_streamlit=True,
        height=CANVA_SIZE,
        width=CANVA_SIZE,
        drawing_mode="freedraw",
        key="canvas",
    )

    # Empty slot for expected input
    res_text = st.empty()

    # Next input button
    change = st.button(text["next"][target])

    # Empty slot for score
    score = st.empty()
Esempio n. 18
0
if option_upload == 'Draw your own image':
    # Specify canvas parameters in application
    stroke_width = st.sidebar.slider("Stroke width: ", 1, 25, 3)
    stroke_color = st.sidebar.color_picker("Stroke color hex: ")
    bg_color = st.sidebar.color_picker("Background color hex: ", "#eee")
    bg_image = st.sidebar.file_uploader("Background image:", type=["png", "jpg"])
    drawing_mode = st.sidebar.selectbox(
        "Drawing tool:", ("freedraw", "line", "rect", "circle", "transform")
    )
    realtime_update = st.sidebar.checkbox("Update in realtime", True)

    # Create a canvas component
    canvas_result = st_canvas(
        fill_color="rgba(255, 165, 0, 0.3)",  # Fixed fill color with some opacity
        stroke_width=stroke_width,
        stroke_color=stroke_color,
        background_color=bg_color,
        background_image=Image.open(bg_image) if bg_image else None,
        update_streamlit=realtime_update,
        height=300,
        width=700,
        drawing_mode=drawing_mode,
        key="canvas",
    )

    # Do something interesting with the image data and paths
    if canvas_result.image_data is not None:
        display_img = canvas_result.image_data
        img = PILImageBW.create(display_img[:, :, 0].astype(np.uint8))
        if st.button('Predict'): predict(img)
Esempio n. 19
0
def main():
    # INITIALIZATION
    # For local testing
    #set_env_variables()
    url_base = os.environ['URL_BASE']
    secret_key = os.environ['SECRET_KEY']
    headers = {'Authorization': 'Token ' + secret_key}
    proxies = {
        "http": os.environ['QUOTAGUARDSHIELD_URL'],
        "https": os.environ['QUOTAGUARDSHIELD_URL']
    }
    ## SIDEBAR
    st.set_option('deprecation.showfileUploaderEncoding', False)
    st.sidebar.header('User Input')

    # Collects user input features into dataframe
    uploaded_file = st.sidebar.file_uploader("Upload your input image file",
                                             type=["png", 'jpg', 'jpeg'])
    pet_type = st.sidebar.selectbox("Pet Type:", ("cat", "dog"))

    ## S3 Connection
    aws_key = os.environ['AWS_ACCESS_KEY_ID']
    aws_secret = os.environ['AWS_SECRET_ACCESS_KEY']
    s3 = boto3.client('s3',
                      aws_access_key_id=aws_key,
                      aws_secret_access_key=aws_secret)
    default_img_path = 'https://petfacebucket.s3.amazonaws.com'
    folder_path = 'cat_test'

    test_distance = 0.6

    ## LOAD DATABASE
    json_path = '00_db/db_' + pet_type + '.json'
    #    json_path = 'db_cat.json'
    with open(json_path, "r") as jsonfile:
        db = json.load(jsonfile)

    if uploaded_file is not None:
        st.write("""
                # Test PetFace App
                """)

        img_pil = Image.open(uploaded_file).convert("RGB")
        img_np = np.array(img_pil)
        img_np = img_np[:, :, :3]

        url = url_base + 'pet_face'
        buffer = io.BytesIO()
        img_pil.save(buffer, format="JPEG")
        img_b64 = base64.b64encode(buffer.getvalue())
        data = {
            'pet_type': [pet_type],
            'image': img_b64.decode(),
            'is_base64': True
        }
        response = requests.post(url,
                                 data=json.dumps(data),
                                 headers=headers,
                                 proxies=proxies)
        if response.ok:
            boxes = response.json()['boxes']
            kpts = response.json()['kpts']
            labels = response.json()['pet_types']
            embs = response.json()['embs']

            if not st.sidebar.checkbox("Wrong picture processed"):

                if len(boxes) > 0:

                    col1, col2 = st.beta_columns(2)

                    # processing only first pet
                    kpt = np.array(kpts[0])
                    y, x, y2, x2 = boxes[0]

                    fig, ax = plt.subplots(1)
                    ax.imshow(img_np)
                    rect = patches.Rectangle((x, y),
                                             x2 - x,
                                             y2 - y,
                                             linewidth=1,
                                             edgecolor='b',
                                             facecolor='none')
                    ax.add_patch(rect)
                    ax.plot(kpt[:, 0::2], kpt[:, 1::2], 'o')
                    plt.axis('off')

                    col1.pyplot(fig)
                    col2.subheader(
                        "This is your original picture with eyes and nose detected. Below you'll find your pet face aligned"
                    )

                    leye, reye, nose = fix_eyes(kpt[0],
                                                kpt[1],
                                                kpt[2],
                                                num_eyes=2)
                    face_aligned = get_face_aligned(img_np, leye, reye, nose,
                                                    pet_type)
                    col2.image(face_aligned, use_column_width='auto')
                    col2.subheader(
                        "Does the picture is correctly detected?. Please answer in the sidebar"
                    )

                    if pet_type in labels:
                        emb = np.array(embs[0])
                        t = st.empty()
                        t.info(
                            "getting similar {}s in our db".format(pet_type))
                        get_similar_pictures(db,
                                             emb,
                                             default_img_path,
                                             pet_type,
                                             test_distance=test_distance)
                        t.success("Success!")
                    else:
                        st.write(
                            "No {}s found, please check the pet type".format(
                                pet_type))
                else:
                    st.write(
                        "No {}s found, please check 'Wrong picture processed to manually annotate'"
                        .format(pet_type))

            else:
                st.write(
                    "Please help us identify the eyes and the nose of your pet."
                )
                st.write(
                    "Using the drawing tool at the sidebar select circle and annotate the eyes and nose of your pet (no particular order)"
                )
                st.write(
                    "You can also select rectangle from the drawing tool and create a rectangle around your pet head"
                )
                st.write(
                    "If necessary use the transform selection at the sidebar to modify your annotations; \n"
                    "aditionaly use the undo, redo and trash buttons near the picture"
                )

                canvas_h = 300
                canvas_w = 600

                drawing_mode = st.sidebar.selectbox(
                    "Drawing tool:", ("circle", "rect", "transform"))

                canvas_result = st_canvas(
                    fill_color=
                    "rgba(255, 165, 0, 0.3)",  # Fixed fill color with some opacity
                    stroke_width=2,
                    stroke_color='blue',
                    background_color="",
                    background_image=img_pil,
                    update_streamlit=True,
                    drawing_mode=drawing_mode,
                    key="canvas",
                )

                if canvas_result.json_data is not None:
                    df = pd.json_normalize(canvas_result.json_data["objects"])
                    if len(df) > 1:
                        leye, reye, nose = [], [], []
                        kpts = df.loc[df.type == 'circle',
                                      ['left', 'top']].values

                        h, w, _ = img_np.shape
                        ratio_factor = (w / h) * (canvas_h / canvas_w)
                        kpts = kpts * [(h / canvas_h) * ratio_factor,
                                       (w / canvas_w) * (1 / ratio_factor)]
                        if len(kpts) <= 2:
                            leye, reye, nose = fix_eyes(kpts[0],
                                                        kpts[0],
                                                        kpts[1],
                                                        num_eyes=1)
                        if len(kpts) > 2:
                            leye, reye, nose = fix_eyes(kpts[0],
                                                        kpts[1],
                                                        kpts[2],
                                                        num_eyes=2)

                        face_aligned = get_face_aligned(
                            img_np, leye, reye, nose, pet_type)

                        col1, col2 = st.beta_columns(2)
                        col2.subheader(
                            "This is the picture of your {} aligned!".format(
                                pet_type))
                        col1.image(face_aligned)

                        url_emb = url_base + 'emb'
                        data = {
                            'image': face_aligned.tolist(),
                            'pet_type': pet_type
                        }

                        response_emb = requests.post(url_emb,
                                                     data=json.dumps(data),
                                                     headers=headers,
                                                     proxies=proxies)
                        if response_emb.ok:
                            emb = np.array(response_emb.json()['emb'])
                            t = st.empty()
                            t.info("getting similar {}s in our db".format(
                                pet_type))
                            get_similar_pictures(db,
                                                 emb,
                                                 default_img_path,
                                                 pet_type,
                                                 test_distance=test_distance)
                            t.success("Success!")
        else:
            st.write(
                "There was a problem connecting to the server, please try again later"
            )
    else:
        st.write("""
            # Test PetFace App
            This app is a *test*, please upload an image and specify the pet type 
            at the menu on the left
            """)
Esempio n. 20
0
    model = VGG_FeatureExtractor(len(character))
    model.load_weights(saved_model)
    
    return model, list(character)
model, idx2char = load_model()

st.write('# 한글 음절 인식기')

col1, col2 = st.beta_columns(2)

with col1:
    canvas = st_canvas(
        fill_color='#FFFFFF',
        stroke_width=7,
        stroke_color='#000000',
        background_color='#FFFFFF',
        width=192,
        height=192,
        drawing_mode='freedraw',
        key='canvas'
    )

if canvas.image_data is not None:
    # image load
    img = canvas.image_data.astype('uint8')
    img = cv2.resize(img, (32, 32))
    preview_img = cv2.resize(img, (192, 192))
    
    # show image for feeding to model
    col2.image(preview_img)
    
    # preprocess image
Esempio n. 21
0
[Deep Detect Handwriting repository](https://github.com/leersmathieu/deep-detect-handwriting)
""")

col1, col2 = st.beta_columns(2)

with col1:
    # Display a h3 title
    st.subheader("Drawing area")
    st.markdown("Draw a digit and then press 'Get prediction'")

    # Create a canvas component
    canvas_result = st_canvas(
        stroke_width=20,
        stroke_color="#fff",
        background_color="#000",
        update_streamlit=True,
        height=280,
        width=280,
        drawing_mode="freedraw",
        key="canvas",
    )

if st.button('Get prediction'):
    model = Model()

    # Instantiate an Image object from the handwritten canvas
    image = Image(canvas_result.image_data)

    with col2:
        # Display a h2 title
        st.subheader("What the computer see")
        st.markdown("Your drawing is resized and gray-scaled")
Esempio n. 22
0
	37: 'sha',38: 'oo', 39: 'au', 40: 'pha', 41: 'ee', 42: '4', 43: 'dha', 44: '9', 45: 'u', 46: 'daa', 47: 'i', 48: '1', 49: 'ha', 
	50: 'rda', 51: 'ba', 52: 'ksha', 53: 'jha', 54: 'katasha', 55: '0', 56: 'ja', 57:'ya'
	}
	
	return mapping[pred]

tf.config.experimental.set_memory_growth(tf.config.experimental.list_physical_devices('GPU')[0], True)

st.title("Hindi character recognition")

st.write("Draw a hindi character below and click on Predict button")
st.write("\n")
st.write("To clear the digit, uncheck checkbox, double click on the digit or refresh the page")
st.write("To draw the digit, check the checkbox")
drawing_mode = st.checkbox("Draw?",True)
image_data = st_canvas(15, '#000', '#FFF', height=400,width=400, drawing_mode=drawing_mode, key="canvas")
check=st.button("Predict")
st.set_option('deprecation.showfileUploaderEncoding', False)

if image_data is not None:
	if check:
		cv2.imwrite("test.jpg",image_data)
		im = np.array(Image.open("test.jpg").convert('RGB').resize((224,224),Image.BICUBIC))
		st.title("Input Image")
		st.image(im)
		im=im/255.0
		im_preprocess=np.expand_dims(im, axis=0)
	
		infer_model=inference_model()
		
		infer_model.load_weights("../weights/prototype.h5")
Esempio n. 23
0
st.title(text[value]["title"])
st.markdown(f"**{alfabeto_espacio}**")
st.markdown(f"{text[value]['instruction']}")
# Specify brush parameters and drawing mode
stroke_width = st.sidebar.slider("{}: ".format(text[value]["stroke width"]), 1,
                                 100, 25)

stroke_color = st.sidebar.color_picker("{}:".format(text[value]["color"]))

# Create a canvas component
canvas_result = st_canvas(
    stroke_width=stroke_width,
    stroke_color=stroke_color,
    background_color="#FFF",
    height=280,
    width=280,
    drawing_mode="freedraw",
    key="canvas",
)

predict = st.button(text[value]["predict"])


def get_prediction(image):
    pass


if canvas_result.image_data is not None and predict:
    # 1: Obtengo la imagen del canvas
    imagen = canvas_result.image_data
Esempio n. 24
0
st.title('¡RECONOCE DÍGITOS!')

st.markdown('''
La siguiente aplicación intenta predecir el dígito escrito.
* Usamos redes neuronales convolucionales con Tensorflow.
* Para identificar reglas de decisión usamos LIME.
* Para reducir dimensiones usamos UMAP.
* Para buscar imágenes similares usamos NearestNeighbors.
''')

st.markdown('''¡ESCRIBA UN DÍGITO, INTENTARÉ PREDECIRLO!''')

canvas_result = st_canvas(fill_color='#000000',
                          stroke_width=20,
                          stroke_color='#FFFFFF',
                          background_color='#000000',
                          width=config.SIZE_DRAW,
                          height=config.SIZE_DRAW,
                          drawing_mode='freedraw',
                          key='canvas')

if canvas_result.image_data is not None:
    if st.button('PREDECIR'):
        image_array = canvas_result.image_data.astype(np.uint8)
        img = prepara_img(image_array=image_array)

        st.subheader('PREDICCIÓN')
        predict_class(img)

        st.subheader('LIME PARA REGLAS DE DECISIÓN')
        plot_rules(img)
    expander_bar = st.beta_expander("About")
    expander_bar.markdown("""
    * **Python libraries: streamlit, streamlit_drawable_canvas, pandas, numpy, tensorflow, skimage**
    * **Source of data: MNIST dataset from tensorflow.keras.datasets, which contains 70,000 images (28x28 pixels) of the numbers from 0 to 9 handwritten by US highschool students.**
    """)
    st.write("")
    st.write("Try to draw a number here:")

    canvas_result = st_canvas(
        fill_color=
        "rgba(255, 165, 0, 0.3)",  # Fixed fill color with some opacity
        stroke_width=50,
        stroke_color="000000",
        background_color="#FFFFFF",
        background_image=None,
        update_streamlit=True,
        height=500,
        width=500,
        drawing_mode="freedraw",
        initial_drawing=None,
        key="full_app",
    )

    if st.button("Click here to let the CNN model make a prediction"):

        # convert the drawn canvas to image format and get a prediction from our pretrained model
        if canvas_result.image_data is not None:

            # load model
            tf_model = load_model()
Esempio n. 26
0
    return load_model('model.h5')


model = load()

st.write('# MNIST Recognizer')

CANVAS_SIZE = 192

col1, col2 = st.beta_columns(2)

with col1:
    canvas = st_canvas(fill_color='#000000',
                       stroke_width=20,
                       stroke_color='#FFFFFF',
                       background_color='#000000',
                       width=CANVAS_SIZE,
                       height=CANVAS_SIZE,
                       drawing_mode='freedraw',
                       key='canvas')

if canvas.image_data is not None:
    img = canvas.image_data.astype(np.uint8)
    img = cv2.resize(img, dsize=(28, 28))
    preview_img = cv2.resize(img,
                             dsize=(CANVAS_SIZE, CANVAS_SIZE),
                             interpolation=cv2.INTER_NEAREST)

    col2.image(preview_img)

    x = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
    x = x.reshape((-1, 28, 28, 1))
Esempio n. 27
0
# st.markdown('<style>body{color: White; background-color: DarkSlateGrey}</style>', unsafe_allow_html=True)

st.title('My Digit Recognizer')
st.markdown('''
Try to write a digit!
''')

# data = np.random.rand(28,28)
# img = cv2.resize(data, (256, 256), interpolation=cv2.INTER_NEAREST)

SIZE = 192
mode = st.checkbox("Draw (or Delete)?", True)
canvas_result = st_canvas(fill_color='#000000',
                          stroke_width=20,
                          stroke_color='#FFFFFF',
                          background_color='#000000',
                          width=SIZE,
                          height=SIZE,
                          drawing_mode="freedraw" if mode else "transform",
                          key='canvas')

if canvas_result.image_data is not None:
    img = cv2.resize(canvas_result.image_data.astype('uint8'), (28, 28))
    rescaled = cv2.resize(img, (SIZE, SIZE), interpolation=cv2.INTER_NEAREST)
    st.write('Model Input')
    st.image(rescaled)

if st.button('Predict'):
    test_x = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
    val = model.predict(test_x.reshape(1, 28, 28))
    st.write(f'result: {np.argmax(val[0])}')
    st.bar_chart(val[0])
Esempio n. 28
0
from code import config
from PIL import Image
from torchvision import transforms
import numpy as np
import cv2
from torchvision.utils import save_image

st.header('Sketch to Shoe')
st.subheader('Image to image translation: a reimplementation of pix2pix GAN')
st.write('Project Sketch-to-Shoe is a thorough yet comprehensible reimplementation of the pix2pix GAN paper (Isola et al.)')
st.write('It adheres to the PyTorch framework and is trained on various sketches/images of shoes.')
st.image('assets/120_AB.jpg')
stwid = st.sidebar.slider('Width of brush: ',1,10,2)
color = st.sidebar.color_picker('Choose the color you want: ')
img_dat = st_canvas(stroke_width=stwid,stroke_color=color,
                background_color='#000',height=300,width=400,
                drawing_mode='freedraw',key='canvas')
cv2.imwrite(r'samples/inp.jpg',img_dat.image_data)
img = Image.open(r'samples/inp.jpg').resize((256, 256), Image.ANTIALIAS).convert('RGB')

img = np.asarray(img, dtype='uint8')

transf = transforms.Compose([transforms.ToTensor(),transforms.Normalize((0.5,),(0.5,))])
img = transf(img)
img = img.unsqueeze(0)
img = img.to(config.device)
x = st.button('pix2pix')
gen = Generator(in_channels=3).to(config.device)
saved = torch.load(r'D:\pix2pix_models\gmodel.pth',map_location=config.device)
gen.load_state_dict(saved['model'])
gen.eval()
Esempio n. 29
0
def main():
    def convertImage(image):
        if image is not None:
            image = Image.fromarray(np.uint8(image)).convert('L')
            image = np.array(image)
            while np.sum(image[0]) == 0:
                image = image[1:]
            while np.sum(image[:, 0]) == 0:
                image = np.delete(image, 0, 1)
            while np.sum(image[-1]) == 0:
                image = image[:-1]
            while np.sum(image[:, -1]) == 0:
                image = np.delete(image, -1, 1)
            rows, cols = image.shape
            colsPadding = (int(math.ceil(
                (308 - cols) / 2.0)), int(math.floor((308 - cols) / 2.0)))
            rowsPadding = (int(math.ceil(
                (308 - rows) / 2.0)), int(math.floor((308 - rows) / 2.0)))
            image = np.lib.pad(image, (rowsPadding, colsPadding), 'constant')
            image = Image.fromarray(np.uint8(image))
            image = image.resize(size=(28, 28))
            st.image(image)
            image = np.array(image)
            image = image.reshape(1, 28, 28, 1)
            image = convert_to_tensor(image)
        return image

    def loadModel():
        model = load_model('./saved_model.h5', compile=True)
        return model

    def prediction(m, d):
        pred = m(d, training=False)
        pred = pred.numpy()
        return pred

    # Load the model
    model = loadModel()

    st.write("""
    #My first app
    Hello World
    """)
    # Specify brush parameters and drawing mode
    drawing_mode = st.sidebar.selectbox("Drawing mode",
                                        ("freedraw", "line", "transform"))
    key = "canvas"
    # Create a canvas component
    image_data = st_canvas(
        20,
        "#FFF",
        "#000",
        height=308,
        width=308,
        drawing_mode=drawing_mode,
        key=key,
    )

    if image_data is not None:
        if np.mean(image_data) != 63.75:
            data = convertImage(image_data)
            pred = prediction(model, data)
            xt = ('0', '1', '2', '3', '4', '5', '6', '7', '8', '9')
            y_pos = np.arange(len(xt))
            plt.bar(y_pos,
                    height=pred[0],
                    align='center',
                    color=[
                        'blue', 'red', 'orange', 'green', 'black', 'cyan',
                        'yellow', 'purple', 'magenta', 'pink'
                    ])
            plt.xticks(y_pos, xt)
            st.pyplot(plt)
Esempio n. 30
0
image_to_open = st.sidebar.file_uploader("Upload Image:", type=["png", "jpg"])

if image_to_open:
    st.title('Pitch lines')
    st.sidebar.write('Draw Penalty Box lines in the order shown below:')
    st.sidebar.image('pitch.png', width=300)
    
    image = Image.open(image_to_open)
    image = image.resize((500, int(500*image.height/image.width)))

    # Create a canvas component to draw pitch lines
    canvas_image = st_canvas(
        fill_color = "rgba(255, 165, 0, 0.3)", 
        stroke_width = 2,
        stroke_color = "#e00",
        background_image=image,
        width = image.width,
        height = image.height,
        drawing_mode= "line",
        key="canvas",
    )

    if canvas_image.json_data["objects"]:
        if len(canvas_image.json_data["objects"])>=4:
            df = pd.json_normalize(canvas_image.json_data["objects"])
            df['y1_line'] = df['top']+df['y1']
            df['y2_line'] = df['top']+df['y2']
            df['x1_line'] = df['left']+df['x1']
            df['x2_line'] = df['left']+df['x2']
            df['slope'], df['intercept'] = get_si_from_coords(df[['x1_line', 'y1_line', 'x2_line', 'y2_line']].values)

            UP_PA = line_intersect(df.loc[0, ['slope', 'intercept']], df.loc[2, ['slope', 'intercept']])