Example #1
0
    def post(self):
        try:
            #Retrieve the image from http submit. It will be a blob object. 
            image = self.get_uploads()[0]
            #Get info from blobfile uploaded 
            file_info = self.get_file_infos()[0]
            #Create the serving url for the blob uploaded into GS (Google cloud storage)
            serving_url = images.get_serving_url(blobstore.create_gs_key(file_info.gs_object_name))

            #Fetch image dimensions. The last integer specifies how much to fetch 50000 is plenty. 
            data = blobstore.fetch_data(image.key(), 0, 50000)
            img = images.Image(image_data = data)
            width_to_height_ratio = float(img.width)/img.height

            #Get name and description
            name = self.request.get('name')
            caption = self.request.get('caption')
            
            #Now generate the database object and store it. 
            image_model = ImageModel(
                name = name,
                caption = caption,
                blob_key = image.key(),
                gs_object_name = file_info.gs_object_name,
                serving_url = serving_url,
                width_to_height_ratio = width_to_height_ratio)
            image_model.put()

            #Redirect to the upload site and tell it that it succeeded. 
            self.redirect('/admin/upload_image#success')

        except:
            #Redirect to upload site and tell that something went wrong. 
            self.redirect('/admin/upload_image#fail')
def main():
    imageDataset = ImageDataSet(
        "/Volumes/My Passport/abhishek/Datasets/Image Dataset/rvl-cdip/dataset"
    )

    model = ImageModel(8, 224, 224)
    model.train_model(imageDataset, batch_size=50)
Example #3
0
def receive_message():
    if request.method == 'GET':
        image_url = request.args["image"]
        ImageModel.create(image_url)
        prob, classes = model.predict(urllib.request.urlopen(image_url),
                                      models)
        max_index = np.argmax(prob)
        max_probability = prob[max_index]
        val_list = list(cat_to_name.values())
        label = classes[max_index]
        labels = []
        for cl in classes:
            labels.append(cat_to_name[str(val_list.index(cl))])
        message = {"messages": [{"text": f'This is a picture of {labels[0]}'}]}
        return make_response(jsonify(message))
Example #4
0
    def get(self):
        redirect_if_not_admin(self)

        images = ImageModel.query()
        
        self.render("gallery_creation.html", 
            active="create_gallery",
            images=images)
Example #5
0
    def get(self):
        redirect_if_not_admin(self)

        images = ImageModel.query()
       
        self.render("image_admin.html", 
            active = "images", 
            images = images)
Example #6
0
    def get(self):
        redirect_if_not_admin(self)

        templates = Config.TEMPLATES
        images = ImageModel.query()
        main_pages = PageModel.query(PageModel.parent == None)

        self.render("page_creation.html", active="create_page", 
            templates=templates, 
            main_pages=main_pages,
            images=images)
Example #7
0
def delete_image():
    if request.method == "DELETE":
        if "id" not in request.args:
            return jsonify(msg="'id' cannot be left blank."), 400
        
        try:
            img_id = request.args.get("id")
            image = ImageModel.find_by_id(img_id)
            if image:
                image.delete_from_database()
                return jsonify(msg="Image with id '{}' has been successfully deleted.".format(img_id)), 200
            return jsonify(msg="id '{}' does not exist.".format(img_id)), 400
        except Exception as error:
            return jsonify(msg="An error occured during image DELETE: {}".format(error)), 500
Example #8
0
    def get(self, curr_page_id="main"):
        redirect_if_not_admin(self)

        galleries = GalleryModel.query()
        images = ImageModel.query()

        curr_page = ""
        if curr_page_id:
            curr_page_query = GalleryModel.query(GalleryModel.gallery_id == curr_page_id)
            curr_page = curr_page_query.get()
        

        self.render("gallery_admin.html", 
            active="galleries",
            galleries = galleries,
            images = images,
            curr_page = curr_page,
            curr_page_id = curr_page_id)
Example #9
0
    def get(self, curr_page_id="main"):
        redirect_if_not_admin(self)

        pages = PageModel.query()
        images = ImageModel.query()

        curr_page = ""
        if curr_page_id:
            curr_page_query = PageModel.query(PageModel.page_id == curr_page_id)
            curr_page = curr_page_query.get()

        self.render('page_admin.html', 
            active="pages", 
            pages=pages,
            templates = Config.TEMPLATES,
            images = images,
            main_pages = Config.MAIN_PAGES,
            curr_page = curr_page,
            curr_page_id = curr_page_id)
Example #10
0
    def Read(self, path, imgID):
        imgName = self.path.split('/')[-1]
        self.imagesModels[imgID] = ImageModel(self.path)
        self.heights[imgID], self.weights[imgID] = self.imagesModels[
            imgID].imgShape

        if type(self.imagesModels[~imgID]) == type(...):
            self.displayImage(self.imagesModels[imgID].imgByte,
                              self.inputImages[imgID])
            logger.info(f"uplood Image{imgID + 1}: {imgName} ")
        else:
            if self.heights[1] != self.heights[0] or self.weights[
                    1] != self.weights[0]:
                self.msg.setWindowTitle("Error in Image Size")
                self.msg.setText("The 2 images don't have the same size.")
                self.msg.setIcon(QMessageBox.Warning)
                x = self.msg.exec_()
                logger.error("Error: The two images don't have the same size.")
            else:
                self.displayImage(self.imagesModels[imgID].imgByte,
                                  self.inputImages[imgID])
                logger.info(f"uplood Image{imgID + 1}: {imgName} ")
Example #11
0
    #     if isinstance(layer, Iterable):
    #         for i in layer:
    #             # _sublayers = list(i)
    #             print("%i", i)
    #             if isinstance(i, Iterable):
    #                 for sublayer in _sublayers:
    #                     print("sub_sublayer", sublayer)

    # else:
    #     print(layer)


# --MAIN ------------------------------------------------------------------------------------------------------
if __name__ == '__main__':
    # instantiate class to handle model
    image_model = ImageModel(model_name='resnet50', dataset_size=2692)
    # Initialize Image Module
    # image_module = MyImageModule(dataset_size=100, batch_size=32)
    image_module = MyImageModule(dataset_size=2692, batch_size=1)
    image_module.setup()

    # --- PREDICT RESULTS ---
    # Get name and model used for testing
    # name_model, inference_model = image_model.inference_model()
    name_model = 'model-epoch=05-val_loss=0.36-weights7y3_unfreeze2.ckpt'
    inference_model = image_model.load_model(name_model)
    # print("Inference model:", inference_model)
    print("Name:", name_model)

    # Prediction with no tensors
    # y_true, y_pred = predict(inference_model, image_module.test_dataloader())
Example #12
0
def post_images():
    if request.method == "POST":
       #path to static 
        LOCAL_STATIC_PATH = 'static/'
        UPLOADS_PATH = join(dirname(realpath(__file__)), LOCAL_STATIC_PATH)
        DATABASE_SIZE = 100
	## check to see if request body is filled
        if "image" not in request.files:
            return jsonify(msg="'image' cannot be left blank."), 400
        elif "time_taken" not in request.form:
            return jsonify(msg="'time_taken' cannot be left blank."), 400
        elif "image_orientation" not in request.form:
            return jsonify(msg="'image_orientation' cannot be left blank."), 400

        # get the image
        image = request.files.get("image")
        time =request.form.get("time_taken")
        orientation = request.form.get("image_orientation")        


        try:
	    seefoodWrapper.pollForReady()

	   # save the image to the server
            image_name = secure_filename(image.filename)
            path = UPLOADS_PATH + image_name
            image.save(path)
            
            # Seefood AI can take .bmp, .jpg, .png, etc. no need to convert. We can filter out images on app before
            # they are sent to the server
            confidences = seefoodWrapper.sendImage(path)
	    logger.write_info("routes.py:Got confidence ratings " + confidences[0] + " " +confidences[1])
            # convert the image for server gallery
            new_img = Image.open(path)
            os.remove(path)

            truncName = image_name.split(".")
            newName = truncName[0] + '.png'
            newNamePath = UPLOADS_PATH + newName
            new_img.save(newNamePath, 'png')

	    logger.write_info("routes.py:Sucessfully re-saved image")

            img_return_path = LOCAL_STATIC_PATH + newName
            
            # ENFORCE DATABASE SIZE
            all_images = ImageModel.query.all()
            if len(all_images) == DATABASE_SIZE:
                first_image = all_images[0]
                first_image.delete_from_database()

            imageModel = ImageModel(truncName[0], time, confidences[0], confidences[1], img_return_path, orientation )
	    logger.write_info("routes.py:Created imageModel")
            imageModel.save_to_database()
	    logger.write_info("routes.py:Saved to database")

            return jsonify(image=imageModel.json()), 201

        except Exception as error:
	    logger.write_error(error)
            return jsonify(msg="An error occured during image POST: {}".format(error)), 500
Example #13
0
def stitchTwoImage(image_model1, image_model2):
    stitch_img, raw_img = image_model1.image, image_model2.image

    img1_gray = cv2.cvtColor(stitch_img, cv2.COLOR_BGR2GRAY)
    img2_gray = cv2.cvtColor(raw_img, cv2.COLOR_BGR2GRAY)

    # Feature matching
    kp1, des1 = getFeature(img1_gray)
    kp2, des2 = getFeature(img2_gray)

    matches = featureMatching(des1, des2)

    img_match = cv2.drawMatches(stitch_img, kp1, raw_img, kp2, matches, None)
    drawMatchImage('match ' + image_model1.name + ' + ' + image_model2.name,
                   img_match)

    # Calculate homography matrix
    h, isHomographyGood = getHomographyMatrix(kp1, kp2, matches)

    if not isHomographyGood:
        print('[WARNING] Bad homography matrix, discard this data',
              image_model2.name)
        return image_model1, False

    # Warping
    print('Warping...')
    start = time.time()

    new_size = (raw_img.shape[1] + int(stitch_img.shape[1] * 1.5),
                raw_img.shape[0] + int(stitch_img.shape[0] * 1.5))

    # put the image to the central position of the result image
    t = np.identity(3, np.float)
    t[0, 2] = new_size[0] / 4
    t[1, 2] = new_size[1] / 4

    pad_raw = cv2.warpPerspective(raw_img,
                                  t,
                                  new_size,
                                  borderMode=cv2.BORDER_REFLECT)
    raw_mask = cv2.warpPerspective(raw_img, t, new_size)

    pad_warp = cv2.warpPerspective(stitch_img,
                                   t.dot(h),
                                   new_size,
                                   borderMode=cv2.BORDER_REFLECT)
    warp_mask = cv2.warpPerspective(stitch_img, t.dot(h), new_size)

    stitch_mask = np.logical_or(getMask(raw_mask), getMask(warp_mask))
    stitch_mask = np.asarray(stitch_mask, dtype=np.uint8)

    saveImage(
        'h**o image {0} -> {1}'.format(image_model1.name, image_model2.name),
        warp_mask, ImageModel.SAVE_HOMO)

    print('--', time.time() - start, 's')

    # Cut Padding
    print('Cut padding...')
    cut_pad_images = [pad_raw, pad_warp, raw_mask, warp_mask]
    [cut_pad_raw, cut_pad_warp, cut_pad_raw_mask,
     cut_pad_warp_mask] = cutPadding(cut_pad_images, stitch_mask)

    if isAlreadyStitch(cut_pad_raw_mask, cut_pad_warp_mask):
        print('Data {0} already stitched, so don\'t need to keep stitching.'.
              format(image_model2.name))
        return image_model1, True

    final_mask = cv2.bitwise_or(getMask(cut_pad_raw_mask),
                                getMask(cut_pad_warp_mask))
    final_mask = np.asarray(final_mask, dtype=np.uint8)

    # Blending
    print('blending images...')
    blend_mask = (np.sum(cut_pad_raw_mask, axis=2) != 0).astype(np.float)

    blend_img = multibandBlending(cut_pad_raw, cut_pad_warp, blend_mask)

    blend_img = cv2.bitwise_and(blend_img, blend_img, mask=final_mask)

    image_model_stitch = ImageModel(
        image_model1.name + ' ' + image_model2.name, blend_img)

    return image_model_stitch, True