def update_output(image_str): if not image_str: print("no image") #im_pil = Image.open(cwd + '/image_test.jpg') #else: print(image_str[0:100]) string = image_str.split(';base64,')[-1] im_pil = drc.b64_to_pil(string.encode('ascii')) #image = image_str.split(',')[1] #data = decodestring(image.encode('ascii')) ## good... #data = decodestring(string.encode('ascii')) #with open(cwd +"/image_test.jpg", "wb") as f: # f.write(data) return parse_interactive_image(im_pil)
def update_output(list_of_contents, list_of_names, list_of_dates): if list_of_contents is not None: # Parse the string and convert to pil string = list_of_contents[0].split(';base64,')[-1] im_pil = drc.b64_to_pil(string) text = image_to_text(im_pil) txt = clean_text(text) print(txt) Date, adress, ttc, Marchand = text_to_ner(txt) print(Date, adress, ttc) children = [ parse_contents(c, n, d) for c, n, d in zip(list_of_contents, list_of_names, list_of_dates) ] return children, text, Date, adress, ttc, Marchand
def update_similar_items(image_str,undo_clicks,redo_clicks): if not image_str: print("no images") return print(image_str[0:100]) string = image_str.split(';base64,')[-1] im_pil = drc.b64_to_pil(string.encode('ascii')) # good data = decodestring(string.encode('ascii')) with open(cwd +"/image_test.jpg", "wb") as f: f.write(data) # fine but don't want to dl images off internet input_image_path = cwd +"/image_test.jpg" print('input_image_path = {}'.format(input_image_path)) #global cnn top_match, frugal_match, premium_match = cnn.find_top_matches_by_path(input_image_path) #top_match, frugal_match, premium_match = cnn.find_top_matches_by_PIL_image(im_pil) similar_image_paths = [top_match['image_path'], frugal_match['image_path'],premium_match['image_path']] names = [top_match['name'], frugal_match['name'],premium_match['name']] brands = [top_match['brand'], frugal_match['brand'],premium_match['brand']] urls = [top_match['url'], frugal_match['url'],premium_match['url']] prices = [top_match['msrp'], frugal_match['msrp'],premium_match['msrp']] sales = [] for salep in [top_match['sale'], frugal_match['sale'],premium_match['sale']]: if (salep is None) or math.isnan(salep): sales.append('None') else: sales.append(salep) heads = ['Match!!!','Frugal','Premium'] children = [] for i, (image_filename,head,brand,name,price,sale,url) in enumerate(zip(similar_image_paths,heads,brands,names,prices,sales,urls)): encoded_image = base64.b64encode(open(image_filename, 'rb').read()) children.append(display_encoded_image_and_metadata(encoded_image,head,brand,name,price,sale,url)) return children
def update_graph_interactive_image( content, undo_clicks, n_clicks, # new_win_width, selectedData, filters, enhance, enhancement_factor, new_filename, dragmode, enc_format, storage, session_id, ): t_start = time.time() # Retrieve information saved in storage, which is a dict containing # information about the image and its action stack storage = json.loads(storage) filename = storage["filename"] # Filename is the name of the image file. image_signature = storage["image_signature"] # Runs the undo function if the undo button was clicked. Storage stays # the same otherwise. storage = undo_last_action(undo_clicks, storage) # If a new file was uploaded (new file name changed) if new_filename and new_filename != filename: # Replace filename if DEBUG: print(filename, "replaced by", new_filename) # Update the storage dict storage["filename"] = new_filename # Parse the string and convert to pil string = content.split(";base64,")[-1] im_pil = drc.b64_to_pil(string) # Update the image signature, which is the first 200 b64 characters # of the string encoding storage["image_signature"] = string[:200] # Posts the image string into the Bucketeer Storage (which is hosted # on S3) store_image_string(string, session_id) if DEBUG: print(new_filename, "added to Bucketeer S3.") # Resets the action stack storage["action_stack"] = [] # If an operation was applied (when the filename wasn't changed) else: # Add actions to the action stack (we have more than one if filters # and enhance are BOTH selected) if filters: type = "filter" operation = filters add_action_to_stack(storage["action_stack"], operation, type, selectedData) if enhance: type = "enhance" operation = { "enhancement": enhance, "enhancement_factor": enhancement_factor, } add_action_to_stack(storage["action_stack"], operation, type, selectedData) # Apply the required actions to the picture, using memoized function im_pil = apply_actions_on_image(session_id, storage["action_stack"], filename, image_signature) t_end = time.time() if DEBUG: print(f"Updated Image Storage in {t_end - t_start:.3f} sec") return [ drc.InteractiveImagePIL( image_id="interactive-image", image=im_pil, enc_format=enc_format, dragmode=dragmode, verbose=DEBUG, ), html.Div(id="div-storage", children=json.dumps(storage), style={"display": "none"}), ]
def apply_actions_on_image(session_id, action_stack, filename, image_signature): action_stack = deepcopy(action_stack) # If we have arrived to the original image if len(action_stack) == 0 and LOCAL: with open("image_string.csv", mode="r") as image_file: image_reader = csv.DictReader(image_file) for row in image_reader: im_pil = drc.b64_to_pil(row["image"]) return im_pil if len(action_stack) == 0 and not LOCAL: # Retrieve the url in which the image string is stored inside s3, # using the session ID url = s3.generate_presigned_url(ClientMethod="get_object", Params={ "Bucket": bucket_name, "Key": session_id }) # A key replacement is required for URL pre-sign in gcp url = url.replace("AWSAccessKeyId", "GoogleAccessId") response = requests.get(url) if DEBUG: print("IMAGE STRING LENGTH: " + str(len(response.text))) im_pil = drc.b64_to_pil(response.text) return im_pil # Pop out the last action last_action = action_stack.pop() # Apply all the previous action_stack recursively, and gets the image PIL im_pil = apply_actions_on_image(session_id, action_stack, filename, image_signature) im_size = im_pil.size # Apply the rest of the action_stack operation = last_action["operation"] selected_data = last_action["selectedData"] action_type = last_action["type"] # Select using Lasso if selected_data and "lassoPoints" in selected_data: selection_mode = "lasso" selection_zone = utils.generate_lasso_mask(im_pil, selected_data) # Select using rectangular box elif selected_data and "range" in selected_data: selection_mode = "select" lower, upper = map(int, selected_data["range"]["y"]) left, right = map(int, selected_data["range"]["x"]) # Adjust height difference height = im_size[1] upper = height - upper lower = height - lower selection_zone = (left, upper, right, lower) # Select the whole image else: selection_mode = "select" selection_zone = (0, 0) + im_size # Apply the filters if action_type == "filter": utils.apply_filters(image=im_pil, zone=selection_zone, filter=operation, mode=selection_mode) elif action_type == "enhance": enhancement = operation["enhancement"] factor = operation["enhancement_factor"] utils.apply_enhancements( image=im_pil, zone=selection_zone, enhancement=enhancement, enhancement_factor=factor, mode=selection_mode, ) return im_pil
def apply_actions_on_image(session_id, action_stack, filename, image_signature): action_stack = deepcopy(action_stack) # If we have arrived to the original image if len(action_stack) == 0: # Retrieve the url in which the image string is stored inside s3, # using the session ID url = s3.generate_presigned_url( ClientMethod='get_object', Params={ 'Bucket': bucket_name, 'Key': session_id } ) # A key replacement is required for URL pre-sign in gcp url = url.replace('AWSAccessKeyId', 'GoogleAccessId') response = requests.get(url) print(len(response.text)) im_pil = drc.b64_to_pil(response.text) return im_pil # Pop out the last action last_action = action_stack.pop() # Apply all the previous action_stack, and gets the image PIL im_pil = apply_actions_on_image( session_id, action_stack, filename, image_signature ) im_size = im_pil.size # Apply the rest of the action_stack operation = last_action['operation'] selectedData = last_action['selectedData'] type = last_action['type'] # Select using Lasso if selectedData and 'lassoPoints' in selectedData: selection_mode = 'lasso' selection_zone = generate_lasso_mask(im_pil, selectedData) # Select using rectangular box elif selectedData and 'range' in selectedData: selection_mode = 'select' lower, upper = map(int, selectedData['range']['y']) left, right = map(int, selectedData['range']['x']) # Adjust height difference height = im_size[1] upper = height - upper lower = height - lower selection_zone = (left, upper, right, lower) # Select the whole image else: selection_mode = 'select' selection_zone = (0, 0) + im_size # Apply the filters if type == 'filter': apply_filters( image=im_pil, zone=selection_zone, filter=operation, mode=selection_mode ) elif type == 'enhance': enhancement = operation['enhancement'] factor = operation['enhancement_factor'] apply_enhancements( image=im_pil, zone=selection_zone, enhancement=enhancement, enhancement_factor=factor, mode=selection_mode ) return im_pil
def apply_actions_on_image(session_id, action_stack, filename, image_signature): action_stack = deepcopy(action_stack) # If we have arrived to the original image if len(action_stack) == 0 and LOCAL: with open("image_string.csv", mode="r") as image_file: image_reader = csv.DictReader(image_file) for row in image_reader: im_pil = drc.b64_to_pil(row["image"]) return im_pil if len(action_stack) == 0 and not LOCAL: pass # Pop out the last action last_action = action_stack.pop() # Apply all the previous action_stack recursively, and gets the image PIL im_pil = apply_actions_on_image(session_id, action_stack, filename, image_signature) im_size = im_pil.size # Apply the rest of the action_stack operation = last_action["operation"] selected_data = last_action["selectedData"] action_type = last_action["type"] # Select using Lasso if selected_data and "lassoPoints" in selected_data: selection_mode = "lasso" selection_zone = utils.generate_lasso_mask(im_pil, selected_data) # Select using rectangular box elif selected_data and "range" in selected_data: selection_mode = "select" lower, upper = map(int, selected_data["range"]["y"]) left, right = map(int, selected_data["range"]["x"]) # Adjust height difference height = im_size[1] upper = height - upper lower = height - lower selection_zone = (left, upper, right, lower) # Select the whole image else: selection_mode = "select" selection_zone = (0, 0) + im_size # Apply the filters if action_type == "filter": utils.apply_filters(image=im_pil, zone=selection_zone, filter=operation, mode=selection_mode) elif action_type == "enhance": enhancement = operation["enhancement"] factor = operation["enhancement_factor"] utils.apply_enhancements( image=im_pil, zone=selection_zone, enhancement=enhancement, enhancement_factor=factor, mode=selection_mode, ) return im_pil
def update_similar_items(undo_clicks, redo_clicks, selectedData, figure, category): #if not image_str: # print("no images") # return if (undo_clicks is None) and (redo_clicks is None): print("undo_clicks: {}".format(undo_clicks)) print("redo_clicks: {}".format(redo_clicks)) print("redo_clicks: {}".format(category)) return elif redo_clicks is None: # this means that whole image was selected selectedData = None print("\nundo_clicks: {}".format(undo_clicks)) print("redo_clicks: {}".format(redo_clicks)) print("selectedData: {}\n".format(selectedData)) print("redo_clicks: {}".format(category)) elif undo_clicks is None: # this means that cropped image was selected print("\nundo_clicks: {}".format(undo_clicks)) print("redo_clicks: {}".format(redo_clicks)) print("selectedData: {}\n".format(selectedData)) print("redo_clicks: {}".format(category)) elif undo_clicks > redo_clicks: selectedData = None print("\nundo_clicks: {}".format(undo_clicks)) print("redo_clicks: {}".format(redo_clicks)) print("selectedData: {}\n".format(selectedData)) print("redo_clicks: {}".format(category)) #selectedData: {'range': {'y': [70.17453798767961, 513.4045174537987], 'x': [87.70071868583162, 963.6575975359342]}, 'points': []} image_str = figure['layout']['images'][0]['source'] print(image_str[0:100]) string = image_str.split(';base64,')[-1] im_pil = drc.b64_to_pil(string.encode('ascii')) white_bg = False im_size = im_pil.size if im_pil.mode != 'RGB': im_pil = im_pil.convert('RGB') # Select using rectangular box if (selectedData is not None) and ('range' in selectedData): selection_mode = 'select' lower, upper = map(int, selectedData['range']['y']) left, right = map(int, selectedData['range']['x']) # Adjust height difference height = im_size[1] upper = height - upper lower = height - lower selection_zone = (left, upper, right, lower) if white_bg: # IF YOU WANT WHITE BACKGROUND ***************** # TO GET WHITE BACKGROUND # MAYBE LASSO WOULD BE GOOD crop = im_pil.crop(selection_zone) #gray = im_pil.convert('L') imgarray = np.array(im_pil) for i in range(imgarray.shape[0]): for j in range(imgarray.shape[1]): imgarray[i][j] = (255, 255, 255) im_pil = Image.fromarray(imgarray) im_pil.paste(crop, selection_zone) # IF YOU WANT WHITE BACKGROUND ***************** # Select using Lasso elif selectedData and 'lassoPoints' in selectedData: selection_mode = 'lasso' im_filtered = im_pil.copy() # IF YOU WANT WHITE BACKGROUND ***************** selection_zone = generate_lasso_mask(im_pil, selectedData) imgarray = np.array(im_pil) for i in range(imgarray.shape[0]): for j in range(imgarray.shape[1]): imgarray[i][j] = (255, 255, 255) im_pil = Image.fromarray(imgarray) #im_filtered = image.filter(filter_selected) im_pil.paste(im_filtered, mask=selection_zone) # IF YOU WANT WHITE BACKGROUND ***************** # Select the whole image else: selection_mode = 'select' selection_zone = (0, 0) + im_size # IF YOU WANT WHITE BACKGROUND ***************** # TO GET WHITE BACKGROUND # MAYBE LASSO WOULD BE GOOD if white_bg: crop = im_pil.crop(selection_zone) #gray = im_pil.convert('L') imgarray = np.array(im_pil) for i in range(imgarray.shape[0]): for j in range(imgarray.shape[1]): imgarray[i][j] = (255, 255, 255) im_pil = Image.fromarray(imgarray) # IF YOU WANT WHITE BACKGROUND ***************** im_pil.paste(crop, selection_zone) print("selection_zone: {}".format(selection_zone)) #if im_pil.mode != 'RGB': # im_pil = im_pil.convert('RGB') ## TO GET WHITE BACKGROUND ## MAYBE LASSO WOULD BE GOOD #crop = im_pil.crop(selection_zone) ##gray = im_pil.convert('L') #imgarray = np.array(im_pil) #for i in range(imgarray.shape[0]): # for j in range(imgarray.shape[1]): # imgarray[i][j] = (255,255,255) #im_pil = Image.fromarray(imgarray) #im_pil.paste(crop,selection_zone) # Good if not white_bg: im_pil = im_pil.crop(selection_zone) """ DEBUG # good data = decodestring(string.encode('ascii')) with open(cwd +"/image_test.jpg", "wb") as f: f.write(data) # fine but don't want to dl images off internet input_image_path = cwd +"/image_test.jpg" print('input_image_path = {}'.format(input_image_path)) """ # debugging im_pil.save(cwd + "/image_crop.jpg") #global cnn #top_match, frugal_match, premium_match = cnn.find_top_matches_by_path(input_image_path) #top_match, frugal_match, premium_match = cnn.find_top_matches_by_PIL_image(im_pil) top_match, frugal_match, premium_match = cnn.find_top_matches_by_PIL_image( im_pil, category) similar_image_paths = [ top_match['image_path'], frugal_match['image_path'], premium_match['image_path'] ] names = [top_match['name'], frugal_match['name'], premium_match['name']] brands = [ top_match['brand'], frugal_match['brand'], premium_match['brand'] ] urls = [top_match['url'], frugal_match['url'], premium_match['url']] prices = [top_match['msrp'], frugal_match['msrp'], premium_match['msrp']] sales = [] for salep in [ top_match['sale'], frugal_match['sale'], premium_match['sale'] ]: if (salep is None) or math.isnan(salep): sales.append('None') else: sales.append(salep) heads = ['Match!!!', 'Frugal', 'Premium'] children = [] for i, (image_filename, head, brand, name, price, sale, url) in enumerate( zip(similar_image_paths, heads, brands, names, prices, sales, urls)): encoded_image = base64.b64encode(open(image_filename, 'rb').read()) children.append( display_encoded_image_and_metadata(encoded_image, head, brand, name, price, sale, url)) return children
def update_graph_interactive_image(content, n_clicks, figure, selectedData, filters, enhance, enhancement_factor, new_filename, dragmode, enc_format, storage): t1 = time.time() # Retrieve metadata stored in the storage filename = storage # If the file has changed (when a file is uploaded) if new_filename and new_filename != filename: if DEBUG: print(filename, "replaced by", new_filename) string = content.split(';base64,')[-1] im_pil = drc.b64_to_pil(string) # If the file HAS NOT changed (which means an operation was applied) else: # Retrieve the image stored inside the figure enc_str = figure['layout']['images'][0]['source'].split(';base64,')[-1] # Creates the PIL Image object from the b64 png encoding im_pil = drc.b64_to_pil(string=enc_str) im_size = im_pil.size # Select using Lasso if selectedData and 'lassoPoints' in selectedData: selection_mode = 'lasso' selection_zone = generate_lasso_mask(im_pil, selectedData) # Select using rectangular box elif selectedData and 'range' in selectedData: selection_mode = 'select' lower, upper = map(int, selectedData['range']['y']) left, right = map(int, selectedData['range']['x']) # Adjust height difference height = im_size[1] upper = height - upper lower = height - lower selection_zone = (left, upper, right, lower) # Select the whole image else: selection_mode = 'select' selection_zone = (0, 0) + im_size # If the filter dropdown was chosen, apply the filter selected by the user if filters: apply_filters(image=im_pil, zone=selection_zone, filter=filters, mode=selection_mode) if enhance: apply_enhancements(image=im_pil, zone=selection_zone, enhancement=enhance, enhancement_factor=enhancement_factor, mode=selection_mode) t2 = time.time() if DEBUG: print(f"Updated Image Storage in {t2-t1:.3f} sec") return [ drc.InteractiveImagePIL(image_id='interactive-image', image=im_pil, enc_format=enc_format, display_mode='fixed', dragmode=dragmode, verbose=DEBUG), html.Div(id='div-filename-image', children=new_filename, style={'display': 'none'}) ]