def process_oneimage(self, image_file): if not self.params.files['model_trash'] == 'None': img, dummy = classifications.create_image(image_file, cropped=False) predicted_label, prob_trash = self.cnn_trash.classify( img, char_sizes=None) else: prob_trash = 100 predicted_label = 1 if predicted_label == 0: # TRASH goes to Recycle bin predicted_type = self.params.type_dict_trash[str(predicted_label)] prob_taxon = 0 # use np.NaN instead self.text.insert( tk.END, 'TRASH model result : ' + 'type : ' + predicted_type + ' ; prob : ' + str(prob_trash) + '\n') self.text.see(tk.END) else: # NOT TRASH img, char_sizes = classifications.create_image(image_file, cropped=True) predicted_label, prob_taxon = self.cnn_taxon.classify( img, char_sizes=char_sizes) predicted_type = self.params.type_dict_taxon[str(predicted_label)] self.text.insert( tk.END, 'TAXON model result : ' + 'type : ' + predicted_type + ' ; prob : ' + str(prob_taxon) + '\n') self.text.see(tk.END) return predicted_type, prob_trash, prob_taxon
def process(self, images2process_list_indir): measure_dir = os.path.join(self.params.dirs['root'], self.params.dirs['measurement']) for fo in images2process_list_indir: cur_dir = os.path.join(measure_dir, fo[0], fo[1]) self.text.insert(tk.END, 'visiting : ' + cur_dir + '\n') res_folder1 = os.path.join(self.params.dirs['root'], self.params.dirs['classification'], self.params.dirs['results'], fo[0]) check_folder(folder=res_folder1, create=True) res_folder = os.path.join(res_folder1, fo[1]) check_folder(folder=res_folder, create=True) for image in fo[2]: self.text.insert(tk.END, 'processing : ' + image + '\n') image_file = os.path.join(measure_dir, fo[0], fo[1], image) im = classifications.create_image(image_file, cropped=False) predicted_label, prob = self.cnn.classify(im) predicted_type = keysWithValue(self.params.type_dict, str(predicted_label))[0] self.text.insert( tk.END, 'result : ' + 'type : ' + predicted_type + ' ; prob : ' + str(prob) + '\n') self.text.see(tk.END) class_folder = os.path.join(res_folder, predicted_type) check_folder(folder=class_folder, create=True) shutil.copy(image_file, os.path.join(class_folder, image)) file = open(os.path.join(cur_dir, 'MeasureSum.xml'), 'w') file.close()
def process_oneimage(self, image_file): # t=time.time() # print(self.correct_RGBShift) if not self.params.files['model_trash'] == 'None': print(self.correct_RGBShift) img, dummy = classifications.create_image( image_file, cropped=False, correct_RGBShift=self.correct_RGBShift) predicted_label_trash, prob_trash = self.cnn_trash.classify( img, char_sizes=None ) # prob_trash is actually probability of object else: prob_trash = 100 predicted_label_trash = 1 # object! if predicted_label_trash == 0: # classified as trash # TRASH goes to Recycle bin predicted_type = self.params.type_dict_trash[str( predicted_label_trash)] prob_taxon = 0 # use np.NaN instead # self.text.insert(tk.END, 'TRASH model result : '+'type : '+predicted_type+' ; prob : '+str(prob_trash)+'\n') # self.text.see(tk.END) else: # NOT TRASH img, char_sizes = classifications.create_image( image_file, cropped=True, correct_RGBShift=self.correct_RGBShift) predicted_label, prob_taxon = self.cnn_taxon.classify( img, char_sizes=char_sizes) if prob_taxon < self.params.thresholds['trash_thresh']: predicted_label_trash = 0 # TRASH predicted_type = self.params.type_dict_trash[str( predicted_label_trash)] else: predicted_type = self.params.type_dict_taxon[str( predicted_label)] # self.text.insert(tk.END, 'TAXON model result : '+'type : '+predicted_type+' ; prob : '+str(prob_taxon)+'\n') # self.text.see(tk.END) # print(t-time.time()) return predicted_type, prob_trash, prob_taxon
def process_oneimage(self, image_file, man_type): predicted_label = None predicted_type = 'Others.Others.Unsure' final_type = None predicted_strength = None char_sizes = None if self.save_cropped: tmp_folder = os.path.join(os.path.curdir, 'tmp_crop') check_folder(tmp_folder, create=True) save_file = os.path.join(tmp_folder, os.path.basename(image_file)) category = 'cropped' else: save_file = '' category = '' img, char_sizes = classifications.create_image( image_file, cropped=True, correct_RGBShift=self.correct_RGBShift, save_file=save_file, category=category) if img is not None and char_sizes is not None: if self.params.processing['use_neural'] == 'True': predicted_label, predicted_strength = self.cnn_taxon.classify( img, char_sizes=char_sizes) predicted_type = self.params.type_dict_taxon[str( predicted_label)] else: predicted_strength = 65535 predicted_type = man_type final_type = predicted_type # Do checks on threshold, shape and size row = self.params.threshold_df_taxon.loc[ self.params.threshold_df_taxon['Taxon'] == predicted_type] if not row.empty: min_l = row['min_l'].values[0] max_l = row['max_l'].values[0] min_rate = row['min_rate'].values[0] max_rate = row['max_rate'].values[0] strength_tsh = row['strength_tsh'].values[0] if char_sizes[0] > 0: rate = char_sizes[1] / char_sizes[0] else: rate = -1 if predicted_strength < strength_tsh: final_type = 'Others.Others.Unsure' if char_sizes[0] < min_l: # char_sizes[0] larger axis length final_type = 'Others.Others.InappropriateSize' if char_sizes[0] > max_l: final_type = 'Others.Others.InappropriateSize' if rate < min_rate or rate > max_rate: final_type = 'Others.Others.InappropriateShape' # self.text.insert(tk.END, 'TAXON model result : '+'type : '+predicted_type+' ; strength : '+str(strength_str)+'\n') # self.text.see(tk.END) # print(t-time.time()) return predicted_type, final_type, predicted_strength, char_sizes
user='******' output_base_dir=os.path.join(r'C:\Users',user,'OneDrive\WaterScope') #output_base_dir=r'd:\DATA\WaterScope' model_file=os.path.join(r'.\model','cnn_model_trash.dnn') cnn=classifications.cnn_classification(model_file) #df_db = pd.read_csv(db_file,delimiter=';') type_dict={'Trash':'0','Object':'1'} # LOAD MODEL included_extenstions = ['*.jpg', '*.bmp', '*.png', '*.gif'] image_list_indir = [] for ext in included_extenstions: image_list_indir.extend(glob.glob(os.path.join(db_image_dir, ext))) for i, image_file in enumerate(image_list_indir): #image_file=os.path.join(r'C:\Users\SzMike\OneDrive\WaterScope\db_images\0044094_DHM2.0.20130101.Measurement20160926_161935..20161024T115654-0010.png') im = classifications.create_image(image_file,cropped=False) predicted_label, prob = cnn.classify(im) if predicted_label==1: shutil.copy(image_file,os.path.join(db_binsel_dir,os.path.basename(image_file)))