def retrieval(): global feature_extraction_method global distance global number_of_images global parameters_frame number_of_images = np.int16(input_number_of_images.get()) feature_extraction_method = feature_extraction_method_name(var1.get()) distance = distance_name(var2.get()) #ajeitar para pegar esse parametro da interface #path_cnn_trained = '/Users/flavio/Dropbox/Compartilhadas/Romuere/CBIR/inception-2015-12-05/classify_image_graph_def.pb' #print(path_database,path_folder_retrieval,path_image,extension_classes,feature_extraction_method,distance,number_of_images,list_of_parameters) if( (feature_extraction_method == 'cnn' or feature_extraction_method == 'cnn_probability') and var3.get() == 0 ): feature_extraction_method = feature_extraction_method + '_training' #get the list of names and labels name_images_database, labels_database, name_images_query, labels_query = convert_database_to_files.get_name_labels(path_database,path_folder_retrieval) path_output = path_database + 'features/' path_cnn_trained = '' if(feature_extraction_method == 'cnn'): path_cnn_trained = path_output + 'inception_resnet_v2_2016_08_30.ckpt' elif(feature_extraction_method == 'cnn_training'): path_cnn_trained = path_output + 'model.ckpt' run.run_command_line(name_images_database, labels_database, name_images_query, labels_query, path_cnn_trained, path_output, feature_extraction_method,distance, number_of_images,list_of_parameters, 'simple', searching_method = 'bf', isEvaluation = False)
def start_Saxsgen(): #flavio machine path_database = '/Users/flavio/Desktop/Saxsgen/' path_cnn_trained = '/Users/flavio/Desktop/Saxsgen_features/model.ckpt' path_output = '/Users/flavio/Desktop/Saxsgen_features/' #flavio dresden #path_database = '/home/users/flavio/databases/Saxsgen/' #path_cnn_trained = '/home/users/flavio/databases/Saxsgen_features/model.ckpt' #path_output = '/home/users/flavio/databases/Saxsgen_features/' feature_extraction_method = 'cnn_training' searching_method = 'kd' preprocessing_method = 'log' distance = 'ed' number_of_images = 10 list_of_parameters = ['0.01', '50000'] folders = glob.glob(path_database + '*/') #get the name and labels for the database cont = 0 name_images_database = [] labels_database = [] name_images_query = [] labels_query = [] for folder in folders: name_images = glob.glob(folder + "/*.jpg") labels = np.zeros(len(name_images), dtype=np.int) labels[:] = cont name_images_database.extend(name_images) labels_database.extend(labels) name_query = name_images_database.pop() label_query = labels_database.pop() name_images_query.append(name_query) labels_query.append(label_query) cont += 1 labels_database = np.asarray(labels_database) labels_query = np.asarray(labels_query) run.run_command_line(name_images_database, labels_database, name_images_query, labels_query, path_cnn_trained, path_output, feature_extraction_method, distance, number_of_images, list_of_parameters, preprocessing_method, searching_method, False)
def main(argv): path_database = '' path_cnn_trained = '' path_folder_retrieval = '' feature_extraction_method = '' distance = '' searching_method = '' number_of_images = 0 list_of_parameters = [] try: opts, args = getopt.getopt(argv,"hd:c:r:f:s:p:n:m:") except getopt.GetoptError: print ('cbir_cl.py -d <path_database> -c <path_cnn_trained> -r <path_folder_retrieval> -f <feature_extraction_method> -s <distance-similarity metric> -n <number_of_images> -m <list_of_parameters>') sys.exit(2) for opt, arg in opts: if opt == '-h': print ('cbir_cl.py -d <path_database> -c <path_cnn_trained> -r <path_folder_retrieval> -f <feature_extraction_method> -s <distance-similarity metric> -n <number_of_images> -m <list_of_parameters>') sys.exit() elif opt == '-d': path_database = arg elif opt == '-c': path_cnn_trained = arg elif opt == '-r': path_folder_retrieval = arg elif opt == '-f': feature_extraction_method = arg elif opt == '-s': distance = arg elif opt == '-p': searching_method = arg elif opt == '-n': number_of_images = int(float(arg)) elif opt == '-m': parameters = arg.split(',') for i in parameters: list_of_parameters.append(i) run.run_command_line(path_database,path_folder_retrieval,path_cnn_trained,feature_extraction_method,distance,number_of_images,list_of_parameters)
def run_retrieval_process_using_folders_structure(): ############################# Paths ############################################ #dani machine path_database = '/Users/dani/Desktop/site_cric/test_pequeno/' path_cnn_pre_trained = '/Users/dani/Desktop/celulas_6_classes/celulas_6_classes_train_png/tensor/files/inception_resnet_v2_2016_08_30.ckpt' #path_cnn_pre_trained = '/Users/dani/Desktop/site_cric/test_pequeno/output/model.ckpt' #Only to initializing the cnn #path_save_cnn = '/Users/dani/Desktop/celulas_6_classes/celulas_6_classes_train_png/tensor/files/inception_resnet_v2_2016_08_30.ckpt' #This variable will be used on the inception resnet pre trained path_save_cnn = '/Users/dani/Desktop/site_cric/test_pequeno/output/model.ckpt' #To save and use in the retrieval process path_retrieval = '/Users/dani/Desktop/site_cric/test_pequeno/query/' #path_database = '/Users/dani/Desktop/site/celulas299_train/' #path_cnn_trained = '/Users/dani/Desktop/cells_small/features/inception_resnet_v2_2016_08_30.ckpt' #path_cnn_trained = '/Users/dani/Desktop/site/celulas299_2_classes/output/model.ckpt-65043' #path_retrieval = '/Users/dani/Desktop/site/celulas299_train/query/' #path_output = path_database + 'output/' #cnn machine #path_database = '/home/users/dani/databases/cells/cells_train_augmentation/database/' #path_cnn_trained = '/home/users/dani/databases/cells/cells_train_augmentation/features/model.ckpt' #path_retrieval = '/home/users/dani/databases/cells/cells_train_augmentation/query' path_output = path_database + 'output/' ############################# Parameters ####################################### feature_extraction_method = 'cnn_training_inception' preprocessing_method = 'None' searching_method = 'bf' distance = 'ed' number_of_images = 10 #list_of_parameters = ['0.1','100'] #list_of_parameters = ['2','8','8']#distance, number of bins #list_of_parameters = ['4', '3', '2', '8'] #list_of_parameters = ['0.01','1'] list_of_parameters = ['0.05','3'] #get the list of names and labels name_images_database, labels_database, name_images_query, labels_query = convert_database_to_files.get_name_labels(path_database,path_retrieval) #inception_feature_extraction.features_extraction_new(name_images_database,labels_database,path_cnn_trained,feature_extraction_method) ############################ Calling function ################################## start = timeit.default_timer() _, train_time, _ = run.run_command_line(name_images_database,labels_database,name_images_query,labels_query,path_cnn_pre_trained,path_save_cnn,path_output,feature_extraction_method,distance,number_of_images,list_of_parameters,preprocessing_method,searching_method, False) stop = timeit.default_timer() print("Total time = ", stop - start) print("Train time = ", train_time)
def run_retrieval_process_using_folders_structure(): ############################# Paths ############################################ #flavio machine #path_database = '/Users/flavio/Desktop/cells/' #path_cnn_trained = '/Users/flavio/Desktop/cells/features/model_wavelet.ckpt' #path_cnn_trained = '/Users/flavio/Desktop/fibers_small/features/inception_resnet_v2_2016_08_30.ckpt' #path_retrieval = '/Users/flavio/Desktop/cells/query/' #path_output = path_database + 'features/' #cnn machine path_database = '/home/users/flavio/databases/cells/cells_train/' path_cnn_trained = '/home/users/flavio/databases/cells/cells_train/features/model_wavelet.ckpt' path_retrieval = '/home/users/flavio/databases/cells/cells_train/query/' path_output = path_database + 'features/' ############################# Parameters ####################################### feature_extraction_method = 'daisy' preprocessing_method = 'simple' searching_method = 'bf' distance = 'ed' number_of_images = 10 #list_of_parameters = ['0.1','100'] #list_of_parameters = ['2','8','8']#distance, number of bins list_of_parameters = ['4', '3', '2', '8'] #list_of_parameters = ['0.1','0'] #list_of_parameters = ['2'] #get the list of names and labels name_images_database, labels_database, name_images_query, labels_query = convert_database_to_files.get_name_labels( path_database, path_retrieval) #inception_feature_extraction.features_extraction_new(name_images_database,labels_database,path_cnn_trained,feature_extraction_method) ############################ Calling function ################################## start = timeit.default_timer() _, train_time, _ = run.run_command_line( name_images_database, labels_database, name_images_query, labels_query, path_cnn_trained, path_output, feature_extraction_method, distance, number_of_images, list_of_parameters, preprocessing_method, searching_method, False) stop = timeit.default_timer() print("Total time = ", stop - start) print("Train time = ", train_time)
def run_create_graph(): #cnn path_database_train = '/home/users/romuere/Saxsgen/new_database_split/new_database_split_train/' path_database_test = '/home/users/romuere/Saxsgen/new_database_split/new_database_split_test/' path_retrieval = '/home/users/flavio/databases/fiberFlaRom/fiberFlaRom_train/query/' path_cnn_trained = '/home/users/romuere/Saxsgen/new_database_split/new_database_split_train/features/model.ckpt' path_output_train = path_database_train + 'features/' path_output_test = path_database_test + 'features/' preprocessing_method = 'log' distance = 'ed' searching_method = 'kd' percent_database = 1 percent_query = 1 number_of_images = 10 feature_extraction_method = 'cnn_training' #jump_num_epoch = [1,4,5,10,20,30,30]#cells #learning_rate =[0.1,0.1,0.08,0.04,0.02,0.01,0.009]#cells #jump_num_epoch = [1,9,10,20,30,30,50,100,100]#fmd #learning_rate =[0.1,0.1,0.03,0.02,0.01,0.008,0.004,0.002,0.001]#fmd jump_num_epoch = [1, 4, 5, 10, 20, 30, 50, 100] #fibers learning_rate = [0.1, 0.1, 0.1, 0.1, 0.08, 0.06, 0.04, 0.04] #fibers NUM_LEVEL = [0] for num_level in NUM_LEVEL: remove_files_cnn(path_output_train) list_train_time = [] list_map = [] #removing files remove_files_pickle(path_output_train) cont_index = 0 for num_epoch in jump_num_epoch: list_of_parameters = [ str(learning_rate[cont_index]), str(num_epoch), str(num_level) ] #train #get the list of names and labels name_images_database, labels_database, name_images_query, labels_query = convert_database_to_files.get_name_labels( path_database_train, path_retrieval) _, train_time, _ = run.run_command_line( name_images_database, labels_database, name_images_query, labels_query, path_cnn_trained, path_output_train, feature_extraction_method, distance, number_of_images, list_of_parameters, preprocessing_method, searching_method, isEvaluation=True, do_searching_processing=False, save_csv=False) if (not list_train_time): list_train_time.append([num_epoch, train_time[0]]) else: list_train_time.append([ np.sum(jump_num_epoch[0:cont_index + 1]), train_time[0] + list_train_time[-1][1] ]) #evaluation list_of_parameters = ['0.1', '0', str(num_level)] name_images_database, labels_database = convert_database_to_files.get_name_labels( path_database_test) MAP, fig = evaluation.evaluation(name_images_database, labels_database, name_images_database, labels_database, path_output_test, feature_extraction_method, distance, list_of_parameters, preprocessing_method, searching_method, path_cnn_trained=path_cnn_trained, percent_query=percent_query, percent_database=percent_database) list_map.append( [np.sum(jump_num_epoch[0:cont_index + 1]), np.mean(MAP)]) print('Num_epoch =', np.sum(jump_num_epoch[0:cont_index + 1]), 'train_time =', list_train_time[-1][1], 'MAP =', np.mean(MAP)) #removing files remove_files_pickle(path_output_test) cont_index += 1 np.savetxt(path_output_test + feature_extraction_method + '_train_time_' + preprocessing_method + '_' + str(num_level) + '_level' + '.csv', np.asarray(list_train_time), delimiter=',') np.savetxt(path_output_test + feature_extraction_method + '_Map_' + preprocessing_method + '_' + str(num_level) + '_level' + '.csv', np.asarray(list_map), delimiter=',')
def run_retrieval_process_using_txt_file(): #dani machine root = '/Users/dani/Desktop/kyager_data_raw' path_database_class0 = '/Users/dani/Desktop/kyager_data_raw/WAXS.txt' path_database_class1 = '/Users/dani/Desktop/kyager_data_raw/SAXS.txt' path_query_class0 = '/Users/dani/Desktop/kyager_data_raw/WASXS_query.txt' path_query_class1 = '/Users/dani/Desktop/kyager_data_raw/SAXS_query.txt' path_cnn_trained = '/Users/dani/Desktop/kyager_data_raw/model.ckpt' path_output = '/Users/dani/Desktop/kyager_data_raw/features/' #dani cnn #root = '/home/users/dani/databases/kyager_data' #path_database_class0 = '/home/users/dani/databases/kyager_data/ringisotropic2_database.txt' #path_database_class1 = '/home/users/dani/databases/kyager_data/ringtextured2_database.txt' #path_query_class0 = '/home/users/dani/databases/kyager_data/ringisotropic2_query.txt' #path_query_class1 = '/home/users/dani/databases/kyager_data/ringtextured2_query.txt' #path_cnn_trained = '/home/users/dani/databases/kyager_data/model.ckpt' #path_output = '/home/users/dani/databases/kyager_data/features/' feature_extraction_method = 'cnn_training' searching_method = 'kd' preprocessing_method = 'simple' distance = 'ed' number_of_images = 10 list_of_parameters = ['0.01','50000'] with open(path_database_class0) as f: name_database_class0 = f.read().splitlines() with open(path_database_class1) as f: name_database_class1 = f.read().splitlines() with open(path_query_class0) as f: name_query_class0 = f.read().splitlines() with open(path_query_class1) as f: name_query_class1 = f.read().splitlines() for i in range(len(name_database_class0)): name_database_class0[i] = root + name_database_class0[i] for i in range(len(name_database_class1)): name_database_class1[i] = root + name_database_class1[i] for i in range(len(name_query_class0)): name_query_class0[i] = root + name_query_class0[i] for i in range(len(name_query_class1)): name_query_class1[i] = root + name_query_class1[i] labels_database_class0 = np.zeros(len(name_database_class0),dtype=np.int) labels_database_class1 = np.ones(len(name_database_class1),dtype=np.int) labels_query_class0 = np.zeros(len(name_query_class0),dtype=np.int) labels_query_class1 = np.ones(len(name_query_class1),dtype=np.int) name_images_database = [] name_images_query = [] name_images_database.extend(name_database_class0) name_images_database.extend(name_database_class1) labels_database = np.concatenate((labels_database_class0,labels_database_class1)) name_images_query.extend(name_query_class0) name_images_query.extend(name_query_class1) labels_query = np.concatenate((labels_query_class0,labels_query_class1)) #preprocessing to remove files inexisting name_images_database, labels_database = convert_database_to_files.preprocessing(name_images_database, labels_database) name_images_query, labels_query = convert_database_to_files.preprocessing(name_images_query, labels_query) run.run_command_line(name_images_database,labels_database,name_images_query,labels_query,path_cnn_trained,path_output,feature_extraction_method,distance,number_of_images,list_of_parameters,preprocessing_method,searching_method, False)
def returnInformation(self, pyCBIR): #path_output = self.returnPathOutput(self) feature_extraction_method = '' if self.checkBox.isChecked(): feature_extraction_method = 'glcm' if self.checkBox_2.isChecked(): feature_extraction_method = 'hog' if self.checkBox_3.isChecked(): feature_extraction_method = 'fotf' if self.checkBox_4.isChecked(): feature_extraction_method = 'lbp' if self.checkBox_5.isChecked(): feature_extraction_method = 'cnn' if self.checkBox_6.isChecked(): feature_extraction_method = 'daisy' similarity_metric = '' if self.radioButton_7.isChecked(): similarity_metric = 'ed' elif self.radioButton_8.isChecked(): similarity_metric = 'id' elif self.radioButton_9.isChecked(): similarity_metric = 'cs' elif self.radioButton_10.isChecked(): similarity_metric = 'pcc' elif self.radioButton_16.isChecked(): similarity_metric = 'csd' elif self.radioButton_11.isChecked(): similarity_metric = 'kld' elif self.radioButton_13.isChecked(): similarity_metric = 'ld' elif self.radioButton_12.isChecked(): similarity_metric = 'ksd' elif self.radioButton_15.isChecked(): similarity_metric = 'cmd' elif self.radioButton_14.isChecked(): similarity_metric = 'cd' searching_method = '' if self.radioButton_17.isChecked(): searching_method = 'bf' elif self.radioButton_18.isChecked(): searching_method = 'r' elif self.radioButton_19.isChecked(): searching_method = 'kd' elif self.radioButton_20.isChecked(): searching_method = 'lsh' retrieval_number = int(self.lineEdit.text()) preprocessing_method = 'simple' path_cnn_trained = self.path_output + 'model.ckpt' list_of_parameters = ['0', '0', '0'] if (self.lineEdit_6.text() != ""): fname_database, labels_database = informationPath( self.lineEdit_6.text()) fname_retrieval, labels_retrieval = informationPath( self.lineEdit_4.text()) elif (self.lineEdit_9.text() != ""): fname_database, labels_database = informationFile( self.lineEdit_9.text()) fname_retrieval, labels_retrieval = informationFile( self.lineEdit_8.text()) _, _, file = run.run_command_line(fname_database, labels_database, fname_retrieval, labels_retrieval, path_cnn_trained, self.path_output, feature_extraction_method, similarity_metric, retrieval_number, list_of_parameters, preprocessing_method, searching_method, isEvaluation=False) print(file) #file = '/Users/romuere/Dropbox/new_Database/todas.jpg' # Create widget self.w = QtWidgets.QWidget(self.centralwidget) self.w.setGeometry(QtCore.QRect(270, 30, 951, 791)) self.w.setEnabled(True) self.w.setAcceptDrops(True) self.w.setObjectName("image") label = QLabel(self.w) pixmap = QPixmap(file) if (pixmap.height() < 700): pixmap = pixmap.scaled(900, pixmap.height()) else: pixmap = pixmap.scaled(900, 800) label.setPixmap(pixmap) self.w.show()
def run_create_graph_map(): #cnn machine path_database_train = '/home/users/flavio/databases/fmd/fmd_train_resize_augmentation/' path_database_test = '/home/users/flavio/databases/cells/cells_test/' path_retrieval = '/home/users/flavio/databases/fiberFlaRom/fiberFlaRom_train/query/' path_cnn_trained = '/home/users/flavio/databases/fmd/fmd_train_resize_augmentation/features/model_test.ckpt' path_output_train = path_database_train + 'features/' path_output_test = path_database_test + 'features/' preprocessing_method = 'None' distance = 'ed' searching_method = 'kd' percent_database = 0.1 percent_query = 0.001 number_of_images = 10 feature_extraction_method = 'cnn_training' #jump_num_epoch = [1,4,5,10,20,30,30]#cells #learning_rate =[0.1,0.1,0.08,0.04,0.02,0.01,0.009]#cells #jump_num_epoch = [1,9,10,20,30,30,50,100,100]#fmd #learning_rate =[0.1,0.1,0.03,0.02,0.01,0.008,0.004,0.002,0.001]#fmd #jump_num_epoch = [1,4,5,5,5]#fibers #learning_rate =[0.1,0.1,0.08,0.06,0.004]#fibers #learning_rate =[0.1,0.1,0.1,0.08,0.06,0.02,0.008,0.006,0.004,0.001]#cells #learning_rate =[0.001,0.001,0.1,0.08,0.06,0.04,0.02,0.02,0.01,0.01]#cells NUM_LEVEL = [0] learning_rate_0 = 0.1 factor_dec = 0.01 learning_rate_f = 0.05 for num_level in NUM_LEVEL: #remove_files_cnn(path_output_train) list_train_time = [] list_map = [] list_accuracy = [] list_number_epoch = [] list_error_total = [] #removing files remove_files_pickle(path_output_train) cont_index=0 #for num_epoch in jump_num_epoch: for num_epoch in range(1,61,1): if(num_epoch == 1 or num_epoch ==2): new_learning_rate = learning_rate_0 list_of_parameters = [str(new_learning_rate),str(1),str(num_level)] else: new_learning_rate = new_learning_new(new_learning_rate,factor_dec,num_epoch,list_error_total[-2][2],list_error_total[-1][2],num_epoch) if(new_learning_rate < learning_rate_f): new_learning_rate = learning_rate_f list_of_parameters = [str(new_learning_rate),str(1),str(num_level)] #train #get the list of names and labels name_images_database, labels_database, name_images_query, labels_query = convert_database_to_files.get_name_labels(path_database_train,path_retrieval) _, train_time, _, error = run.run_command_line(name_images_database,labels_database,name_images_query,labels_query,path_cnn_trained,path_output_train,feature_extraction_method,distance,number_of_images,list_of_parameters,preprocessing_method,searching_method, isEvaluation=True,do_searching_processing=False,save_csv=False) if(not list_train_time): list_train_time.append(train_time[0]) else: list_train_time.append(train_time[0] + list_train_time[-1]) print('train time epoch', num_epoch, '=', list_train_time[-1]) list_error_total.append([num_epoch, new_learning_rate, (error[0][1] + error[1][1])/2 ]) print('Num_epoch =', list_error_total[-1][0],'Learning rate =', list_error_total[-1][1], 'Error =',list_error_total[-1][2]) ''' if(not list_train_time): list_train_time.append(train_time[0]) else: list_train_time.append(train_time[0] + list_train_time[-1]) #evaluation list_of_parameters = ['0.1','0',str(num_level)] name_images_database, labels_database = convert_database_to_files.get_name_labels(path_database_test) MAP, ACCURACY, fig = evaluation.evaluation(name_images_database, labels_database, name_images_database, labels_database,path_output_test,feature_extraction_method,distance,list_of_parameters,preprocessing_method,searching_method,path_cnn_trained=path_cnn_trained,percent_query=percent_query,percent_database=percent_database) list_number_epoch.append(np.sum(jump_num_epoch[0:cont_index+1])) list_map.append(MAP) list_accuracy.append(ACCURACY) print('Num_epoch =', list_number_epoch[-1],'train_time =', list_train_time[-1], 'MAP =',np.mean(MAP), 'Accuracy =',np.mean(ACCURACY)) for i in range(len(list_map[-1])): print('Map for class ', i, list_map[-1][i]) for i in range(len(list_map[-1])): print('Accuracy for class ', i, list_accuracy[-1][i]) #removing files remove_files_pickle(path_output_test) cont_index+=1 np.savetxt(path_output_test + feature_extraction_method + '_train_time_' + preprocessing_method + '_' + str(num_level) + '_level' + '.csv', np.asarray(list_train_time),delimiter = ',') np.savetxt(path_output_test + feature_extraction_method + '_Map_' + preprocessing_method + '_' + str(num_level) + '_level' + '.csv', np.asarray(list_map),delimiter = ',') np.savetxt(path_output_test + feature_extraction_method + '_number_epoch_' + preprocessing_method + '_' + str(num_level) + '_level' + '.csv', np.asarray(list_number_epoch),delimiter = ',') np.savetxt(path_output_test + feature_extraction_method + '_accuracy_' + preprocessing_method + '_' + str(num_level) + '_level' + '.csv', np.asarray(list_accuracy),delimiter = ',') ''' np.savetxt(path_output_train + feature_extraction_method + '_learning_rate_error_' + '.csv', np.asarray(list_error_total),delimiter = ',')
def run_sorting(): """ This is just a test function, to avoid run the GUI every time. """ import csv import itertools """ ##To run fibers/cells/fmd/dtd/... folders = ['/Users/romuere/Dropbox/CBIR/fibers/database/no_fibers/*','/Users/romuere/Dropbox/CBIR/fibers/database/yes_fibers/*'] fname_database = [] labels_database = np.empty(0) for id,f in enumerate(folders): files = glob.glob(f) labels_database = np.append(labels_database, np.zeros(len(files))+id) fname_database = fname_database+files print(files) print(len(fname_database)) preprocessing_method = 'log' feature_extraction_method = 'glcm' searching_method = 'lsh' retrieval_number = 10 similarity_metric = 'ed' path_output = '/Users/romuere/Dropbox/CBIR/fibers/results/' list_of_parameters = ['1','2'] path_cnn_trained = '' fname_retrieval = fname_database[0:3] + fname_database[2001:2003] labels_retrieval = np.concatenate((labels_database[0:3],labels_database[2001:2003])) """ ##To run scattering images path = '/Users/romuere/Desktop/als/kyager_data_raw' files_database_class0 = '/Users/romuere/Desktop/als/kyager_data_raw/SAXS.txt' files_database_class1 = '/Users/romuere/Desktop/als/kyager_data_raw/WAXS.txt' files_retrieval_class0 = '/Users/romuere/Desktop/als/kyager_data_raw/SAXS_query.txt' files_retrieval_class1 = '/Users/romuere/Desktop/als/kyager_data_raw/WAXS_query.txt' #------# reader = csv.reader(open(files_database_class0)) fname_database_class0 = list(reader) fname_database_class0 = list(itertools.chain(*fname_database_class0)) labels_class_0 = np.zeros(len(fname_database_class0)) reader = csv.reader(open(files_database_class1)) fname_database_class1 = list(reader) fname_database_class1 = list(itertools.chain(*fname_database_class1)) labels_class_1 = np.zeros(len(fname_database_class1)) + 1 fname_database = fname_database_class0 + fname_database_class1 fname_database = [path + x for x in fname_database] labels_database = np.concatenate((labels_class_0, labels_class_1)) #------# reader = csv.reader(open(files_retrieval_class0)) fname_retrieval_class0 = list(reader) fname_retrieval_class0 = list(itertools.chain(*fname_retrieval_class0)) labels_retrieval_class0 = np.zeros(len(fname_retrieval_class0)) reader = csv.reader(open(files_retrieval_class1)) fname_retrieval_class1 = list(reader) fname_retrieval_class1 = list(itertools.chain(*fname_retrieval_class1)) labels_retrieval_class1 = np.zeros(len(fname_retrieval_class1)) fname_retrieval = fname_retrieval_class0 + fname_retrieval_class1 fname_retrieval = [path + x for x in fname_retrieval] labels_retrieval = np.concatenate( (labels_retrieval_class0, labels_retrieval_class1)) #------# preprocessing_method = 'log' feature_extraction_method = 'lbp' searching_method = 'lsh' retrieval_number = 10 similarity_metric = 'ed' path_output = '/Users/romuere/Desktop/als/output/' list_of_parameters = ['2'] #['2','8','8'] path_cnn_trained = '' run.run_command_line(fname_database, labels_database, fname_retrieval, labels_retrieval, path_cnn_trained, path_output, feature_extraction_method, similarity_metric, retrieval_number, list_of_parameters, preprocessing_method, searching_method, isEvaluation=False)
searching_method = 'lsh' retrieval_number = 2000 similarity_metric = 'ed' path_output = '/Users/romuere/Dropbox/CBIR/fibers/results/' list_of_parameters = [] path_cnn_trained = '' fname_retrieval = fname_database labels_retrieval = labels_database result = run.run_command_line(fname_database, labels_database, fname_retrieval, labels_retrieval, path_cnn_trained, path_output, feature_extraction_method, similarity_metric, retrieval_number, list_of_parameters, searching_method, isEvaluation=True) result = np.array(result[1]) #result = result[1] fscore = [] #fscore = np.zeros(()) print(len(labels_retrieval)) print(len(fname_database)) for id_label, i in enumerate(labels_retrieval): for j in range(retrieval_number): nm = j + 1
def returnInformation(self, pyCBIR): #import sys #sys.path.insert(0, '../src') import run if self.radioButton.isChecked(): self.feature_extraction_method = 'glcm' if self.radioButton_2.isChecked(): self.feature_extraction_method = 'hog' if self.radioButton_3.isChecked(): self.feature_extraction_method = 'fotf' if self.radioButton_4.isChecked(): self.feature_extraction_method = 'lbp' if self.radioButton_6.isChecked(): self.feature_extraction_method = 'daisy' searching_method = '' if self.radioButton_7.isChecked(): searching_method = 'bf' elif self.radioButton_8.isChecked(): searching_method = 'kd' elif self.radioButton_9.isChecked(): searching_method = 'bt' retrieval_number = int(self.lineEdit.text()) preprocessing_method = 'simple' if (self.lineEdit_3.text() != ""): fname_database, labels_database = informationPath( self.lineEdit_3.text()) fname_retrieval, labels_retrieval = informationPath( self.lineEdit_4.text()) elif (self.lineEdit_5.text() != ""): fname_database, labels_database = informationFile( self.lineEdit_5.text()) fname_retrieval, labels_retrieval = informationFile( self.lineEdit_6.text()) print(self.path_cnn_trained) _, _, file = run.run_command_line(fname_database, labels_database, fname_retrieval, labels_retrieval, self.path_cnn_pre_trained, self.path_save_cnn, self.lineEdit_2.text(), self.feature_extraction_method, self.similarity_metric, retrieval_number, self.list_of_parameters, preprocessing_method, searching_method, isEvaluation=False) self.w = QtWidgets.QWidget(self.centralwidget) self.w.setGeometry(QtCore.QRect(140, 0, 951, 791)) self.w.setEnabled(True) self.w.setAcceptDrops(True) self.w.setObjectName("image") label = QLabel(self.w) pixmap = QPixmap(file) if (pixmap.height() < 680): pixmap = pixmap.scaled(800, pixmap.height()) else: pixmap = pixmap.scaled(800, 680) label.setPixmap(pixmap) self.w.show()