def evaluate_feats(db, N, feat_pools=feat_pools, keep_rate=keep_rate, project_type=project_type, d_type='d1', depths=[None, 300, 200, 100, 50, 30, 10, 5, 3, 1]): result = open( os.path.join( result_dir, 'feature_reduction-{}-keep{}-{}-{}feats.csv'.format( project_type, keep_rate, d_type, N)), 'w') for i in range(N): result.write("feat{},".format(i)) result.write("depth,distance,MMAP") combinations = itertools.combinations(feat_pools, N) for combination in combinations: fusion = RandomProjection(features=list(combination), keep_rate=keep_rate, project_type=project_type) if fusion.check_random_projection(): for d in depths: APs = evaluate_class(db, f_instance=fusion, d_type=d_type, depth=d) cls_MAPs = [] for cls, cls_APs in APs.items(): MAP = np.mean(cls_APs) cls_MAPs.append(MAP) r = "{},{},{},{}".format(",".join(combination), d, d_type, np.mean(cls_MAPs)) print(r) result.write('\n' + r) print() result.close()
samples = [] data = db.get_data() for d in data.itertuples(): d_img, d_cls = getattr(d, "img"), getattr(d, "cls") d_hist = self.histogram(d_img, type=h_type, n_slice=n_slice) samples.append({ 'img': d_img, 'cls': d_cls, 'hist': d_hist }) cPickle.dump(samples, open(os.path.join( cache_dir, sample_cache), "wb", True)) return samples if __name__ == "__main__": db = Database() # evaluate database APs = evaluate_class(db, f_class=Daisy, d_type=d_type, depth=depth) cls_MAPs = [] for cls, cls_APs in APs.items(): MAP = np.mean(cls_APs) print("Class {}, MAP {}".format(cls, MAP)) cls_MAPs.append(MAP) print("MMAP", np.mean(cls_MAPs))
# evaluate features double-wise evaluate_feats(db, N=2, d_type='d1', keep_rate=keep_rate, project_type=project_type) # evaluate features triple-wise evaluate_feats(db, N=3, d_type='d1', keep_rate=keep_rate, project_type=project_type) # evaluate features quadra-wise evaluate_feats(db, N=4, d_type='d1', keep_rate=keep_rate, project_type=project_type) # evaluate features penta-wise evaluate_feats(db, N=5, d_type='d1', keep_rate=keep_rate, project_type=project_type) # evaluate features hexa-wise evaluate_feats(db, N=6, d_type='d1', keep_rate=keep_rate, project_type=project_type) # evaluate features hepta-wise evaluate_feats(db, N=7, d_type='d1', keep_rate=keep_rate, project_type=project_type) # evaluate color feature d_type = 'd1' depth = 30 fusion = RandomProjection(features=['color'], keep_rate=keep_rate, project_type=project_type) APs = evaluate_class(db, f_instance=fusion, d_type=d_type, depth=depth) cls_MAPs = [] for cls, cls_APs in APs.items(): MAP = np.mean(cls_APs) print("Class {}, MAP {}".format(cls, MAP)) cls_MAPs.append(MAP) print("MMAP", np.mean(cls_MAPs))
except: if verbose: print( "Counting histogram..., config=%s, distance=%s, depth=%s" % (sample_cache, d_type, depth)) samples = [] data = db.get_data() for d in data.itertuples(): d_img, d_cls = getattr(d, "img"), getattr(d, "cls") d_hist = self.histogram(d_img, type=h_type, n_slice=n_slice) samples.append({'img': d_img, 'cls': d_cls, 'hist': d_hist}) cPickle.dump( samples, open(os.path.join(cache_dir, sample_cache), "wb", True)) return samples if __name__ == "__main__": db = Database() # evaluate database APs = evaluate_class(db, f_class=HOG, d_type=d_type, depth=depth) cls_MAPs = [] for cls, cls_APs in APs.items(): MAP = np.mean(cls_APs) print("Class {}, MAP {}".format(cls, MAP)) cls_MAPs.append(MAP) print("MMAP", np.mean(cls_MAPs))
IMG = IMG.astype(int) hist = color.histogram(IMG, type='global', n_bin=4) assert np.equal(np.where(hist > 0)[0], np.array([37, 43, 58, 61])).all(), "global histogram implement failed" hist = color.histogram(IMG, type='region', n_bin=4, n_slice=2) assert np.equal(np.where(hist > 0)[0], np.array([58, 125, 165, 235])).all(), "region histogram implement failed" # examinate distance np.random.seed(1) IMG = sigmoid(np.random.randn(4, 4, 3)) * 255 IMG = IMG.astype(int) hist = color.histogram(IMG, type='region', n_bin=4, n_slice=2) IMG2 = sigmoid(np.random.randn(4, 4, 3)) * 255 IMG2 = IMG2.astype(int) hist2 = color.histogram(IMG2, type='region', n_bin=4, n_slice=2) assert distance(hist, hist2, d_type='d1') == 2, "d1 implement failed" assert distance(hist, hist2, d_type='d2-norm') == 2, "d2 implement failed" # evaluate database APs = evaluate_class(db, f_class=Color, d_type=d_type, depth=depth) cls_MAPs = [] for cls, cls_APs in APs.items(): MAP = np.mean(cls_APs) print("Class {}, MAP {}".format(cls, MAP)) cls_MAPs.append(MAP) print("MMAP", np.mean(cls_MAPs))
else: inputs = torch.autograd.Variable( torch.from_numpy(img).float()) d_hist = res_model(inputs)[pick_layer] d_hist = d_hist.data.cpu().numpy().flatten() d_hist /= np.sum(d_hist) # normalize samples.append({ 'img': d_img, 'cls': d_cls, 'hist': d_hist }) except: pass cPickle.dump( samples, open(os.path.join(cache_dir, sample_cache), "wb", True)) return samples if __name__ == "__main__": # evaluate database db = Database() APs = evaluate_class(db, f_class=ResNetFeat, d_type=d_type, depth=depth) cls_MAPs = [] for cls, cls_APs in APs.items(): MAP = np.mean(cls_APs) print("Class {}, MAP {}".format(cls, MAP)) cls_MAPs.append(MAP) print("MMAP", np.mean(cls_MAPs))
inputs = torch.autograd.Variable( torch.from_numpy(img).float()) d_hist = vgg_model(inputs)[pick_layer] d_hist = np.sum(d_hist.data.cpu().numpy(), axis=0) d_hist /= np.sum(d_hist) # normalize samples.append({ 'img': d_img, 'cls': d_cls, 'hist': d_hist }) except BaseException: pass cPickle.dump( samples, open(os.path.join(cache_dir, sample_cache), "wb", True)) return samples if __name__ == "__main__": # evaluate database DB = Database() APs = evaluate_class(DB, f_class=VGGNetFeat, d_type=d_type, depth=depth) cls_MAPs = [] for cls, cls_APs in APs.items(): MAP = np.mean(cls_APs) print("Class {}, MAP {}".format(cls, MAP)) cls_MAPs.append(MAP) print("MMAP", np.mean(cls_MAPs))
combinations = itertools.combinations(feat_pools, N) for combination in combinations: fusion = FeatureFusion(features=list(combination)) for d in depths: APs = evaluate_class(db, f_instance=fusion, d_type=d_type, depth=d) cls_MAPs = [] for cls_APs in APs: MAP = np.mean(cls_APs) cls_MAPs.append(MAP) r = "{},{},{},{}".format( ",".join(combination), d, d_type, np.mean(cls_MAPs)) print(r) result.write('\n' + r) print() result.close() if __name__ == "__main__": print("Pensez à supprimer le dossier cache dans le cas où vous utilisez des nouvelles données.\n") dbTrain = Database(DB_dir="CorelDBDataSet/train", DB_csv="CorelDBDataSetTrain.csv") fusion = FeatureFusion(features=['color', 'daisy']) result = evaluate_class(dbTrain, f_instance=fusion, d_type=d_type, depth=depth) print("{} classes classées sur {} disponibles".format( result[0], result[1]))
return samples if __name__ == "__main__": DB_train_dir = "../database/train" DB_train_csv = DB_train_dir + "/data_train.csv" db1 = Database(DB_train_dir, DB_train_csv) print("DB1 length: ", len(db1)) color = Color() DB_test_dir = "../database/test" DB_test_csv = DB_test_dir + "/data_test.csv" db2 = Database(DB_test_dir, DB_test_csv) print("DB2 length: ", len(db2)) # evaluate database APs, res = evaluate_class(db1, db2, color.make_samples, depth=depth, d_type="d1") for i in range(len(db2)): saveName = "../database/res/" + res[i] + "/" + db2.data.img[i].split( '/')[-1] bid = imageio.imread(db2.data.img[i]) if not os.path.exists("../database/res/" + res[i]): os.makedirs("../database/res/" + res[i]) mpimg.imsave(saveName, bid / 255.)
index.train(vecbase) index.add(vecbase) else: raise ValueError("you should choose a correct retrival mode") cPickle.dump(dicbase, open(os.path.join(cache_dir, dic_addr), "wb", True)) cPickle.dump(vecbase, open(os.path.join(cache_dir, vec_addr), "wb", True)) faiss.write_index(index, os.path.join(cache_dir, index_addr)) return index, dicbase, vecbase if __name__ == "__main__": # evaluate database db = Database() start = time.time() APs = evaluate_class(db, f_class=ModelFeat, depth=depth) end = time.time() # cls_MAPs = [] # with open(os.path.join(result_dir, result_csv), 'w', encoding='UTF-8') as f: # f.write("Vgg16-oxf-cosine result: MAP&MMAP") # for cls, cls_APs in APs.items(): # MAP = np.mean(cls_APs) # print("Class {}, MAP {}".format(cls, MAP)) # f.write("\nClass {}, MAP {}".format(cls, MAP)) # cls_MAPs.append(MAP) # print("MMAP", np.mean(cls_MAPs)) # f.write("\nMMAP {}".format(np.mean(cls_MAPs))) # print("total time:", end - start) # f.write("\ntotal time:{0:.4f}s".format(end - start))
DB_train_csv = '/content/train.csv' db = MyDatabase(DB_train_dir, DB_train_csv) # DB_test_dir = '../database/test' # DB_test_csv = 'data_test.csv' DB_test_dir = '/content/Data/test' DB_test_csv = '/content/test.csv' db2 = MyDatabase(DB_test_dir, DB_test_csv) # evaluate database fusion = FeatureFusion(features=['color', 'edge']) APs, res = evaluate_class(db, db2, f_instance=fusion, depth=3, d_type=d_type) cls_MAPs = [] for cls, cls_APs in APs.items(): MAP = np.mean(cls_APs) print("Class {}, MAP {}".format(cls, MAP)) cls_MAPs.append(MAP) print("MMAP", np.mean(cls_MAPs)) for i in range(len(db2)): saveName = "/content/traitement_images/Data/result_fusion/" + res[ i] + "/" + db2.data.img[i].split('/')[-1] bid = imageio.imread(db2.data.img[i]) mping.imsave(saveName, bid / 255)
(sample_cache, d_type, depth)) samples = [] data = db.get_data() for d in data.itertuples(): d_img, d_cls = getattr(d, "img"), getattr(d, "cls") d_hist = self.histogram(d_img, type=h_type, n_slice=n_slice) samples.append({'img': d_img, 'cls': d_cls, 'hist': d_hist}) cPickle.dump( samples, open(os.path.join(cache_dir, sample_cache), "wb", True)) return samples if __name__ == "__main__": db_train = Database('database/train') db_test = Database('database/test') # check shape assert edge_kernels.shape == (5, 2, 2) # evaluate database APs = evaluate_class(db, db2, f_class=Edge, d_type=d_type, depth=depth) cls_MAPs = [] for cls, cls_APs in APs.items(): MAP = np.mean(cls_APs) print("Class {}, MAP {}".format(cls, MAP)) cls_MAPs.append(MAP) print("MMAP", np.mean(cls_MAPs))
if verbose: print( "Counting histogram..., config=%s, distance=%s, depth=%s" % (sample_cache, d_type, depth)) samples = [] data = db.get_data() for d in data.itertuples(): d_img, d_cls = getattr(d, "img"), getattr(d, "cls") d_hist = self.histogram(d_img, type=h_type, n_slice=n_slice) samples.append({'img': d_img, 'cls': d_cls, 'hist': d_hist}) cPickle.dump( samples, open(os.path.join(cache_dir, sample_cache), "wb", True)) return samples if __name__ == "__main__": print( "Pensez à supprimer le dossier cache dans le cas où vous utilisez des nouvelles données.\n" ) dbTrain = Database(DB_dir="CorelDBDataSet/train", DB_csv="CorelDBDataSetTrain.csv") result = evaluate_class(dbTrain, f_class=Daisy, d_type=d_type, depth=depth) print("{} classes classées sur {} disponibles".format( result[0], result[1]))