def main(config): warnings.filterwarnings("ignore", category=DeprecationWarning) warnings.filterwarnings("ignore", category=FutureWarning) os.environ["TF_CPP_MIN_LOG_LEVEL"] = '3' os.environ["CUDA_VISIBLE_DEVICES"] = config.gpu_id if not os.path.exists(config.save_dir): os.makedirs(config.save_dir) sys.stdout = Logger(os.path.join(config.save_dir, 'train.log')) pprint(vars(config)) data_root = os.path.join('../../data', config.dataset) config.wordvec_dict = f'{data_root}/wordvec.txt' img_tr = f'{data_root}/train.txt' img_te = f'{data_root}/test.txt' img_db = f'{data_root}/database.txt' if config.test == True: config.network_weights = os.path.join(config.save_dir, 'network_weights.npy') else: train_img = dataset.import_train(data_root, img_tr) network_weights = model.train(train_img, config) config.network_weights = network_weights query_img, database_img = dataset.import_validation(data_root, img_te, img_db) maps = model.validation(database_img, query_img, config) for key in maps: print(f"{key}: {maps[key]}")
args = parser.parse_args() os.environ['CUDA_VISIBLE_DEVICES'] = args.gpus label_dims = {'cifar10': 10, 'cub': 200, 'nuswide_81': 81, 'coco': 80} Rs = {'cifar10': 54000, 'nuswide_81': 5000, 'coco': 5000} args.R = Rs[args.dataset] args.label_dim = label_dims[args.dataset] args.img_tr = os.path.join(args.data_dir, args.dataset, "train.txt") args.img_te = os.path.join(args.data_dir, args.dataset, "test.txt") args.img_db = os.path.join(args.data_dir, args.dataset, "database.txt") pprint(vars(args)) data_root = os.path.join(args.data_dir, args.dataset) query_img, database_img = dataset.import_validation( data_root, args.img_te, args.img_db) # test_image, database_image if not args.evaluate: train_img = dataset.import_train(data_root, args.img_tr) model_weights = model.train(train_img, database_img, query_img, args) args.model_weights = model_weights maps = model.validation(database_img, query_img, args) for key in maps: print(("{}\t{}".format(key, maps[key]))) pprint(vars(args))
parser.add_argument('--finetune-all', default=True, type=bool) parser.add_argument('--save-dir', default="./models/", type=str) parser.add_argument('-e', '--evaluate', dest='evaluate', action='store_true') args = parser.parse_args() os.environ['CUDA_VISIBLE_DEVICES'] = args.gpus label_dims = {'cifar10': 10, 'cub': 200, 'nuswide_81': 81, 'coco': 80} Rs = {'cifar10': 54000, 'nuswide_81': 5000, 'coco': 5000} args.R = Rs[args.dataset] args.label_dim = label_dims[args.dataset] args.img_tr = "/home/caoyue/data/{}/train.txt".format(args.dataset) args.img_te = "/home/caoyue/data/{}/test.txt".format(args.dataset) args.img_db = "/home/caoyue/data/{}/database.txt".format(args.dataset) pprint(vars(args)) query_img, database_img = dataset.import_validation(args.img_te, args.img_db) if not args.evaluate: train_img = dataset.import_train(args.img_tr) model_weights = model.train(train_img, database_img, query_img, args) args.model_weights = model_weights maps = model.validation(database_img, query_img, args) for key in maps: print(("{}\t{}".format(key, maps[key]))) pprint(vars(args))
# CQ params 'max_iter_update_b': 3, 'max_iter_update_Cb': 1, 'cq_lambda': cq_lambda, 'code_batch_size': 500, 'n_subspace': subspace_num, 'n_subcenter': 256, 'label_dim': label_dims[_dataset], 'img_tr': "/home/caoyue/data/{}/train.txt".format(_dataset), 'img_te': "/home/caoyue/data/{}/test.txt".format(_dataset), 'img_db': "/home/caoyue/data/{}/database.txt".format(_dataset), 'save_dir': "./models/", 'log_dir': log_dir, 'dataset': _dataset } pprint(config) train_img = dataset.import_train(config['img_tr']) model_weights = model.train(train_img, config) config['model_weights'] = model_weights query_img, database_img = dataset.import_validation(config['img_te'], config['img_db']) maps = model.validation(database_img, query_img, config) for key in maps: print(("{}: {}".format(key, maps[key]))) pprint(config)
# CQ params 'max_iter_update_b': 3, 'max_iter_update_Cb': 1, 'cq_lambda': cq_lambda, 'code_batch_size': 500, 'n_subspace': subspace_num, 'n_subcenter': 256, 'label_dim': label_dims[_dataset], 'img_tr': "{}/train.txt".format(data_root), 'img_te': "{}/test.txt".format(data_root), 'img_db': "{}/database.txt".format(data_root), 'save_dir': "./models/", 'log_dir': log_dir, 'dataset': _dataset } pprint(config) train_img = dataset.import_train(data_root, config['img_tr']) model_weights = model.train(train_img, config) config['model_weights'] = model_weights query_img, database_img = dataset.import_validation(data_root, config['img_te'], config['img_db']) maps = model.validation(database_img, query_img, config) for key in maps: print(("{}\t{}".format(key, maps[key]))) pprint(config)
'../../core/architecture/single_model/pretrained_model/reference_pretrain.npy', 'img_model': 'alexnet', 'loss_type': 'normed_cross_entropy', # normed_cross_entropy # TODO # if only finetune last layer 'finetune_all': True, # CQ params 'cq_lambda': cq_lambda, 'label_dim': label_dims[_dataset], 'img_tr': "/home/caoyue/data/{}/train.txt".format(_dataset), 'img_te': "/home/caoyue/data/{}/test.txt".format(_dataset), 'img_db': "/home/caoyue/data/{}/database.txt".format(_dataset), 'save_dir': "./models/", 'log_dir': log_dir, 'dataset': _dataset } pprint(config) train_img = dataset.import_train(config['img_tr']) model_weights = model.train(train_img, config) config['model_weights'] = model_weights query_img, database_img = dataset.import_validation(config['img_te'], config['img_db']) maps = model.validation(database_img, query_img, config) for key in maps: print(("{}: {}".format(key, maps[key]))) pprint(config)
parser.add_argument('--save-dir', default="./models/", type=str) parser.add_argument('-e', '--evaluate', dest='evaluate', action='store_true') parser.add_argument('--val-freq', default=1, type=int) args = parser.parse_args() os.environ['CUDA_VISIBLE_DEVICES'] = args.gpus label_dims = {'cifar10': 10, 'nuswide_81': 81, 'coco': 80, 'imagenet': 100} Rs = {'cifar10': 54000, 'nuswide_81': 5000, 'coco': 5000, 'imagenet': 5000} args.R = Rs[args.dataset] args.label_dim = label_dims[args.dataset] args.img_tr = "/home/caoyue/data/{}/train.txt".format(args.dataset) args.img_te = "/home/caoyue/data/{}/test.txt".format(args.dataset) args.img_db = "/home/caoyue/data/{}/database.txt".format(args.dataset) pprint(vars(args)) query_img, database_img = dataset.import_validation(args.img_te, args.img_db) if not args.evaluate: train_img = dataset.import_train(args.img_tr) model_weights = model.train(train_img, database_img, query_img, args) args.model_weights = model_weights else: maps = model.validation(database_img, query_img, args) for key in maps: print(("{}\t{}".format(key, maps[key]))) pprint(vars(args))