def main(train_loader, test_loaders, model, logger, file_logger): # print the experiment configuration print('\nparsed options:\n{}\n'.format(vars(args))) # if (args.enable_logging): # file_logger.log_string('logs.txt', '\nparsed options:\n{}\n'.format(vars(args))) if args.cuda: model.cuda() optimizer1 = create_optimizer(model.features, args.lr) # optionally resume from a checkpoint if args.resume: if os.path.isfile(args.resume): print('=> loading checkpoint {}'.format(args.resume)) checkpoint = torch.load(args.resume) args.start_epoch = checkpoint['epoch'] checkpoint = torch.load(args.resume) model.load_state_dict(checkpoint['state_dict']) else: print('=> no checkpoint found at {}'.format(args.resume)) start = args.start_epoch end = start + args.epochs for epoch in range(start, end): # iterate over test loaders and test results #train_loader, test_loaders2 = create_loaders(load_random_triplets=triplet_flag) train(train_loader, model, optimizer1, epoch, logger, triplet_flag) for test_loader in test_loaders: test(test_loader['dataloader'], model, epoch, logger, test_loader['name']) if TEST_ON_W1BS: # print(weights_path) patch_images = w1bs.get_list_of_patch_images( DATASET_DIR=args.w1bsroot.replace('/code', '/data/W1BS')) desc_name = 'curr_desc' # + str(random.randint(0,100)) DESCS_DIR = LOG_DIR + '/temp_descs/' # args.w1bsroot.replace('/code', "/data/out_descriptors") OUT_DIR = DESCS_DIR.replace('/temp_descs/', "/out_graphs/") for img_fname in patch_images: w1bs_extract_descs_and_save(img_fname, model, desc_name, cuda=args.cuda, mean_img=args.mean_image, std_img=args.std_image, out_dir=DESCS_DIR) force_rewrite_list = [desc_name] w1bs.match_descriptors_and_save_results(DESC_DIR=DESCS_DIR, do_rewrite=True, dist_dict={}, force_rewrite_list=force_rewrite_list) if (args.enable_logging): w1bs.draw_and_save_plots_with_loggers(DESC_DIR=DESCS_DIR, OUT_DIR=OUT_DIR, methods=["SNN_ratio"], descs_to_draw=[desc_name], logger=file_logger, tensor_logger=logger) else: w1bs.draw_and_save_plots(DESC_DIR=DESCS_DIR, OUT_DIR=OUT_DIR, methods=["SNN_ratio"], descs_to_draw=[desc_name])
def execute(self, train_loader, test_loaders, model, logger, file_logger): # print the experiment configuration print('\nparsed options:\n{}\n'.format(vars(self.args))) #if (self.args.enable_logging): # file_logger.log_string('logs.txt', '\nparsed options:\n{}\n'.format(vars(self.args))) if self.args.cuda: model.cuda() optimizer1 = self.create_optimizer(model.features, self.args.lr) # optionally resume from a checkpoint if self.args.resume: if os.path.isfile(self.args.resume): print('=> loading checkpoint {}'.format(self.args.resume)) checkpoint = torch.load(self.args.resume) self.args.start_epoch = checkpoint['epoch'] checkpoint = torch.load(self.args.resume) model.load_state_dict(checkpoint['state_dict']) else: print('=> no checkpoint found at {}'.format(self.args.resume)) start = self.args.start_epoch end = start + self.args.epochs for epoch in range(start, end): # iterate over test loaders and test results self.train(train_loader, model, optimizer1, epoch, logger, self.triplet_flag) for test_loader in test_loaders: self.test(test_loader['dataloader'], model, epoch, logger, test_loader['name']) if self.test_on_w1bs: print("Saving test_on_w1bs results") patch_images = w1bs.get_list_of_patch_images( DATASET_DIR=self.args.w1bsroot) desc_name = 'curr_desc'# + str(random.randint(0,100)) self.descs_dir = self.log_dir + '/temp_descs/' #self.args.w1bsroot.replace('/code', "/data/out_descriptors") OUT_DIR = self.descs_dir.replace('/temp_descs/', "/out_graphs/") for img_fname in patch_images: w1bs_extract_descs_and_save(img_fname, model, desc_name, cuda = self.args.cuda, mean_img=self.args.mean_image, std_img=self.args.std_image, out_dir = self.descs_dir) force_rewrite_list = [desc_name] w1bs.match_descriptors_and_save_results(DESC_DIR=self.descs_dir, do_rewrite=True, dist_dict={}, force_rewrite_list=force_rewrite_list) print("descs_dir", self.descs_dir) print("OUT_DIR", OUT_DIR) print("Number of patch_images", len(patch_images)) if(self.args.enable_logging): w1bs.draw_and_save_plots_with_loggers(DESC_DIR=self.descs_dir, OUT_DIR=OUT_DIR, methods=["SNN_ratio"], descs_to_draw=[desc_name], logger=file_logger, tensor_logger = logger) else: w1bs.draw_and_save_plots(DESC_DIR=self.descs_dir, OUT_DIR=OUT_DIR, methods=["SNN_ratio"], descs_to_draw=[desc_name]) #randomize train loader batches train_loader, test_loaders2 = self.create_loaders(load_random_triplets=self.triplet_flag)
def main(trainPhotoTourDataset, test_loaders, model, logger, file_logger): # print the experiment configuration print('\nparsed options:\n{}\n'.format(vars(args))) if (args.enable_logging): file_logger.log_string('logs.txt', '\nparsed options:\n{}\n'.format(vars(args))) if args.cuda: model.cuda() optimizer1 = create_optimizer(model.features, args.lr) # optionally resume from a checkpoint if args.resume: if os.path.isfile(args.resume): print('=> loading checkpoint {}'.format(args.resume)) checkpoint = torch.load(args.resume) args.start_epoch = checkpoint['epoch'] checkpoint = torch.load(args.resume) model.load_state_dict(checkpoint['state_dict']) else: print('=> no checkpoint found at {}'.format(args.resume)) start = args.start_epoch end = start + args.epochs kwargs = {'num_workers': args.num_workers, 'pin_memory': args.pin_memory} if args.cuda else {} transform = transforms.Compose([ transforms.Lambda(cv2_scale), transforms.Lambda(np_reshape), transforms.ToTensor(), transforms.Normalize((args.mean_image,), (args.std_image,))]) for epoch in range(start, end): model.eval() # # descriptors = get_descriptors_for_dataset(model, trainPhotoTourDataset) # # np.save('descriptors.npy', descriptors) descriptors = np.load('descriptors.npy') # hard_negatives = get_hard_negatives(trainPhotoTourDataset, descriptors) np.save('descriptors_min_dist.npy', hard_negatives) hard_negatives = np.load('descriptors_min_dist.npy') print(hard_negatives[0]) trainPhotoTourDatasetWithHardNegatives = TripletPhotoTourHardNegatives(train=True, negative_indices=hard_negatives, batch_size=args.batch_size, root=args.dataroot, name=args.training_set, download=True, transform=transform) train_loader = torch.utils.data.DataLoader(trainPhotoTourDatasetWithHardNegatives, batch_size=args.batch_size, shuffle=False, **kwargs) train(train_loader, model, optimizer1, epoch, logger) # iterate over test loaders and test results for test_loader in test_loaders: test(test_loader['dataloader'], model, epoch, logger, test_loader['name']) if TEST_ON_W1BS : # print(weights_path) patch_images = w1bs.get_list_of_patch_images( DATASET_DIR=args.w1bsroot.replace('/code', '/data/W1BS')) desc_name = 'curr_desc' for img_fname in patch_images: w1bs_extract_descs_and_save(img_fname, model, desc_name, cuda = args.cuda, mean_img=args.mean_image, std_img=args.std_image) DESCS_DIR = args.w1bsroot.replace('/code', "/data/out_descriptors") OUT_DIR = args.w1bsroot.replace('/code', "/data/out_graphs") force_rewrite_list = [desc_name] w1bs.match_descriptors_and_save_results(DESC_DIR=DESCS_DIR, do_rewrite=True, dist_dict={}, force_rewrite_list=force_rewrite_list) if(args.enable_logging): w1bs.draw_and_save_plots_with_loggers(DESC_DIR=DESCS_DIR, OUT_DIR=OUT_DIR, methods=["SNN_ratio"], descs_to_draw=[desc_name], logger=file_logger, tensor_logger = None) else: w1bs.draw_and_save_plots(DESC_DIR=DESCS_DIR, OUT_DIR=OUT_DIR, methods=["SNN_ratio"], descs_to_draw=[desc_name], really_draw = False)