예제 #1
0
def main(train_loader, test_loaders, model, logger, file_logger):
    # print the experiment configuration
    print('\nparsed options:\n{}\n'.format(vars(args)))

    # if (args.enable_logging):
    #    file_logger.log_string('logs.txt', '\nparsed options:\n{}\n'.format(vars(args)))

    if args.cuda:
        model.cuda()

    optimizer1 = create_optimizer(model.features, args.lr)

    # optionally resume from a checkpoint
    if args.resume:
        if os.path.isfile(args.resume):
            print('=> loading checkpoint {}'.format(args.resume))
            checkpoint = torch.load(args.resume)
            args.start_epoch = checkpoint['epoch']
            checkpoint = torch.load(args.resume)
            model.load_state_dict(checkpoint['state_dict'])
        else:
            print('=> no checkpoint found at {}'.format(args.resume))

    start = args.start_epoch
    end = start + args.epochs
    for epoch in range(start, end):
        # iterate over test loaders and test results
        #train_loader, test_loaders2 = create_loaders(load_random_triplets=triplet_flag)
        train(train_loader, model, optimizer1, epoch, logger, triplet_flag)
        for test_loader in test_loaders:
            test(test_loader['dataloader'], model, epoch, logger, test_loader['name'])
        if TEST_ON_W1BS:
            # print(weights_path)
            patch_images = w1bs.get_list_of_patch_images(
                DATASET_DIR=args.w1bsroot.replace('/code', '/data/W1BS'))
            desc_name = 'curr_desc'  # + str(random.randint(0,100))

            DESCS_DIR = LOG_DIR + '/temp_descs/'  # args.w1bsroot.replace('/code', "/data/out_descriptors")
            OUT_DIR = DESCS_DIR.replace('/temp_descs/', "/out_graphs/")

            for img_fname in patch_images:
                w1bs_extract_descs_and_save(img_fname, model, desc_name, cuda=args.cuda,
                                            mean_img=args.mean_image,
                                            std_img=args.std_image, out_dir=DESCS_DIR)

            force_rewrite_list = [desc_name]
            w1bs.match_descriptors_and_save_results(DESC_DIR=DESCS_DIR, do_rewrite=True,
                                                    dist_dict={},
                                                    force_rewrite_list=force_rewrite_list)
            if (args.enable_logging):
                w1bs.draw_and_save_plots_with_loggers(DESC_DIR=DESCS_DIR, OUT_DIR=OUT_DIR,
                                                      methods=["SNN_ratio"],
                                                      descs_to_draw=[desc_name],
                                                      logger=file_logger,
                                                      tensor_logger=logger)
            else:
                w1bs.draw_and_save_plots(DESC_DIR=DESCS_DIR, OUT_DIR=OUT_DIR,
                                         methods=["SNN_ratio"],
                                         descs_to_draw=[desc_name])
예제 #2
0
파일: CDbin.py 프로젝트: Shelfcol/CDbin
def main(train_loader, test_loaders, model, logger, file_logger):
    # print the experiment configuration
    print('\nparsed options:\n{}\n'.format(vars(args)))

    #if (args.enable_logging):
    #    file_logger.log_string('logs.txt', '\nparsed options:\n{}\n'.format(vars(args)))

    if args.cuda:
        model.cuda()
    # change model.features to model.parameters()
    optimizer1 = create_optimizer(model, args.lr)
    # optionally resume from a checkpoint
    if args.resume:
        if os.path.isfile(args.resume):
            print('=> loading checkpoint {}'.format(args.resume))
            checkpoint = torch.load(args.resume)
            args.start_epoch = checkpoint['epoch']
            try:
                model.load_state_dict(checkpoint['state_dict'])
            except:
                t=0
                for i in model.state_dict():
                    if(i.startswith('features') or i.startswith('feature_L2NET')):
                        model.state_dict()[i]=checkpoint['state_dict'].items()[t]
                        t=t+1
                        print(i+" is loaded")
                args.start_epoch=0

            try:
                optimizer1.load_state_dict(checkpoint['optimizer'])
                print('=> optimizer loaded from {}'.format(args.resume))
                print('current lr is {}'.format(optimizer1.param_groups[0]['lr']))
                for group in optimizer1.param_groups:
                   if 'step' not in group:
                       group['step'] = 0.
                   else:
                       print('current step is {}'.format(group['step']))
                #        group['step'] = (args.n_triplets)*args.start_epoch//args.batch_size+1
            except:
                print('=> on optimizer saved in {}'.format(args.resume))
        else:
            print('=> no checkpoint found at {}'.format(args.resume))
            
    
    start = args.start_epoch
    end = args.end_epoch if args.end_epoch < (start + args.epochs) else (start + args.epochs)
    trainloss=[]
    testacc=[]
    for epoch in range(start, end):

        # iterate over test loaders and test results
        train(test_loaders[0], train_loader, model, optimizer1, epoch, logger, triplet_flag)
        for test_loader in test_loaders:
            test(test_loader['dataloader'], model, epoch, logger, test_loader['name'])
            if args.testbinary:
                test_binary(test_loader['dataloader'], model, epoch, logger, test_loader['name'])
        x = copy.copy(dataset_names)
        x.remove(args.training_set)
        old_trainingset=args.training_set
        args.training_set = x[0]
        args.training_set = old_trainingset
        if TEST_ON_W1BS :
            # print(weights_path)
            patch_images = w1bs.get_list_of_patch_images(
                DATASET_DIR=args.w1bsroot.replace('/code', '/data/W1BS'))
            desc_name = 'curr_desc'# + str(random.randint(0,100))
            
            DESCS_DIR = LOG_DIR + '/temp_descs/' #args.w1bsroot.replace('/code', "/data/out_descriptors")
            OUT_DIR = DESCS_DIR.replace('/temp_descs/', "/out_graphs/")

            for img_fname in patch_images:
                w1bs_extract_descs_and_save(img_fname, model, desc_name, cuda = args.cuda,
                                            mean_img=args.mean_image,
                                            std_img=args.std_image, out_dir = DESCS_DIR)


            force_rewrite_list = [desc_name]
            w1bs.match_descriptors_and_save_results(DESC_DIR=DESCS_DIR, do_rewrite=True,
                                                    dist_dict={},
                                                    force_rewrite_list=force_rewrite_list)
        train_loader, test_loaders2 = create_loaders(load_random_triplets=triplet_flag)
def main(trainPhotoTourDataset, test_loaders, model, logger, file_logger):
    # print the experiment configuration
    print('\nparsed options:\n{}\n'.format(vars(args)))

    if (args.enable_logging):
        file_logger.log_string('logs.txt', '\nparsed options:\n{}\n'.format(vars(args)))

    if args.cuda:
        model.cuda()

    optimizer1 = create_optimizer(model.features, args)

    # optionally resume from a checkpoint

    if args.resume:
        model = resume_pretrained_model(model, args)


    start = args.start_epoch
    end = start + args.epochs

    kwargs = {'num_workers': args.num_workers, 'pin_memory': args.pin_memory} if args.cuda else {}

    transform = transforms.Compose([
            transforms.Lambda(cv2_scale),
            transforms.Lambda(np_reshape),
            transforms.ToTensor(),
            transforms.Normalize((args.mean_image,), (args.std_image,))])

    for epoch in xrange(start, end):

        model.eval()
        # #
        descriptors = get_descriptors_for_dataset(model, trainPhotoTourDataset)
        #
        np.save('descriptors.npy', descriptors)
        descriptors = np.load('descriptors.npy')
        #
        hard_negatives = get_hard_negatives(trainPhotoTourDataset, descriptors)
        np.save('descriptors_min_dist.npy', hard_negatives)
        hard_negatives = np.load('descriptors_min_dist.npy')
        print(hard_negatives[0])

        trainPhotoTourDatasetWithHardNegatives = TripletPhotoTourHardNegatives(train=True,
                                                                               negative_indices=hard_negatives,
                                                                               batch_size=args.batch_size,
                                                                               root=args.dataroot,
                                                                               name=args.training_set,
                                                                               download=True,
                                                                               transform=transform)

        train_loader = torch.utils.data.DataLoader(trainPhotoTourDatasetWithHardNegatives,
                                                   batch_size=args.batch_size,
                                                   shuffle=False, **kwargs)

        train(train_loader, model, optimizer1, epoch, logger)

        # iterate over test loaders and test results
        for test_loader in test_loaders:
            test(test_loader['dataloader'], model, epoch, logger, test_loader['name'])

        if TEST_ON_W1BS :
            # print(weights_path)
            patch_images = w1bs.get_list_of_patch_images(
                DATASET_DIR=args.w1bsroot.replace('/code', '/data/W1BS'))
            desc_name = 'curr_desc'

            for img_fname in patch_images:
                w1bs_extract_descs_and_save(img_fname, model, desc_name, cuda = args.cuda,
                                            mean_img=args.mean_image,
                                            std_img=args.std_image)

            DESCS_DIR = args.w1bsroot.replace('/code', "/data/out_descriptors")
            OUT_DIR = args.w1bsroot.replace('/code', "/data/out_graphs")

            force_rewrite_list = [desc_name]
            w1bs.match_descriptors_and_save_results(DESC_DIR=DESCS_DIR, do_rewrite=True,
                                                    dist_dict={},
                                                    force_rewrite_list=force_rewrite_list)
            if(args.enable_logging):
                w1bs.draw_and_save_plots_with_loggers(DESC_DIR=DESCS_DIR, OUT_DIR=OUT_DIR,
                                         methods=["SNN_ratio"],
                                         descs_to_draw=[desc_name],
                                         logger=file_logger,
                                         tensor_logger = None)
            else:
                w1bs.draw_and_save_plots(DESC_DIR=DESCS_DIR, OUT_DIR=OUT_DIR,
                                         methods=["SNN_ratio"],
                                         descs_to_draw=[desc_name],
                                         really_draw = False)
예제 #4
0
파일: HardNet.py 프로젝트: keeeeenw/hardnet
    def execute(self, train_loader, test_loaders, model, logger, file_logger):
        # print the experiment configuration
        print('\nparsed options:\n{}\n'.format(vars(self.args)))

        #if (self.args.enable_logging):
        #    file_logger.log_string('logs.txt', '\nparsed options:\n{}\n'.format(vars(self.args)))

        if self.args.cuda:
            model.cuda()

        optimizer1 = self.create_optimizer(model.features, self.args.lr)

        # optionally resume from a checkpoint
        if self.args.resume:
            if os.path.isfile(self.args.resume):
                print('=> loading checkpoint {}'.format(self.args.resume))
                checkpoint = torch.load(self.args.resume)
                self.args.start_epoch = checkpoint['epoch']
                checkpoint = torch.load(self.args.resume)
                model.load_state_dict(checkpoint['state_dict'])
            else:
                print('=> no checkpoint found at {}'.format(self.args.resume))
                
        
        start = self.args.start_epoch
        end = start + self.args.epochs
        for epoch in range(start, end):

            # iterate over test loaders and test results
            self.train(train_loader, model, optimizer1, epoch, logger, self.triplet_flag)
            for test_loader in test_loaders:
                self.test(test_loader['dataloader'], model, epoch, logger, test_loader['name'])

            if self.test_on_w1bs:
                print("Saving test_on_w1bs results")
                patch_images = w1bs.get_list_of_patch_images(
                    DATASET_DIR=self.args.w1bsroot)
                desc_name = 'curr_desc'# + str(random.randint(0,100))
                
                self.descs_dir = self.log_dir + '/temp_descs/' #self.args.w1bsroot.replace('/code', "/data/out_descriptors")
                OUT_DIR = self.descs_dir.replace('/temp_descs/', "/out_graphs/")

                for img_fname in patch_images:
                    w1bs_extract_descs_and_save(img_fname, model, desc_name, cuda = self.args.cuda,
                                                mean_img=self.args.mean_image,
                                                std_img=self.args.std_image, out_dir = self.descs_dir)


                force_rewrite_list = [desc_name]
                w1bs.match_descriptors_and_save_results(DESC_DIR=self.descs_dir, do_rewrite=True,
                                                        dist_dict={},
                                                        force_rewrite_list=force_rewrite_list)
                print("descs_dir", self.descs_dir)
                print("OUT_DIR", OUT_DIR)
                print("Number of patch_images", len(patch_images))
                if(self.args.enable_logging):
                    w1bs.draw_and_save_plots_with_loggers(DESC_DIR=self.descs_dir, OUT_DIR=OUT_DIR,
                                            methods=["SNN_ratio"],
                                            descs_to_draw=[desc_name],
                                            logger=file_logger,
                                            tensor_logger = logger)
                else:
                    w1bs.draw_and_save_plots(DESC_DIR=self.descs_dir, OUT_DIR=OUT_DIR,
                                            methods=["SNN_ratio"],
                                            descs_to_draw=[desc_name])
            #randomize train loader batches
            train_loader, test_loaders2 = self.create_loaders(load_random_triplets=self.triplet_flag)
예제 #5
0
def main(train_loader, test_loaders, model, logger, file_logger):
    # print the experiment configuration
    print('\nparsed options:\n{}\n'.format(vars(args)))

    if (args.enable_logging):
        file_logger.log_string('logs',
                               '\nparsed options:\n{}\n'.format(vars(args)))

    if args.cuda:
        model.cuda()

    optimizer1 = create_optimizer(model.features, args)

    # optionally resume from a checkpoint
    if args.resume:
        model = resume_pretrained_model(model, args, suffix)

    start = args.start_epoch
    end = start + args.epochs

    for epoch in xrange(start, end):
        # iterate over test loaders and test results
        train(train_loader, model, optimizer1, epoch, file_logger,
              triplet_flag)

        for test_loader in test_loaders:
            test(test_loader['dataloader'], model, epoch, file_logger,
                 test_loader['name'])

        if TEST_ON_W1BS:
            # print(weights_path)
            patch_images = w1bs.get_list_of_patch_images(
                DATASET_DIR=args.w1bsroot.replace('/code', '/data/W1BS'))
            desc_name = 'curr_desc'  # + str(random.randint(0,100))

            DESCS_DIR = log_dir + '/temp_descs/'  #args.w1bsroot.replace('/hardnet', "/data/out_descriptors")
            OUT_DIR = DESCS_DIR.replace('/temp_descs/', "/out_graphs/")

            for img_fname in patch_images:
                # a bug occur @func
                # save (patch*feat_dim) desc matrix to the desc file
                w1bs_extract_descs_and_save(img_fname,
                                            model,
                                            desc_name,
                                            cuda=args.cuda,
                                            mean_img=args.mean_image,
                                            std_img=args.std_image,
                                            out_dir=DESCS_DIR)

            force_rewrite_list = [desc_name]
            w1bs.match_descriptors_and_save_results(
                DESC_DIR=DESCS_DIR,
                do_rewrite=True,
                dist_dict={},
                force_rewrite_list=force_rewrite_list)
            if (args.enable_logging):
                # DESC_DIR, OUT_DIR, methods, colors, lines,
                # descs_to_draw, really_draw, logger, tensor_logger = None
                w1bs.draw_and_save_plots_with_loggers(
                    DESC_DIR=DESCS_DIR,
                    OUT_DIR=OUT_DIR,
                    methods=["SNN_ratio"],
                    descs_to_draw=[desc_name],
                    really_draw=True,
                    logger=file_logger,
                    tensor_logger=logger)
            else:
                w1bs.draw_and_save_plots(DESC_DIR=DESCS_DIR,
                                         OUT_DIR=OUT_DIR,
                                         methods=["SNN_ratio"],
                                         descs_to_draw=[desc_name],
                                         really_draw=True)
        # re-generate the triplets slices
        if epoch + 1 < end:
            train_loader.resample_dataset_triplets()

    print('HardNet train done!')