示例#1
0
def main():
    sys.stdout = Logger(osp.join(args.save_dir, 'test.log'))
    os.environ['CUDA_VISIBLE_DEVICES'] = args.gpus
    multi_gpus = False
    if len(args.gpus.split(',')) > 1:
        multi_gpus = True
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
    if device.type == 'cuda':
        print("Currently using GPU: {}".format(args.gpus))
        cudnn.benchmark = True

    print("Creating dataset: {}".format(args.dataset))
    dataset = datasets.create(name=args.dataset, batch_size=args.batch_size, bit=32, tencrop=args.tencrop)
    testloader, databaseloader = dataset.testloader, dataset.databaseloader

    model_path = osp.join(args.save_dir, 'model_best.pth')
    print("load pretrained model: {}".format(model_path))
    model = torch.load(model_path)
    if multi_gpus:
        model = nn.DataParallel(model).to(device)
    else:
        model = model.to(device)

    print("==> Evaluate")
    mAP_feat, mAP_sign, mAP_topK, code_and_labels = evaluate(model, databaseloader, testloader, dataset.R, args.tencrop, device)
    np.save(osp.join(args.save_dir, 'code_and_label.npy'), code_and_labels)
    print(f'mAP_feat:{mAP_feat:.4f}  mAP_sign:{mAP_sign:.4f}  mAP_top1000:{mAP_topK:.4f}')
def init_activation_quantization(args, model, w_quantizer, alpha, beta):
    """
    Initialize biases, alphas and betas for activation quantization

    Args:
        args: The args object obtained with argparse.
        model: The model that contains activation quantization modules.
    """
    freeze_bn(model)
    model = torch.nn.DataParallel(model).cuda()
    init_data_loader, _, _ = datasets.create(
        args.dataset, osp.join(args.data_dir, args.dataset), args.img_size,
        args.scale_size, args.qa_sample_batch_size, args.workers)
    for i, (inputs, targets) in enumerate(init_data_loader):
        # Run the model once to initialize activation quantization parameters
        if args.qw:
            w_quantizer.save_params()
            w_quantizer.quantize_params(T=1,
                                        alpha=alpha,
                                        beta=beta,
                                        train=False)
            model(inputs.cuda(), input_ac_T=1)
            w_quantizer.restore_params()
        else:
            model(inputs.cuda(), input_ac_T=1)
        model.QA_inited = True
        break
示例#3
0
def main(args):
    train_loader, val_loader, num_classes = datasets.create(
        args.dataset, osp.join(args.data_dir, args.dataset), args.img_size,
        args.scale_size, args.batch_size, args.workers)
    model, count, alpha, beta = init_model(args, num_classes, QA_flag=args.qa)
    w_quantizer = WQuantization(model,
                                alpha,
                                beta,
                                QW_values=get_qw_values(args.wk),
                                initialize_biases=False)
    args.resume = args.checkpoint
    args.resume_epoch = 0
    alpha, beta = resume_checkpoint(args, model, None, None, None, w_quantizer)
    for i in range(len(alpha)):
        alpha[i].cpu()
        beta[i].cpu()

    for i, module in enumerate(w_quantizer.target_modules):
        param = module.data.numpy()
        counts, lower_bounds = count_bins(param, 0.01)
        plt.plot(lower_bounds, counts)
        plt.title('Module {} parameter distribution'.format(i))
        plt.show()

        q_param = w_quantizer.forward(module.data *
                                      beta[i].data.cpu().detach(),
                                      1,
                                      w_quantizer.QW_biases[i],
                                      train=False)
        q_param = q_param.detach().cpu().numpy()
        counts, lower_bounds = count_bins(q_param, 1)
        plt.plot(lower_bounds, counts, 'r')
        plt.title('Quantized module {} parameter distribution'.format(i))
        plt.show()
示例#4
0
def main():

    labels_to_branch_map = {6: 0, 9: 0,
                            8: 1, 4: 1,
                            0: 2,
                            2: 3, 7: 3, 1: 3,
                            5: 4, 3: 4}

    if random_group:
        labels_to_branch_map = choose_random_label_branch()
        print("label random, ", labels_to_branch_map)

    dataset = datasets.create(
            name='mnist', batch_size=batch_size, use_gpu=False,
            num_workers=4,
        )
    trainloader, testloader = dataset.trainloader, dataset.testloader

    model = BranchNet(num_classes)
    optimizer_model = torch.optim.SGD(model.parameters(), lr=lr_model, weight_decay=5e-04, momentum=0.9)
    criterion = nn.CrossEntropyLoss()

    for epoch in range(0, epochs):
        if epoch < epochs/2:
            train(model, trainloader, optimizer_model, epoch, criterion=criterion,
                  labels_to_branch_map=labels_to_branch_map, choose_predefined_branch=True)
        else:
            train(model, trainloader, optimizer_model, epoch, criterion=criterion,
                  labels_to_branch_map=labels_to_branch_map, choose_predefined_branch=False)

        if epoch % 10 == 0:
            test(model, testloader, epoch)

    test(model, testloader, epochs)
示例#5
0
def main():
    torch.manual_seed(args.seed)
    os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu
    use_gpu = torch.cuda.is_available()
    if args.use_cpu: use_gpu = False

    sys.stdout = Logger(osp.join(args.save_dir, 'log_' + args.dataset + '.txt'))

    if use_gpu:
        print("Currently using GPU: {}".format(args.gpu))
        cudnn.benchmark = True
        torch.cuda.manual_seed_all(args.seed)
    else:
        print("Currently using CPU")

    print("Creating dataset: {}".format(args.dataset))
    dataset = datasets.create(
        name=args.dataset, batch_size=args.batch_size, use_gpu=use_gpu,
        num_workers=args.workers,
    )

    trainloader, testloader = dataset.trainloader, dataset.testloader

    print("Creating model: {}".format(args.model))
    model = models.create(name=args.model, num_classes=dataset.num_classes)

    if use_gpu:
        model = nn.DataParallel(model).cuda()

    criterion_xent = nn.CrossEntropyLoss()
    criterion_cent = CenterLoss(num_classes=dataset.num_classes, feat_dim=2, use_gpu=use_gpu)
    optimizer_model = torch.optim.SGD(model.parameters(), lr=args.lr_model, weight_decay=5e-04, momentum=0.9)
    optimizer_centloss = torch.optim.SGD(criterion_cent.parameters(), lr=args.lr_cent)

    if args.stepsize > 0:
        scheduler = lr_scheduler.StepLR(optimizer_model, step_size=args.stepsize, gamma=args.gamma)

    start_time = time.time()

    for epoch in range(args.max_epoch):
        print("==> Epoch {}/{}".format(epoch+1, args.max_epoch))
        train(model, criterion_xent, criterion_cent,
              optimizer_model, optimizer_centloss,
              trainloader, use_gpu, dataset.num_classes, epoch)

        if args.stepsize > 0: scheduler.step()

        if args.eval_freq > 0 and (epoch+1) % args.eval_freq == 0 or (epoch+1) == args.max_epoch:
            print("==> Test")
            acc, err = test(model, testloader, use_gpu, dataset.num_classes, epoch)
            print("Accuracy (%): {}\t Error rate (%): {}".format(acc, err))

    elapsed = round(time.time() - start_time)
    elapsed = str(datetime.timedelta(seconds=elapsed))
    print("Finished. Total elapsed time (h:m:s): {}".format(elapsed))
示例#6
0
 def setup_dataset(self):
     self.tokenizer = Tokenizer(ann_path=args.ann_path,
                                dataset_name=args.dataset_name)
     self.dataset = datasets.create(
         name=args.dataset_name,
         image_dir=args.image_dir,
         ann_path=args.ann_path,
         tokenizer=self.tokenizer,
         split='train',
         args=args,
     )
示例#7
0
    def __init__(self, args):
        super(Trainer, self).__init__()
        self.args = args
        if cfg.SEED > 0:
            random.seed(cfg.SEED)
            torch.manual_seed(cfg.SEED)
            torch.cuda.manual_seed_all(cfg.SEED)

        self.num_gpus = torch.cuda.device_count()
        self.distributed = self.num_gpus > 1
        if self.distributed:
            torch.cuda.set_device(args.local_rank)
            torch.distributed.init_process_group(backend="nccl",
                                                 init_method="env://")
        self.device = torch.device("cuda")
        # self.device = 'cpu'

        self.rl_stage = False
        self.setup_logging()
        self.setup_dataset()
        self.setup_network()
        self.val_evaler = Evaler(datasets.create(
            name=args.dataset_name,
            image_dir=args.image_dir,
            ann_path=args.ann_path,
            tokenizer=self.tokenizer,
            split='val',
            args=args,
        ),
                                 tokenizer=self.tokenizer)  # TODO
        self.test_evaler = Evaler(datasets.create(name=args.dataset_name,
                                                  image_dir=args.image_dir,
                                                  ann_path=args.ann_path,
                                                  tokenizer=self.tokenizer,
                                                  split='test',
                                                  args=args),
                                  tokenizer=self.tokenizer)  # TODO
        self.scorer = Scorer()
示例#8
0
def demo(args):
    device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")

    # load the Oxford5k database
    dataset = create('Oxford')
    print(dataset)

    # initialize architecture and load weights
    print('Loading the model...')
    model = resnet50_rank_DA().to(device)
    model.eval()
    print('Done\n')

    # load the precomputed dataset features
    d_feats_file = 'data/features/resnet50-rnk-lm-da_ox.npy'
    try:
        d_feats = np.load(d_feats_file)
    except OSError as e:
        print(
            'ERROR: File {} not found. Please follow the instructions to download the pre-computed features.'
            .format(d_feats_file))
        sys.exit()

    # Load the query image
    img = Image.open(dataset.get_query_filename(args.qidx))
    # Crop the query ROI
    img = img.crop(tuple(dataset.get_query_roi(args.qidx)))
    # Apply transformations
    img = trf.resize_image(img, 800)
    I = trf.to_tensor(img)
    I = trf.normalize(
        I, dict(rgb_means=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]))
    I = I.unsqueeze(0).to(device)
    # Forward pass to extract the features
    with torch.no_grad():
        print('Extracting the representation of the query...')
        q_feat = model(I).cpu().numpy()
    print('Done\n')

    # Rank the database and visualize the top-k most similar images in the database
    dataset.vis_top(d_feats,
                    args.qidx,
                    q_feat=q_feat,
                    topk=args.topk,
                    out_image_file='results/out.png')
def setup_dataset(mode, crop_dir, mask_dir=None, mean_mask_dir=None,
                  mean_grid_dir=None, trimap_dir=None, alpha_dir=None,
                  alpha_weight_dir=None):
    # Create dataset
    dataset = datasets.create(mode, crop_dir, mask_dir, mean_mask_dir,
                              mean_grid_dir, trimap_dir, alpha_dir,
                              alpha_weight_dir)

    # Create transform function
    transform = transforms.create(mode)
    transform_random = transforms.transform_random

    # Split into train and test
    train_raw, test_raw = datasets.split_dataset(dataset)

    # Increase data variety
    train_raw = chainer.datasets.TransformDataset(train_raw, transform_random)

    # Transform for network inputs
    train = chainer.datasets.TransformDataset(train_raw, transform)
    test = chainer.datasets.TransformDataset(test_raw, transform)

    return train, test
示例#10
0
                    help='custom model name')
parser.add_argument('-dataset',
                    default='MNIST',
                    required=True,
                    help='samples per training batch')
parser.add_argument('-batch_size',
                    default=45,
                    required=True,
                    help='samples per training batch')
parser.add_argument('-visualize',
                    default='none',
                    required=False,
                    help='class to visualize')
args = parser.parse_args()

data = datasets.create(args.dataset)

train_loader = dataloader.create(args.loss_fn, data.train,
                                 int(args.batch_size))
valid_loader = dataloader.create(args.loss_fn, data.valid,
                                 int(args.batch_size))


def train_model(config):
    model = models.create(
        args.model,
        config["loaders"],
        config["loss_fn"],
        config["acc_fn"],
        config["epochs"],
        config["pretrained"],
示例#11
0
def main():
    setup_seed(args.seed)
    sys.stdout = Logger(osp.join(args.save_dir, 'train.log'))
    # gpu init
    os.environ['CUDA_VISIBLE_DEVICES'] = args.gpus
    multi_gpus = False
    if len(args.gpus.split(',')) > 1:
        multi_gpus = True
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
    if device.type == 'cuda':
        print("Currently using GPU: {}".format(args.gpus))
    else:
        print("Currently using CPU")

    # define dataset, backbone and margin layer
    print("Creating dataset: {}".format(args.dataset))
    dataset = datasets.create(name=args.dataset, batch_size=args.batch_size, bit=args.feat_dim)
    trainloader, testloader, databaseloader = dataset.trainloader, dataset.testloader, dataset.databaseloader
    hadamard = torch.from_numpy(generate_hadamard_codebook(args.feat_dim, dataset.num_classes)).float().to(device)

    print("Creating net: {}".format(args.backbone))
    net = backbone.create(name=args.backbone, feat_dim=args.feat_dim)

    print("Creating classifier: {}".format(args.classifier))
    classifier = margin.create(name=args.classifier, feat_dim=args.feat_dim, 
                               num_classes=dataset.num_classes, scale=args.scale)

    if args.resume:
        net_path = osp.join(args.net_path, 'model_best.pth')
        classifier_path = osp.join(args.classifier_path, 'classifier_best.pth')
        print('resume the model parameters from: ', net_path, classifier_path)
        net.load_state_dict(torch.load(net_path).state_dict())
        classifier.load_state_dict(torch.load(classifier_path).state_dict())

    # define optimizers for different layer
    if 'nuswide' in args.dataset:
        criterion_xent = nn.BCEWithLogitsLoss()
    else:
        criterion_xent = nn.CrossEntropyLoss()

    parameter_list = [{"params":net.feature_layers.parameters(), "lr":args.lr}, \
                      {"params":net.hash_layer.parameters(), "lr":args.lr}, \
                      {"params":classifier.parameters(), "lr":args.lr}]
    optimizer_model = torch.optim.SGD(parameter_list, lr=args.lr, weight_decay=5e-04, momentum=0.9)

    if args.stepsize > 0:
        scheduler = lr_scheduler.StepLR(optimizer_model, step_size=args.stepsize, gamma=args.gamma)

    if multi_gpus:
        net = nn.DataParallel(net).to(device)
        classifier = nn.DataParallel(classifier).to(device)
    else:
        net = net.to(device)
        classifier = classifier.to(device)

    start_time = time.time()

    best_acc, best_mAP_feat, best_mAP_sign= 0.0, 0.0, 0.0
    for epoch in range(args.max_epoch):
        print("==> Epoch {}/{}".format(epoch+1, args.max_epoch))
        train(net, classifier,
              criterion_xent, optimizer_model,
              trainloader, dataset.num_classes, hadamard, epoch, device)

        if args.stepsize > 0: scheduler.step()

        if args.test_freq > 0 and (epoch+1) % args.test_freq == 0 or (epoch+1) == args.max_epoch:
            print("==> Test")
            acc, err = test(net, classifier, testloader, device)
            print("Accuracy (%): {}\t Error rate (%): {}".format(acc, err))
            if acc > best_acc:
                best_acc = acc

        if args.eval_freq > 0 and (epoch+1) % args.eval_freq == 0 or (epoch+1) == args.max_epoch:
            print("==> Evaluate")
            code_and_labels = generate_codes(net, databaseloader, testloader, device) 

            print('calculate metrics...')
            mAP_feat = get_mAP(code_and_labels['db_feats'], code_and_labels['db_labels'], 
                            code_and_labels['test_feats'], code_and_labels['test_labels'], dataset.R)
            mAP_sign = get_mAP(code_and_labels['db_codes'], code_and_labels['db_labels'], 
                            code_and_labels['test_codes'], code_and_labels['test_labels'], dataset.R)
            pre_topK = get_precision_top(code_and_labels['db_codes'], code_and_labels['db_labels'].argmax(1), 
                            code_and_labels['test_codes'], code_and_labels['test_labels'].argmax(1), k=500)
            precision, recall = get_pre_recall(code_and_labels['db_codes'], code_and_labels['db_labels'], 
                            code_and_labels['test_codes'], code_and_labels['test_labels'])

            print(f'mAP_feat:{mAP_feat:.4f}  mAP_sign:{mAP_sign:.4f}  precision_top500:{pre_topK:.4f}')
            if mAP_sign > best_mAP_sign:
                best_mAP_sign = mAP_sign
                best_mAP_feat = mAP_feat
                print(f'best mAP updated to {best_mAP_sign:.4f}')
                save_pre_recall(precision, recall, path=args.save_dir)
                np.save(osp.join(args.save_dir, 'code_and_label.npy'), code_and_labels)
                # io.savemat(osp.join(args.save_dir, 'code_and_label.mat'), {'code_and_label':code_and_labels})
                torch.save(net,  osp.join(args.save_dir, 'model_best.pth'))
                torch.save(classifier,  osp.join(args.save_dir, 'classifier_best.pth'))

    print(f"best mAP_feat:{best_mAP_feat:.4f}  best mAP_sign:{best_mAP_sign:.4f}  best Acc:{best_acc}")
    # torch.save(net,  osp.join(args.save_dir, 'model_final.pth'))
    elapsed = round(time.time() - start_time)
    elapsed = str(datetime.timedelta(seconds=elapsed))
    print("Finished. Total elapsed time (h:m:s): {}".format(elapsed))
示例#12
0
        TP = (score[:, i] > thr) * (label == i)
        FP = (score[:, i] > thr) * (label != i)

        TP_num = len(TP[TP])
        FP_num = len(FP[FP])

        FPR += FP_num / N_neg
        TPR += TP_num / N_pos
    FPR /= 10
    TPR /= 10
    return FPR, TPR


dataset = datasets.create(name='mnist',
                          batch_size=128,
                          use_gpu=True,
                          num_workers=4)
testloader = dataset.testloader
model_list = os.listdir('models')
thr_points = np.linspace(0, 1, 1000)
for name in model_list:
    path = os.path.join('models', name)
    '''
    # for task1
    update_way = name.split('_')[0]
    lr = name.split('_')[1]
    epoch = name.split('_')[2].split('.')[0]
    '''
    '''
    # for task2
    update_way = name.split('_')[0]
示例#13
0
sys.stdout = Logger(osp.join(args.save_dir, 'log_' + args.dataset + '.txt'))

if use_gpu:
    print("Currently using GPU: {}".format(args.gpu))
    cudnn.benchmark = True
    torch.cuda.manual_seed_all(args.seed)
else:
    print("Currently using CPU")
'''---------------  Get the DataLoaders --------------- '''

print("Creating dataset: {}".format(args.dataset))

if args.dataset.lower() == 'mnist':
    dataset = datasets.create(
        name=args.dataset,
        batch_size=args.batch_size,
        use_gpu=use_gpu,
        num_workers=args.workers,
    )

    #     dataset = MNIST(batch_size=args.batch_size, use_gpu=use_gpu,
    #     num_workers=args.workers,)
    trainloader, testloader = dataset.trainloader, dataset.testloader
    num_classes = dataset.num_classes
elif args.dataset.lower() == 'lfw':

    with open('lfw_splits', 'rb') as f:
        splits = pickle.load(f)
    dataset = LFWDataloaders(batch_size=args.batch_size,
                             use_gpu=use_gpu,
                             num_workers=args.workers,
                             train_transform=train_transform,
示例#14
0
文件: main.py 项目: nihaizai/Research
def main():
    torch.manual_seed(args.seed)
    os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu
    use_gpu = torch.cuda.is_available()
    if args.use_cpu: use_gpu = False

    sys.stdout = Logger(osp.join(args.save_dir,
                                 'log_' + args.dataset + '.txt'))

    if use_gpu:
        print("Currently using GPU: {}".format(args.gpu))
        cudnn.benchmark = True
        torch.cuda.manual_seed_all(args.seed)
    else:
        print("Currently using CPU")


#==================================dataset loading============================
    print("Creating dataset: {}".format(args.dataset))
    dataset = datasets.create(
        name=args.dataset,
        batch_size=args.batch_size,
        use_gpu=use_gpu,
        num_workers=args.workers,
    )

    trainloader, testloader = dataset.trainloader, dataset.testloader

    print("Creating model: {}".format(args.model))
    model = models.create(name=args.model, num_classes=dataset.num_classes)

    if use_gpu:
        model = nn.DataParallel(model).cuda()

    criterion_xent = nn.CrossEntropyLoss()
    criterion_cent = CenterLoss(num_classes=dataset.num_classes,
                                feat_dim=2,
                                use_gpu=use_gpu)
    optimizer_model = torch.optim.SGD(model.parameters(),
                                      lr=args.lr_model,
                                      weight_decay=5e-04,
                                      momentum=0.9)
    optimizer_centloss = torch.optim.SGD(criterion_cent.parameters(),
                                         lr=args.lr_cent)

    if args.stepsize > 0:
        scheduler = lr_scheduler.StepLR(optimizer_model,
                                        step_size=args.stepsize,
                                        gamma=args.gamma)

    start_time = time.time()

    xent_plot = []
    cent_plot = []
    loss_plot = []

    for epoch in range(args.max_epoch):
        print("==> Epoch {}/{}".format(epoch + 1, args.max_epoch))
        xent_losses, cent_losses, losses = train(model, criterion_xent,
                                                 criterion_cent,
                                                 optimizer_model,
                                                 optimizer_centloss,
                                                 trainloader, use_gpu,
                                                 dataset.num_classes, epoch)
        xent_plot.append(xent_losses.avg)
        cent_plot.append(cent_losses.avg)
        loss_plot.append(losses.avg)
        if args.stepsize > 0: scheduler.step()

        #        if args.eval_freq > 0 and (epoch+1) % args.eval_freq == 0 or (epoch+1) == args.max_epoch:
        #            print("==> Test")
        #            acc, err = test(model, testloader, use_gpu, dataset.num_classes, epoch)
        #            print("Accuracy (%): {}\t Error rate (%): {}".format(acc, err))

        if epoch % 100 == 0:
            state = {'cnn': model.state_dict()}
            torch.save(
                state,
                '/home/mg/code/GEI+PTSN/train/pytorch-center-loss-master/snapshots_512/snapshot_%d.t7'
                % epoch)
            print('model save at epoch %d' % epoch)

    plot_losses(xent_plot, cent_plot, loss_plot, prefix='losses')

    elapsed = round(time.time() - start_time)
    elapsed = str(datetime.timedelta(seconds=elapsed))
    print("Finished. Total elapsed time (h:m:s): {}".format(elapsed))
示例#15
0
                sel_ids[view], train_data, untrain_data, pred_y, weights[view])
            train_datas[view] = new_train_data
            nets[view] = models.create(configs[view].model_name).to(view)
        # update model parameter
        pred_probs = parallel_train(nets,
                                    train_datas,
                                    untrain_data,
                                    configs,
                                    iteration=step + 1)
        pred_y = np.argmax(sum(pred_probs), axis=1)
        parallel_test(nets, data['test'], configs)
        add_num += 8000


if __name__ == '__main__':
    mp.set_start_method('spawn')
    config1 = Config(model_name='shake_drop2', loss_name='weight_softmax')
    config2 = Config(model_name='wrn', loss_name='weight_softmax')

    dataset = args.dataset
    cur_path = os.getcwd()
    logs_dir = os.path.join(cur_path, 'logs')
    data_dir = os.path.join(cur_path, 'data', dataset)
    data = datasets.create(dataset, data_dir)

    spaco([config1, config2],
          data,
          gamma=args.gamma,
          iter_steps=args.iter_steps,
          regularizer=args.regularizer)
def main():
    setup_seed(args.seed)
    sys.stdout = Logger(osp.join(args.save_dir,
                                 'log_' + args.dataset + '.txt'))
    # gpu init
    os.environ['CUDA_VISIBLE_DEVICES'] = args.gpus
    multi_gpus = False
    if len(args.gpus.split(',')) > 1:
        multi_gpus = True
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
    if device.type == 'cuda':
        print("Currently using GPU: {}".format(args.gpus))
    else:
        print("Currently using CPU")

    # define dataset, backbone and margin layer
    print("Creating dataset: {}".format(args.dataset))
    dataset = datasets.create(name=args.dataset, batch_size=args.batch_size)
    trainloader, testloader, databaseloader = dataset.trainloader, dataset.testloader, dataset.databaseloader

    print("Creating net: {}".format(args.backbone))
    net = backbone.create(name=args.backbone, feat_dim=args.feat_dim)
    net_vlad = NetVLAD(num_clusters=dataset.num_classes,
                       dim=args.feat_dim,
                       alpha=1.0)
    model = EmbedNet(net, net_vlad)

    # define optimizers for different layer
    optimizer_model = torch.optim.SGD([{
        "params": model.parameters()
    }],
                                      lr=args.lr,
                                      weight_decay=5e-04,
                                      momentum=0.9)

    if args.stepsize > 0:
        scheduler = lr_scheduler.StepLR(optimizer_model,
                                        step_size=args.stepsize,
                                        gamma=args.gamma)

    criterion = HardTripletLoss(margin=0.1, hardest=True).to(device)

    if multi_gpus:
        model = nn.DataParallel(model).to(device)
    else:
        model = model.to(device)

    start_time = time.time()

    best_mAP_feat, best_mAP_sign = 0.0, 0.0
    for epoch in range(args.max_epoch):
        print("==> Epoch {}/{}".format(epoch + 1, args.max_epoch))
        train(net, optimizer_model, criterion, trainloader,
              dataset.num_classes, epoch, device)

        if args.stepsize > 0: scheduler.step()

        if args.eval_freq > 0 and (epoch + 1) % args.eval_freq == 0 or (
                epoch + 1) == args.max_epoch:
            print("==> Evaluate")
            mAP_feat, mAP_sign, code_and_labels = evaluate(net, databaseloader, testloader, \
                                                dataset.R, dataset.num_classes, epoch, device)
            print(f'mAP_feat:{mAP_feat:.4f}  mAP_sign:{mAP_sign:.4f}')
            if mAP_sign > best_mAP_sign:
                best_mAP_sign = mAP_sign
                best_mAP_feat = mAP_feat
                print(f'best mAP updated to {best_mAP_sign:.4f}')
                np.save(osp.join(args.save_dir, 'code_and_label.npy'),
                        code_and_labels)
                torch.save(net, osp.join(args.save_dir, 'model_best.pth'))

    print(
        f"best mAP_feat:{best_mAP_feat:.4f}  best mAP_sign:{best_mAP_sign:.4f}"
    )
    torch.save(net, osp.join(args.save_dir, 'model_final.pth'))
    elapsed = round(time.time() - start_time)
    elapsed = str(datetime.timedelta(seconds=elapsed))
    print("Finished. Total elapsed time (h:m:s): {}".format(elapsed))
示例#17
0
#############################
#### Section 1: Training ####
#############################

# 1a: AlexNet architecture
# 1b: Finetuning on Landmarks
# 1c: Generalized Mean Poolimg (GeM)
# 1d: ResNet18 architecture
# 1e: PCA
# 1f: Triplet loss and training for retrieval
# 1g: Data augmentation
# 1h: Multi-resolution
# 1i: Improved architectures

# create Oxford 5k database
dataset = create('Oxford')

# get the label vector
labels = dataset.get_label_vector()
classes = dataset.get_label_names()

# load the dictionary of the available models and features
with open('data/models.json', 'r') as fp:
    models_dict = json.load(fp)

sections = ['1a', '1b', '1c', '1d', '1e', '1f', '1g', '1h', '1i',
            '2a', '2b', '2c', '2d', '2e', '2f', '2g']

if args.sect not in sections:
    print ('Incorrect section name. Please choose a section between 1[a-i] or 2[a-g]. Example: python session.py --sect 1c')
    sys.exit()
示例#18
0
def main():
    setup_seed(args.seed)
    sys.stdout = Logger(osp.join(args.save_dir,
                                 'log_' + args.dataset + '.txt'))
    # gpu init
    os.environ['CUDA_VISIBLE_DEVICES'] = args.gpus
    multi_gpus = False
    if len(args.gpus.split(',')) > 1:
        multi_gpus = True
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
    if device.type == 'cuda':
        print("Currently using GPU: {}".format(args.gpus))
    else:
        print("Currently using CPU")

    # define dataset, backbone and margin layer
    print("Creating dataset: {}".format(args.dataset))
    dataset = datasets.create(name=args.dataset, batch_size=args.batch_size)
    trainloader, testloader, databaseloader = dataset.trainloader, dataset.testloader, dataset.databaseloader

    print("Creating net: {}".format(args.backbone))
    net = backbone.create(name=args.backbone, feat_dim=args.feat_dim)

    print("Creating classifier: {}".format(args.classifier))
    classifier = margin.create(name=args.classifier,
                               feat_dim=args.feat_dim,
                               num_classes=dataset.num_classes,
                               scale=args.scale)

    # define optimizers for different layer
    criterion_xent = nn.CrossEntropyLoss()
    criterion_cent = CenterLoss(num_classes=dataset.num_classes,
                                feat_dim=args.feat_dim,
                                device=device)
    optimizer_model = torch.optim.SGD([{
        'params': net.parameters()
    }, {
        'params': classifier.parameters()
    }],
                                      lr=args.lr,
                                      weight_decay=5e-04,
                                      momentum=0.9)
    optimizer_centloss = torch.optim.SGD(criterion_cent.parameters(),
                                         lr=args.lr_cent)

    if args.stepsize > 0:
        scheduler = lr_scheduler.StepLR(optimizer_model,
                                        step_size=args.stepsize,
                                        gamma=args.gamma)

    if multi_gpus:
        net = nn.DataParallel(net).to(device)
        classifier = nn.DataParallel(classifier).to(device)
    else:
        net = net.to(device)
        classifier = classifier.to(device)

    start_time = time.time()

    best_acc, best_mAP_feat, best_mAP_sign = 0.0, 0.0, 0.0
    for epoch in range(args.max_epoch):
        print("==> Epoch {}/{}".format(epoch + 1, args.max_epoch))
        train(net, classifier, criterion_xent, criterion_cent, optimizer_model,
              optimizer_centloss, trainloader, dataset, epoch, device)

        if args.stepsize > 0: scheduler.step()

        if args.test_freq > 0 and (epoch + 1) % args.test_freq == 0 or (
                epoch + 1) == args.max_epoch:
            print("==> Test")
            acc, err = test(net, classifier, testloader, device)
            print("Accuracy (%): {}\t Error rate (%): {}".format(acc, err))
            if acc > best_acc:
                best_acc = acc

        if args.eval_freq > 0 and (epoch + 1) % args.eval_freq == 0 or (
                epoch + 1) == args.max_epoch:
            print("==> Evaluate")
            mAP_feat, mAP_sign, code_and_labels = evaluate(net, databaseloader, testloader, \
                                                dataset.R, dataset.num_classes, epoch, device)
            print(f'mAP_feat:{mAP_feat:.4f}  mAP_sign:{mAP_sign:.4f}')
            if mAP_sign > best_mAP_sign:
                best_mAP_sign = mAP_sign
                best_mAP_feat = mAP_feat
                print(f'best mAP updated to {best_mAP_sign:.4f}')
                np.save(osp.join(args.save_dir, 'code_and_label.npy'),
                        code_and_labels)
                torch.save(net, osp.join(args.save_dir, 'model_best.pth'))

    print(
        f"best mAP_feat:{best_mAP_feat:.4f}  best mAP_sign:{best_mAP_sign:.4f}  best Acc:{best_acc}"
    )
    torch.save(net, osp.join(args.save_dir, 'model_final.pth'))
    elapsed = round(time.time() - start_time)
    elapsed = str(datetime.timedelta(seconds=elapsed))
    print("Finished. Total elapsed time (h:m:s): {}".format(elapsed))