Exemplo n.º 1
0
def main(args):
    initialization_time = time.time()

    print "#############  Read in Database   ##############"
    train_loader, valid_loader, test_loader = get_data_loaders(
        dataset=args.dataset,
        batch_size=args.batch_size,
        num_workers=args.num_workers,
        model=args.model,
        flip=args.flip,
        num_classes=args.num_classes,
        valid=0.0,
        regression=args.regression)

    # initiate metrics
    if args.loss == 'MSE':
        metrics_train = adviser_metrics(train_loader.dataset.kp_dict,
                                        regression=True,
                                        num_classes=args.num_classes)
        metrics_test = adviser_metrics(test_loader.dataset.kp_dict,
                                       num_classes=args.num_classes,
                                       priors=None)
        # metrics_valid = adviser_metrics(valid_loader.dataset.kp_dict, regression=True )
    else:
        metrics_train = adviser_metrics(train_loader.dataset.kp_dict,
                                        num_classes=args.num_classes)
        metrics_test = adviser_metrics(test_loader.dataset.kp_dict,
                                       num_classes=args.num_classes,
                                       priors=None)
        # metrics_valid = adviser_metrics(valid_loader.dataset.kp_dict)

    print "#############  Initiate Model     ##############"
    if args.model == 'alexAdviser':
        assert Paths.clickhere_weights != None, "Error: Set render4cnn weights path in util/Paths.py."
        weights = torch.load(Paths.clickhere_weights)
        # weights = torch.load(Paths.render4cnn_weights)
        model = alexAdviser(weights=weights, num_classes=args.num_classes)
    else:
        assert False, "Error: unknown model choice."

    # Loss functions
    criterion = adviser_loss(num_classes=args.num_classes,
                             weights=train_loader.dataset.loss_weights,
                             loss=args.loss)

    # Optimizer
    params = list(model.parameters())
    if args.optimizer == 'adam':
        optimizer = torch.optim.Adam(params,
                                     lr=args.lr,
                                     betas=(0.9, 0.999),
                                     eps=1e-8,
                                     weight_decay=0)
    elif args.optimizer == 'sgd':
        optimizer = torch.optim.SGD(params,
                                    lr=args.lr,
                                    momentum=0.9,
                                    weight_decay=0.0005)
        scheduler = MultiStepLR(optimizer,
                                milestones=range(0, args.num_epochs, 5),
                                gamma=0.95)
    else:
        assert False, "Error: Unknown choice for optimizer."

    # Train on GPU if available
    if torch.cuda.is_available():
        model.cuda()

    print "Time to initialize take: ", time.time() - initialization_time
    print "#############  Start Training     ##############"
    total_step = len(train_loader)

    for epoch in range(0, args.num_epochs + 1):

        if epoch % args.eval_epoch == 0:
            # _, _ = eval_step(   model       = model,
            #                     data_loader = train_loader,
            #                     criterion   = criterion,
            #                     step        = epoch * total_step,
            #                     results_dict = metrics_train,
            #                     datasplit   = "train")
            #
            # curr_loss, curr_wacc, _ = eval_step(   model       = model,
            #                                     data_loader = valid_loader,
            #                                     criterion   = criterion,
            #                                     step        = epoch * total_step,
            #                                     results_dict = metrics_valid,
            #                                     datasplit   = "valid")

            curr_loss, curr_wacc, qual_dict = eval_step(
                model=model,
                data_loader=test_loader,
                criterion=criterion,
                step=epoch * total_step,
                results_dict=metrics_test,
                datasplit="test")

        if args.evaluate_only:
            exit()

        if epoch % args.save_epoch == 0 and epoch > 0:

            args = save_checkpoint(model=model,
                                   optimizer=optimizer,
                                   curr_epoch=epoch,
                                   curr_step=(total_step * epoch),
                                   args=args,
                                   curr_loss=curr_loss,
                                   curr_acc=curr_wacc,
                                   filename=('model@epoch%d.pkl' % (epoch)))

        if args.optimizer == 'sgd':
            scheduler.step()

        logger.add_scalar_value("Misc/Epoch Number",
                                epoch,
                                step=epoch * total_step)
        train_step(model=model,
                   train_loader=train_loader,
                   criterion=criterion,
                   optimizer=optimizer,
                   epoch=epoch,
                   step=epoch * total_step)

    curr_loss, curr_wacc, qual_dict = eval_step(model=model,
                                                data_loader=test_loader,
                                                criterion=criterion,
                                                step=epoch * total_step,
                                                results_dict=metrics_test,
                                                datasplit="test")
    # Final save of the model
    args = save_checkpoint(model=model,
                           optimizer=optimizer,
                           curr_epoch=epoch,
                           curr_step=(total_step * epoch),
                           args=args,
                           curr_loss=curr_loss,
                           curr_acc=curr_wacc,
                           filename=('model@epoch%d.pkl' % (epoch)))
Exemplo n.º 2
0
def main(args):
    curr_time = time.time()

    args.distributed = args.world_size > 1

    if args.distributed:
        dist.init_process_group(backend=args.dist_backend,
                                init_method=args.dist_url,
                                world_size=args.world_size)

    print("#############  Read in Database   ##############")
    # Data loading code (From PyTorch example https://github.com/pytorch/examples/blob/master/imagenet/main.py)
    traindir = os.path.join(args.data_path, 'train')
    valdir = os.path.join(args.data_path, 'validation')
    normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                     std=[0.229, 0.224, 0.225])

    print("Generating Validation Dataset")
    valid_dataset = ImageNet(
        valdir,
        transforms.Compose([
            transforms.Resize((299, 299)),
            # transforms.CenterCrop(224),
            transforms.ToTensor(),
            normalize,
        ]))

    print("Generating Training Dataset")
    train_dataset = ImageNet(
        traindir,
        transforms.Compose([
            transforms.RandomResizedCrop(299),
            transforms.RandomHorizontalFlip(),
            transforms.ToTensor(),
            normalize,
        ]))

    if args.distributed:
        train_sampler = torch.utils.data.distributed.DistributedSampler(
            train_dataset)
    else:
        train_sampler = None

    print("Generating Data Loaders")
    train_loader = torch.utils.data.DataLoader(train_dataset,
                                               batch_size=args.batch_size,
                                               shuffle=True,
                                               num_workers=args.workers,
                                               pin_memory=True,
                                               sampler=train_sampler)

    valid_loader = torch.utils.data.DataLoader(valid_dataset,
                                               batch_size=args.batch_size,
                                               shuffle=False,
                                               num_workers=args.workers,
                                               pin_memory=True)

    print("Time taken:  {} seconds".format(time.time() - curr_time))
    curr_time = time.time()

    print("######## Initiate Model and Optimizer   ##############")
    # Model - inception_v3 as specified in the paper
    # Note: This is slightly different to the model used by the paper,
    # however, the differences should be minor in terms of implementation and impact on results
    model = models.inception_v3(pretrained=False)
    # Train on GPU if available
    if not args.distributed:
        model = torch.nn.DataParallel(model).cuda()
    else:
        model.cuda()
        model = torch.nn.parallel.DistributedDataParallel(model)

    # if torch.cuda.is_available():
    #     model.cuda()

    # Criterion was not specified by the paper, it was assumed to be cross entropy (as commonly used)
    criterion = torch.nn.CrossEntropyLoss().cuda()  # Loss function
    params = list(model.parameters())  # Parameters to train

    # Optimizer -- the optimizer is not specified in the paper, and was ssumed to
    # be SGD. The parameters of the model were also not specified and were set
    # to commonly values used by pytorch (lr = 0.1, momentum = 0.3, decay = 1e-4)
    optimizer = torch.optim.SGD(params,
                                lr=args.lr,
                                momentum=args.momentum,
                                weight_decay=args.weight_decay)
    # The paper does not specify an annealing factor, we set it to 1.0 (no annealing)
    scheduler = MultiStepLR(optimizer,
                            milestones=list(range(0, args.num_epochs, 1)),
                            gamma=args.annealing_factor)

    print("Time taken:  {} seconds".format(time.time() - curr_time))
    curr_time = time.time()

    print("#############  Start Training     ##############")
    total_step = len(train_loader)

    # curr_loss, curr_wacc = eval_step(   model       = model,
    #                                     data_loader = valid_loader,
    #                                     criterion   = criterion,
    #                                     step        = epoch * total_step,
    #                                     datasplit   = "valid")

    for epoch in range(0, args.num_epochs):

        if args.evaluate_only: exit()
        if args.optimizer == 'sgd': scheduler.step()

        logger.add_scalar("Misc/Epoch Number", epoch, epoch * total_step)
        train_step(model=model,
                   train_loader=train_loader,
                   criterion=criterion,
                   optimizer=optimizer,
                   epoch=epoch,
                   step=epoch * total_step,
                   valid_loader=valid_loader)

        curr_loss, curr_wacc = eval_step(model=model,
                                         data_loader=valid_loader,
                                         criterion=criterion,
                                         step=epoch * total_step,
                                         datasplit="valid")

        args = save_checkpoint(model=model,
                               optimizer=optimizer,
                               curr_epoch=epoch,
                               curr_loss=curr_loss,
                               curr_step=(total_step * epoch),
                               args=args,
                               curr_acc=curr_wacc,
                               filename=('model@epoch%d.pkl' % (epoch)))

    # Final save of the model
    args = save_checkpoint(model=model,
                           optimizer=optimizer,
                           curr_epoch=epoch,
                           curr_loss=curr_loss,
                           curr_step=(total_step * epoch),
                           args=args,
                           curr_acc=curr_wacc,
                           filename=('model@epoch%d.pkl' % (epoch)))
Exemplo n.º 3
0
def main(args):
    curr_time = time.time()


    print("#############  Read in Database   ##############")

    train_loader = torch.utils.data.DataLoader(
        datasets.MNIST('../data', train=True, download=True,
                       transform=transforms.Compose([
                           transforms.ToTensor(),
                           transforms.Normalize((0.1307,), (0.3081,))
                       ])),
        batch_size=args.batch_size, shuffle=True)
    valid_loader = torch.utils.data.DataLoader(
        datasets.MNIST('../data', train=False, transform=transforms.Compose([
                           transforms.ToTensor(),
                           transforms.Normalize((0.1307,), (0.3081,))
                       ])),
        batch_size=args.batch_size, shuffle=True)


    print("Time taken:  {} seconds".format(time.time() - curr_time) )
    curr_time = time.time()

    print("######## Initiate Model and Optimizer   ##############")
    # Model - inception_v3 as specified in the paper
    # Note: This is slightly different to the model used by the paper,
    # however, the differences should be minor in terms of implementation and impact on results
    model   = Net()
    model   = torch.nn.DataParallel(model).cuda()


    # Criterion was not specified by the paper, it was assumed to be cross entropy (as commonly used)
    criterion = magnet_loss(D = 12, M = 4, alpha = 7.18).cuda()    # Loss function
    params  = list(model.parameters())                                      # Parameters to train


    # Optimizer -- the optimizer is not specified in the paper, and was ssumed to
    # be SGD. The parameters of the model were also not specified and were set
    # to commonly values used by pytorch (lr = 0.1, momentum = 0.3, decay = 1e-4)
    optimizer = torch.optim.SGD(params, lr=args.lr, momentum = args.momentum, weight_decay = args.weight_decay)
    # The paper does not specify an annealing factor, we set it to 1.0 (no annealing)
    scheduler = MultiStepLR( optimizer,
                             milestones=list(range(0, args.num_epochs, 1)),
                             gamma=args.annealing_factor)


    print("Time taken:  {} seconds".format(time.time() - curr_time) )
    curr_time = time.time()

    print("#############  Start Training     ##############")
    total_step = len(train_loader)


    for epoch in range(0, args.num_epochs):


        if args.evaluate_only:         exit()
        if args.optimizer == 'sgd':    scheduler.step()

        logger.add_scalar("Misc/Epoch Number", epoch, epoch * total_step)
        train_step( model        = model,
                    train_loader = train_loader,
                    criterion    = criterion,
                    optimizer    = optimizer,
                    epoch        = epoch,
                    step         = epoch * total_step,
                    valid_loader = valid_loader)


        #
        # curr_loss, curr_wacc = eval_step(   model       = model,
        #                                     data_loader = valid_loader,
        #                                     criterion   = criterion,
        #                                     step        = epoch * total_step,
        #                                     datasplit   = "valid")

        # args = save_checkpoint(  model      = model,
        #                          optimizer  = optimizer,
        #                          curr_epoch = epoch,
        #                          curr_loss  = curr_loss,
        #                          curr_step  = (total_step * epoch),
        #                          args       = args,
        #                          curr_acc   = curr_wacc,
        #                          filename   = ('model@epoch%d.pkl' %(epoch)))

    # Final save of the model
    args = save_checkpoint(  model      = model,
                             optimizer  = optimizer,
                             curr_epoch = epoch,
                             curr_loss  = curr_loss,
                             curr_step  = (total_step * epoch),
                             args       = args,
                             curr_acc   = curr_wacc,
                             filename   = ('model@epoch%d.pkl' %(epoch)))
Exemplo n.º 4
0
def main(args):
    curr_time = time.time()


    print("#############  Read in Database   ##############")
    train_loader, valid_loader = get_loaders()

    print("Time taken:  {} seconds".format(time.time() - curr_time) )
    curr_time = time.time()

    print("######## Initiate Model and Optimizer   ##############")
    # Model - inception_v3 as specified in the paper
    # Note: This is slightly different to the model used by the paper,
    # however, the differences should be minor in terms of implementation and impact on results

    if args.dataset == "MNIST":
        model   = Net(args.embedding_size)
    else:
        model   = magnetInception(args.embedding_size)

    if args.resume is not None:
        print("Loading pretrained Module")
        checkpoint      = torch.load(args.resume)
        state_dict      = checkpoint['state_dict']

        from collections import OrderedDict
        new_state_dict = OrderedDict()
        for k, v in state_dict.items():
            name = k[7:] # remove `module.`
            new_state_dict[name] = v
        # load params
        model.load_state_dict(new_state_dict)

    model   = torch.nn.DataParallel(model).cuda()


    # Criterion was not specified by the paper, it was assumed to be cross entropy (as commonly used)
    if args.loss == "magnet":
        criterion = magnet_loss(D = args.D, M = args.M, alpha = args.GAP).cuda()    # Loss function
    elif args.loss == "triplet":
        criterion = triplet_loss(alpha = 0.304).cuda()    # Loss function
    elif args.loss == "softmax":
        criterion =     torch.nn.CrossEntropyLoss().cuda()    # Loss function
    else:
        print("Undefined Loss Function")
        exit()

    params  = list(model.parameters())                                      # Parameters to train


    # Optimizer -- the optimizer is not specified in the paper, and was ssumed to
    # be SGD. The parameters of the model were also not specified and were set
    # to commonly values used by pytorch (lr = 0.1, momentum = 0.3, decay = 1e-4)
    optimizer = torch.optim.SGD(params, lr=args.lr, momentum = args.momentum, weight_decay = args.weight_decay)
    # The paper does not specify an annealing factor, we set it to 1.0 (no annealing)
    scheduler = MultiStepLR( optimizer,
                             milestones=list(range(0, args.num_epochs, 1)),
                             gamma=args.annealing_factor)


    print("Time taken:  {} seconds".format(time.time() - curr_time) )
    curr_time = time.time()

    print("#############  Start Training     ##############")
    total_step = len(train_loader)


    cluster_centers, cluster_assignment = indexing_step(    model = model,
                                                            data_loader = train_loader,
                                                            cluster_centers = None)



    loss_vector = 10. * np.ones(args.K * args.num_classes)
    loss_count  = np.ones(args.K * args.num_classes)

    for epoch in range(0, args.num_epochs):


        if args.evaluate_only:         exit()
        if args.optimizer == 'sgd':    scheduler.step()

        order = define_order(cluster_assignment, cluster_centers, loss_vector/loss_count)
        train_loader.dataset.update_read_order(order)


        logger.add_scalar("Misc/Epoch Number", epoch, epoch * total_step)
        loss_vector, loss_count, stdev = train_step(   model        = model,
                                    train_loader = train_loader,
                                    criterion    = criterion,
                                    epoch        = epoch,
                                    optimizer    = optimizer,
                                    step         = epoch * total_step,
                                    valid_loader = valid_loader,
                                    assignment   = cluster_assignment,
                                    loss_vector  = loss_vector,
                                    loss_count   = loss_count)

        logger.add_scalar(args.dataset + "/STDEV ",   stdev,   epoch * total_step)

        if epoch % 3 == 0:
            curr_loss, curr_wacc = eval_step(   model       = model,
                                                data_loader = train_loader,
                                                criterion   = criterion,
                                                step        = epoch * total_step,
                                                datasplit   = "train",
                                                stdev       = stdev,
                                                cluster_centers = cluster_centers)


            curr_loss, curr_wacc = eval_step(   model       = model,
                                                data_loader = valid_loader,
                                                criterion   = criterion,
                                                step        = epoch * total_step,
                                                datasplit   = "valid",
                                                stdev       = stdev,
                                                cluster_centers = cluster_centers)

        cluster_centers, assignment = indexing_step( model = model, data_loader = train_loader, cluster_centers = cluster_centers)

        # args = save_checkpoint(  model      = model,
        #                          optimizer  = optimizer,
        #                          curr_epoch = epoch,
        #                          curr_loss  = curr_loss,
        #                          curr_step  = (total_step * epoch),
        #                          args       = args,
        #                          curr_acc   = curr_wacc,
        #                          filename   = ('model@epoch%d.pkl' %(epoch)))

    # Final save of the model
    args = save_checkpoint(  model      = model,
                             optimizer  = optimizer,
                             curr_epoch = epoch,
                             curr_loss  = curr_loss,
                             curr_step  = (total_step * epoch),
                             args       = args,
                             curr_acc   = curr_wacc,
                             filename   = ('model@epoch%d.pkl' %(epoch)))
def main(args):
    curr_time = time.time()

    print("#############  Read in Database   ##############")
    train_loader, valid_loader = get_loaders()

    print("Time taken:  {} seconds".format(time.time() - curr_time))
    curr_time = time.time()

    print("######## Initiate Model and Optimizer   ##############")
    # Model - inception_v3 as specified in the paper
    # Note: This is slightly different to the model used by the paper,
    # however, the differences should be minor in terms of implementation and impact on results

    if args.dataset == "MNIST":
        model = Net(args.embedding_size)
    else:
        model = magnetInception(args.embedding_size)

    model = torch.nn.DataParallel(model).cuda()

    # Criterion was not specified by the paper, it was assumed to be cross entropy (as commonly used)
    criterion = triplet_loss(alpha=args.GAP).cuda()  # Loss function

    params = list(model.parameters())  # Parameters to train

    # Optimizer -- the optimizer is not specified in the paper, and was ssumed to
    # be SGD. The parameters of the model were also not specified and were set
    # to commonly values used by pytorch (lr = 0.1, momentum = 0.3, decay = 1e-4)
    optimizer = torch.optim.SGD(params,
                                lr=args.lr,
                                momentum=args.momentum,
                                weight_decay=args.weight_decay)
    # The paper does not specify an annealing factor, we set it to 1.0 (no annealing)
    scheduler = MultiStepLR(optimizer,
                            milestones=list(range(0, args.num_epochs, 1)),
                            gamma=args.annealing_factor)

    print("Time taken:  {} seconds".format(time.time() - curr_time))
    curr_time = time.time()

    print("#############  Start Training     ##############")
    total_step = len(train_loader)

    for epoch in range(0, args.num_epochs):

        if args.dataset == "oxford":
            order = define_order(train_loader.dataset.fine_class)
        else:
            order = define_order(train_loader.dataset.classes)
        train_loader.dataset.update_read_order(order)

        if args.evaluate_only: exit()
        if args.optimizer == 'sgd': scheduler.step()

        logger.add_scalar("Misc/Epoch Number", epoch, epoch * total_step)
        train_step(model=model,
                   train_loader=train_loader,
                   criterion=criterion,
                   epoch=epoch,
                   optimizer=optimizer,
                   step=epoch * total_step)

        train_loader.dataset.default_read_order()
        if epoch % 3 == 0:
            if args.dataset != "MNIST":
                curr_loss, curr_wacc = eval_step(model=model,
                                                 data_loader=train_loader,
                                                 criterion=criterion,
                                                 step=epoch * total_step,
                                                 datasplit="train")

            curr_loss, curr_wacc = eval_step(model=model,
                                             data_loader=valid_loader,
                                             criterion=criterion,
                                             step=epoch * total_step,
                                             datasplit="valid")

    # Final save of the model
    args = save_checkpoint(model=model,
                           optimizer=optimizer,
                           curr_epoch=epoch,
                           curr_loss=curr_loss,
                           curr_step=(total_step * epoch),
                           args=args,
                           curr_acc=curr_wacc,
                           filename=('model@epoch%d.pkl' % (epoch)))