Пример #1
0
def main(args):
    datadir = get_data_dir(args.db)
    outputdir = get_output_dir(args.db)

    logger = None
    if args.tensorboard:
        # One should create folder for storing logs
        loggin_dir = os.path.join(outputdir, 'runs', 'pretraining')
        if not os.path.exists(loggin_dir):
            os.makedirs(loggin_dir)
        loggin_dir = os.path.join(loggin_dir, '%s' % (args.id))
        if args.clean_log:
            remove_files_in_dir(loggin_dir)
        logger = Logger(loggin_dir)

    use_cuda = torch.cuda.is_available()

    # Set the seed for reproducing the results
    random.seed(args.manualSeed)
    np.random.seed(args.manualSeed)
    torch.manual_seed(args.manualSeed)
    if use_cuda:
        torch.cuda.manual_seed_all(args.manualSeed)
        torch.backends.cudnn.enabled = True
        cudnn.benchmark = True

    kwargs = {'num_workers': 0, 'pin_memory': True} if use_cuda else {}
    trainset = DCCPT_data(root=datadir, train=True, h5=args.h5)
    testset = DCCPT_data(root=datadir, train=False, h5=args.h5)

    nepoch = int(
        np.ceil(
            np.array(args.niter * args.batchsize, dtype=float) /
            len(trainset)))
    step = int(
        np.ceil(
            np.array(args.step * args.batchsize, dtype=float) / len(trainset)))

    trainloader = torch.utils.data.DataLoader(trainset,
                                              batch_size=args.batchsize,
                                              shuffle=True,
                                              **kwargs)
    testloader = torch.utils.data.DataLoader(testset,
                                             batch_size=100,
                                             shuffle=True,
                                             **kwargs)

    return pretrain(
        args, outputdir, {
            'nlayers': 4,
            'dropout': 0.2,
            'reluslope': 0.0,
            'nepoch': nepoch,
            'lrate': [args.lr],
            'wdecay': [0.0],
            'step': step
        }, use_cuda, trainloader, testloader, logger)
Пример #2
0
def main(args, net=None):
    global oldassignment

    datadir = get_data_dir(args.db)
    outputdir = get_output_dir(args.db)

    logger = None
    if args.tensorboard:
        # One should create folder for storing logs
        loggin_dir = os.path.join(outputdir, 'runs', 'DCC')
        if not os.path.exists(loggin_dir):
            os.makedirs(loggin_dir)
        loggin_dir = os.path.join(loggin_dir, '%s' % (args.id))
        if args.clean_log:
            remove_files_in_dir(loggin_dir)
        logger = Logger(loggin_dir)

    use_cuda = torch.cuda.is_available()

    # Set the seed for reproducing the results
    random.seed(args.manualSeed)
    np.random.seed(args.manualSeed)
    torch.manual_seed(args.manualSeed)
    if use_cuda:
        torch.cuda.manual_seed_all(args.manualSeed)
        torch.backends.cudnn.enabled = True
        cudnn.benchmark = True


    startepoch = 0
    kwargs = {'num_workers': 5, 'pin_memory': True} if use_cuda else {}

    # setting up dataset specific objects
    trainset = DCCPT_data(root=datadir, train=True, h5=args.h5)
    testset = DCCPT_data(root=datadir, train=False, h5=args.h5)
    numeval = len(trainset) + len(testset)

    # extracting training data from the pretrained.mat file
    data, labels, pairs, Z, sampweight = makeDCCinp(args)

    # For simplicity, I have created placeholder for each datasets and model
    load_pretraining = True if net is None else False
    if net is None:
        net = dp.load_predefined_extract_net(args)

    # reshaping data for some datasets
    if args.db == 'cmnist':
        data = data.reshape((-1, 1, 28, 28))
    elif args.db == 'ccoil100':
        data = data.reshape((-1, 3, 128, 128))
    elif args.db == 'cytf':
        data = data.reshape((-1, 3, 55, 55))
    elif args.db == 'cyale':
        data = data.reshape((-1, 1, 168, 192))

    totalset = torch.utils.data.ConcatDataset([trainset, testset])

    # computing and initializing the hyperparams
    _sigma1, _sigma2, _lambda, _delta, _delta1, _delta2, lmdb, lmdb_data = computeHyperParams(pairs, Z)
    oldassignment = np.zeros(len(pairs))
    stopping_threshold = int(math.ceil(cfg.STOPPING_CRITERION * float(len(pairs))))

    # Create dataset and random batch sampler for Finetuning stage
    trainset = DCCFT_data(pairs, data, sampweight)
    batch_sampler = DCCSampler(trainset, shuffle=True, batch_size=args.batchsize)

    # copying model params from Pretrained (SDAE) weights file
    if load_pretraining:
        load_weights(args, outputdir, net)


    # creating objects for loss functions, U's are initialized to Z here
    # Criterion1 corresponds to reconstruction loss
    criterion1 = DCCWeightedELoss(size_average=True)
    # Criterion2 corresponds to sum of pairwise and data loss terms
    criterion2 = DCCLoss(Z.shape[0], Z.shape[1], Z, size_average=True)

    if use_cuda:
        net.cuda()
        criterion1 = criterion1.cuda()
        criterion2 = criterion2.cuda()

    # setting up data loader for training and testing phase
    trainloader = torch.utils.data.DataLoader(trainset, batch_sampler=batch_sampler, **kwargs)
    testloader = torch.utils.data.DataLoader(totalset, batch_size=args.batchsize, shuffle=False, **kwargs)

    # setting up optimizer - the bias params should have twice the learning rate w.r.t. weights params
    bias_params = filter(lambda x: ('bias' in x[0]), net.named_parameters())
    bias_params = list(map(lambda x: x[1], bias_params))
    nonbias_params = filter(lambda x: ('bias' not in x[0]), net.named_parameters())
    nonbias_params = list(map(lambda x: x[1], nonbias_params))

    optimizer = optim.Adam([{'params': bias_params, 'lr': 2*args.lr},
                            {'params': nonbias_params},
                            {'params': criterion2.parameters(), 'lr': args.lr},
                            ], lr=args.lr, betas=(0.99, 0.999))

    # this is needed for WARM START
    if args.resume:
        filename = outputdir+'/FTcheckpoint_%d.pth.tar' % args.level
        if os.path.isfile(filename):
            print("==> loading checkpoint '{}'".format(filename))
            checkpoint = torch.load(filename)
            net.load_state_dict(checkpoint['state_dict'])
            criterion2.load_state_dict(checkpoint['criterion_state_dict'])
            startepoch = checkpoint['epoch']
            optimizer.load_state_dict(checkpoint['optimizer'])
            _sigma1 = checkpoint['sigma1']
            _sigma2 = checkpoint['sigma2']
            _lambda = checkpoint['lambda']
            _delta = checkpoint['delta']
            _delta1 = checkpoint['delta1']
            _delta2 = checkpoint['delta2']
        else:
            print("==> no checkpoint found at '{}'".format(filename))
            raise ValueError

    # This is the actual Algorithm
    flag = 0
    for epoch in range(startepoch, args.nepoch):
        if logger:
            logger.log_value('sigma1', _sigma1, epoch)
            logger.log_value('sigma2', _sigma2, epoch)
            logger.log_value('lambda', _lambda, epoch)

        train(trainloader, net, optimizer, criterion1, criterion2, epoch, use_cuda, _sigma1, _sigma2, _lambda, logger)
        Z, U, change_in_assign, assignment = test(testloader, net, criterion2, epoch, use_cuda, _delta, pairs, numeval, flag, logger)

        if flag:
            # As long as the change in label assignment < threshold, DCC continues to run.
            # Note: This condition is always met in the very first epoch after the flag is set.
            # This false criterion is overwritten by checking for the condition twice.
            if change_in_assign > stopping_threshold:
                flag += 1
            if flag == 4:
                break

        if((epoch+1) % args.M == 0):
            _sigma1 = max(_delta1, _sigma1 / 2)
            _sigma2 = max(_delta2, _sigma2 / 2)
            if _sigma2 == _delta2 and flag == 0:
                # Start checking for stopping criterion
                flag = 1

        # Save checkpoint
        index = (epoch // args.M) * args.M
        save_checkpoint({'epoch': epoch+1,
                         'state_dict': net.state_dict(),
                         'criterion_state_dict': criterion2.state_dict(),
                         'optimizer': optimizer.state_dict(),
                         'sigma1': _sigma1,
                         'sigma2': _sigma2,
                         'lambda': _lambda,
                         'delta': _delta,
                         'delta1': _delta1,
                         'delta2': _delta2,
                         }, index, filename=outputdir)

    output = {'Z': Z, 'U': U, 'gtlabels': labels, 'w': pairs, 'cluster':assignment}
    sio.savemat(os.path.join(outputdir, 'features'), output)