Example #1
0
def run(title, base_batch_size, base_labeled_batch_size, base_lr, n_labels,
        data_seed, **kwargs):
    LOG.info('run title: %s, data seed: %d', title, data_seed)

    ngpu = torch.cuda.device_count()
    assert ngpu > 0, "Expecting at least one GPU, found none."

    adapted_args = {
        'batch_size':
        base_batch_size * ngpu,
        'labeled_batch_size':
        base_labeled_batch_size * ngpu,
        'lr':
        base_lr * ngpu,
        'labels':
        'data-local/labels/cifar10/{}_balanced_labels/{:02d}.txt'.format(
            n_labels, data_seed),
        'data_seed':
        data_seed,
    }
    context = RunContext(__file__, "{}_{}".format(n_labels, data_seed))
    logfile = "{}/{}.log".format(context.result_dir, 'output')
    fh = logging.FileHandler(logfile)
    LOG.addHandler(fh)
    LOG.info('run title: %s, data seed: %d', title, data_seed)
    main.args = parse_dict_args(LOG, **adapted_args, **kwargs)
    main.main(context, LOG)

    LOG.info('Run finished, closing logfile.')
    LOG.removeHandler(fh)
def run(title, base_batch_size, base_labeled_batch_size, base_lr, data_seed,
        **kwargs):
    LOG.info('run title: %s', title)
    ngpu = 1
    main.args = parse_dict_args(**kwargs)
    context = RunContext(__file__, args.consistency, args.epochs, args.labels)
    main.main(context)
def run(title, data_seed, **kwargs):
    print('run title: %s', title)
    ngpu = 1
    main.args = parse_dict_args(**kwargs)
    context = RunContext(__file__, kwargs['consistency'], kwargs['epochs'],
                         kwargs['labels'])
    main.main(context)
Example #4
0
    def __init__(self):

        self.args = cli.parse_commandline_args()
        self.context = RunContext(logging)
        self.training_log = self.context.create_train_log("training")
        self.results_all_log = self.context.create_results_all_log(
            "results_all")
        useCuda = torch.cuda.is_available()
        self.device = torch.device("cuda" if useCuda else "cpu")
def run(title, base_batch_size, base_labeled_batch_size, base_lr, n_labels, data_seed, **kwargs):
    LOG.info('run title: %s', title)
    ngpu = torch.cuda.device_count()
    adapted_args = {
        'batch_size': base_batch_size * ngpu,
        'labeled_batch_size': base_labeled_batch_size * ngpu,
        'lr': base_lr * ngpu,
        'labels': 'data-local/labels/cifar100/{}_balanced_labels/{:02d}.txt'.format(n_labels, data_seed),
    }
    context = RunContext(__file__, "{}_{}".format(n_labels, data_seed))
    main.args = parse_dict_args(**adapted_args, **kwargs)
    main.main(context)
Example #6
0
def run(title, base_batch_size, base_labeled_batch_size, base_lr, n_labels, data_seed, **kwargs):
    LOG.info('run title: %s, data seed: %d', title, data_seed)

    ngpu = torch.cuda.device_count()
    assert ngpu > 0, "Expecting at least one GPU, found none."

    adapted_args = {
        'batch_size': base_batch_size * ngpu,
        'labeled_batch_size': base_labeled_batch_size * ngpu,
        'lr': base_lr * ngpu,
        'labels': 'data-local/labels/cifar10/{}_balanced_labels/{:02d}.txt'.format(n_labels, data_seed),
    }
    context = RunContext(__file__, "{}_{}".format(n_labels, data_seed))
    main_cnn_multi_label.args = parse_dict_args(**adapted_args, **kwargs)
    main_cnn_multi_label.main(context)
Example #7
0
def run(title, base_batch_size, base_labeled_batch_size, base_lr, data_seed,
        **kwargs):
    LOG.info('run title: %s', title)
    ngpu = torch.cuda.device_count()
    adapted_args = {
        'batch_size':
        base_batch_size * ngpu,
        'labeled_batch_size':
        base_labeled_batch_size * ngpu,
        'lr':
        base_lr * ngpu,
        'labels':
        'data-local/labels/ilsvrc2012/128000_balanced_labels/{:02d}.txt'.
        format(data_seed),
    }
    context = RunContext(__file__, data_seed)
    main_cifar.args = parse_dict_args(**adapted_args, **kwargs)
    main_cifar.main(context)
Example #8
0
    for param_group in optimizer.param_groups:
        param_group['lr'] = lr


def get_current_consistency_weight(epoch):
    # Consistency ramp-up from https://arxiv.org/abs/1610.02242
    return args.consistency * ramps.sigmoid_rampup(epoch,
                                                   args.consistency_rampup)


def accuracy(output, target, topk=(1, )):
    """Computes the precision@k for the specified values of k"""
    maxk = max(topk)
    labeled_minibatch_size = max(target.ne(NO_LABEL).sum(), 1e-8)

    _, pred = output.topk(maxk, 1, True, True)
    pred = pred.t()
    correct = pred.eq(target.view(1, -1).expand_as(pred))
    res = []
    for k in topk:
        correct_k = correct[:k].view(-1).float().sum(0, keepdim=True)
        res.append(correct_k.mul_(100.0 / labeled_minibatch_size.float()))
    return res


if __name__ == '__main__':
    logging.basicConfig(level=logging.INFO)
    args = cli.parse_commandline_args()
    main(RunContext(__file__, 0))
def run(title, data_seed, **kwargs):
    LOG.info('run title: %s', title)
    context = RunContext('/scratch/jtb470/ssl_j/', __file__,
                         "{}".format(data_seed))
    main.args = parse_dict_args(**kwargs)
    main.main(context)
Example #10
0
    T1 = 10
    T2 = 60
    af = 0.3
    if epoch > T1:
        alpha = (epoch - T1) / (T2 - T1) * af
        if epoch > T2:
            alpha = af
    return alpha


def accuracy(output, target, topk=(1, )):
    """Computes the precision@k for the specified values of k"""
    maxk = max(topk)
    labeled_minibatch_size = max(target.ne(NO_LABEL).sum(), 1e-8)

    _, pred = output.topk(maxk, 1, True, True)
    pred = pred.t()
    correct = pred.eq(target.view(1, -1).expand_as(pred))

    res = []
    for k in topk:
        correct_k = correct[:k].view(-1).float().sum(0, keepdim=True)
        res.append(correct_k.mul_(100.0 / labeled_minibatch_size.float()))
    return res


if __name__ == '__main__':
    logging.basicConfig(level=logging.INFO)  # #
    args = cli.parse_commandline_args()  # #
    main(RunContext(__file__, 0, basepath=args.results_dir))
Example #11
0
#     image_datasets = {
#             x : torchvision.datasets.ImageFolder(os.path.join(data_dir,x),
#                                  data_transforms[x])
#             for x in ['train','val','test']
#         }

#     train_loader = torch.utils.data.DataLoader(image_datasets['train'],     
#                                                         batch_size=args.batch_size, 
#                                                         shuffle=True,
#                                                         num_workers=0) 
													
#     eval_loader = torch.utils.data.DataLoader(image_datasets['val'],     
#                                                         batch_size=args.batch_size, 
#                                                         shuffle=True,
#                                                         num_workers=0)                                                 

#     testImageLoader = torch.utils.data.DataLoader(image_datasets['test'],batch_size=16,shuffle=False)

#     dataset_sizes = {x: len(image_datasets[x]) for x in ['train', 'val','test']}
#     class_names = image_datasets['train'].classes
#     numOfClasses = len(class_names)

#     return train_loader,eval_loader,testImageLoader,class_names


if __name__ == '__main__':
	logging.basicConfig(level=logging.INFO)
	args = cli.parse_commandline_args()
	os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu_id
	main(RunContext(__file__,args.consistency,args.epochs,args.labels))