Esempio n. 1
0
def run():
    # Load configuration
    args = load_config()
    logger.add(os.path.join('logs', '{}.log'.format(args.dataset)), rotation="500 MB", level="INFO")
    logger.info(args)

    # Set seed
    random.seed(args.seed)
    torch.manual_seed(args.seed)
    torch.cuda.manual_seed(args.seed)
    np.random.seed(args.seed)

    # Load dataset
    train_data, _, query_data, query_targets, retrieval_data, retrieval_targets = load_data(args.dataset, args.root)

    # Training
    for code_length in args.code_length:
        checkpoint = sh.train(
            train_data,
            query_data,
            query_targets,
            retrieval_data,
            retrieval_targets,
            code_length,
            args.device,
            args.topk,
        )
        logger.info('[code_length:{}][map:{:.4f}]'.format(code_length, checkpoint['map']))
        torch.save(checkpoint, 'checkpoints/{}_code_{}_map_{:.4f}.pt'.format(args.dataset, code_length, checkpoint['map']))
Esempio n. 2
0
def run():
    # Load configuration
    args = load_config()
    logger.add('logs/{}.log'.format(args.dataset), rotation='500 MB', level='INFO')
    logger.info(args)

    random.seed(args.seed)
    torch.manual_seed(args.seed)
    torch.cuda.manual_seed(args.seed)
    np.random.seed(args.seed)

    # Load dataset
    _, _, query_data, query_targets, retrieval_data, retrieval_targets = load_data(args.dataset, args.root)

    # Training
    for code_length in args.code_length:
        checkpoint = lsh.train(
            query_data,
            query_targets,
            retrieval_data,
            retrieval_targets,
            code_length,
            args.device,
            args.topk,
        )
        logger.info('[code length:{}][map:{:.4f}]'.format(code_length, checkpoint['map']))
Esempio n. 3
0
def create_eval_loaders(options,
                        eval_type,
                        keyframes,
                        total_batch_size=8,
                        trajectory=''):
    """ create the evaluation loader at different keyframes set-up
    """
    eval_loaders = {}

    if trajectory == '':
        trajectories = eval_trajectories(options.dataset)
    else:
        trajectories = [trajectory]

    for trajectory in trajectories:
        for kf in keyframes:
            np_loader = load_data(options.dataset, [kf], eval_type, trajectory)
            eval_loaders['{:}_keyframe_{:}'.format(
                trajectory,
                kf)] = data.DataLoader(np_loader,
                                       batch_size=int(total_batch_size),
                                       shuffle=False,
                                       num_workers=options.cpu_workers)

    return eval_loaders
Esempio n. 4
0
def run_dpsh(opt):
    """运行DLFH算法

    Parameters
        opt: parser
        程序运行参数

    Returns
        None
    """
    # 加载数据
    query_dataloader, train_dataloader, database_dataloader = dataloader.load_data(
        opt)

    logger.info(opt)

    # DLFH算法
    DPSH.dpsh(
        opt,
        train_dataloader,
        query_dataloader,
        database_dataloader,
    )
Esempio n. 5
0
parameters["save final"] = args.save_final
parameters["split threshold"] = args.split_threshold
parameters["margin loss"] = args.margin_loss
parameters["hallucination_epoch"] = args.hallucination_epoch
parameters["data root"] = config['data_root']
parameters["sampler"] = training_opt['sampler']

if not os.path.isdir(training_opt['log_dir']):
    os.makedirs(training_opt['log_dir'])

if not test_mode:
    data = {
        x: dataloader.load_data(data_root=config['data_root'],
                                dataset=dataset,
                                phase=x,
                                batch_size=args.batch_size,
                                use_sampler=parameters["sampler"],
                                num_workers=training_opt['num_workers'],
                                gamma=args.gamma)
        for x in ['train', 'val', 'test']
    }

    training_model = model(config, data, parameters=parameters, test=False)
    training_model.train()
else:
    warnings.filterwarnings("ignore", "(Possibly )?corrupt EXIF data",
                            UserWarning)
    print(
        'Under testing phase, we load training data simply to calculate training data number for each class.'
    )
    data = {
Esempio n. 6
0
                           if k not in ['type', 'def_file']}
            }
    else:
        sampler_dic = None

    # generated sub-datasets all have test split
    splits = ['train', 'val']
    if dataset not in ['iNaturalist18', 'ImageNet']:
        splits.append('test')
    data = {
        x: dataloader.load_data(
            data_root=data_root[dataset.rstrip('_LT')],
            dataset=dataset,
            phase=x,
            batch_size=training_opt['batch_size'],
            sampler_dic=sampler_dic,
            num_workers=training_opt['num_workers'],
            top_k_class=training_opt['top_k']
            if 'top_k' in training_opt else None,
            cifar_imb_ratio=training_opt['cifar_imb_ratio']
            if 'cifar_imb_ratio' in training_opt else None,
        )
        for x in splits
    }

    training_model = model(config, data, test=False)
    training_model.train()

# ============================================================================
# TESTING
else:
    warnings.filterwarnings("ignore", "(Possibly )?corrupt EXIF data",
Esempio n. 7
0
if not test_mode:

    sampler_defs = training_opt['sampler']
    if sampler_defs:
        sampler_dic = {
            'sampler': source_import(sampler_defs['def_file']).get_sampler(),
            'num_samples_cls': sampler_defs['num_samples_cls']
        }
    else:
        sampler_dic = None

    data = {
        x: dataloader.load_data(data_root=data_root[dataset.rstrip('_LT')],
                                dataset=dataset,
                                phase=x,
                                batch_size=training_opt['batch_size'],
                                sampler_dic=sampler_dic,
                                num_workers=training_opt['num_workers'])
        for x in (['train', 'val', 'train_plain']
                  if relatin_opt['init_centroids'] else ['train', 'val'])
    }

    training_model = model(config, data, test=False)

    training_model.train()

else:

    warnings.filterwarnings("ignore", "(Possibly )?corrupt EXIF data",
                            UserWarning)
Esempio n. 8
0
                        default=5000,
                        type=int,
                        help='Compute map of top k (default: 5000)')
    parser.add_argument('--evaluate-freq',
                        default=1,
                        type=int,
                        help='Frequency of evaluate (default: 1)')

    return parser.parse_args()


if __name__ == '__main__':
    opt = load_parse()
    if opt.gpu == -1:
        opt.device = torch.device("cpu")
    else:
        opt.device = torch.device("cuda:%d" % opt.gpu)
    # Load data
    query_dataloader, train_dataloader, database_dataloader = load_data(opt)

    # onehot targets
    if opt.dataset == 'cifar10':
        query_targets = torch.FloatTensor(
            encode_onehot(query_dataloader.dataset.targets)).to(opt.device)
        train_targets = torch.FloatTensor(
            encode_onehot(train_dataloader.dataset.targets)).to(opt.device)
        database_targets = torch.FloatTensor(
            encode_onehot(database_dataloader.dataset.targets)).to(opt.device)

    train(opt, query_dataloader, train_dataloader, database_dataloader,
          query_targets, train_targets, database_targets)
Esempio n. 9
0
                           if k not in ['type', 'def_file']}
            }
    else:
        sampler_dic = None

    # generated sub-datasets all have test split
    splits = ['train', 'val']
    if dataset not in ['iNaturalist18', 'ImageNet']:
        splits.append('test')
    data = {
        x: dataloader.load_data(data_root=data_root,
                                dataset=dataset,
                                phase=x,
                                batch_size=training_opt['batch_size'],
                                sampler_dic=sampler_dic,
                                num_workers=training_opt['num_workers'],
                                top_k_class=training_opt['top_k']
                                if 'top_k' in training_opt else None,
                                cifar_imb_ratio=training_opt['cifar_imb_ratio']
                                if 'cifar_imb_ratio' in training_opt else None,
                                reverse=args.train_reverse)
        for x in splits
    }

    training_model = model(config, data, test=False)
    training_model.train()

# ============================================================================
# TESTING
else:
    warnings.filterwarnings("ignore", "(Possibly )?corrupt EXIF data",
Esempio n. 10
0
            'num_samples_cls': 4,
            'type': 'ClassAwareSampler'
        }
        sampler_dic_ldam = {
            'sampler': source_import(sampler_defs['def_file']).get_sampler(),
            'params': {
                'num_samples_cls': sampler_defs['num_samples_cls']
            }
        }

    data = {
        x: dataloader.load_data(
            data_root=data_root[dataset.rstrip('_LT')],
            dataset=dataset,
            phase=x,
            batch_size=training_opt['batch_size'],
            sampler_dic=sampler_dic if x != 'train_drw' else sampler_dic_ldam,
            num_workers=training_opt['num_workers'],
            cifar_imb_ratio=training_opt['cifar_imb_ratio']
            if 'cifar_imb_ratio' in training_opt else None)
        for x in (phase_bank)
    }  # if relatin_opt['init_centroids'] else ['train', 'val'])}

    lbs = data['train'].dataset.labels
    counts = []
    for i in range(len(np.unique(lbs))):
        counts.append(lbs.count(i))
    config['label_counts'] = counts

    counts = pd.DataFrame(counts)
    #tail classes
Esempio n. 11
0
def run_dsdh(opt):
    """Run DSDH algorithm

    Parameters
        opt: parser
        Configuration

    Returns
        None
    """
    # Load data
    query_dataloader, train_dataloader, database_dataloader = dataloader.load_data(
        opt)

    # onehot targets
    if opt.dataset == 'cifar10':
        query_targets = torch.FloatTensor(
            encode_onehot(query_dataloader.dataset.targets)).to(opt.device)
        train_targets = torch.FloatTensor(
            encode_onehot(train_dataloader.dataset.targets)).to(opt.device)
        database_targets = torch.FloatTensor(
            encode_onehot(database_dataloader.dataset.targets)).to(opt.device)
    elif opt.dataset == 'nus-wide':
        query_targets = torch.FloatTensor(query_dataloader.dataset.targets).to(
            opt.device)
        train_targets = torch.FloatTensor(train_dataloader.dataset.targets).to(
            opt.device)
        database_targets = torch.FloatTensor(
            database_dataloader.dataset.targets).to(opt.device)

    cl = [12, 24, 32, 48]
    for c in cl:
        opt.code_length = c

        # DSDH algorithm
        logger.info(opt)
        best_model = DSDH.dsdh(
            train_dataloader,
            query_dataloader,
            train_targets,
            query_targets,
            opt.code_length,
            opt.max_iter,
            opt.dcc_iter,
            opt.mu,
            opt.nu,
            opt.eta,
            opt.model,
            opt.multi_gpu,
            opt.device,
            opt.lr,
            opt.evaluate_freq,
            opt.topk,
        )

        # Evaluate whole dataset
        model = torch.load(os.path.join('result', best_model))
        final_map = evaluate(
            model,
            query_dataloader,
            database_dataloader,
            query_targets,
            database_targets,
            opt.code_length,
            opt.device,
            opt.topk,
        )
        logger.info('final_map: {:.4f}'.format(final_map))
Esempio n. 12
0
def train():
    # Load data
    print('Loading data ...')
    start_time = time.time()
    q1_train, q2_train, y_train, q1_dev, q2_dev, y_dev, vocab_size = load_data(
        data_file=args.train_files,
        dev_sample_percentage=args.dev_sample_percentage,
        vocab_path=os.path.join(args.save_dir, "vocab"))
    time_dif = get_time_dif(start_time)
    print('Time usage:', time_dif)

    print('Configuring TensorBoard and Saver ...')
    tensorboard_dir = args.tensorboard_dir
    if not os.path.exists(tensorboard_dir):
        os.makedirs(tensorboard_dir)

    # MVLSTM model init
    model = MVLSTM(sequence_length=args.max_q_len,
                   num_classes=args.num_classes,
                   embedding_dim=args.embedding_dim,
                   vocab_size=vocab_size,
                   max_length=args.max_q_len,
                   hidden_dim=args.hidden_size,
                   learning_rate=args.learning_rate)

    tf.summary.scalar('loss', model.loss)
    tf.summary.scalar('accuracy', model.accuracy)
    merged_summary = tf.summary.merge_all()
    writer = tf.summary.FileWriter(tensorboard_dir)

    # Configuring Saver

    saver = tf.train.Saver()
    if not os.path.exists(args.save_dir):
        os.makedirs(args.save_dir)

    # Create Session
    session = tf.Session()
    session.run(tf.global_variables_initializer())
    writer.add_graph(session.graph)

    print('Training and Deviation ...')
    start_time = time.time()
    total_batch = 0
    best_acc_dev = 0.0
    last_improved = 0
    require_improvement = 30000  # Early stopping

    tag = False
    for epoch in range(args.epochs):
        print('Epoch:', epoch + 1)
        batch_train = batch_iter_per_epoch(q1_train, q2_train, y_train,
                                           args.batch_size)
        for q1_batch, q2_batch, y_batch in batch_train:
            feed_dict = feed_data(q1_batch,
                                  q2_batch,
                                  y_batch,
                                  args.dropout_keep_prob,
                                  model=model)
            if total_batch % args.checkpoint_every == 0:
                # write to tensorboard scalar
                summary = session.run(merged_summary, feed_dict)
                writer.add_summary(summary, total_batch)

            if total_batch % args.evaluate_every == 0:
                # print performance on train set and dev set
                feed_dict[model.dropout_keep_prob] = 1.0
                loss_train, acc_train = session.run(
                    [model.loss, model.accuracy], feed_dict=feed_dict)
                loss_dev, acc_dev = evaluate(q1_dev,
                                             q2_dev,
                                             y_dev,
                                             session,
                                             model=model)

                if acc_dev > best_acc_dev:
                    # save best result
                    best_acc_dev = acc_dev
                    last_improved = total_batch
                    saver.save(sess=session, save_path=save_path)
                    improved_str = '*'
                else:
                    improved_str = ''

                time_dif = get_time_dif(start_time)
                print(
                    'Iter: {0:>6}, Train Loss: {1:>6.2}, Train Acc: {2:7.2%}, Val Loss: {3:>6.2}, Val Acc:'
                    '{4:>7.2%}, Time:{5}{6}'.format(total_batch, loss_train,
                                                    acc_train, loss_dev,
                                                    acc_dev, time_dif,
                                                    improved_str))

            session.run(model.optim, feed_dict)
            total_batch += 1

            if total_batch - last_improved > require_improvement:
                # having no improvement for a long time
                print('No optimization for a long time, auto-stopping ...')
                tag = True
                break
        if tag:  # early stopping
            break
Esempio n. 13
0
def train(options):

    if options.time:
        timers = Timers()
    else:
        timers = None

    total_batch_size = options.batch_per_gpu *  torch.cuda.device_count()

    checkpoint = train_utils.load_checkpoint_train(options)

    keyframes = [int(x) for x in options.keyframes.split(',')]
    train_loader = load_data(options.dataset, keyframes, load_type = 'train')
    train_loader = data.DataLoader(train_loader,
        batch_size = total_batch_size,
        shuffle = True, num_workers = options.cpu_workers)
    if options.dataset in ['BundleFusion', 'TUM_RGBD']:
        obj_has_mask = False
    else:
        obj_has_mask = True

    eval_loaders = create_train_eval_loaders(options, 'validation', keyframes, total_batch_size)

    logfile_name = '_'.join([
        options.prefix, # the current test version
        options.network,
        options.encoder_name,
        options.mestimator,
        options.solver,
        'lr', str(options.lr),
        'batch', str(total_batch_size),
        'kf', options.keyframes])

    print("Initialize and train the Deep Trust Region Network")
    net = ICtracking.LeastSquareTracking(
        encoder_name    = options.encoder_name,
        max_iter_per_pyr= options.max_iter_per_pyr,
        mEst_type       = options.mestimator,
        solver_type     = options.solver,
        tr_samples      = options.tr_samples,
        no_weight_sharing = options.no_weight_sharing,
        timers          = timers)

    if options.no_weight_sharing:
        logfile_name += '_no_weight_sharing'
    logger = train_utils.initialize_logger(options, logfile_name)

    if options.checkpoint:
        net.load_state_dict(checkpoint['state_dict'])

    if torch.cuda.is_available(): net.cuda()

    net.train()

    if torch.cuda.device_count() > 1:
        print("Use", torch.cuda.device_count(), "GPUs for training!")
        # dim = 0 [30, xxx] -> [10, ...], [10, ...], [10, ...] on 3 GPUs
        net = nn.DataParallel(net)

    train_objective = ['EPE3D'] # Note: we don't use RPE for training
    eval_objectives = ['EPE3D', 'RPE']

    num_params = train_utils.count_parameters(net)

    if num_params < 1:
        print('There is no learnable parameters in this baseline.')
        print('No training. Only one iteration of evaluation')
        no_training = True
    else:
        print('There is a total of {:} learnabled parameters'.format(num_params))
        no_training = False
        optim = train_utils.create_optim(options, net)
        scheduler = torch.optim.lr_scheduler.MultiStepLR(optim,
            milestones=options.lr_decay_epochs,
            gamma=options.lr_decay_ratio)

    freq = options.save_checkpoint_freq
    for epoch in range(options.start_epoch, options.epochs):

        if epoch % freq == 0:
            checkpoint_name = 'checkpoint_epoch{:d}.pth.tar'.format(epoch)
            print('save {:}'.format(checkpoint_name))
            state_info = {'epoch': epoch, 'num_param': num_params}
            logger.save_checkpoint(net, state_info, filename=checkpoint_name)

        if options.no_val is False:
            for k, loader in eval_loaders.items():

                eval_name = '{:}_{:}'.format(options.dataset, k)

                eval_info = eval_utils.evaluate_trust_region(
                    loader, net, eval_objectives, 
                    known_mask  = obj_has_mask, 
                    eval_name   = eval_name,
                    timers      = timers)

                display_dict = {"{:}_epe3d".format(eval_name): eval_info['epes'].mean(), 
                    "{:}_rpe_angular".format(eval_name): eval_info['angular_error'].mean(), 
                    "{:}_rpe_translation".format(eval_name): eval_info['translation_error'].mean()}

                logger.write_to_tensorboard(display_dict, epoch)

        if no_training: break

        train_one_epoch(options, train_loader, net, optim, epoch, logger,
            train_objective, known_mask=obj_has_mask, timers=timers)

        scheduler.step()
Esempio n. 14
0
def run():
    # Load configuration
    args = load_config()
    logger.add('logs/{}_beta_{}_lamda_{}.log'.format(args.dataset, args.beta,
                                                     args.lamda),
               rotation='500 MB',
               level='INFO')
    logger.info(args)

    random.seed(args.seed)
    torch.manual_seed(args.seed)
    torch.cuda.manual_seed(args.seed)
    np.random.seed(args.seed)

    # Load dataset
    train_data, train_targets, query_data, query_targets, retrieval_data, retrieval_targets = load_data(
        args.dataset, args.root)

    # Training
    for code_length in args.code_length:
        checkpoint = lfh.train(
            train_data,
            train_targets,
            query_data,
            query_targets,
            retrieval_data,
            retrieval_targets,
            code_length,
            args.num_samples,
            args.max_iter,
            args.beta,
            args.lamda,
            args.device,
            args.topk,
        )
        logger.info('[code length:{}][map:{:.4f}]'.format(
            code_length, checkpoint['map']))
        torch.save(
            checkpoint,
            'checkpoints/{}_code_{}_beta_{}_lamda_{}_map_{:.4f}.pt'.format(
                args.dataset, code_length, args.beta, args.lamda,
                checkpoint['map']))
args = parser.parse_args()

test_mode = args.test
test_open = args.test_open
if test_open:
    test_mode = True
output_logits = args.output_logits

config = source_import(args.config).config
training_opt = config['training_opt']
# change
relatin_opt = config['memory']
dataset = training_opt['dataset']
data_loader = dataloader.load_data(data_root=data_root[dataset.rstrip('_LT')],
                                   dataset=dataset,
                                   phase='val',
                                   batch_size=1,
                                   num_workers=training_opt['num_workers'])
baseline_model = model(config, data_loader, test=False)
baseline_model.load_model()

for model in baseline_model.networks.values():
    model.eval()

cls_weight = baseline_model.networks['classifier'].module.fc.weight
baseline_model.networks['classifier'].module.fc.bias

cls_weight = cls_weight.norm(dim=1).tolist()
df = pd.read_csv("./analysis/classifier_weight_norm.csv")
df[args.col] = cls_weight
df.to_csv("./analysis/classifier_weight_norm.csv", index=False)
Esempio n. 16
0
def run_hcoh(args):
    """Run HCOH algorithm

    Parameters
        args: parser
        Configuration

    Returns
        None
    """
    # Load dataset
    train_data, train_targets, query_data, query_targets, database_data, database_targets = dataloader.load_data(args)

    # Preprocess dataset
    # Normalization
    train_data = normalization(train_data)
    query_data = normalization(query_data)
    database_data = normalization(database_data)

    # One-hot
    query_targets = encode_onehot(query_targets, 10)
    database_targets = encode_onehot(database_targets, 10)

    # Convert to Tensor
    train_data = torch.from_numpy(train_data).float().to(args.device)
    query_data = torch.from_numpy(query_data).float().to(args.device)
    database_data = torch.from_numpy(database_data).float().to(args.device)
    train_targets = torch.from_numpy(train_targets).squeeze().to(args.device)
    query_targets = torch.from_numpy(query_targets).to(args.device)
    database_targets = torch.from_numpy(database_targets).to(args.device)

    # HCOH algorithm
    for code_length in [8, 16, 32, 64, 128]:
        args.code_length = code_length
        mAP = 0.0
        precision = 0.0
        for i in range(10):
            m, p = HCOH.hcoh(
                train_data,
                train_targets,
                query_data,
                query_targets,
                database_data,
                database_targets,
                args.code_length,
                args.lr,
                args.num_hadamard,
                args.device,
                args.topk,
            )
            mAP += m
            precision += p
        logger.info('[code_length:{}][map:{:.3f}][precision:{:.3f}]'.format(code_length, mAP / 10, precision / 10))
Esempio n. 17
0
if __name__ == '__main__':
    net = models.__dict__['resnet18'](pretrained=True).cuda()

    net = net.cuda()
    criterion = nn.CrossEntropyLoss().cuda()
    optimizer = optim.SGD(net.parameters(),
                          lr=LR,
                          momentum=momentum,
                          weight_decay=weight_decay)
    cudnn.benchmark = True

    # data loading code

    for epoch in range(init_epoch, num_epoch):
        train_loader, test_loader = load_data(data_path=data_path,
                                              batch_size=batch_size,
                                              nb_workers=n_workers)

        print('\nEpoch: %d' % (epoch + 1))
        net.train()
        s1 = time.time()
        for i, data in enumerate(train_loader):
            inputs, labels = data
            inputs, labels = inputs.cuda(), labels.cuda()
            optimizer.zero_grad()

            # forward + backward
            outputs = net(inputs)
            loss = criterion(outputs, labels)
            loss.backward()
            optimizer.step()
Esempio n. 18
0
def run():
    # Load configuration
    args = load_config()
    logger.add(
        'logs/{}_code_{}_anchor_{}_lamda_{}_nu_{}_sigma_{}_topk_{}.log'.format(
            args.dataset,
            '_'.join([str(code_length) for code_length in args.code_length]),
            args.num_anchor,
            args.lamda,
            args.nu,
            args.sigma,
            args.topk,
        ),
        rotation='500 MB',
        level='INFO',
    )
    logger.info(args)

    # Set seed
    random.seed(args.seed)
    torch.manual_seed(args.seed)
    torch.cuda.manual_seed(args.seed)
    np.random.seed(args.seed)

    # Load data
    train_data, train_targets, query_data, query_targets, retrieval_data, retrieval_targets = load_data(
        args.dataset, args.root)

    # Training
    for code_length in args.code_length:
        checkpoint = sdh.train(
            train_data,
            train_targets,
            query_data,
            query_targets,
            retrieval_data,
            retrieval_targets,
            code_length,
            args.num_anchor,
            args.max_iter,
            args.lamda,
            args.nu,
            args.sigma,
            args.device,
            args.topk,
        )
        logger.info('[code length:{}][map:{:.4f}]'.format(
            code_length, checkpoint['map']))

        # Save checkpoint
        torch.save(
            checkpoint,
            'checkpoints/{}_code_{}_anchor_{}_lamda_{}_nu_{}_sigma_{}_topk_{}_map_{:.4f}.pt'
            .format(
                args.dataset,
                code_length,
                args.num_anchor,
                args.lamda,
                args.nu,
                args.sigma,
                args.topk,
                checkpoint['map'],
            ))
    sal = sal.resize((224, 224), resample=Image.LINEAR)
    plt.imshow(transform_forshow(image))
    plt.imshow(np.array(sal), alpha=0.4, cmap='jet')
    plt.savefig('test1.jpg')

if not test_mode:
    sampler_defs = training_opt['sampler']
    if sampler_defs:
        sampler_dic = {'sampler': source_import(sampler_defs['def_file']).get_sampler(),
                       'num_samples_cls': sampler_defs['num_samples_cls']}
    else:
        sampler_dic = None

    data = {x: dataloader.load_data(data_root=data_root[dataset.rstrip('_LT')], dataset=dataset, phase=x,
                                    batch_size=128,
                                    sampler_dic=sampler_dic,
                                    num_workers=training_opt['num_workers'],
                                    shuffle=False) for x in  ['train', 'val']}

    baseline_model = model(config, data, test=False)
    baseline_model.load_model(baseline_path)
    feature_b = nn.Sequential(*list(baseline_model.networks['feat_model'].module.children())[:-1])
    avgpool = [list(baseline_model.networks['feat_model'].module.children())[-1],Flatten()]
    classifier_b = nn.Sequential(*(avgpool+list(baseline_model.networks['classifier'].module.children())))

    mixup_model = model(config, data, test=False)
    mixup_model.load_model(mixup_only_path)
    feature_m = nn.Sequential(*list(mixup_model.networks['feat_model'].module.children())[:-1])
    avgpool_m = [list(mixup_model.networks['feat_model'].module.children())[-1],Flatten()]
    classifier_m = nn.Sequential(*(avgpool_m+list(mixup_model.networks['classifier'].module.children())))
Esempio n. 20
0
                'params': {
                    'meta_learner': learner,
                    'batch_size': training_opt['batch_size']
                }
            }
    else:
        sampler_dic = None

    splits = ['train', 'train_plain', 'val']
    if dataset not in ['iNaturalist18', 'ImageNet']:
        splits.append('test')
    data = {
        x: dataloader.load_data(data_root=data_root[dataset.rstrip('_LT')],
                                dataset=dataset,
                                phase=split2phase(x),
                                batch_size=training_opt['batch_size'],
                                sampler_dic=sampler_dic,
                                num_workers=training_opt['num_workers'],
                                cifar_imb_ratio=training_opt['cifar_imb_ratio']
                                if 'cifar_imb_ratio' in training_opt else None)
        for x in splits
    }

    if sampler_defs and sampler_defs[
            'type'] == 'MetaSampler':  # todo: use meta-sampler
        cbs_file = './data/ClassAwareSampler.py'
        cbs_sampler_dic = {
            'sampler': source_import(cbs_file).get_sampler(),
            'params': {
                'is_infinite': True
            }
        }
Esempio n. 21
0
import torch
import torch.nn as nn

import torchvision
import torchvision.transforms as transforms

import torch.optim as optim

import os

from data.dataloader import load_data
"""
LOAD CIFAR100
"""
trainloader, testloader = load_data()
"""
Define a Loss function
"""

criterion = nn.CrossEntropyLoss()
"""
Training on GPU
"""
net = resnet18()

#For retraining, load the previously trained model

# PATH = "./models/trained"
# net = torch.load(os.path.join(PATH, "resnet18.pth"))