示例#1
0
# ============================================================================
# LOAD CONFIGURATIONS
with open(args.cfg) as f:
    config = yaml.load(f)
config = update(config, args)

test_mode = args.test
val_mode = args.val

if not test_mode and not val_mode:
    opt = config['training_opt']
    if not os.path.isdir(opt['log_dir']):
        os.makedirs(opt['log_dir'])
    pprint.pprint(config)
    data = load_data(opt)
    training_model = model(config, data, test=False)
    training_model.train()
if val_mode:
    opt = config['val_opt']
    data = load_data(opt)
    training_model = model(config, data, test=True)
    # training_model.load_model(args.model_dir)
    # pdb.set_trace()
    training_model.eval('test', save=opt['log_dir'], window=opt['window'])
if test_mode:
    opt = config['testting_opt']
    data = load_data(opt)
    training_model = model(config, data, test=True)
    # load checkpoints
    # training_model.load_model(args.model_dir)
    training_model.eval('test', save=opt['log_dir'], window=opt['window'])
示例#2
0
        data['meta'] = dataloader.load_data(
            data_root=data_root[dataset.rstrip('_LT')],
            dataset=dataset,
            phase='train' if 'CIFAR' in dataset else 'val',
            batch_size=sampler_defs.get(
                'meta_batch_size',
                training_opt['batch_size'],
            ),
            sampler_dic=cbs_sampler_dic,
            num_workers=training_opt['num_workers'],
            cifar_imb_ratio=training_opt['cifar_imb_ratio']
            if 'cifar_imb_ratio' in training_opt else None,
            meta=True)
        training_model = model(config,
                               data,
                               test=False,
                               meta_sample=True,
                               learner=learner)
    else:
        training_model = model(config, data, test=False)

    training_model.train()

else:

    warnings.filterwarnings("ignore", "(Possibly )?corrupt EXIF data",
                            UserWarning)

    print('Under testing phase, we load training data simply to calculate \
           training data number for each class.')
示例#3
0
文件: main.py 项目: JKozerawski/BLT
if not os.path.isdir(training_opt['log_dir']):
    os.makedirs(training_opt['log_dir'])

if not test_mode:
    data = {
        x: dataloader.load_data(data_root=config['data_root'],
                                dataset=dataset,
                                phase=x,
                                batch_size=args.batch_size,
                                use_sampler=parameters["sampler"],
                                num_workers=training_opt['num_workers'],
                                gamma=args.gamma)
        for x in ['train', 'val', 'test']
    }

    training_model = model(config, data, parameters=parameters, test=False)
    training_model.train()
else:
    warnings.filterwarnings("ignore", "(Possibly )?corrupt EXIF data",
                            UserWarning)
    print(
        'Under testing phase, we load training data simply to calculate training data number for each class.'
    )
    data = {
        x: dataloader.load_data(data_root=data_root[dataset.rstrip('_LT')],
                                dataset=dataset,
                                phase=x,
                                batch_size=args.batch_size,
                                num_workers=training_opt['num_workers'],
                                shuffle=False)
        for x in ['train', 'test']
示例#4
0
    data = {
        x: dataloader.load_data(data_root=data_root,
                                dataset=dataset,
                                phase=x,
                                batch_size=training_opt['batch_size'],
                                sampler_dic=sampler_dic,
                                num_workers=training_opt['num_workers'],
                                top_k_class=training_opt['top_k']
                                if 'top_k' in training_opt else None,
                                cifar_imb_ratio=training_opt['cifar_imb_ratio']
                                if 'cifar_imb_ratio' in training_opt else None,
                                reverse=args.train_reverse)
        for x in splits
    }

    training_model = model(config, data, test=False)
    training_model.train()

# ============================================================================
# TESTING
else:
    warnings.filterwarnings("ignore", "(Possibly )?corrupt EXIF data",
                            UserWarning)
    print(
        'Under testing phase, we load training data simply to calculate training data number for each class.'
    )

    if 'iNaturalist' in dataset.rstrip('_LT'):
        splits = ['train', 'val']
        test_split = 'val'
    else:
test_mode = args.test
test_open = args.test_open
if test_open:
    test_mode = True
output_logits = args.output_logits

config = source_import(args.config).config
training_opt = config['training_opt']
# change
relatin_opt = config['memory']
dataset = training_opt['dataset']
data_loader = dataloader.load_data(data_root=data_root[dataset.rstrip('_LT')],
                                   dataset=dataset,
                                   phase='val',
                                   batch_size=1,
                                   num_workers=training_opt['num_workers'])
baseline_model = model(config, data_loader, test=False)
baseline_model.load_model()

for model in baseline_model.networks.values():
    model.eval()

cls_weight = baseline_model.networks['classifier'].module.fc.weight
baseline_model.networks['classifier'].module.fc.bias

cls_weight = cls_weight.norm(dim=1).tolist()
df = pd.read_csv("./analysis/classifier_weight_norm.csv")
df[args.col] = cls_weight
df.to_csv("./analysis/classifier_weight_norm.csv", index=False)
示例#6
0
def run_experiment(args):

    args.config = './config/config.py'
    config = source_import(args.config).config
    training_opt = config['training_opt']

    training_opt['dataset'] = args.data
    training_opt['log_dir'] = './logs/'+args.data

    dataset = training_opt['dataset']
    training_opt['batch_size']=args.batch_size
    training_opt['num_epochs']=args.num_epochs
    if not os.path.isdir(training_opt['log_dir']):
        os.makedirs(training_opt['log_dir'])
    if args.preprocess:
        process_data(dataset,training_opt['add_inverse_relation'])
        return 0

    print('Preparing knowledge graph base!')
    kg = KnowledgeGraph(data_root[dataset],args,config)
    kg.load_all_triples(data_root[dataset], add_reversed_edges=training_opt['add_inverse_relation'])
    pprint.pprint(config)

    if args.train:
        print('Loading dataset from: %s' % data_root[dataset])
        data = {x: dataloader.load_data_kg(data_root=data_root[dataset], dataset=dataset, phase=x,
                                           batch_size=training_opt['batch_size'],kg_base=kg,
                                           add_reversed_edges=training_opt['add_inverse_relation'])
                for x in (['train', 'dev'])}
        config['training_opt']['num_classes'] = len(data['train'].dataset.relation2id.keys())

        training_model = model(config, data, test=False, args=args)
        training_model.train()

    elif args.test:
        warnings.filterwarnings("ignore", "(Possibly )?corrupt EXIF data", UserWarning)
        print('Under testing phase, we load training data simply to calculate training data number for each class.')
        data = {x: dataloader.load_data_kg(data_root=data_root[dataset], dataset=dataset, phase=x,
                                        batch_size=training_opt['batch_size'], kg_base=kg,
                                        test_open=False,
                                        shuffle=False,
                                        add_reversed_edges=False)
                for x in ['train', 'dev']}

        config['training_opt']['num_classes'] = len(data['train'].dataset.relation2id.keys())
        training_model = model(config, data, test=True,args=args)
        training_model.load_model()
        training_model.eval_batch(phase='dev', openset=False)

    elif args.test_open:
        warnings.filterwarnings("ignore", "(Possibly )?corrupt EXIF data", UserWarning)
        print('Under testing phase, we load training data simply to calculate training data number for each class.')

        data = {x: dataloader.load_data_kg(data_root=data_root[dataset], dataset=dataset, phase=x,
                                        batch_size=training_opt['batch_size'], kg_base=kg,
                                        test_open=True,
                                        shuffle=False,
                                        add_reversed_edges=False)
                for x in ['train','dev']}
        training_model = model(config, data, test=True,args=args,kg=kg)
        training_model.load_model()
        training_model.eval_batch(phase='dev', openset=args.test_open)

    print('ALL COMPLETED.')