parser.add_argument('--eval_batch_size', default=128, type=int)
    parser.add_argument('--base_lr1', default=0.1, type=float)
    parser.add_argument('--base_lr2', default=0.1, type=float)
    parser.add_argument('--lr_factor', default=0.1, type=float)
    parser.add_argument('--custom_weight_decay', default=5e-4, type=float)
    parser.add_argument('--custom_momentum', default=0.9, type=float)
    parser.add_argument('--load_ckpt_prefix', type=str, default='-')
    parser.add_argument('--load_order', type=str, default='-')
    parser.add_argument('--add_str', default=None, type=str)

    the_args = parser.parse_args()
    assert (the_args.nb_cl_fg % the_args.nb_cl == 0)
    assert (the_args.nb_cl_fg >= the_args.nb_cl)

    print(the_args)

    np.random.seed(the_args.random_seed)

    if not os.path.exists('./logs/cifar100_nfg50_ncls2_nproto20_mtl_exp01'):
        print('Download checkpoints from Google Drive.')
        os.system('sh ./script/download_ckpt.sh')

    os.environ['CUDA_VISIBLE_DEVICES'] = the_args.gpu
    print('Using gpu:', the_args.gpu)

    occupy_memory(the_args.gpu)
    print('Occupy GPU memory in advance.')

    trainer = Trainer(the_args)
    trainer.eval()
示例#2
0
                        default='EBL',
                        choices=['HPL', 'EBL'])
    parser.add_argument('--phase_sib',
                        type=str,
                        default='meta_train',
                        choices=['meta_train', 'meta_eval'])
    parser.add_argument('--meta_eval_load_path',
                        type=str,
                        default='./ckpts/miniImageNet/e3bm_ckpt.pth')

    args = parser.parse_args()
    pprint(vars(args))
    print('Experiment label: ' + args.label)
    set_gpu(args.gpu)

    occupy_memory(args.gpu)
    print('Occupy GPU memory in advance.')

    if args.baseline == 'MTL':
        if args.seed == 0:
            torch.backends.cudnn.benchmark = True
        else:
            torch.manual_seed(args.seed)
            torch.cuda.manual_seed(args.seed)
            torch.backends.cudnn.deterministic = True
            torch.backends.cudnn.benchmark = False

        if args.phase == 'meta_train':
            trainer = MetaTrainer(args)
            trainer.train()
        elif args.phase == 'meta_eval':
示例#3
0
    print('Random mode.')
    torch.backends.cudnn.benchmark = True
else:
    import random
    print('Fixed random seed:', args.seed)
    random.seed(args.seed)
    np.random.seed(args.seed)
    torch.manual_seed(args.seed)
    torch.cuda.manual_seed_all(args.seed)
    torch.backends.cudnn.enabled = True
    torch.backends.cudnn.deterministic = True
    torch.backends.cudnn.benchmark = False

args.num_gpu = 1
if args.gpu_occupy:
    occupy_memory(Config.gpu_id)
    print('Occupy GPU memory in advance.')

trainer = MetaTrainer(args)
if args.mode == 'meta_train':
    print('Start meta-train phase.')
    trainer.train()
    print('Start meta-test phase.')
    trainer.eval()
elif args.mode == 'meta_eval':
    print('Start meta-test phase.')
    trainer.eval()
elif args.mode == 'pre_train':
    print('Start pre-train phase.')
    trainer.pre_train()