Esempio n. 1
0
                    default=None,
                    help='load pretrained checkpoint for assigning labels')
parser.add_argument('--pretrain_epo',
                    type=int,
                    default=None,
                    help='load pretrained epoch for assigning labels')
args = parser.parse_args()

## Pipelines Setup
model_dir = os.path.join(
    args.save_dir,
    'seqsupce_{}+{}_cpb{}_epo{}_bs{}_lr{}_mom{}_wd{}_lcr{}{}'.format(
        args.arch, args.data, args.cpb, args.epo, args.bs, args.lr, args.mom,
        args.wd, args.lcr, args.tail))
headers = ["label_batch_id", "epoch", "step", "loss"]
utils.init_pipeline(model_dir, headers)
utils.save_params(model_dir, vars(args))


## per model functions
def lr_schedule(epoch, optimizer):
    """decrease the learning rate"""
    lr = args.lr
    if epoch >= 400:
        lr = args.lr * 0.01
    elif epoch >= 200:
        lr = args.lr * 0.1
    for param_group in optimizer.param_groups:
        param_group['lr'] = lr

Esempio n. 2
0
    help='base directory for saving PyTorch model. (default: ./saved_models/)')
parser.add_argument(
    '--data_dir',
    type=str,
    default='./data/',
    help='base directory for saving PyTorch model. (default: ./data/)')
args = parser.parse_args()

## Pipelines Setup
model_dir = os.path.join(
    args.save_dir,
    'selfsup_{}+{}_{}_epo{}_bs{}_aug{}+{}_lr{}_mom{}_wd{}_gam1{}_gam2{}_eps{}{}'
    .format(args.arch, args.fd, args.data, args.epo, args.bs, args.aug,
            args.transform, args.lr, args.mom, args.wd, args.gam1, args.gam2,
            args.eps, args.tail))
utils.init_pipeline(model_dir)

## Prepare for Training
if args.pretrain_dir is not None:
    net, _ = tf.load_checkpoint(args.pretrain_dir, args.pretrain_epo)
    utils.update_params(model_dir, args.pretrain_dir)
else:
    net = tf.load_architectures(args.arch, args.fd)
transforms = tf.load_transforms(args.transform)
trainset = tf.load_trainset(args.data, path=args.data_dir)
trainloader = AugmentLoader(trainset,
                            transforms=transforms,
                            sampler=args.sampler,
                            batch_size=args.bs,
                            num_aug=args.aug)