Beispiel #1
0
def inf_train_gen(loader):
    while True:
        for batch in loader:
            yield batch


ctx = [mx.gpu(int(i)) for i in opt.ctx.split(",")]

batch_size = opt.batch_size
num_iterations = opt.niters

margin_s = opt.margin_s
margin_m = opt.margin_m

train_set = get_recognition_dataset(opt.dataset, transform=transform_train)
train_data = DataLoader(train_set, batch_size, shuffle=True, num_workers=opt.num_workers, last_batch='discard')
batch_generator = inf_train_gen(train_data)

targets = opt.target
val_sets = [get_recognition_dataset(name, transform=transform_test) for name in targets.split(",")]
val_datas = [DataLoader(dataset, batch_size, last_batch='keep') for dataset in val_sets]

dtype = opt.dtype
train_net = get_model(opt.model, classes=train_set.num_classes, weight_norm=True, feature_norm=True)
train_net.initialize(init=mx.init.MSRAPrelu(), ctx=ctx)

lr_period = [int(iter) for iter in opt.lr_decay_iter.split(",")]
lr_scheduler = IterLRScheduler(mode=opt.lr_mode, baselr=opt.lr, step=lr_period,
                               step_factor=opt.lr_decay, power=2,
                               niters=num_iterations, warmup_iters=opt.lr_warmup_iters)
Beispiel #2
0
        'num_iterations={}, lr_warmup_iters={}, loss_warmup_iters={}.'.format(
            num_iterations, lr_warmup_iters, loss_warmup_iters))
    return num_iterations, lr_warmup_iters, loss_warmup_iters


ctx = [mx.gpu(int(i)) for i in opt.ctx.split(",")]

batch_size = opt.batch_size
num_iterations = opt.niters
lr_warmup_iters = opt.lr_warmup_iters
loss_warmup_iters = opt.loss_warmup_iters

margin_s = opt.margin_s
margin_m = opt.margin_m

train_set = get_recognition_dataset(opt.dataset, transform=transform_train)
train_data = DataLoader(train_set,
                        batch_size,
                        shuffle=True,
                        num_workers=opt.num_workers,
                        last_batch='discard')
batch_generator = inf_train_gen(train_data)

if num_iterations == 0:
    # Auto setting. You should have a large batch size to enable this(512 or larger is recommend).
    # Epochs 25, loss warm up 35%, lr warm up 5% mixup iters 90%.
    num_iterations, lr_warmup_iters, loss_warmup_iters = auto_train_setting(
        train_data._dataset, epochs=opt.epochs)

targets = opt.target
val_sets = [
    logger.info('use %d gpus' % num_gpu)
    logger.info(ctx)
else:
    ctx = [mx.cpu()]
    logger.info('use cpu')
    logger.info(ctx)

batch_size = (opt.batch_size * num_gpu) if num_gpu > 0 else opt.batch_size
lr_decay_epoch = [
    int(i) + opt.warmup_epochs for i in opt.lr_decay_epoch.split(',')
]

logger.info('loading training and validation data')
image_src_root = opt.data_dir
train_set = get_recognition_dataset("faces_emore",
                                    root=image_src_root,
                                    transform=transform_train)
train_data = DataLoader(train_set,
                        batch_size,
                        shuffle=True,
                        num_workers=opt.num_workers)

# targets = ['lfw', 'cfp_fp', 'agedb_30']
targets = [i for i in opt.validation_targets.split(',')]
val_sets = [
    get_recognition_dataset(name,
                            root=os.path.join(image_src_root, 'faces_emore'),
                            transform=transform_test) for name in targets
]
val_datas = [
    DataLoader(dataset, batch_size, num_workers=opt.num_workers)
Beispiel #4
0
ctx = [mx.gpu(i) for i in range(num_gpu)]
batch_size = 128 * num_gpu
num_worker = 24
save_period = 500
iters = 200e3
lr_steps = [30e3, 60e3, 90e3, np.inf]

scale = 50
margin = 0.5
embedding_size = 256

lr = 0.001
momentum = 0.9
wd = 4e-5

train_set = get_recognition_dataset("emore", transform=transform_train)
train_data = DataLoader(train_set,
                        batch_size,
                        shuffle=True,
                        num_workers=num_worker)

targets = ['lfw']
val_sets = [
    get_recognition_dataset(name, transform=transform_test) for name in targets
]
val_datas = [
    DataLoader(dataset, batch_size, num_workers=num_worker)
    for dataset in val_sets
]

net = get_mobile_facenet(train_set.num_classes,
Beispiel #5
0
epochs = 15
save_period = 3000
warmup_epochs = 1

scale = 60
margin = 0.5

lr = 0.1
momentum = 0.9
wd = 4e-5

use_float16 = False
trans = Transform(use_float16)

train_set = get_recognition_dataset("faces_emore",
                                    transform=trans.transform_train)
train_data = DataLoader(train_set,
                        batch_size,
                        shuffle=True,
                        num_workers=num_worker,
                        last_batch="discard")

targets = ['lfw']
val_sets = [
    get_recognition_dataset(name, transform=trans.transform_test)
    for name in targets
]
val_datas = [
    DataLoader(dataset, batch_size, num_workers=num_worker)
    for dataset in val_sets
]
Beispiel #6
0
    if isf:
        data = nd.transpose(data, (2, 0, 1)).astype('float32')
        flip_data = nd.transpose(flip_data, (2, 0, 1)).astype('float32')
        return data, flip_data
    return transform_test(data), transform_test(flip_data)


export_path = os.path.dirname(
    opt.model_params) if opt.export_path == '' else opt.export_path
ctx = [mx.gpu(int(i)) for i in opt.ctx.split(",")]

batch_size = opt.batch_size

targets = opt.target
val_sets = [
    get_recognition_dataset(name, transform=transform_test_flip)
    for name in targets.split(",")
]
val_datas = [
    DataLoader(dataset, batch_size, last_batch='keep') for dataset in val_sets
]

test_net = get_model(opt.model, need_cls_layer=False)
test_net.cast(opt.dtype)
test_net.load_parameters(opt.model_params, ctx=ctx, ignore_extra=True)


def validate(nfolds=10):
    metric = FaceVerification(nfolds)
    metric_flip = FaceVerification(nfolds)
    for loader, name in zip(val_datas, targets.split(",")):