def train():
    logging.basicConfig(
        level=logging.INFO,
        format=
        '%(asctime)s - %(filename)s[line:%(lineno)d] - %(levelname)s: %(message)s'
    )

    logging.info('beging train...')

    model = get_nas_model('mobilenetv2-b0', blocks_type='mix', load_path='')
    logging.debug('get a nas model')

    data = get_cifar10()

    data['train_ds'] = data['train_ds'].take(500)
    data['train_num'] = 500
    data['val_ds'] = data['val_ds'].take(500)
    data['val_num'] = 500

    trainer = Trainer(model,
                      data,
                      optimizer=tf.keras.optimizers.Adam(1e-3),
                      flops_constant=100)
    logging.debug('get a trainer')

    trainer.train(90, 128)
예제 #2
0
def get_data_loader(config):
    if config.dataset.type == 'CIFAR10':
        dataset = data.get_cifar10(config.dataset)
    elif config.dataset.type == 'ImageNet':
        dataset = data.get_image_net(config.dataset)
    else:
        raise KeyError('invalid dataset type')

    train_loader, val_loader = data.get_loader(config.dataset, config.dataset.batch_size,
                                               config.distributed.enable, *dataset)

    max_iter = len(train_loader) * config.training.epoch
    config.lr_scheduler.max_iter = max_iter
    if config.get('arch_scheduler', None) is not None:
        config.arch_scheduler.max_iter = max_iter
        config.arch.start_train = max_iter // 2
        config.arch_scheduler.warmup_step = max_iter // 2

    return train_loader, val_loader
예제 #3
0
def train():
    logging.basicConfig(level=logging.INFO,
                    format='%(asctime)s - %(filename)s[line:%(lineno)d] - %(levelname)s: %(message)s')


    logging.info('beging train...')

    model = get_nas_model('mobilenetv2-fairnas', blocks_type='mix', load_path='', num_classes=10)
    logging.debug('get a nas model')

    data = get_cifar10()
    
    #opt = tf.keras.optimizers.SGD(learning_rate=0.002, momentum=0.9, nesterov=True)
    opt = tf.keras.optimizers.Adam(learning_rate=0.001)
    trainer = Trainer(model, data, optimizer=opt, flops_constant=100, params_constant=math.inf, )
    logging.debug('get a trainer')



    trainer.train(90, 128)