示例#1
0
def get_tuned_learning_rate(model, dataset, optimizer):
    """
    Returns the learning rate for the given experiment once the tuning has been made.
    :param model: 'vgg', 'vggnonorm', 'resnet' or 'lstm'
    :param dataset: 'cifar10' or 'cifar100'
    :param optimizer: 'sgdm', 'ssgd' or 'sssgd'
    :return: lr
    """
    name = base_folder + get_experiment_name(model, dataset, optimizer)
    lr_space = load_obj('./results/' + name + 'lr_space')
    losses = load_obj('./results/' + name + 'losses')
    return lr_space[np.nanargmin(losses)]
示例#2
0
def run_experiment(model,
                   dataset,
                   optimizer,
                   prefix='',
                   batch_size=128,
                   num_exp=3,
                   start_at=1):

    base_name = base_folder + 'batchsize-' + str(batch_size) + '/' + prefix \
                + get_experiment_name(model, dataset, optimizer)

    hyperparameters = get_experiment_hyperparameters(model, dataset, optimizer)
    momentum = hyperparameters['momentum']
    weight_decay = hyperparameters['weight_decay']
    comp = hyperparameters['comp']
    noscale = hyperparameters['noscale']
    memory = hyperparameters['memory']
    mnorm = hyperparameters['mnorm']
    mback = hyperparameters['mback']

    num_epochs = [100, 50, 50]  # total 200 epochs

    for exp_index in range(start_at, num_exp + start_at):
        resume = False
        name = base_name + str(exp_index) + '/'

        # find better learning_rate
        lr = get_tuned_learning_rate(model, dataset,
                                     optimizer) * batch_size / 128
        print('Tuned lr : {}'.format(lr))

        # train model
        for epochs in num_epochs:
            construct_and_train(name=name,
                                dataset=dataset,
                                model=model,
                                resume=resume,
                                epochs=epochs,
                                lr=lr,
                                batch_size=batch_size,
                                momentum=momentum,
                                weight_decay=weight_decay,
                                comp=comp,
                                noscale=noscale,
                                memory=memory,
                                mnorm=mnorm,
                                mback=mback)
            resume = True
            lr /= 10
示例#3
0
def tune_learning_rate(model, dataset, optimizer, base_name=None):
    """
    Tune the learning rate for a given experiment (batch size 128)
    The results are saved in base_folder + experiment_name if base_name is None,
    or in base_folder + base_name otherwise
    :param model: 'vgg', 'vggnonorm', 'resnet' or 'lstm'
    :param dataset: 'cifar10' or 'cifar100'
    :param optimizer: 'sgdm', 'ssgd' or 'sssgd'
    :param base_name: If you want to have a custom name for the saving folder
    """
    model = model.lower()
    dataset = dataset.lower()
    optimizer = optimizer.lower()

    if base_name is None:
        base_name = base_folder + get_experiment_name(model, dataset, optimizer)
    else:
        base_name = base_folder + base_name

    hyperparameters = get_experiment_hyperparameters(model, dataset, optimizer)
    momentum = hyperparameters['momentum']
    weight_decay = hyperparameters['weight_decay']
    comp = hyperparameters['comp']
    noscale = hyperparameters['noscale']
    memory = hyperparameters['memory']
    mnorm = hyperparameters['mnorm']
    mback = hyperparameters['mback']

    losses = []
    # lr_space = np.logspace(-5, 1, 9)
    lr_space = np.logspace(-7, -1, 9)
    for index, lr in enumerate(lr_space):
        name = base_name + 'lr' + str(index)
        res = construct_and_train(name=name, dataset=dataset, model=model, resume=False, epochs=num_epochs,
                                  lr=lr, batch_size=batch_size, momentum=momentum, weight_decay=weight_decay,
                                  comp=comp, noscale=noscale, memory=memory, mnorm=mnorm, mback=mback)
        best_loss = np.nanmin(res['test_losses'])
        losses.append(best_loss)
    losses = np.array(losses)
    save_obj(lr_space, './results/' + base_name + 'lr_space')
    save_obj(losses, './results/' + base_name + 'losses')
    with open('./results/' + base_name + 'README.md', 'w') as file:
        file.write('Best learning rate : {}\\\n'.format(lr_space[np.nanargmin(losses)]))
        file.write('Best loss reached over {0} epochs : {1}\n'.format(num_epochs, np.nanmin(losses)))
def run_experiment(model, dataset, optimizer, prefix='', batch_size=128):
    base_name = base_folder + 'batchsize-' + str(batch_size) + '/' \
                + prefix + get_experiment_name(model, dataset, optimizer)

    hyperparameters = get_experiment_hyperparameters(model, dataset, optimizer)
    momentum = hyperparameters['momentum']
    weight_decay = hyperparameters['weight_decay']
    comp = hyperparameters['comp']
    noscale = hyperparameters['noscale']
    memory = hyperparameters['memory']
    mnorm = hyperparameters['mnorm']
    mback = hyperparameters['mback']
    norm_ratio = True

    num_epochs = [100, 50, 50]

    resume = False
    name = base_name + '/'
    lr = get_tuned_learning_rate(model, dataset, optimizer) * batch_size / 128
    print('Tuned lr : {}'.format(lr))
    for epochs in num_epochs:
        construct_and_train(name=name,
                            dataset=dataset,
                            model=model,
                            resume=resume,
                            epochs=epochs,
                            lr=lr,
                            batch_size=batch_size,
                            momentum=momentum,
                            weight_decay=weight_decay,
                            comp=comp,
                            noscale=noscale,
                            memory=memory,
                            mnorm=mnorm,
                            mback=mback,
                            norm_ratio=norm_ratio)
        resume = True
        lr /= 10