Exemplo n.º 1
0
##################### Setup visdom visualization ###############################
viz = 0
if (options.show_visdom):
    try:
        viz = Visdom()
    except Exception as e:
        print("The visdom experienced an exception while running: {}\n"
              "The demo displays up-to-date functionality with the GitHub "
              "version, which may not yet be pushed to pip. Please upgrade "
              "using `pip install -e .` or `easy_install .`\n"
              "If this does not resolve the problem, please open an issue on "
              "our GitHub.".format(repr(e)))

########################## Choose architecture #################################
cmp.DEBUGprint("Loading model. \n", options.debug)
model = cmp.init_model(options)

########################### Environment setup ##################################

# Resume from checkpoint option
cmp.DEBUGprint("Loading previous state. \n", options.debug)
if options.run_at_checkpoint:
    model.load_state_dict(torch.load(options.weight_addr))

# Setup log
cmp.DEBUGprint("Setup log. \n", options.debug)
log = open(options.log_addr, "w+")

# Basic runner stuff
cmp.DEBUGprint("Initialize runner. \n", options.debug)
Exemplo n.º 2
0
def objective(trial, options):
    dataset_generator = Splitter(
        options.data_addr,
        options.split,
        options.seed,
        pretrained_model=options.pretrained_model,
        debug=options.debug,
        positive_case_percent=options.positive_case_percent)

    train_dataset = dataset_generator.generate_training_data()
    train_loader = torch.utils.data.DataLoader(train_dataset,
                                               batch_size=options.batch_size,
                                               shuffle=options.shuffle)

    test_dataset = dataset_generator.generate_validation_data()
    test_loader = torch.utils.data.DataLoader(test_dataset, batch_size=1)

    criterion = cmp.criterion_selection(options.criterion)

    ####################### Hyperparameter autotuning session ######################
    '''
    Remember to comment out uneccessary hyperparameter that you are not tuning.
    Each hyperparameter also has a fixed value so that you can comment out the
    optuna version without disrupting the flow. Option 1 is fixed value. Option
    2 is autotune.

    Available hyperparameters so far are:
        Final layers of model (model.<final layer name>)
        Learning rate (lr)
        Momentum (momentum)
        Optimizer (optimizer)
    '''

    #------------------------------Final layers--------------------------------#
    model = cmp.init_model(options)
    # Option 1:
    # Nothing here

    # Option 2:
    # num_layers = trial.suggest_int('num_layers',1,3)
    # layer_list = []
    # prev_num_neurons = 512
    # for i in range(num_layers):
    #     num_neurons = int(trial.suggest_loguniform('num_neurons_{}'.format(i),4,prev_num_neurons))
    #     layer_list.append(('fc_{}'.format(i), nn.Linear(prev_num_neurons,num_neurons)))
    #     layer_list.append(('relu_{}'.format(i), nn.ReLU()))
    #     prev_num_neurons = num_neurons
    #
    # layer_list.append(('fc_last', nn.Linear(in_features=num_neurons, out_features=2)))
    # layer_list.append(('output', nn.Softmax(dim=1)))
    # fc = nn.Sequential(OrderedDict(layer_list))
    # model.fc = fc

    #--------------------------------------------------------------------------#

    #-------------------------------Learning rate-------------------------------#
    # Options 1:
    # lr = options.lr_fix

    # Options 2:
    lr = trial.suggest_loguniform('lr', options.lr_lower, options.lr_upper)
    #--------------------------------------------------------------------------#

    #-------------------------------Momentum-----------------------------------#
    # Options 1:
    # momentum = options.momentum_fix

    # Options 2:
    momentum = trial.suggest_uniform('momentum', options.momentum_lower,
                                     options.momentum_upper)
    #--------------------------------------------------------------------------#

    #--------------------------------Optimizer---------------------------------#
    optimizer_list = {
        'SGD': optim.SGD,
        'RMSprop': optim.RMSprop,
        'Adam': optim.Adam
    }

    # Options 1:
    # optimizer = optimizer_list['Adam'](model.parameters(), lr=lr)

    # Options 2:
    optimizer_name = trial.suggest_categorical('optimizer',
                                               ['SGD', 'RMSprop', 'Adam'])
    if (optimizer_name == 'SGD' or optimizer_name == 'RMSprop'):
        optimizer = optimizer_list[optimizer_name](model.parameters(),
                                                   momentum=momentum,
                                                   lr=lr)
    elif (optimizer_name == 'Adam'):
        optimizer = optimizer_list['Adam'](model.parameters(), lr=lr)
    else:
        print("Error: please check optimizer_list and optimizer_name")
        exit(-1)
    #--------------------------------------------------------------------------#


################################################################################

    now = datetime.now()
    date = datetime.timestamp(now)
    timestamp = datetime.fromtimestamp(date)
    for epoch in range(options.epoch):
        train(model, train_loader, criterion, optimizer, epoch, options)
        test_score = test(model, test_loader, options)

        if options.checkpoint:
            if epoch % options.save_freq == 0:
                print('Saving model!')
                torch.save(
                    model.state_dict(), options.weight_addr + str(timestamp) +
                    "_epoch_" + str(epoch) + "_score_" + str(test_score))

    return test_score