Exemple #1
0
def main(params, config):
    data_pedestal = PedestalDataset(config)

    train_loader, validation_loader = split_dataset(data_pedestal,
                                                    params['batch_size'])

    if config['experiment']['load_model'] != None:
        PATH = config['experiment']['load_model']
        checkpoint = torch.load(PATH)
        # Load Model
        net = SimpleNet(params, config)
        net.load_state_dict(checkpoint['model_state_dict'])

        # Load Optimizer
        optimizer = model_utils.map_optimizer(params['optimizer'],
                                              net.parameters(), 0.0)
        optimizer.load_state_dict(checkpoint['optimizer_state_dict'])

        # Assign Loss Function
        loss_func = model_utils.map_loss_func(params['loss'])

        # Set EPOCH and LOSS for retraining
        epoch = checkpoint['epoch']
        loss = checkpoint['loss']

    else:
        net = SimpleNet(params, config)
        optimizer = model_utils.map_optimizer(params['optimizer'],
                                              net.parameters(),
                                              params['learning_rate'])
        loss_func = model_utils.map_loss_func(params['loss'])

    epochs = config['epochs']

    last_results = []
    metrics = {}
    for epoch in range(epochs):

        # TRAINING
        net.train()
        for i, batch in enumerate(train_loader):
            optimizer.zero_grad()
            inputs, targets = batch['input'], batch['target']
            output = net(inputs)
            loss = loss_func(output, targets)
            loss.backward()
            optimizer.step()

        # Validation
        if epoch % 5 == 4:
            net.eval()
            max_error = 0.0

            for i, batch in enumerate(validation_loader):
                inputs, targets = batch['input'], batch['target']
                output = net(inputs)
                MSE = torch.sum((output - targets)**
                                2) / (len(output) * params['batch_size'])
                max_error = max(MSE, max_error)
                score = -math.log10(max_error)
                # print(epoch, score)

        if epoch > epochs - 5:
            last_results.append(score)

    final_score = min(last_results)
    metrics['default'] = final_score

    if config['experiment']['save_model'] is not None:
        PATH = config['experiment']['save_model']
        # save mode
        torch.save(
            {
                'epoch': epoch,
                'model_state_dict': net.state_dict(),
                'optimizer_state_dict': optimizer.state_dict(),
                'loss': loss
            }, PATH)
Exemple #2
0
def main(params, config):
    dataset = PedestalDataset(config)

    train_loader, validation_loader = split_dataset(dataset,
                                                    params['batch_size'])

    if config['experiment']['load_model'] != None:
        PATH = config['experiment']['load_model']
        checkpoint = torch.load(PATH)
        # Load Model
        net = SimpleNet(params, config)
        net.load_state_dict(checkpoint['model_state_dict'])

        # Load Optimizer
        optimizer = model_utils.map_optimizer(params['optimizer'],
                                              net.parameters(), 0.0)
        optimizer.load_state_dict(checkpoint['optimizer_state_dict'])

        # Assign Loss Function
        loss_func = model_utils.map_loss_func(params['loss'])

        # Set EPOCH and LOSS for retraining
        epoch = checkpoint['epoch']
        loss = checkpoint['loss']

    else:
        net = SimpleNet(params, config)
        optimizer = model_utils.map_optimizer(params['optimizer'],
                                              net.parameters(),
                                              params['learning_rate'])
        loss_func = model_utils.map_loss_func(params['loss'])

    metrics = {}
    target_norms = dataset.targets_norms
    input_norms = dataset.inputs_norms

    target_list = dataset.target_params
    input_list = dataset.input_params

    net.eval()

    outputs = []
    actual_array = []
    scaled_list = []

    save_path = config['experiment']['name']

    for i, batch in enumerate(validation_loader):
        inputs, targets = batch['input'], batch['target']
        for val in inputs:
            output = net(val).detach().numpy()
            output = denormalize(output, target_list, target_norms)
            outputs.append(output[0])

            normed_vals = denormalize(val.numpy(), input_list, input_norms)

        for tar in targets:
            denorm_targ = denormalize(tar.numpy(), target_list, target_norms)
            actual_array.append(denorm_targ[0])

    if config['experiment']['target'] == 'density':
        for i, batch in enumerate(validation_loader):
            inputs, targets = batch['input'], batch['target']
            for val in inputs:
                normed_vals = denormalize(val.numpy(), input_list, input_norms)
                scaled_vals = fitted_scale(normed_vals)
                scaled_list.append(scaled_vals)
        plt.scatter(actual_array, scaled_list, label='Scale Law')

    plt.scatter(actual_array, actual_array, label='Actual')
    plt.scatter(actual_array, outputs, label='NN')
    plt.legend()
    plt.ylabel('Predicted')
    plt.xlabel('Actual Density Height')
    plt.ylim(0, 12)

    plt.title('Neural Network vs Scaling Law')
    plt.savefig('./results/' + save_path)
    plt.show()