コード例 #1
0
 def denormalize(self, model=settings.MODEL):
     if model == "mlp" or model == "test":
         self.images_outer_flat = denormalize_data(self.images_outer_flat)
         self.images_inner_flat = denormalize_data(self.images_inner_flat)
     elif model == "conv_mlp":
         self.images_outer2d = denormalize_data(self.images_outer2d)
         self.images_inner_flat = denormalize_data(self.images_inner_flat)
     elif model == "conv_deconv" or model == "vgg16" or model == "lasagne_conv_deconv":
         self.images_outer2d = denormalize_data(self.images_outer2d)
         self.images_inner2d = denormalize_data(self.images_inner2d)
     elif model == "dcgan" or model == "wgan" or model == "lsgan":
         self.images = denormalize_data(self.images)
         self.images_inner2d = denormalize_data(self.images_inner2d)
コード例 #2
0
input_data[:, :, 2] = paw_norm

models = [load_model_from_json(model_filename)]

output_pred_test = [model.predict(input_data) for model in models]
output_pred_test = sum(output_pred_test) / len(output_pred_test)

err_r = []
err_c = []
err_pmus = []

# R_hat = np.average([denormalize_data(output_pred_test[i, 0], minimum=min_resistances, maximum=max_resistances) for i in range(num_examples)])
# C_hat = np.average([denormalize_data(output_pred_test[i, 1], minimum= min_capacitances, maximum= max_capacitances) for i in range(num_examples)])

R_hat = denormalize_data(output_pred_test[0, 0],
                         minimum=min_resistances,
                         maximum=max_resistances)
C_hat = denormalize_data(output_pred_test[0, 1],
                         minimum=min_capacitances,
                         maximum=max_capacitances)
alpha = 0.2

rr = min(RR)
fs = max(Fs)
time = np.arange(0, np.floor(180.0 / rr * fs) + 1, 1) / fs

err_pmus_hat = []
err_nmsre = []
for i in range(num_examples - 1):
    # R_hat = alpha*denormalize_data(output_pred_test[i, 0], minimum=min_resistances, maximum=max_resistances) + (1-alpha)*R_hat
    # C_hat = alpha*denormalize_data(output_pred_test[i, 1], minimum= min_capacitances, maximum= max_capacitances) + (1-alpha)*C_hat
コード例 #3
0
# TEST
#######################################################################################################################
"""
model.eval()
logs_test = defaultdict(float)
with torch.no_grad():        
    for x,y in test_loader:
        if torch.cuda.is_available():
            x = x.to(device)
            y = y.to(device)
        else:
            x = x
            y = y
        y_pred, _ = model(x)
        
        y_pred_dnorm = denormalize_data(y_pred.view(-1, opt.n_inp, opt.n_points).cpu(), min_value, max_value)
        y_dnorm = denormalize_data(y.view(-1, opt.n_inp, opt.n_points).cpu(), min_value, max_value)
        
        loss_test = loss_fn(y_pred_dnorm, y_dnorm)
        
        logs_test['mse'] = loss_test.item()
        logs_test['rmse'] = np.sqrt(loss_test.item())
        logs_test['bias'] = bias(y_pred_dnorm, y_dnorm)
        logs_test['err-rel'] = rel_error(y_pred_dnorm, y_dnorm)
        
        logger.log('test', logs_test)
        
print("\n\n================================================")
print(" *  Test MSE: ", logs_test['mse'],
      "\n *  Test RMSE: ", logs_test['rmse'],
      "\n *  Test Bias: ", logs_test['bias'],
コード例 #4
0
            x_space = x_space
            x_exo = x_exo
            y = y
        y_time = evaluate_temp_att(temporal_encoder, temporal_decoder, x_time,
                                   opt.n_out_sp, device)
        y_space, _ = spatial_model(x_space)
        x = torch.cat((y_time.unsqueeze(2), y_space.squeeze().view(
            -1, opt.n_out_sp, opt.n_points), x_exo),
                      dim=2).view(-1, inputs)
        x = torch.cat(
            (x, x_space[:, -opt.n_ar:].view(-1, opt.n_ar * opt.n_points)),
            dim=1)
        y_pred = model(x).view(-1, opt.n_out_sp, opt.dim_x, opt.dim_y)

        y_dnorm = denormalize_data(
            y.view(-1, opt.n_out_sp, opt.n_points).cpu(), min_value_space,
            max_value_space)
        y_pred_dnorm = denormalize_data(
            y_pred.view(-1, opt.n_out_sp, opt.n_points).cpu(), min_value_space,
            max_value_space)

        loss_test = loss_fn(y_pred_dnorm, y_dnorm)

        logs_test['mse'] = loss_test.item()
        logs_test['rmse'] = np.sqrt(loss_test.item())
        logs_test['bias'] = bias(y_pred_dnorm, y_dnorm)
        logs_test['err-rel'] = rel_error(y_pred_dnorm, y_dnorm)

        logger.log('test', logs_test)

print("\n\n================================================")
コード例 #5
0
    def train(self,
              dataset,
              num_epochs=1000,
              epochsize=50,
              batchsize=64,
              initial_eta=0.00005,
              architecture=1):
        import lasagne
        import theano.tensor as T
        from theano import shared, function

        # Load the dataset
        log("Fetching data...")
        X_train, X_test, y_train, y_test, ind_train, ind_test = dataset.return_train_data(
        )

        # Prepare Theano variables for inputs and targets
        noise_var = T.matrix('noise')
        input_var = T.tensor4('inputs')

        # Create neural network model
        log("Building model and compiling functions...")
        generator = self.build_generator(noise_var)
        critic = self.build_critic(input_var, architecture)

        # Create expression for passing real data through the critic
        real_out = lasagne.layers.get_output(critic)
        # Create expression for passing fake data through the critic
        fake_out = lasagne.layers.get_output(
            critic, lasagne.layers.get_output(generator))

        # Create loss expressions to be minimized
        # a, b, c = -1, 1, 0  # Equation (8) in the paper
        a, b, c = 0, 1, 1  # Equation (9) in the paper
        generator_loss = lasagne.objectives.squared_error(fake_out, c).mean()
        critic_loss = (lasagne.objectives.squared_error(real_out, b).mean() +
                       lasagne.objectives.squared_error(fake_out, a).mean())

        # Create update expressions for training
        from theano import shared
        generator_params = lasagne.layers.get_all_params(generator,
                                                         trainable=True)
        critic_params = lasagne.layers.get_all_params(critic, trainable=True)
        eta = shared(lasagne.utils.floatX(initial_eta))
        #generator_updates = lasagne.updates.rmsprop(generator_loss, generator_params, learning_rate=eta)
        #critic_updates = lasagne.updates.rmsprop(critic_loss, critic_params, learning_rate=eta)
        generator_updates = lasagne.updates.adam(generator_loss,
                                                 generator_params,
                                                 learning_rate=eta,
                                                 beta1=0.75)
        critic_updates = lasagne.updates.adam(critic_loss,
                                              critic_params,
                                              learning_rate=eta,
                                              beta1=0.75)

        # Instantiate a symbolic noise generator to use for training
        from theano.sandbox.rng_mrg import MRG_RandomStreams as RandomStreams
        srng = RandomStreams(seed=np.random.randint(2147462579, size=6))
        noise = srng.uniform((batchsize, 100))

        # Compile functions performing a training step on a mini-batch (according
        # to the updates dictionary) and returning the corresponding score:
        from theano import function
        generator_train_fn = function([],
                                      generator_loss,
                                      givens={noise_var: noise},
                                      updates=generator_updates)
        critic_train_fn = function([input_var],
                                   critic_loss,
                                   givens={noise_var: noise},
                                   updates=critic_updates)

        # Compile another function generating some data
        gen_fn = function([noise_var],
                          lasagne.layers.get_output(generator,
                                                    deterministic=True))

        # Finally, launch the training loop.
        log("Starting training...")
        # We create an infinite supply of batches (as an iterable generator):
        batches = self.iterate_minibatches(X_train,
                                           y_train,
                                           batchsize,
                                           shuffle=True,
                                           forever=True)

        # We iterate over epochs:
        epoch_eta_threshold = num_epochs // 5
        generator_runs = 0
        mean_g_loss = 0
        mean_c_loss = 0
        for epoch in range(num_epochs):
            start_time = time.time()

            if self.check_stop_file():
                print_error("Detected a STOP file. Aborting experiment.")
                break

            # In each epoch, we do `epochsize` generator updates. Usually, the
            # critic is updated 5 times before every generator update. For the
            # first 25 generator updates and every 500 generator updates, the
            # critic is updated 100 times instead, following the authors' code.
            critic_losses = []
            generator_losses = []
            for _ in range(epochsize):
                if mean_c_loss < 0.15:
                    critic_runs = 10
                elif mean_c_loss < mean_g_loss / 5.0:
                    critic_runs = 5
                else:
                    critic_runs = 1
                for _ in range(critic_runs):
                    batch = next(batches)
                    inputs, targets = batch
                    critic_losses.append(critic_train_fn(inputs))
                    generator_runs
                if mean_g_loss > mean_c_loss * 5.0:
                    generator_runs = 5
                else:
                    generator_runs = 3
                for _ in range(generator_runs):
                    generator_losses.append(generator_train_fn())

            # Then we print the results for this epoch:
            log("Epoch {} of {} took {:.3f}s".format(epoch + 1, num_epochs,
                                                     time.time() - start_time))
            mean_g_loss = np.mean(generator_losses)
            mean_c_loss = np.mean(critic_losses)
            log("  generator loss = {}".format(mean_g_loss))
            log("  critic loss    = {}".format(mean_c_loss))

            # And finally, we plot some generated data
            # And finally, we plot some generated data, depending on the settings
            if epoch % settings.EPOCHS_PER_SAMPLES == 0:
                from utils import normalize_data, denormalize_data
                # And finally, we plot some generated data
                # Generate 100 images, which we will output in a 10x10 grid
                samples = np.array(
                    gen_fn(lasagne.utils.floatX(np.random.rand(10 * 10, 100))))
                samples = denormalize_data(samples)
                samples_path = os.path.join(
                    settings.EPOCHS_DIR,
                    'samples_epoch_{0:0>5}.png'.format(epoch + 1))
                try:
                    import PIL.Image as Image
                    Image.fromarray(
                        samples.reshape(10, 10, 3, 64, 64).transpose(
                            0, 3, 1, 4, 2).reshape(10 * 64, 10 * 64,
                                                   3)).save(samples_path)
                except ImportError as e:
                    print_warning(
                        "Cannot import module 'PIL.Image', which is necessary for the LSGAN to output its sample images. You should really install it!"
                    )

            # After half the epochs, we start decaying the learn rate towards zero
            if epoch >= epoch_eta_threshold:
                progress = float(epoch -
                                 epoch_eta_threshold) / float(num_epochs)
                eta.set_value(
                    lasagne.utils.floatX(initial_eta *
                                         math.pow(1 - progress, 2)))
            # if epoch >= num_epochs // 2:
            #     progress = float(epoch) / num_epochs
            #     eta.set_value(lasagne.utils.floatX(initial_eta*2*(1 - progress)))

        # Optionally, you could now dump the network weights to a file like this:
        np.savez(os.path.join(settings.MODELS_DIR, 'lsgan_gen.npz'),
                 *lasagne.layers.get_all_param_values(generator))
        np.savez(os.path.join(settings.MODELS_DIR, 'lsgan_crit.npz'),
                 *lasagne.layers.get_all_param_values(critic))
        #
        # And load them again later on like this:
        # with np.load('model.npz') as f:
        #     param_values = [f['arr_%d' % i] for i in range(len(f.files))]
        # lasagne.layers.set_all_param_values(network, param_values)

        self.generator = generator
        self.critic = critic
        self.generator_train_fn = generator_train_fn
        self.critic_train_fn = critic_train_fn
        self.gen_fn = gen_fn

        return generator, critic, gen_fn
コード例 #6
0
"""
#######################################################################################################################
# TEST
#######################################################################################################################
"""
encoder.eval()
decoder.eval()
logs_test = defaultdict(float)
with torch.no_grad():
    for x, y in test_loader:
        x = x.view(-1, opt.n_inp, opt.in_dim).to(device)
        y = y.to(device)

        y_pred = evaluate(encoder, decoder, x)

        y_pred_dnorm = denormalize_data(y_pred, min_value, max_value)
        y_dnorm = denormalize_data(y, min_value, max_value)

        loss_test = loss_fn(y_pred_dnorm, y_dnorm)

        logs_test['mse'] = loss_test.item()
        logs_test['rmse'] = np.sqrt(loss_test.item())
        logs_test['bias'] = bias(y_pred_dnorm, y_dnorm)
        logs_test['err-rel'] = rel_error(y_pred_dnorm, y_dnorm)

        logger.log('test', logs_test)

print("\n\n================================================")
print(" *  Test MSE: ", logs_test['mse'], "\n *  Test RMSE: ",
      logs_test['rmse'], "\n *  Test Bias: ", logs_test['bias'],
      "\n *  Test Rel-Err (%): ", logs_test['err-rel'])
コード例 #7
0
# utils.disp_images(np.concatenate([x_train[-5:], recon_pca[-5:], recon_lae[-5:]]), title="disp", cols=5, cmap='gray')
#
# plt.plot(history.epoch, history.history['mean_squared_error'])
# plt.plot(history.epoch, [mse_pca]*len(history.epoch))
# plt.show()
# plt.close()

# Run non-linear AutoEncoder
x_train_norm, train_mean, train_std = utils.normalize_data(x_train)
ae = NonLinearAutoEncoder(input_shape=(w, h))
# ae.load_weights()
ae.build_model(lr=0.0003)
ae.train(x_train_norm, epochs=100)
ae.save_weights()
result = ae.predict(x_train_norm[:5])
result = utils.denormalize_data(result, train_mean, train_std)
utils.disp_images(np.concatenate([x_train[:5], result]),
                  title="disp",
                  cols=5,
                  cmap='gray')

# Run classifier with frozen pretrained layers
# x_train_norm, train_mean, train_std = normalize_data(x_train)
# ae = NonLinearAutoEncoder(input_shape=(w, h), pcs=50)
# ae.load_weights()
# vecs = ae.extract_features(x_train_norm)
# plot_tsne(2, vecs[:300], y_train[:300])
# cp = Classifier(5, ae.encoder, freeze=True)
# cp.build_model()
# cp.train(x_train_norm, y_train, epochs=10)
#
コード例 #8
0
    def train(self, dataset):
        log("Fetching data...")
        X_train, X_val, y_train, y_val, ind_train, ind_val = dataset.return_train_data(
        )
        X_test, y_test = dataset.return_test_data()

        #Variance of the prediction can be maximized to obtain sharper images.
        #If this coefficient is set to "0", the loss is just the L2 loss.
        StdevCoef = 0

        # Prepare Theano variables for inputs and targets
        input_var = T.tensor4('inputs')
        target_var = T.tensor4('targets')

        # Create neural network model
        log("Building model and compiling functions...")
        self.build_network(input_var, target_var)

        # See if we can resume from previously saved model
        print_info("Looking for existing model to resume training from...")
        model_path = os.path.join(settings.MODELS_DIR,
                                  settings.EXP_NAME + ".npz")
        if self.load_model(model_path):
            print_positive(
                "Loaded saved model weights found on disk at: {}!".format(
                    model_path))
            print_positive("Resuming training from exisiting weights!")
        else:
            print_info(
                "Unable to find exisiting or load valid model file. Starting training from scratch!"
            )

        # Build loss function
        train_loss = self.build_loss(input_var, target_var)

        # Update expressions
        from theano import shared
        eta = shared(lasagne.utils.floatX(settings.LEARNING_RATE))
        params = lasagne.layers.get_all_params(self.network_out,
                                               trainable=True)
        updates = lasagne.updates.adam(train_loss, params, learning_rate=eta)

        # Train loss function
        train_fn = theano.function([input_var, target_var],
                                   train_loss,
                                   updates=updates)

        # Test/validation Loss expression (disable dropout and so on...)
        test_loss = self.build_loss(input_var, target_var, deterministic=True)

        # Validation loss function
        val_test_fn = theano.function([input_var, target_var], test_loss)

        # Predict function
        predict_fn = theano.function([input_var],
                                     lasagne.layers.get_output(
                                         self.network_out, deterministic=True))

        ##########################################
        # Finally, launch the training loop.
        ##########################################
        keyboard_interrupt = False
        completed_epochs = 0
        try:
            log("Starting training...")
            batch_size = settings.BATCH_SIZE
            best_val_loss = 1.0e30
            best_val_loss_epoch = -1
            for epoch in range(settings.NUM_EPOCHS):
                start_time = time.time()
                train_losses = []
                for batch in self.iterate_minibatches(X_train,
                                                      y_train,
                                                      batch_size,
                                                      shuffle=True):
                    inputs, targets = batch
                    train_losses.append(train_fn(inputs, targets))

                val_losses = []
                for batch in self.iterate_minibatches(X_val,
                                                      y_val,
                                                      batch_size,
                                                      shuffle=False):
                    inputs, targets = batch
                    val_losses.append(val_test_fn(inputs, targets))

                completed_epochs += 1

                # Print the results for this epoch
                mean_train_loss = np.mean(train_losses)
                mean_val_loss = np.mean(val_losses)
                log("Epoch {} of {} took {:.3f}s".format(
                    epoch + 1, settings.NUM_EPOCHS,
                    time.time() - start_time))
                log(" - training loss:    {:.6f}".format(mean_train_loss))
                log(" - validation loss:  {:.6f}".format(mean_val_loss))

                create_checkpoint = False
                STOP_FILE = False

                if self.check_stop_file():
                    STOP_FILE = True
                    create_checkpoint = True

                if epoch >= 8 and mean_val_loss < best_val_loss:
                    best_val_loss_epoch = epoch + 1
                    best_val_loss = mean_val_loss
                    create_checkpoint = True
                    print_positive(
                        "New best val loss = {:.6f}!!! Creating model checkpoint!"
                        .format(best_val_loss))
                elif (epoch + 1) % settings.EPOCHS_PER_CHECKPOINT == 0:
                    create_checkpoint = True
                    print_info(
                        "Time for model checkpoint (every {} epochs)...".
                        format(settings.EPOCHS_PER_CHECKPOINT))

                if create_checkpoint:
                    # Save checkpoint
                    model_checkpoint_filename = "model_checkpoint-val_loss.{:.6f}-epoch.{:0>3}.npz".format(
                        best_val_loss, epoch + 1)
                    model_checkpoint_path = os.path.join(
                        settings.CHECKPOINTS_DIR, model_checkpoint_filename)
                    print_info("Saving model checkpoint: {}".format(
                        model_checkpoint_path))
                    # Save model
                    self.save_model(model_checkpoint_path)

                # Save samples for this epoch
                if (epoch + 1) % settings.EPOCHS_PER_SAMPLES == 0:
                    num_samples = 100
                    num_rows = 10
                    num_cols = 10
                    samples = self.create_samples(X_val, y_val, batch_size,
                                                  num_samples, predict_fn)
                    samples = denormalize_data(samples)
                    samples_path = os.path.join(
                        settings.EPOCHS_DIR,
                        'samples_epoch_{0:0>5}.png'.format(epoch + 1))
                    print_info(
                        "Time for saving sample images (every {} epochs)... ".
                        format(settings.EPOCHS_PER_SAMPLES) +
                        "Saving {} sample images predicted validation dataset input images here: {}"
                        .format(num_samples, samples_path))
                    try:
                        import PIL.Image as Image
                        Image.fromarray(
                            samples.reshape(
                                num_rows, num_cols, 3, 32, 32).transpose(
                                    0, 3, 1, 4,
                                    2).reshape(num_rows * 32, num_cols * 32,
                                               3)).save(samples_path)
                    except ImportError as e:
                        print_warning(
                            "Cannot import module 'PIL.Image', which is necessary for the Lasagne model to output its sample images. You should really install it!"
                        )

                if STOP_FILE:
                    print_critical(
                        "STOP file found. Ending training here! Still producing results..."
                    )
                    break
        except KeyboardInterrupt:
            print_critical("Training interrupted by KeyboardInterrupt (^C)!")
            print_info(
                "Before shutting down, attempt to saving valid checkpoint, produce preliminary results, and so on."
            )
            keyboard_interrupt = True

        print_info("Training complete!")

        # Save model
        self.save_model(
            os.path.join(settings.MODELS_DIR, settings.EXP_NAME + ".npz"))

        # Compute 'num_images' predictions to visualize and print the test error
        num_images = 100
        test_losses = []
        num_iter = 0
        num_predictions = min(X_test.shape[0], num_images)
        indices = np.arange(X_test.shape[0])
        if num_images < X_test.shape[0]:
            indices = indices[0:num_images]
        X_test_small = X_test[indices, :, :, :]
        y_test_small = y_test[indices, :, :, :]
        preds = np.zeros((num_predictions, 3, 32, 32))
        for batch in self.iterate_minibatches(X_test_small,
                                              y_test_small,
                                              batch_size,
                                              shuffle=False):
            inputs, targets = batch
            preds[num_iter * batch_size:(num_iter + 1) *
                  batch_size] = predict_fn(inputs)
            test_losses.append(val_test_fn(inputs, targets))
            num_iter += 1
        log("Final results:")
        log(" - test loss:        {:.6f}".format(np.mean(test_losses)))

        # Save predictions and create HTML page to visualize them
        test_images_original = np.copy(normalize_data(dataset.test_images))
        denormalize_and_save_jpg_results(preds, X_test, y_test,
                                         test_images_original, num_images)
        create_html_results_page(num_images)

        # Save model's performance
        path_model_score = os.path.join(settings.PERF_DIR, "score.txt")
        print_info("Saving performance to file '{}'".format(path_model_score))
        metric = settings.LOSS_FUNCTION
        log("")
        log("Performance statistics")
        log("----------------------")
        log(" * Model = {}".format(settings.MODEL))
        log(" * Number of training epochs = {0}".format(completed_epochs))
        log(" * Final training score (metric: {0: >6})    = {1:.5f}".format(
            metric, np.mean(train_losses)))
        log(" * Final validation score  (metric: {0: >6}) = {1:.5f}".format(
            metric, np.mean(val_losses)))
        log(" * Best validation score   (metric: {0: >6}) = {1:.5f}".format(
            metric, best_val_loss))
        log(" * Epoch for best validation score           = {}".format(
            best_val_loss_epoch))
        log(" * Testing dataset  (metric: {0: >6})        = {1:.5f}".format(
            metric, np.mean(test_losses)))
        log("")
        with open(path_model_score, "w") as fd:
            fd.write("Performance statistics\n")
            fd.write("----------------------\n")
            fd.write("Model = {}\n".format(settings.MODEL))
            fd.write(
                "Number of training epochs = {0}\n".format(completed_epochs))
            fd.write(
                "Final training score (metric: {0: >6})    = {1:.5f}\n".format(
                    metric, np.mean(train_losses)))
            fd.write(
                "Final validation score  (metric: {0: >6}) = {1:.5f}\n".format(
                    metric, np.mean(val_losses)))
            fd.write(
                "Best validation score   (metric: {0: >6}) = {1:.5f}\n".format(
                    metric, best_val_loss))
            fd.write("Epoch for best validation score           = {}\n".format(
                best_val_loss_epoch))
            fd.write(
                "Testing dataset  (metric: {0: >6})        = {1:.5f}\n".format(
                    metric, np.mean(test_losses)))

        if keyboard_interrupt:
            raise KeyboardInterrupt