Ejemplo n.º 1
0
    def run(self, nb_epoch=10, batch_size=128, verbose=1):
        data = self.data
        model = self.model
        fig = self.fig
        history = self.fit(nb_epoch=nb_epoch,
                           batch_size=batch_size,
                           verbose=verbose)
        score = model.evaluate(data.X_test, data.Y_test, verbose=0)
        print('Confusion Matrix')
        Y_test_pred = model.predict(data.X_test, verbose=0)
        y_test_pred = np.argmax(Y_test_pred, axis=1)
        print(metrics.confusion_matrix(data.y_test, y_test_pred))

        print('Test Socre: ', score[0])
        print('Test Accuracy: ', score[1])

        suffix = sfile.unique_filename('datatime')
        foldname = 'output_' + suffix
        os.makedirs(foldname)
        plot.save_history_history('history_history.npy',
                                  history.history,
                                  fold=foldname)
        model.save_weights(os.path.join(foldname, 'dl_model.h5'))
        print('Output result are save in ', foldname)

        if fig:
            plt.figure(figsize=(12, 4))
            plt.subplot(1, 2, 1)
            plot.plot_acc(history)
            plt.subplot(1, 2, 2)
            plot.plot_loss(history)
            plt.show()
        self.history = history
        return foldname
Ejemplo n.º 2
0
def train(num_fold):
    isBest = True
    data_frame = pd.read_csv(config.TRAIN_FACIAL_LANDMARKS_CSV_PATH,
                             header=None)

    data_frame = preprocess.customFacialLandmarks(data_frame,
                                                  config.CLASS_LABEL)

    X = data_frame.iloc[:, :-1].copy().values
    Y = data_frame.iloc[:, -1].copy().values.reshape(-1, 1)
    X_train, X_test, Y_train, Y_test = return_split(X, Y)

    model = model_dispatcher.return_model(num_fold)
    Y_train_enc, Y_test_enc = conver_to_ohe(Y_train, Y_test)
    print(Y_train_enc.shape, Y_test_enc.shape)

    mc, reduce_lr = return_callbacks(num_fold, isBest)

    opt = return_opt('adam', learning_rate=0.01)
    model.compile(optimizer=opt,
                  loss='categorical_crossentropy',
                  metrics=['accuracy'])

    history = model.fit(X_train,
                        Y_train_enc,
                        epochs=100,
                        validation_data=(X_test, Y_test_enc),
                        batch_size=config.BATCH_SIZE,
                        callbacks=[mc, reduce_lr])

    plot.plot_loss(history)
    plot.plot_accuracy(history)
Ejemplo n.º 3
0
def main():
    x_train, y_train, x_test, y_test = data.mnist(one_hot=True)

    # Define Deep Neural Network structure (input_dim, num_of_nodes)
    layers = [[x_train.shape[1], 256], [256, 128], [128, 64]]

    # Initialize a deep neural network

    dnn = DNN(MODEL_FOLDER, os_slash, layers, params)

    pre_epochs = 100
    train_epochs = 100

    # Create auto-encoders and train them one by one by stacking them in the DNN
    pre_trained_weights = dnn.pre_train(x_train, pre_epochs)

    # Then use the pre-trained weights of these layers as initial weight values for the MLP
    history = dnn.train(x_train,
                        y_train,
                        train_epochs,
                        init_weights=pre_trained_weights)

    plot.plot_loss(history, loss_type='MSE')

    predicted, score = dnn.test(x_test, y_test)

    print("Test accuracy: ", score[1])

    dnn.model.save_weights(MODEL_FOLDER + os_slash + "final_weights.h5")
    dnn.model.save(MODEL_FOLDER + os_slash + "model.h5")
    save_results(score[1])
Ejemplo n.º 4
0
def main():
    x_train, y_train, x_test, y_test = data.mnist()

    params = {
        "epochs": 70,
        "num_hid_nodes": int(x_train.shape[1] * 0.9),
        "weight_init": [0.0, 0.1],
        "activations": 'relu',  #relu is much better performance
        "lr": 0.15,
        "decay": 1e-6,
        "momentum": 0.1,
    }

    # noise to training
    x_train_noise = np.copy(x_train).astype(float)
    level = 0.2
    cols = x_train.shape[1]
    for row in range(x_train.shape[0]):
        noise = np.random.normal(0, np.sqrt(level), cols)
        x_train_noise[row, :] = x_train_noise[row, :] + noise

    # noise to test
    x_test_noise = np.copy(x_test).astype(float)
    cols = x_test.shape[1]
    for row in range(x_test.shape[0]):
        noise = np.random.normal(0, np.sqrt(level), cols)
        x_test_noise[row, :] = x_test_noise[row, :] + noise

    auto_enc1 = Autoencoder(**params)
    history = auto_enc1.train(x_train_noise, x_train, x_test)
    plot.plot_loss(history, loss_type='MSE')
    x_reconstr = auto_enc1.test(x_test_noise, binary=True)
    plot_traintest(x_test_noise, y_test, x_reconstr)
Ejemplo n.º 5
0
def fine_tune(num_fold, data):
    #InceptionResNetV2 - 774:
    #Xception - 126
    print('Fine-Tuning')
    model = None
    train_data = None
    val_data = None
    isBest = True

    train_data, val_data = data[num_fold - 1][0], data[num_fold - 1][1]
    model = model_dispatcher.return_model(config.MODELS[num_fold - 1])

    model = load_model(get_model_name(num_fold))
    #LAYERS_TO_TRAIN = some_arbitrary_value
    print(model.summary())
    for layers in model.layers[1].layers[config.LAYERS_TO_TRAIN[config.
                                                                MODELS[num_fold
                                                                       - 1]]:]:
        layers.trainable = True
    print(model.summary())

    mc, reduce_lr = return_callbacks(num_fold, isBest)

    opt = return_opt('adam', learning_rate=1e-5)
    model.compile(optimizer=opt,
                  loss='categorical_crossentropy',
                  metrics=['accuracy'])

    history = model.fit(train_data,
                        validation_data=val_data,
                        epochs=20,
                        callbacks=[mc, reduce_lr],
                        steps_per_epoch=train_data.__len__())
    plot.plot_loss(history)
    plot.plot_accuracy(history)
Ejemplo n.º 6
0
def model(features,
          labels,
          experiment,
          lr=0.0001,
          batch_size=32,
          epochs=500,
          verbose=True):

    x_train, x_test, y_train, y_test = train_test_split(features,
                                                        labels,
                                                        test_size=0.20,
                                                        stratify=labels,
                                                        random_state=SEED)

    leaky_relu = tf.keras.layers.LeakyReLU(alpha=0.1)

    input_layer = keras.Input(shape=(11, ))
    x = layers.Dense(128, activation=leaky_relu)(input_layer)
    x = layers.Dense(64, activation=leaky_relu)(x)
    x = layers.Dropout(0.2)(x)
    x = layers.Dense(64, activation=leaky_relu)(x)
    x = layers.Dense(32, activation=leaky_relu)(x)
    x = layers.LayerNormalization()(x)
    x = layers.Dense(num_classes)(x)
    output_layer = layers.Softmax()(x)

    model = keras.Model(inputs=input_layer,
                        outputs=output_layer,
                        name='spotify_nn')
    if verbose:
        #keras.utils.plot_model(model, './model_diagram.png', show_shapes=True)
        model.summary()

    optimizer = tf.keras.optimizers.Adam(learning_rate=lr)

    model.compile(optimizer=optimizer,
                  loss=tf.keras.losses.SparseCategoricalCrossentropy(),
                  metrics=['accuracy'])

    history = model.fit(x_train,
                        y_train,
                        batch_size=batch_size,
                        epochs=epochs,
                        validation_split=0.2)

    plot_acc(history, num_classes, experiment)
    plot_loss(history, num_classes, experiment)

    test_loss, test_acc = model.evaluate(x_test, y_test, verbose=2)
    if verbose:
        print('\nTest accuracy: {}'.format(test_acc))

    preds = model.predict(x_test)
    preds = tf.argmax(preds, axis=-1)
    confusion_matrix = tf.math.confusion_matrix(y_test,
                                                preds,
                                                num_classes=num_classes)

    plot_cm(confusion_matrix, classes, num_classes, experiment)
def contpred(cfg):

    train = cfg.mode == 'train'

    # Collect data
    if not train:
        log.info(f"Collecting new trials")

        exper_data = collect_data(cfg)
        test_data = collect_data(cfg)

        log.info("Saving new default data")
        torch.save((exper_data, test_data),
                   hydra.utils.get_original_cwd() + '/trajectories/reacher/' + 'raw' + cfg.data_dir)
        log.info(f"Saved trajectories to {'/trajectories/reacher/' + 'raw' + cfg.data_dir}")
    # Load data
    else:
        log.info(f"Loading default data")
        # raise ValueError("Current Saved data old format")
        # Todo re-save data
        (exper_data, test_data) = torch.load(
            hydra.utils.get_original_cwd() + '/trajectories/reacher/' + 'raw' + cfg.data_dir)

    if train:
        prob = cfg.model.prob
        traj = cfg.model.traj
        ens = cfg.model.ensemble
        delta = cfg.model.delta

        log.info(f"Training model P:{prob}, T:{traj}, E:{ens}")

        log_hyperparams(cfg)

        if cfg.training.num_traj:
            train_data = exper_data[:cfg.training.num_traj]
        else:
            train_data = exper_data

        if traj:
            dataset = create_dataset_traj(exper_data, control_params=cfg.model.training.control_params,
                                          train_target=cfg.model.training.train_target, threshold=0.95)
        else:
            dataset = create_dataset_step(train_data, delta=delta)

        model = DynamicsModel(cfg)
        train_logs, test_logs = model.train(dataset, cfg)

        setup_plotting({cfg.model.str: model})
        plot_loss(train_logs, test_logs, cfg, save_loc=cfg.env.name + '-' + cfg.model.str, show=False)

        log.info("Saving new default models")
        f =  hydra.utils.get_original_cwd() + '/models/reacher/'
        if cfg.exper_dir:
            f = f + cfg.exper_dir + '/'
            if not os.path.exists(f):
                os.mkdir(f)
        f = f + cfg.model.str + '.dat'
        torch.save(model, f)
def training(x_train, x_test, epochs=1, batch_size=32):
    #Loading Data
    batches = x_train.shape[0] / batch_size

    # Creating GAN
    generator = create_generator()
    discriminator = create_discriminator()
    gan = create_gan(generator, discriminator)

    # Adversarial Labels
    y_valid = np.ones(batch_size) * 0.9
    y_fake = np.zeros(batch_size)
    discriminator_loss, generator_loss = [], []

    for epoch in range(1, epochs + 1):
        print('-' * 15, 'Epoch', epoch, '-' * 15)
        g_loss = 0
        d_loss = 0

        for _ in tqdm(range(int(batches))):
            # Random Noise and Images Set
            noise = generate_noise(batch_size)
            image_batch = x_train[np.random.randint(0,
                                                    x_train.shape[0],
                                                    size=batch_size)]

            # Generate Fake Images
            generated_images = generator.predict(noise)

            # Train Discriminator (Fake and Real)
            discriminator.trainable = True
            d_valid_loss = discriminator.train_on_batch(image_batch, y_valid)
            d_fake_loss = discriminator.train_on_batch(generated_images,
                                                       y_fake)

            d_loss += (d_fake_loss + d_valid_loss) / 2

            # Train Generator
            noise = generate_noise(batch_size)
            discriminator.trainable = False
            g_loss += gan.train_on_batch(noise, y_valid)

        discriminator_loss.append(d_loss / batches)
        generator_loss.append(g_loss / batches)

        if epoch % PLOT_FRECUENCY == 0:
            plot_images(epoch, generator)
            plot_loss(epoch, generator_loss, discriminator_loss)
            plot_test(epoch, x_test, generator)

    generator.save('Save_model/generator.h5')
    discriminator.save('Save_model/discriminator.h5')
    gan.save('Save_model/gan.h5')
Ejemplo n.º 9
0
def main():
    x_train, y_train, x_test, y_test = data.mnist()

    params = {
        "epochs": 50,
        "num_hid_nodes": int(x_train.shape[1] * 0.9),
        "weight_init": [0.0, 0.1],
        "activations": 'sigmoid',  #relu is much better performance
        "lr": 0.15,
        "decay": 1e-6,
        "momentum": 0.1,
    }
    auto_enc1 = Autoencoder(**params)
    history = auto_enc1.train(x_train, x_train, x_test)
    plot.plot_loss(history, loss_type='MSE')
    x_reconstr = auto_enc1.test(x_test, binary=True)
    plot_traintest(x_test, y_test, x_reconstr)
Ejemplo n.º 10
0
    def train(self, images, epochs, batch_size):
        gan = self.__build()
        batch_count = int(images.shape[0] / batch_size)
        for e in range(1, epochs + 1):
            for _ in tqdm(range(batch_count)):
                noise = np.random.normal(0,
                                         1,
                                         size=[batch_size, self.random_dim])
                image_batch = images[np.random.randint(0,
                                                       images.shape[0],
                                                       size=batch_size)]

                # Generate fake MNIST images
                generated_images = self.generator.predict(noise)
                # Combine generated generated images and training images
                X = np.concatenate([image_batch, generated_images])

                # Labels for training and generated images
                y_dis = np.zeros(2 * batch_size)
                # 0 is fake, 1 is real
                # Label smoothing (see: https://github.com/aleju/papers/blob/master/neural-nets/Improved_Techniques_for_Training_GANs.md)
                # All images from 0 to batch_size are (caused by concatenation) real
                y_dis[:batch_size] = 0.9
                y_dis[batch_size:] = 0.1

                # Train discriminator
                d_loss = self.discriminator.train_on_batch(X, y_dis)

                # Train generator
                noise = np.random.normal(0,
                                         1,
                                         size=[batch_size, self.random_dim])
                y_gen = np.ones(batch_size)
                g_loss = gan.train_on_batch(noise, y_gen)

            self.discriminator_losses.append(d_loss)
            self.generator_losses.append(g_loss)

            plot_images(e, self.generator, self.random_dim, self.img_dir)
            self.save_models(self.model_path, e)

        plot_loss(e, self.discriminator_losses, self.generator_losses,
                  self.loss_dir)
Ejemplo n.º 11
0
def main():
    batch_size = 128
    epoch = 15

    data = DATA()
    model = LeNet(data.input_shape, data.num_classes)

    hist = model.fit(data.x_train,
                     data.y_train,
                     batch_size=batch_size,
                     epochs=epoch,
                     validation_split=0.2)
    score = model.evaluate(data.x_test, data.y_test, batch_size=batch_size)

    print()
    print('Test Loss= ', score)

    plot_loss(hist)
    plt.show()
Ejemplo n.º 12
0
def run_hc_tsne(
    Z_init, tree, alpha, margin, config, score_logger, seed=2020, rerun=False
):
    Z1_name = f"{Z_dir}/Z1_{seed}.z"
    Z1_test_name = f"{Z_dir}/Z1_test_{seed}.z"
    loss_name = f"{score_dir}/loss-{name_suffix}.json"
    loss_logger = LossLogger(loss_name)

    if rerun or not os.path.exists(Z1_name):
        print("\n[DEBUG]Run Hierarchical TSNE with ", config["Z_new"])

        Z1 = hc_tsne(
            X_train,
            initialization=Z_init,
            tree=tree,
            alpha=alpha,
            margin=margin,
            loss_logger=loss_logger,
            random_state=seed,
            **config["hc"],
            **config["Z_new"],
        )
        Z1_test = Z1.transform(X_test)
        loss_logger.dump()
        joblib.dump(np.array(Z1), Z1_name)
        joblib.dump(np.array(Z1_test), Z1_test_name)
    else:
        Z1 = joblib.load(Z1_name)
        Z1_test = joblib.load(Z1_test_name)

    fig_name = f"{plot_dir}/HC-{name_suffix}.png"
    scatter(Z1, None, y_train, None, tree=tree, out_name=fig_name)

    loss_logger.load(loss_name)
    plot_loss(loss_logger.loss, out_name=f"{plot_dir}/loss-{name_suffix}.png")

    if score_logger is not None:
        evaluate_scores(
            X_train, y_train, X_test, y_test, Z1, Z1_test, "hc-tsne", score_logger
        )
Ejemplo n.º 13
0
    def run(self, epochs=400):
        d = self.data
        X_train, X_test = d.X_train, d.X_test
        y_train, y_test = d.y_train, d.y_test
        X, y = d.X, d.y
        m = self.model

        h = m.fit(X_train, y_train, epochs=epochs, validation_data=[X_test, y_test], verbose=2)
        plot.plot_loss(h)
        plt.title('History of training')
        plt.show()

        yp = m.predict(X_test)
        print('Loss : ', m.evaluate(X_test, y_test))
        plt.plot(yp, label='Prediction')
        plt.plot(y_test, label='Original')
        plt.legend(loc=0)
        plt.title('Validation Results')
        plt.show()

        yp = m.predict(X_test).reshape(-1)
        print('Loss : ',m.evaluate(X_test, y_test))
        print(yp.shape, y_test.shape)

        df = pd.DataFrame()
        df['Sample'] = list(range(len(y_test))) * 2
        df['Normalized #Passengers'] = np.concatenate([y_test, yp], axis=0)
        df['Type'] = ['Original'] * len(y_test) + ['Prediction'] * len(yp)
        plt.figure(figsize=(7,5))
        sns.barplot(x='Sample', y='Normalized #Passengers', hue='Type', data=df)
        plt.ylabel('Normalized #Passengers')
        plt.show()

        yp = m.predict(X)
        plt.plot(yp, label='Prediction')
        plt.plot(y, label='Original')
        plt.legend(loc=0)
        plt.title('All Results')
        plt.show()
Ejemplo n.º 14
0
def train_net(model, loss, config, inputs, labels, batch_size, disp_freq):

    iter_counter = 0
    loss_list = []
    acc_list = []

    for input, label in data_iterator(inputs, labels, batch_size):
        target = onehot_encoding(label, 10)
        iter_counter += 1

        # forward net
        output = model.forward(input)
        # calculate loss
        loss_value = loss.forward(output, target)
        # generate gradient w.r.t loss
        grad = loss.backward(output, target)
        # backward gradient

        model.backward(grad)
        # update layers' weights
        model.update(config)

        acc_value = calculate_acc(output, label)
        loss_list.append(loss_value)
        acc_list.append(acc_value)

        if iter_counter % disp_freq == 0:
            total_loss = np.mean(loss_list)
            total_acc = np.mean(acc_list)
            plot_loss(total_loss, total_acc)

            msg = '  Training iter %d, batch loss %.4f, batch acc %.4f' % (
                iter_counter, total_loss, total_acc)
            loss_list = []
            acc_list = []
            LOG_INFO(msg)
Ejemplo n.º 15
0
model.add(layers.Dense(46, activation='softmax'))



# 验证
x_val = x_train[:1000]
partial_x_train = x_train[1000:]

# y_val = one_hot_train_labels[:1000]
# partial_y_train = one_hot_train_labels[1000:]
# model.compile(optimizer='rmsprop',
#               loss='categorical_crossentropy',
#               metrics=['accuracy'])


partial_y_train = train_labels[1000:]
y_val = train_labels[:1000]
model.compile(optimizer='rmsprop', loss='sparse_categorical_crossentropy', metrics=['acc'])
print(model.summary())
history = model.fit(partial_x_train,
                    partial_y_train,
                    epochs=20,
                    batch_size=512,
                    validation_data=(x_val, y_val))



import plot
plot.plot_accuracy(history)
plot.plot_loss(history)
Ejemplo n.º 16
0
                                               save_best_only=True)
earlystopper = keras.callbacks.EarlyStopping(monitor='val_loss',
                                             patience=10,
                                             verbose=0)
GRU6_json = GRU6.to_json()
with open("GRU6.json", "w") as jason_file:
    jason_file.write(GRU6_json)
time_callback = TimeHistory()
GRU6_history = GRU6.fit_generator(
    train,
    epochs=100,
    validation_data=valid,
    verbose=0,
    callbacks=[checkpointer, earlystopper, time_callback])

plot_loss(GRU6_history, 'GRU6 - Train & Validation Loss')

print('time for 10 epochs in seconds:', total_time(10, time_callback.times))

prediction = GRU6.predict_generator(test)
pr_y = np.zeros((1, len(prediction)))
t_y = np.zeros((1, len(test)))
for num in range(len(prediction)):
    pr_y[:, num] = prediction[num][0]
    t_y[:, num] = (test[num][1])[0][0]

plt.plot(t_y, pr_y, 'ro')
plt.xlabel('True Values')
plt.ylabel('Predictions')
plt.axis('equal')
plt.show()
Ejemplo n.º 17
0
    print('reward:', rewards_c.shape)

    # print(len(rewards))
    reward_batch = np.sum(rewards)
    with critic_sess.as_default():
        with critic_sess.graph.as_default():
            # critic_sess.run(tf.global_variables_initializer())

            baselines, _, loss_, m = critic_sess.run(
                [training_logits_, train_op_, l_, masks_], {
                    input_data_: target_batch,
                    targets_: translate_logits,
                    rewards_: rewards_c,
                    lr_: learning_rate,
                    target_sequence_length_: lens,
                    source_sequence_length_: targets_lengths,
                    keep_prob_: keep_probability
                })
            # print(m)
            print('baseline:', baselines)
            print(baselines.shape)
            print('critic loss:', loss_)
            # print(baselines.shape)
            loss_list.append((count, loss_))
            saver.save(critic_sess, save_path_critic)
            print('Model Trained and Saved')

            util.save_params(save_path_critic, 'params_critic_sup.p')
plot_loss(loss_list)
Ejemplo n.º 18
0
def train():
    print('Starting Training')
    data_frame = pd.read_csv(config.TRAIN_CSV_PATH)
    Y = data_frame[['Label']].copy()
    num_fold = 1
    data = dict()
    isBest = False
    for train_idx, val_idx in return_split(Y):
        if (num_fold in config.LIST_OF_FOLD_EXCEPTIONS):
            import train_facialLandmarks
            train_facialLandmarks.train(num_fold)
        else:
            train_df = data_frame.iloc[train_idx]
            val_df = data_frame.iloc[val_idx]
            train_datagen, val_datagen = return_gen(num_fold)

            train_data = train_datagen.flow_from_dataframe(
                dataframe=train_df,
                directory=None,
                x_col="Image",
                y_col="Label",
                target_size=(config.TARGET_SIZE[config.MODELS[num_fold -
                                                              1]][0],
                             config.TARGET_SIZE[config.MODELS[num_fold -
                                                              1]][1]),
                class_mode="categorical",
                shuffle=True,
                batch_size=config.BATCH_SIZE,
                seed=42)

            val_data = val_datagen.flow_from_dataframe(
                dataframe=val_df,
                directory=None,
                x_col="Image",
                y_col="Label",
                target_size=(config.TARGET_SIZE[config.MODELS[num_fold -
                                                              1]][0],
                             config.TARGET_SIZE[config.MODELS[num_fold -
                                                              1]][1]),
                class_mode="categorical",
                shuffle=True,
                batch_size=config.BATCH_SIZE,
                seed=42)
            print(train_data.class_indices)
            model = model_dispatcher.return_model(num_fold)
            mc, reduce_lr = return_callbacks(num_fold, isBest)

            opt = return_opt('adam', learning_rate=0.01)
            model.compile(optimizer=opt,
                          loss='categorical_crossentropy',
                          metrics=['accuracy'])

            history = model.fit(train_data,
                                validation_data=val_data,
                                epochs=1,
                                callbacks=[mc, reduce_lr],
                                steps_per_epoch=train_data.__len__())

            plot.plot_loss(history)
            plot.plot_accuracy(history)
            model = load_model(get_model_name(num_fold, isBest))
            results = model.evaluate(val_data)
            results = dict(zip(model.metrics_name, results))

            data[num_fold] = [train_data, val_data]

        num_fold += 1
        tf.keras.backend.clear_session()
        if (num_fold > len(config.MODELS)):
            break

    return data
Ejemplo n.º 19
0
import numpy as np

import dataset as ds
from neural_networks import NeuralNetwork
from layers import InputLayer, OutputLayer, DenseLayer
from functions._init_functions import init_functions
from functions._activation_functions import activation_functions, activation_functions_derivatives
from functions._loss_functions import loss_functions
import plot as plt


data = ds.MLCupDataset()

data = ds.MLCupDataset()

model = NeuralNetwork()
model.add(InputLayer(10))
model.add(DenseLayer(50, fanin=10, activation="sigmoid"))
model.add(DenseLayer(30, fanin=50, activation="sigmoid"))
model.add(OutputLayer(2, fanin=30))

# configuration 322, line 324
model.compile(1143, 600, 0.03, None, 0.000008, 0.3, "mean_squared_error")

loss = model.fit(data.train_data_patterns, data.train_data_targets)

print(loss[-1])
plt.plot_loss(loss)
Ejemplo n.º 20
0
def training(epochs=1, batch_size=32):
    #Loading Data
    #x_train = load_imgs("./data256/*png")

    # Rescale -1 to 1
    #x_train = x_train / 127.5 - 1.
    train_datagen = ImageDataGenerator()
    train_generator = train_datagen.flow_from_directory('imgs/',
                                                        batch_size=batch_size,
                                                        shuffle=True)
    batches = 12385. / batch_size

    # Creating GAN
    generator = create_generator()
    discriminator = create_discriminator()
    gan = create_gan(generator, discriminator)

    # Adversarial Labels
    y_valid = np.ones(batch_size) * 0.9
    y_fake = np.zeros(batch_size)
    discriminator_loss, generator_loss = [], []

    for epoch in range(1, epochs + 1):
        print('-' * 15, 'Epoch', epoch, '-' * 15)
        g_loss = 0
        d_loss = 0

        for _ in tqdm(range(int(batches - 1))):
            # Random Noise and Images Set
            noise = generate_noise(batch_size)
            image_batch, _ = train_generator.next()
            image_batch = image_batch / 127.5 - 1.
            if image_batch.shape[0] == batch_size:
                # Generate Fake Images
                generated_images = generator.predict(noise)

                # Train Discriminator (Fake and Real)
                discriminator.trainable = True
                d_valid_loss = discriminator.train_on_batch(
                    image_batch, y_valid)
                d_fake_loss = discriminator.train_on_batch(
                    generated_images, y_fake)

                d_loss += (d_fake_loss + d_valid_loss) / 2

                # Train Generator
                noise = generate_noise(batch_size)
                discriminator.trainable = False
                g_loss += gan.train_on_batch(noise, y_valid)
        train_generator.on_epoch_end()

        discriminator_loss.append(d_loss / batches)
        generator_loss.append(g_loss / batches)

        if epoch % PLOT_FRECUENCY == 0:
            plot_images(epoch, generator)
            plot_loss(epoch, generator_loss, discriminator_loss)
            save_path = "./models"
            if not os.path.exists(save_path):
                os.makedirs(save_path)
            discriminator.save(save_path + "/discrim_%s.h5" % epoch)
            generator.save(save_path + "/generat_%s.h5" % epoch)
            gan.save(save_path + "/gan_%s.h5" % epoch)

    save_images(generator)
Ejemplo n.º 21
0
        #Forward Propagation
        out, net_in, net_in_bias = forwardprop.forward(num_layers,
                                                       out[0].shape[0],
                                                       net_in_bias, out,
                                                       net_in, theta)

        #Backpropagation
        error, dtheta, theta = backprop.back(num_layers, out[0].shape[0],
                                             error, out, hidden, net_in_bias,
                                             theta, dtheta, alpha, temp_Y)

    out, _, _, _, _, _, _ = initialize.init(X, num_layers, hidden, num_batches)
    out, _, _ = forwardprop.forward(num_layers, num_trainset, net_in_bias, out,
                                    net_in, theta)
    loss[epoch] = performance_metrics.loss(Y, out[num_layers - 1], Y.shape[0])
    #plot.plot_boundary(X, Y_nb, out, num_layers, net_in_bias, net_in, theta, epoch, Z, xx, yy)

out, _, _, _, _, _, _ = initialize.init(X, num_layers, hidden, num_batches)
out, _, _ = forwardprop.forward(num_layers, num_trainset, net_in_bias, out,
                                net_in, theta)
print performance_metrics.confusion_matrix(Y, out[num_layers - 1], num_class)
print "Accuracy: " + str(performance_metrics.accuracy(Y, out[num_layers - 1]))
print "Precision: " + str(
    performance_metrics.precision(Y, out[num_layers - 1], num_class))
print "Recall: " + str(
    performance_metrics.recall(Y, out[num_layers - 1], num_class))
print "Error: " + str(performance_metrics.sum_error(error, num_layers))

#plot.plot_boundary(X, Y_nb, out, num_layers, net_in_bias, net_in, theta, 0)
plot.plot_loss(np.arange(num_epochs), loss)
Ejemplo n.º 22
0
from sys import argv

import numpy as np
#iner
from neuron import generate_network, get_neighborhood, get_route
from plot import plot_neuron_chain, plot_route, plot_loss
from opts import OPT
from dataloader import dataloader
import pandas as pd
from path import Path
from som import SOM

import matplotlib.pyplot as plt


def main():
    pass


if __name__ == '__main__':
    args = OPT().args()
    SOM(args)

    plot_loss(input_dir=args.out_dir)
    plot_route(input_dir=args.out_dir)
    plot_neuron_chain(input_dir=args.out_dir)
Ejemplo n.º 23
0
model = models.Sequential()
model.add(layers.Dense(64, activation='relu', input_shape=(10000, )))
model.add(layers.Dense(64, activation='relu'))
model.add(layers.Dense(46, activation='softmax'))

# 验证
x_val = x_train[:1000]
partial_x_train = x_train[1000:]

# y_val = one_hot_train_labels[:1000]
# partial_y_train = one_hot_train_labels[1000:]
# model.compile(optimizer='rmsprop',
#               loss='categorical_crossentropy',
#               metrics=['accuracy'])

partial_y_train = train_labels[1000:]
y_val = train_labels[:1000]
model.compile(optimizer='rmsprop',
              loss='sparse_categorical_crossentropy',
              metrics=['acc'])
print(model.summary())
history = model.fit(partial_x_train,
                    partial_y_train,
                    epochs=20,
                    batch_size=512,
                    validation_data=(x_val, y_val))

import plot
plot.plot_accuracy(history)
plot.plot_loss(history)
Ejemplo n.º 24
0
                    rewards_all /= 10

            print('Epoch {:>3} Batch {:>4}/{} - Actor Loss: {:>6.4f} - Critic Loss: {:>6.4f} - Train Reward: {:>6.4f} - Valid Reward: {:>6.4f}'
                  .format(epoch_i, batch_i, len(source_int_text) // batch_size, loss_actor, loss_critic, reward_batch, rewards_all))

            loss_list.append((count, loss_actor))
            loss_list_critic.append((count, loss_critic))
            reward_list.append((count, reward_batch))
            reward_valid_list.append((count, rewards_all))

    with train_sess.as_default():
        with train_sess.graph.as_default():
            # Save Model
            saver_actor = tf.train.Saver()
            saver_actor.save(train_sess, save_path)

    with critic_sess.as_default():
        with critic_sess.graph.as_default():
            # Save Model
            saver_critic = tf.train.Saver()
            saver_critic.save(critic_sess, save_path_critic)

# plot
plot_loss(loss_list, "actor loss", "img/actor_loss.jpg")
plot_loss(loss_list_critic, "critic loss", "img/critic_loss.jpg")
plot_loss(reward_valid_list, "training reward", "img/training_reward.jpg")

#save model
util.save_params(save_path, 'params_actor_reinforce.p')
util.save_params(save_path_critic, 'params_critic_sup.p')
Ejemplo n.º 25
0
            if batch_i % display_step == 0 and batch_i > 0:
                rewards_all = 0
                for batch_j, (valid_sources_batch, valid_targets_batch, valid_sources_lengths, valid_targets_lengths) in enumerate(
                        util.get_batches(valid_source, valid_target, batch_size,
                                         source_vocab_to_int['<PAD>'],
                                         target_vocab_to_int['<PAD>'])):
                    batch_valid_logits = sess.run(
                        inferencing_logits,
                        {input_data: valid_sources_batch,
                         source_sequence_length: valid_sources_lengths,
                         target_sequence_length: valid_targets_lengths,
                         keep_prob: 1.0})

                    rewards_v, pre_logits = util.get_bleu(valid_targets_batch, batch_valid_logits)
                    rewards_v = np.sum(rewards_v) / batch_size
                    rewards_all += rewards_v
                rewards_all /= 10

                print('Epoch {:>3} Batch {:>4}/{} - Loss: {:>6.4f} - Valid bleu: {:>6.4f}'
                      .format(epoch_i, batch_i, len(source_int_text) // batch_size, loss, rewards_all))

    # Save Model
    saver = tf.train.Saver()
    saver.save(sess, save_path)
    print('Model Trained and Saved')

plot_loss(loss_list, "pretrain loss", "img/pretrain_loss.jpg")

# Save parameters for checkpoint# Save p
util.save_params(save_path, 'params_actor_sup.p')
Ejemplo n.º 26
0
def lorenz(cfg):
    # Lorenz paramters and initial conditions
    sigma, beta, rho = cfg.lorenz.sigma, cfg.lorenz.beta, cfg.lorenz.rho
    u0, v0, w0 = cfg.lorenz.ex.u0, cfg.lorenz.ex.v0, cfg.lorenz.ex.w0

    # Maximum time point and total number of time points
    tmax, n = cfg.lorenz.tmax, cfg.lorenz.n

    # Integrate the Lorenz equations on the time grid t
    t = np.linspace(0, tmax, n)
    f = odeint(sim_lorenz, (u0, v0, w0), t, args=(sigma, beta, rho))
    x, y, z = f.T

    num_traj = cfg.lorenz.num_traj
    if cfg.collect_data:
        data_X = np.zeros((1, 3))
        data_Seq = []

        new_init = np.random.uniform(low=[-25, -25, -25],
                                     high=[25, 25, 25],
                                     size=(num_traj, 3))

        for row in new_init:
            u, v, w = row  # row[0], row[1], row[2]
            f = odeint(sim_lorenz, (u, v, w), t, args=(sigma, beta, rho))
            x, y, z = f.T
            l = DotMap()
            l.states = f
            # Add parameters the way that the generation object is
            # TODO take generic parameters rather than only PD Target
            l.P = cfg.lorenz.beta
            l.D = cfg.lorenz.rho
            l.target = cfg.lorenz.sigma

            data_Seq.append(l)

        if cfg.plot: plot_lorenz(data_Seq, cfg, predictions=None)

        if cfg.save_data:
            log.info("Saving new default data")
            torch.save((data_Seq),
                       hydra.utils.get_original_cwd() +
                       '/trajectories/lorenz/' + 'raw' + cfg.data_dir)
            log.info(f"Saved trajectories to {cfg.data_dir}")
    else:
        data_Seq = torch.load(hydra.utils.get_original_cwd() +
                              '/trajectories/lorenz/' + 'raw' + cfg.data_dir)

    # Analysis
    from dynamics_model import DynamicsModel
    from reacher_pd import create_dataset_step, create_dataset_traj
    from plot import plot_loss

    prob = cfg.model.prob
    traj = cfg.model.traj
    ens = cfg.model.ensemble

    if traj:
        dataset = create_dataset_traj(data_Seq, threshold=0.95)
    else:
        dataset = create_dataset_step(data_Seq)

    if cfg.train_models:
        model = DynamicsModel(cfg)
        train_logs, test_logs = model.train(dataset, cfg)
        plot_loss(train_logs,
                  test_logs,
                  cfg,
                  save_loc=cfg.env.name + '-' + cfg.model.str,
                  show=True)
        if cfg.save_models:
            log.info("Saving new default models")
            torch.save(
                model,
                hydra.utils.get_original_cwd() + '/models/lorenz/' +
                cfg.model.str + '.dat')

    models = {}
    for model_type in cfg.models_to_eval:
        models[model_type] = torch.load(hydra.utils.get_original_cwd() +
                                        '/models/lorenz/' + model_type +
                                        ".dat")

    mse_evald = []
    for i in range(cfg.num_eval):
        traj_idx = np.random.randint(num_traj)
        traj = data_Seq[traj_idx]
        MSEs, predictions = test_models([traj], models)

        MSE_avg = {key: np.average(MSEs[key], axis=0) for key in MSEs}

        mse = {key: MSEs[key].squeeze() for key in MSEs}
        mse_sub = {key: mse[key][mse[key] < 10**5] for key in mse}
        pred = {key: predictions[key] for key in predictions}
        mse_evald.append(mse)
        #
        # plot_states(traj.states, pred, save_loc="Predictions; traj-" + str(traj_idx), idx_plot=[0,1,2], show=False)
        # plot_mse(mse_sub, save_loc="Error; traj-" + str(traj_idx), show=False)
        plot_lorenz([traj], cfg, predictions=pred)

    plot_mse_err(mse_evald, save_loc="Err Bar MSE of Predictions", show=True)
Ejemplo n.º 27
0
def noise_experiment(args, test_size=0.3):
    du = DataUtils(args.name)
    pos, neg, bk, clauses, lang = du.load_data()
    pos_train, pos_test = train_test_split(pos,
                                           test_size=test_size,
                                           random_state=seed)
    neg_train, neg_test = train_test_split(neg,
                                           test_size=test_size,
                                           random_state=seed)

    noise_rates = [
        0.0, 0.05, 0.1, 0.15, 0.2, 0.25, 0.30, 0.35, 0.40, 0.45, 0.50
    ]
    baseline_auc = [1.0, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5]

    datasets = get_datasets_with_noise(pos_train, neg_train, noise_rates)
    AUCs = []
    AUC_stds = []
    MSEs = []
    MSE_stds = []
    N = 5  # how many times to perform weight learn

    if args.name == 'member':
        T_beam = 3
        N_beam = 3

    elif args.name == 'subtree':
        T_beam = 3
        N_beam = 15
    else:
        T_beam = 5
        N_beam = 10

    N_max = 50

    for i, (pos_train, neg_train) in enumerate(datasets):
        ilp_train = ILPProblem(pos_train, neg_train, bk, lang, name=args.name)
        print('NOISE RATE: ', noise_rates[i])
        ilp_train.print()
        CG = ClauseGenerator(ilp_train,
                             infer_step=args.T,
                             max_depth=1,
                             max_body_len=1)
        solver = ILPSolver(ilp_train,
                           C_0=clauses,
                           CG=CG,
                           m=args.m,
                           infer_step=args.T)
        clauses_, Ws_list, loss_list_list = solver.train_N(N=N,
                                                           gen_mode='beam',
                                                           N_max=N_max,
                                                           T_beam=T_beam,
                                                           N_beam=N_beam,
                                                           epoch=args.epoch,
                                                           lr=args.lr,
                                                           wd=0.0)
        v_list, facts = solver.predict_N(pos_test, neg_test, clauses_, Ws_list)

        auc_list = np.array(
            [compute_auc(pos_test, neg_test, v_, facts) for v_ in v_list])
        auc_mean = np.mean(auc_list)
        auc_std = np.std(auc_list)
        AUCs.append(auc_mean)
        AUC_stds.append(auc_std)

        mse_list = np.array(
            [compute_mse(pos_test, neg_test, v_, facts) for v_ in v_list])
        mse_mean = np.mean(mse_list)
        mse_std = np.std(mse_list)
        MSEs.append(mse_mean)
        MSE_stds.append(mse_std)
        for j in range(N):
            loss_path = 'imgs/noise/loss/' + args.name + \
                '[noise:' + str(noise_rates[i]) + ']-' + str(j) + '.pdf'
            plot_loss(
                loss_path, loss_list_list[j],
                args.name + ':[noise:' + str(noise_rates[i]) + ']-' + str(j))

    # plot AUC with baseline
    path_auc = 'imgs/noise/' + args.name + '_AUC.pdf'
    path_mse = 'imgs/noise/' + args.name + '_MSE.pdf'

    print(AUC_stds)
    print(MSE_stds)

    plot_line_graph_baseline_err(
        path=path_auc,
        xs=noise_rates,
        ys=AUCs,
        err=AUC_stds,
        xlabel='Proportion of mislabeled training data',
        ylabel='AUC',
        title=args.name,
        baseline=baseline_auc)
    # plot MSR with std
    plot_line_graph_err(path=path_mse,
                        xs=noise_rates,
                        ys=MSEs,
                        err=MSE_stds,
                        xlabel='Proportion of mislabeled training data',
                        ylabel='Mean-squared test error',
                        title=args.name)
Ejemplo n.º 28
0
def main():
  args = get_args()
  global m
  print("Using "+args.arch_file+" DNN architecture")
  m = __import__(args.arch_file)

  print("Using GPU", args.gpu_id)
  torch.cuda.set_device(args.gpu_id)
  tmp = torch.ByteTensor([0])
  tmp.cuda()

  int_model_dir = args.dirout+'/intermediate_models/'
  plot_dir = args.dirout+'/train_stats/plots/'
  loss_file = args.dirout+'/train_stats/train_loss.txt'
  cv_loss_file = args.dirout+'/train_stats/cv_loss.txt'


  print("loading datset")
  dataset = m.TrainSet(args.data_dir, args.train_copy_location)
  dataloader = DataLoader(dataset, batch_size=args.batch_size, shuffle=True, collate_fn=dataset.collator, num_workers=1)
  if args.cv_data_dir:
    cv_dataset = m.TrainSet(args.cv_data_dir)
    cv_dataloader = DataLoader(cv_dataset, batch_size=args.batch_size, collate_fn=cv_dataset.collator)

  print("initializing model")
  if args.model_config:
    kwargs = dict()
    for line in open(args.model_config):
      kwargs[line.split('=')[0]] = line.rstrip().split('=')[1]
    model = m.SepDNN(args.gpu_id, **kwargs)
  else:
    model = m.SepDNN(args.gpu_id)
  model.cuda()
  optimizer = optim.Adam(model.parameters(), lr=args.learning_rate)
  print("using lr="+str(args.learning_rate))

  epoch_losses = [[], []]
  epoch_cv_losses = [[], []]
  lossF = open(loss_file, 'a')
  if args.cv_data_dir:
    cv_lossF = open(cv_loss_file, 'a')

  if args.start_epoch == 0:
    torch.save(model.state_dict(), int_model_dir+'init.mdl')
  else:
    model.load_state_dict(torch.load(int_model_dir+str(args.start_epoch).zfill(3)+'.mdl', map_location=lambda storage, loc: storage.cuda()))
    load_losses(loss_file, epoch_losses)
    if args.cv_data_dir:
      load_losses(cv_loss_file, epoch_cv_losses)

  print("training")
  for epoch in range(args.start_epoch, args.num_epochs):
    epoch_loss = 0.0
    epoch_norm = 0
    for i_batch, sample_batch in enumerate(dataloader):
      loss, norm = m.compute_loss(model, epoch, sample_batch)
      epoch_loss += loss.detach().cpu().numpy() * norm.detach().cpu().numpy()
      epoch_norm += norm.detach().cpu().numpy()
      loss.backward()
      torch.nn.utils.clip_grad_norm_(model.parameters(), 0.25)
      optimizer.step()

    if args.cv_data_dir and epoch % 5 == 4:
      cv_epoch_loss = 0.0
      cv_epoch_norm = 0
      model.eval()
      with torch.no_grad():
        for i_batch_cv, sample_batch_cv in enumerate(cv_dataloader):
          if i_batch_cv == 0:
            cv_loss, cv_norm = m.compute_cv_loss(model, epoch, sample_batch_cv, plot_dir+'epoch'+str(epoch+1).zfill(3))
          else:
            cv_loss, cv_norm = m.compute_cv_loss(model, epoch, sample_batch_cv)
          cv_epoch_loss += cv_loss.detach().cpu().numpy() * cv_norm.detach().cpu().numpy()
          cv_epoch_norm += cv_norm.detach().cpu().numpy()
      model.train()
      print("For epoch: "+str(epoch+1).zfill(3)+" cv set loss is: "+str(cv_epoch_loss/cv_epoch_norm))
      cv_lossF.write(str(epoch+1).zfill(3)+' '+str(cv_epoch_loss/cv_epoch_norm)+'\n')
      cv_lossF.flush()
      epoch_cv_losses[0].append(epoch+1)
      epoch_cv_losses[1].append(cv_epoch_loss/cv_epoch_norm)

    print("For epoch: "+str(epoch+1).zfill(3)+" loss is: "+str(epoch_loss/epoch_norm))
    lossF.write(str(epoch+1).zfill(3)+' '+str(epoch_loss/epoch_norm)+'\n')
    lossF.flush()
    epoch_losses[0].append(epoch+1)
    epoch_losses[1].append(epoch_loss/epoch_norm)
    if epoch % 5 == 4:
      print("Saving model for epoch "+str(epoch+1).zfill(3))
      torch.save(model.state_dict(), int_model_dir+str(epoch+1).zfill(3)+'.mdl')
      os.system("mkdir -p "+plot_dir+'epoch'+str(epoch+1).zfill(3))
      plot.plot_loss(epoch_losses, epoch_cv_losses, plot_dir+'epoch'+str(epoch+1).zfill(3)+'/Loss_'+str(epoch_losses[0][0]).zfill(3)+'-'+str(epoch+1).zfill(3)+'.png')
    sys.stdout.flush()

  torch.save(model.state_dict(), args.dirout+'/final.mdl')
  plot.plot_loss(epoch_losses, epoch_cv_losses, plot_dir+'Loss_'+str(epoch_losses[0][0]).zfill(3)+'-'+str(args.num_epochs).zfill(3)+'.png')
Ejemplo n.º 29
0
    # print some metrics
    train_samples_size = len(train_loader) * BATCH_SIZE
    valid_samples_size = len(valid_loader) * BATCH_SIZE
    loss_train_epoch = loss_train / train_samples_size
    loss_valid_epoch = loss_valid / valid_samples_size
    error_train_epoch = 100 - 100 * (acc_train / train_samples_size)
    error_valid_epoch = 100 - 100 * (acc_valid / valid_samples_size)
    error_history.append((error_train_epoch, error_valid_epoch))
    loss_history.append((loss_train_epoch, loss_valid_epoch))
    print(
        'Epoch: {} train loss: {:.5f} valid loss: {:.5f} train error: {:.2f} % valid error: {:.2f} %'
        .format(epoch, loss_train_epoch, loss_valid_epoch, error_train_epoch,
                error_valid_epoch))

    # check if model is better
    if error_valid_epoch < best_error[1]:
        best_error = (epoch, error_valid_epoch)
        snapshot(SAVED_MODELS_DIR, RUN_TIME, RUN_NAME, True, epoch,
                 error_valid_epoch, model.state_dict(),
                 model.optimizer.state_dict())

    # check that the model is not doing worst over the time
    if best_error[0] + PATIENCE < epoch:
        print('Overfitting. Stopped at epoch {}.'.format(epoch))
        break
    epoch += 1

    plot_loss(RUN_TIME, RUN_NAME, loss_history)
    plot_error(RUN_TIME, RUN_NAME, error_history)