Ejemplo n.º 1
0
import tensorflow as tf
from reader import reader
from model import dnn
import numpy as np
import pickle


# Loading data reader at Practice(2)
data_reader = reader()

# Loading DNN graph at Practice(3)
model = dnn()

# Define Session for running graph
# and initialize model's parameters
sess = tf.Session()
sess.run(tf.global_variables_initializer())

saver = tf.train.Saver()
save_path = 0
val_queue, val_weightedAvg = [],[]
count, init_flag, best_val = 0, 0, 0

batch_size = 16
max_steps = 50000
for i in range(max_steps):

  # For each iteration, first we get batch x&y data
  x_train, y_train, _, _, _ = data_reader.next_batch(16, train=1)

  # Next, construct feed for model's placeholder
Ejemplo n.º 2
0
# Author:  <Changyu Liu>[email protected]
# License: MIT
####################################################

from model import dnn

from sklearn.datasets import load_breast_cancer
from sklearn.model_selection import train_test_split

X_data, y_data = load_breast_cancer(return_X_y=True)
X_train, X_test, y_train, y_test = train_test_split(X_data,
                                                    y_data,
                                                    train_size=0.6,
                                                    test_size=0.4)

X_train = X_train.T
y_train = y_train.reshape(y_train.shape[0], -1).T

X_test = X_test.T
y_test = y_test.reshape(y_test.shape[0], -1).T

accuracy = dnn(X_train,
               y_train,
               X_test,
               y_test,
               layer_dims=[X_train.shape[0], 10, 5, 1],
               lr=0.001,
               iters=20000)

print(f"Acc: {accuracy}")
Ejemplo n.º 3
0
from model import dnn

from sklearn.datasets import load_breast_cancer
from sklearn.model_selection import train_test_split

X_data, y_data = load_breast_cancer(return_X_y=True)
X_train, X_test, y_train, y_test = train_test_split(X_data,
                                                    y_data,
                                                    train_size=0.6,
                                                    test_size=0.4,
                                                    random_state=28)

X_train = X_train.T
y_train = y_train.reshape(y_train.shape[0], -1).T

X_test = X_test.T
y_test = y_test.reshape(y_test.shape[0], -1).T

accuracy = dnn(X_train,
               y_train,
               X_test,
               y_test,
               layer_dims=[X_train.shape[0], 10, 5, 1],
               learning_rate=1e-3,
               iters=10000)

print(f"Acc: {accuracy}")

### Write your own paper to refresh yourself.
Ejemplo n.º 4
0
def train(filepath):
    """Train model to estimate power.

    Args:
        filepath (str): Path to training set.

    """

    MODELS_PATH.mkdir(parents=True, exist_ok=True)

    # Load parameters
    params = yaml.safe_load(open("params.yaml"))["train"]
    net = params["net"]

    # Load training set
    train = np.load(filepath)

    X_train = train["X"]
    y_train = train["y"]

    n_features = X_train.shape[-1]

    # Create sample weights
    sample_weights = np.ones_like(y_train)

    if params["weigh_samples"]:
        sample_weights[y_train > params["weight_thresh"]] = params["weight"]

    hist_size = X_train.shape[-2]

    # hypermodel = DeepPowerHyperModel(hist_size, n_features)

    # # hp = HyperParameters()
    # # hp.Choice("num_layers", values=[1, 2])
    # # hp.Fixed("kernel_size", value=4)
    # # hp.Fixed("kernel_size_0", value=4)

    # tuner = Hyperband(
    #         hypermodel,
    #         # hyperparameters=hp,
    #         # tune_new_entries=True,
    #         objective="val_loss",
    #         # max_trials=10,
    #         # min_epochs=20,
    #         max_epochs=50,
    #         executions_per_trial=2,
    #         directory="model_tuning",
    #         project_name="DeepPower"
    # )

    # tuner.search_space_summary()

    # tuner.search(
    #     X_train, y_train,
    #     epochs=params["n_epochs"],
    #     batch_size=params["batch_size"],
    #     validation_split=0.2,
    #     sample_weight=sample_weights
    # )

    # tuner.results_summary()

    # # best_hyperparameters = tuner.get_best_hyperparameters(1)[0]

    # # model = tuner.hypermodel.build(best_hyperparameters)

    # # print(model.summary())

    # # history = model.fit(
    # #     X_train, y_train,
    # #     epochs=params["n_epochs"],
    # #     batch_size=params["batch_size"],
    # #     validation_split=0.2,
    # #     sample_weight=sample_weights
    # # )

    # model = tuner.get_best_models()[0]

    # print(model.summary())

    # model.save(MODELS_FILE_PATH)

    # Build model
    if net == "cnn":
        hist_size = X_train.shape[-2]
        model = cnn(hist_size, n_features, kernel_size=params["kernel_size"])
    elif net == "dnn":
        model = dnn(n_features)
    elif net == "lstm":
        pass
    elif net == "cnndnn":
        pass

    print(model.summary())

    # Save a plot of the model
    plot_model(model,
               to_file=PLOTS_PATH / 'model.png',
               show_shapes=False,
               show_layer_names=True,
               rankdir='TB',
               expand_nested=True,
               dpi=96)

    history = model.fit(X_train,
                        y_train,
                        epochs=params["n_epochs"],
                        batch_size=params["batch_size"],
                        validation_split=0.2,
                        sample_weight=sample_weights)

    model.save(MODELS_FILE_PATH)

    TRAININGLOSS_PLOT_PATH.parent.mkdir(parents=True, exist_ok=True)

    loss = history.history['loss']
    val_loss = history.history['val_loss']

    n_epochs = range(len(loss))

    plt.figure()
    plt.plot(n_epochs, loss, label="Training loss")
    plt.plot(n_epochs, val_loss, label="Validation loss")
    plt.legend()
    plt.savefig(TRAININGLOSS_PLOT_PATH)
Ejemplo n.º 5
0
    def train(self):

        net = dnn().cuda()
        mse = nn.MSELoss()
        l1 = nn.L1Loss()
        opt = torch.optim.Adam(net.parameters(), self.learning_rate,
                               [self.beta1, self.beta2])
        sigmoid = nn.Sigmoid().cuda()

        print('start train')

        train_err = []
        test_err_mse = []
        test_err_rmse = []

        prev_loss = 999.

        for sbj in range(5):
            traindata = seedvig_loader(data_dir,
                                       mode='train',
                                       batch_size=batch_size,
                                       test_no=sbj,
                                       data_type=data_type)
            testdata = seedvig_loader(data_dir,
                                      mode='val',
                                      batch_size=batch_size,
                                      test_no=sbj,
                                      data_type=data_type)

            for epoch in range(self.max_epoch):

                train_loss = 0.
                test_mse = 0.
                test_rmse = 0.
                n_train = 0.
                n_test = 0.

                net.train()
                for i, data in enumerate(traindata):
                    eeg_x, eeg_y = Variable(data[0]).cuda(), Variable(
                        data[1]).cuda()
                    pred = net(eeg_x)
                    err = torch.sqrt(mse(pred, eeg_y)) + l1(pred, eeg_y)
                    train_loss += err.item()
                    err.backward()
                    opt.step()

                if epoch == self.max_epoch - 1:
                    train_err.append(train_loss / i)
                    n_train = i
                    prev_loss = train_loss
                net.eval()

                if epoch == self.max_epoch - 1:
                    torch.save(net.state_dict(),
                               '../model/dnn_3layers_{:03d}.ckpt'.format(sbj))
                    with torch.no_grad():
                        for i, data in enumerate(testdata):
                            eeg_x, eeg_y = Variable(data[0]).cuda(), Variable(
                                data[1]).cuda()
                            pred = net(eeg_x)
                            err = mse(pred, eeg_y)
                            test_mse += err.item()
                            test_rmse += torch.sqrt(err).item()

                    test_err_mse.append(test_mse / i)
                    test_err_rmse.append(test_rmse / i)
                    n_test = i

        print('mse: {:.4f} / mse(std): {:.4f}'.format(
            np.array(test_err_mse).mean(),
            np.array(test_err_mse).std()))
        print('rmse: {:.4f} / rmse(std): {:.4f}'.format(
            np.array(test_err_rmse).mean(),
            np.array(test_err_rmse).std()))
Ejemplo n.º 6
0
data_names = ['data']
label_names = ['softmax_label']
data_shapes = [(batch_size, data_dim)]
label_shapes = [(batch_size, label_dim)]

#net = model.dnn(ndnn=ndnn, dnnsize=dnnsize)

#data_names = [x[0] for x in test_Iter.provide_data]
#print data_names;raw_input()
#mod = mx.mod.Module(symbol=net, context=mx.context.gpu(5), data_names=data_names,label_names=label_names)
model_prefix = prefix

symbol, arg_params, aux_params = mx.model.load_checkpoint(model_prefix, 1)

sym = model.dnn(ndnn=ndnn, dnnsize=dnnsize)
mod = mx.mod.Module(symbol=sym,
                    context=mx.context.gpu(5),
                    data_names=data_names,
                    label_names=label_names)
mod.bind(for_training=False,
         data_shapes=[('data', (4, 13))],
         label_shapes=[('softmax_label', (4, 2))])
mod.set_params(arg_params=arg_params, aux_params=aux_params)
result = []

fin = codecs.open(dev_feats_scp, 'r')
files = fin.readlines()
fin.close()

for F in files:
Ejemplo n.º 7
0
def train(filepath):
    """Train model to estimate power.

    Args:
        filepath (str): Path to training set.

    """

    MODELS_PATH.mkdir(parents=True, exist_ok=True)

    # Load parameters
    params = yaml.safe_load(open("params.yaml"))["train"]
    net = params["net"]
    use_early_stopping = params["early_stopping"]
    patience = params["patience"]

    # Load training set
    train = np.load(filepath)

    X_train = train["X"]
    y_train = train["y"]

    n_features = X_train.shape[-1]

    hist_size = X_train.shape[-2]
    target_size = y_train.shape[-1]

    # Build model
    if net == "cnn":
        hist_size = X_train.shape[-2]
        model = cnn(hist_size,
                    n_features,
                    output_length=target_size,
                    kernel_size=params["kernel_size"])
    elif net == "dnn":
        model = dnn(n_features, output_length=target_size)
    # elif net == "lstm":
    #     pass
    # elif net == "cnndnn":
    #     pass
    else:
        raise NotImplementedError("Only 'cnn' is implemented.")

    print(model.summary())

    # Save a plot of the model
    PLOTS_PATH.mkdir(parents=True, exist_ok=True)
    plot_model(model,
               to_file=PLOTS_PATH / 'model.png',
               show_shapes=False,
               show_layer_names=True,
               rankdir='TB',
               expand_nested=True,
               dpi=96)

    early_stopping = EarlyStopping(monitor="val_loss",
                                   patience=patience,
                                   verbose=4)

    model_checkpoint = ModelCheckpoint(MODELS_FILE_PATH,
                                       monitor="val_loss",
                                       save_best_only=True)

    if use_early_stopping:
        # Train model for 10 epochs before adding early stopping
        history = model.fit(
            X_train,
            y_train,
            epochs=10,
            batch_size=params["batch_size"],
            validation_split=0.25,
        )

        loss = history.history['loss']
        val_loss = history.history['val_loss']
        print(type(loss))

        history = model.fit(X_train,
                            y_train,
                            epochs=params["n_epochs"],
                            batch_size=params["batch_size"],
                            validation_split=0.25,
                            callbacks=[early_stopping, model_checkpoint])

        loss += history.history['loss']
        val_loss += history.history['val_loss']

    else:
        history = model.fit(
            X_train,
            y_train,
            epochs=params["n_epochs"],
            batch_size=params["batch_size"],
            validation_split=0.25,
        )

        loss = history.history['loss']
        val_loss = history.history['val_loss']

        model.save(MODELS_FILE_PATH)

    TRAININGLOSS_PLOT_PATH.parent.mkdir(parents=True, exist_ok=True)

    print(f"Best model in epoch: {np.argmax(np.array(val_loss))}")

    n_epochs = range(len(loss))

    plt.figure()
    plt.plot(n_epochs, loss, label="Training loss")
    plt.plot(n_epochs, val_loss, label="Validation loss")
    plt.legend()
    plt.savefig(TRAININGLOSS_PLOT_PATH)
Ejemplo n.º 8
0
x_train = x_train.reshape(x_train.shape[0], 48, 48, 1)
x_val = x_val.reshape(x_val.shape[0], 48, 48, 1)

datagen = ImageDataGenerator(featurewise_center=False,
                             samplewise_center=False,
                             featurewise_std_normalization=False,
                             samplewise_std_normalization=False,
                             rotation_range=0,
                             width_shift_range=0.2,
                             height_shift_range=0.2,
                             horizontal_flip=True,
                             vertical_flip=True)

datagen.fit(x_train)

emotion_classifier = model.dnn()
emotion_classifier.summary()

history = model.History()
tbCallBack = TensorBoard(log_dir=os.path.join('./', 'logs'),
                         write_graph=True,
                         write_images=False)
emotion_classifier.fit_generator(datagen.flow(x_train, y_train,
                                              batch_size=100),
                                 steps_per_epoch=len(x_train) / 100,
                                 epochs=80,
                                 validation_data=(x_val, y_val),
                                 callbacks=[history, tbCallBack])
dump_history('./', history)
emotion_classifier.save('model.h5')