Example #1
0
def objective_pseudobs(x_train, neurons, drop, activation, lr_opt, optimizer,
                       n_layers):

    n_epochs = 100
    in_features = x_train.shape[1]
    model = Sequential()
    model.add(Dense(neurons, input_dim=in_features, activation=activation))
    model.add(BatchNormalization(epsilon=1e-05, momentum=0.1))
    model.add(Dropout(rate=drop))

    if n_layers > 1:
        model.add(Dense(neurons, activation=activation))
        model.add(BatchNormalization(epsilon=1e-05, momentum=0.1))
        model.add(Dropout(rate=drop))

    if n_layers == 3:
        model.add(Dense(neurons, activation=activation))
        model.add(BatchNormalization(epsilon=1e-05, momentum=0.1))
        model.add(Dropout(rate=drop))

    model.add(Dense(1, activation='sigmoid'))

    if optimizer == "rmsprop":
        optim = optimizers.RMSprop(learning_rate=lr_opt, rho=0.9)
        ca = [Callback()]

    elif optimizer == "adam":
        optim = optimizers.Adam(learning_rate=lr_opt,
                                beta_1=0.9,
                                beta_2=0.999,
                                amsgrad=False)
        ca = [Callback()]

    elif optimizer == "adam_amsgrad":
        optim = optimizers.Adam(learning_rate=lr_opt,
                                beta_1=0.9,
                                beta_2=0.999,
                                amsgrad=True)
        ca = [Callback()]

    elif optimizer == "sgdwr":
        optim = optimizers.SGD(momentum=0.9)
        n_cycles = n_epochs / 50
        ca = [CosineAnnealingLearningRateSchedule(n_epochs, n_cycles, 0.01)]

    model.compile(optimizer=optim, loss='mean_squared_error')

    return (model, ca)
Example #2
0
    def train(self, X, Y, epoch_ypred=False, epoch_xtest=None):
        """ Fit the neural network model, save additional stats (as attributes) and return Y predicted values.

        Parameters
        ----------
        X : array-like, shape = [n_samples, n_features]
            Predictor variables, where n_samples is the number of samples and n_features is the number of predictors.

        Y : array-like, shape = [n_samples, 1]
            Response variables, where n_samples is the number of samples.

        Returns
        -------
        y_pred_train : array-like, shape = [n_samples, 1]
            Predicted y score for samples.
        """

        # # If using Keras, set tf to 1 core
        # config = K.tf.ConfigProto(intra_op_parallelism_threads=8, inter_op_parallelism_threads=8, allow_soft_placement=True)
        # session = tf.Session(config=config)
        # K.set_session(session)

        # If batch-size is None:
        if self.batch_size is None:
            self.batch_size = len(X)

        self.model = Sequential()
        self.model.add(Dense(self.n_neurons, activation="sigmoid", input_dim=len(X.T)))
        self.model.add(Dense(1, activation="linear"))
        self.model.compile(optimizer=self.optimizer, loss=self.loss, metrics=["accuracy"])

        # If epoch_ypred is True, calculate ypred for each epoch
        if epoch_ypred is True:
            self.epoch = YpredCallback(self.model, X, epoch_xtest)
        else:
            self.epoch = Callback()

        # Fit
        self.model.fit(X, Y, epochs=self.n_epochs, batch_size=self.batch_size, verbose=self.verbose, callbacks=[self.epoch])

        layer1_weight = self.model.layers[0].get_weights()[0]
        layer1_bias = self.model.layers[0].get_weights()[1]
        layer2_weight = self.model.layers[1].get_weights()[0]
        layer2_bias = self.model.layers[1].get_weights()[1]

        # Not sure about the naming scheme (trying to match PLS)
        self.model.x_loadings_ = layer1_weight
        self.model.x_scores_ = np.matmul(X, self.model.x_loadings_) + layer1_bias
        self.model.y_loadings_ = layer2_weight
        self.model.pctvar_ = np.ones((1, len(self.model.y_loadings_[0])))
        self.xcols_num = len(X.T)
        self.model.pctvar_ = sum(abs(self.model.x_scores_) ** 2) / sum(sum(abs(X) ** 2)) * 100
        y_pred_train = self.model.predict(X).flatten()

        # Storing X, Y, and Y_pred
        self.Y_pred = y_pred_train
        self.X = X
        self.Y = Y
        return y_pred_train
Example #3
0
    def train(self, X, Y, epoch_ypred=False, epoch_xtest=None):
        """ Fit the neural network model, save additional stats (as attributes) and return Y predicted values.

        Parameters
        ----------
        X : array-like, shape = [n_samples, n_features]
            Predictor variables, where n_samples is the number of samples and n_features is the number of predictors.

        Y : array-like, shape = [n_samples, 1]
            Response variables, where n_samples is the number of samples.

        Returns
        -------
        y_pred_train : array-like, shape = [n_samples, 1]
            Predicted y score for samples.
        """

        # If batch-size is None:
        if self.batch_size is None:
            self.batch_size = len(X)

        self.model = Sequential()
        self.model.add(
            Dense(self.n_neurons, activation="sigmoid", input_dim=len(X.T)))
        self.model.add(Dense(len(Y[0]), activation="softmax"))
        self.model.compile(optimizer=self.optimizer,
                           loss=self.loss,
                           metrics=["accuracy"])

        # If epoch_ypred is True, calculate ypred for each epoch
        if epoch_ypred is True:
            self.epoch = YpredCallback(self.model, X, epoch_xtest)
        else:
            self.epoch = Callback()

        # Fit
        self.model.fit(X,
                       Y,
                       epochs=self.n_epochs,
                       batch_size=self.batch_size,
                       verbose=self.verbose,
                       callbacks=[self.epoch])
        y_pred_train = self.model.predict(X)

        # Storing X, Y, and Y_pred
        self.Y_pred = y_pred_train
        self.X = X
        self.Y = Y
        return y_pred_train
Example #4
0
    def __init__(self, input, output, name='RDNN'):
        # super(Dnn, self).__init__()
        self.name = name
        features_1 = Input(shape=(input, ))

        e1 = Dense(6000,
                   input_dim=input,
                   activation='sigmoid',
                   W_regularizer=regularizers.l2(l=0.))(features_1)
        e1 = normalization.BatchNormalization()(e1)
        f = Dense(3500,
                  activation='sigmoid',
                  W_regularizer=regularizers.l2(l=0.))(e1)
        f = normalization.BatchNormalization()(f)
        out = Dense(output, activation='linear', name="Synsets")(f)
        out = CustomActivation(activation='custom_mixed_activation')(out)

        model = Model(input=features_1, output=out)
        self.check_stop = Callback()
        self.model = model
Example #5
0
    def __init__(self, input, output, name='DNN'):
        # super(Dnn, self).__init__()

        self.name = name
        model = Sequential()
        model.add(
            Dense(4000,
                  input_dim=input,
                  activation='sigmoid',
                  W_regularizer=regularizers.l2(l=0.)))
        # model.add(advanced_activations.LeakyReLU(alpha=0.1))
        model.add(normalization.BatchNormalization())
        model.add(
            Dense(2000,
                  activation='sigmoid',
                  W_regularizer=regularizers.l2(l=0.)))
        # model.add(advanced_activations.LeakyReLU(alpha=0.1))
        model.add(normalization.BatchNormalization())
        model.add(Dense(output, activation='sigmoid'))
        self.check_stop = Callback()
        self.model = model
Example #6
0
def model_fit(model, x, y, batch_size=1024, epochs=40):
    s = datetime.datetime.now()
    log = Callback()
    his = model.fit(
        x,
        y,
        batch_size=batch_size,
        epochs=epochs,
        verbose=1,
        callbacks=[log],
        validation_split=0.1,
    )
    t = datetime.datetime.now() - s
    t = str(t).split('.')[0]
    print('cost time {} '.format(t))  # 21:34:00
    tem = his.history
    loss = str(tem['loss'][len(tem)])[:5]
    val_loss = str(tem['val_loss'][len(tem)])[:5]
    his_data = pd.DataFrame()
    his_data['loss'] = tem['loss']
    his_data['val_loss'] = tem['val_loss']
    return model, t, loss, val_loss, his_data
    def train(self):
        model = self.prepare_model()
        training_info = self.get_training_info()
        epoch_count = training_info['epoch']

        def on_epoch_end(epoch, log):
            nonlocal epoch_count
            epoch_count += 1
            self.store_training_info({
                'epoch': epoch_count,
                'val_acc': log['val_acc'],
                'train_loss': log['loss'],
                'val_loss': log['val_loss']
            })

        training_info_call_back = Callback()
        training_info_call_back.on_epoch_end = on_epoch_end

        model.fit(self.train_x, self.train_y,
                  batch_size=2, epochs=100,
                  validation_data=(self.test_x, self.test_y),
                  initial_epoch=epoch_count, callbacks=[ModelCheckpoint(self.model_weight), training_info_call_back])
Example #8
0
tbCallBack = TensorBoard(
    log_dir=
    'C:/Users/baseb/Desktop/MachineLearning1/Building Classifier CNN/logs/',
    histogram_freq=0,
    batch_size=1,
    write_graph=True,
    write_grads=False,
    write_images=True,
    embeddings_freq=0,
    embeddings_layer_names=None,
    embeddings_metadata=None)
a = adam(lr=0.0001, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0)
model.compile(loss='categorical_crossentropy',
              optimizer=a,
              metrics=['accuracy'])
Callback()
earlyStop = EarlyStopping(monitor='loss',
                          min_delta=0,
                          patience=15,
                          verbose=2,
                          mode='auto')
callbacks_list = [tbCallBack, earlyStop]
model.fit(trainData,
          trainLabels,
          batch_size=1,
          epochs=300,
          validation_data=(testData, testLabels),
          verbose=2,
          callbacks=callbacks_list)

#convout1_f = theano.function([model.get_input(train=False)], convout1.get_output(train=False))
Example #9
0
    def train(self,
              X,
              Y,
              epoch_ypred=False,
              epoch_xtest=None,
              w1=False,
              w2=False):
        """ Fit the neural network model, save additional stats (as attributes) and return Y predicted values.

        Parameters
        ----------
        X : array-like, shape = [n_samples, n_features]
            Predictor variables, where n_samples is the number of samples and n_features is the number of predictors.

        Y : array-like, shape = [n_samples, 1]
            Response variables, where n_samples is the number of samples.

        Returns
        -------
        y_pred_train : array-like, shape = [n_samples, 1]
            Predicted y score for samples.
        """

        # # If using Keras, set tf to 1 core
        # config = K.tf.ConfigProto(intra_op_parallelism_threads=8, inter_op_parallelism_threads=8, allow_soft_placement=True)
        # session = tf.Session(config=config)
        # K.set_session(session)

        # If batch-size is None:
        if self.batch_size is None:
            self.batch_size = len(X)
        self.X = X
        self.Y = Y

        # If epoch_ypred is True, calculate ypred for each epoch
        if epoch_ypred is True:
            self.epoch = YpredCallback(self.model, X, epoch_xtest)
        else:
            self.epoch = Callback()

        if self.compiled == False:
            np.random.seed(self.seed)
            self.model = Sequential()
            self.model.add(
                Dense(self.n_neurons, activation="linear", input_dim=len(X.T)))
            self.model.add(Dense(1, activation="sigmoid"))
            self.model.compile(optimizer=self.optimizer,
                               loss=self.loss,
                               metrics=["accuracy"])
            self.model.w1 = self.model.layers[0].get_weights()
            self.model.w2 = self.model.layers[1].get_weights()
            self.compiled == True
        else:
            self.model.layers[0].set_weights(self.model.w1)
            self.model.layers[1].set_weights(self.model.w2)
        #print("Before: {}".format(self.model.layers[1].get_weights()[0].flatten()))
        # print("Before: {}".format(self.model.layers[1].get_weights()[0]))

        if w1 != False:
            self.model.layers[0].set_weights(w1)
            self.model.w1 = w1
        if w2 != False:
            self.model.layers[1].set_weights(w2)
            self.model.w2 = w2

        # Fit
        self.model.fit(X,
                       Y,
                       epochs=self.n_epochs,
                       batch_size=self.batch_size,
                       verbose=self.verbose)

        self.model.pctvar_ = pctvar_calc(self.model, X, Y)
        #print("After: {} .... {}".format(self.model.layers[1].get_weights()[0].flatten(), self.model.pctvar_))

        layer1_weight = self.model.layers[0].get_weights()[0]
        layer1_bias = self.model.layers[0].get_weights()[1]
        layer2_weight = self.model.layers[1].get_weights()[0]
        layer2_bias = self.model.layers[1].get_weights()[1]

        # Coef vip
        self.model.vip_ = garson(layer1_weight, layer2_weight.flatten())
        self.model.coef_ = connectionweight(layer1_weight,
                                            layer2_weight.flatten())

        # Not sure about the naming scheme (trying to match PLS)
        self.model.x_loadings_ = layer1_weight
        self.model.x_scores_ = np.matmul(X,
                                         self.model.x_loadings_) + layer1_bias
        self.model.x_scores_alt = self.model.x_scores_
        self.model.y_loadings_ = layer2_weight
        self.model.y_scores = np.matmul(self.model.x_scores_alt,
                                        self.model.y_loadings_) + layer2_bias
        y_pred_train = self.model.predict(X).flatten()

        # Sort by pctvar
        # if self.compiled == False:
        #     if w1 == False:
        #         if w2 == False:
        #             order = np.argsort(self.model.pctvar_)[::-1]
        #             self.model.x_scores_ = self.model.x_scores_[:, order]
        #             self.model.x_loadings_ = self.model.x_loadings_[:, order]
        #             self.model.y_loadings_ = self.model.y_loadings_[order]
        #             self.model.y_loadings_ = self.model.y_loadings_.T
        #             self.model.pctvar_ = self.model.pctvar_[order]
        #             self.model.w1[0] = self.model.w1[0][:, order]
        #             self.model.w2[0] = self.model.w2[0][order]
        #     self.compiled = True

        self.model.y_loadings_ = layer2_weight.T

        # Calculate pfi
        if self.pfi_nperm == 0:
            self.model.pfi_acc_ = np.zeros((1, len(Y)))
            self.model.pfi_r2q2_ = np.zeros((1, len(Y)))
            self.model.pfi_auc_ = np.zeros((1, len(Y)))
        else:
            pfi_acc, pfi_r2q2, pfi_auc = self.pfi(nperm=self.pfi_nperm,
                                                  metric=self.pfi_metric,
                                                  mean=self.pfi_mean)
            self.model.pfi_acc_ = pfi_acc
            self.model.pfi_r2q2_ = pfi_r2q2
            self.model.pfi_auc_ = pfi_auc

        self.Y_train = Y
        self.Y_pred_train = y_pred_train

        self.Y_pred = y_pred_train
        self.X = X
        self.Y = Y
        self.metrics_key = []
        self.model.eval_metrics_ = []
        bm = binary_evaluation(Y, y_pred_train)
        for key, value in bm.items():
            self.model.eval_metrics_.append(value)
            self.metrics_key.append(key)

        self.model.eval_metrics_ = np.array(self.model.eval_metrics_)

        return y_pred_train
Example #10
0
def build_model_pseudobs(x_train, neurons, drop, activation, lr_opt, optimizer,
                         n_layers, n_epochs):
    """ Define the structure of the neural network for models with pseudo-observations (optim, continuous, discrete, km)
    # Arguments
        x_train: input data as formated by the function "prepare_data"
        neurons: number of neurons per hidden layer in the neural network
        drop: dropout rate applied after each hidden layer
        activation: activation function applied after each hidden layer
        lr_opt: learning rate chosen for optimization
        optimizer: optimization algorithm 
        n_layers: number of hidden layers 
        n_epochs: number of epochs used for training the model
    # Returns
        model: keras model with the architecture defined previously
        callbacks: callbacks function   
    """
    in_features = x_train.shape[1]

    model = Sequential()
    model.add(Dense(neurons, input_dim=in_features, activation=activation))
    model.add(BatchNormalization(epsilon=1e-05, momentum=0.1))
    model.add(Dropout(rate=drop))

    if n_layers > 1:
        model.add(Dense(neurons, activation=activation))
        model.add(BatchNormalization(epsilon=1e-05, momentum=0.1))
        model.add(Dropout(rate=drop))

    if n_layers == 3:
        model.add(Dense(neurons, activation=activation))
        model.add(BatchNormalization(epsilon=1e-05, momentum=0.1))
        model.add(Dropout(rate=drop))

    model.add(Dense(1, activation='sigmoid'))

    if optimizer == "RMSprop":
        optim = optimizers.RMSprop(learning_rate=lr_opt, rho=0.9)
        callbacks = [Callback()]

    elif optimizer == "Adam":
        optim = optimizers.Adam(learning_rate=lr_opt,
                                beta_1=0.9,
                                beta_2=0.999,
                                amsgrad=False)
        callbacks = [Callback()]

    elif optimizer == "Adam_AMSGrad":
        optim = optimizers.Adam(learning_rate=lr_opt,
                                beta_1=0.9,
                                beta_2=0.999,
                                amsgrad=True)
        callbacks = [Callback()]

    elif optimizer == "SGDWR":
        optim = optimizers.SGD(momentum=0.9)
        n_cycles = n_epochs / 50
        callbacks = [
            CosineAnnealingLearningRateSchedule(n_epochs, n_cycles, 0.01)
        ]

    model.compile(optimizer=optim, loss='mean_squared_error')
    return (model, callbacks)
Example #11
0
def main(batch_size=32,
         eval_interval=10,
         epochs=100000,
         image_size=224,
         loss_threshold=-0.1,
         color_space='yuv',
         train_data_dir='/mnt/bolbol/raw-data/train',
         log_dir='logs',
         models_save_dir='coloring_models',
         colored_images_save_dir='colored_images',
         vgg=False,
         feature_extractor_model_path=None,
         train_feature_extractor=False,
         classifier=False,
         populate_batches=1000,
         scale_factor=9.,
         colorizer_model_path=None,
         include_target_image=False):
    """ Train Wasserstein gan to colorize black and white images """
    ''' Prepare data generators '''
    image_generator = ImageDataGenerator().flow_from_directory(
        directory=train_data_dir,
        interpolation='bilinear',
        target_size=(image_size, image_size),
        batch_size=batch_size,
        color_mode='rgb',
        class_mode=None)
    data_mapper, class_weights = get_mapping_with_class_weights(
        classifier=classifier,
        color_space=color_space,
        image_generator=image_generator,
        image_size=image_size,
        nb_batches=populate_batches,
        scale_factor=scale_factor,
        calculate_weights=False)
    gray_image_generator = ImageGenerator(
        rgb_generator=image_generator,
        input_processing_function=data_mapper.rgb_to_colorizer_input)
    real_image_generator = ImageGenerator(
        rgb_generator=image_generator,
        input_processing_function=data_mapper.rgb_to_target_image)
    gray_with_target_generator = ImageGenerator(
        rgb_generator=image_generator,
        input_processing_function=data_mapper.rgb_to_colorizer_input,
        label_processing_function=data_mapper.rgb_to_colorizer_target)
    test_data_generator = ImageGenerator(
        rgb_generator=image_generator,
        input_processing_function=data_mapper.rgb_to_colorizer_input,
        label_processing_function=lambda x: x)
    ''' Prepare Models '''
    colorizer = get_colorizer(
        colorizer_model_path=colorizer_model_path,
        image_size=None,
        vgg=vgg,
        feature_extractor_model_path=feature_extractor_model_path,
        train_feature_extractor=train_feature_extractor,
        classifier=classifier,
        classes_per_pixel=data_mapper.nb_classes if classifier else 0)
    critic = get_critic(image_size=image_size)
    critic.compile(optimizer=Adam(lr=0.00001, beta_1=0.5, beta_2=0.9),
                   loss=wasserstein_loss)
    combined = get_combined_gan(
        classifier=classifier,
        class_to_color=data_mapper.class_to_color if classifier else None,
        generator=colorizer,
        critic=critic,
        image_size=image_size,
        include_colorizer_output=include_target_image)
    combined.compile(optimizer=Adam(lr=0.00001, beta_1=0.5, beta_2=0.9),
                     loss=[
                         wasserstein_loss,
                         'categorical_crossentropy' if classifier else 'mse'
                     ] if include_target_image else [wasserstein_loss])
    ''' View summary of the models '''
    print('\n\n\n\nColorizer:'), colorizer.summary()
    print('\n\n\n\nCritic:'), critic.summary()
    print('\n\n\n\nCombined:'), combined.summary()

    logger = keras.callbacks.TensorBoard(
        log_dir=log_dir) if K.backend() == 'tensorflow' else Callback()
    gym = Gym(generator=colorizer,
              critic=critic,
              combined=combined,
              gray_image_generator=gray_image_generator,
              real_image_generator=real_image_generator,
              gray_with_target_generator=gray_with_target_generator,
              test_data_generator=test_data_generator,
              data_mapper=data_mapper,
              logger=logger,
              models_save_dir=models_save_dir,
              colored_images_save_dir=colored_images_save_dir,
              classifier=classifier)
    gym.train(loss_threshold=loss_threshold,
              eval_interval=eval_interval,
              epochs=epochs,
              include_target_image=include_target_image)
Example #12
0
K.clear_session()
np.random.seed(1337)
set_random_seed(1337)

labels = "bleach_with_non_chlorine, do_not_bleach, do_not_dryclean, do_not_tumble_dry, do_not_wash, double_bar, dryclean, low_temperature_tumble_dry, normal_temperature_tumble_dry, single_bar, tumble_dry, wash_30, wash_40, wash_60, wash_hand"
labels = labels.split(", ")
img2_shape = (int(360), int(360), 3)
X_train, y_train, val_X_2, val_y_2 = get_features_and_labels_validation(
    img2_shape, val_size=0.2)
a = list(range(len(y_train)))
np.random.shuffle(a)
y_train = y_train[a]
X_train = X_train[a]

cb = Callback()
filepath = "weights-improvement-{epoch:02d}.hdf5"
checkpoint = ModelCheckpoint(filepath,
                             monitor='val_hn_multilabel_loss',
                             verbose=1,
                             save_best_only=True,
                             mode='min')
reduceLROnPlat = ReduceLROnPlateau(monitor='val_hn_multilabel_loss',
                                   factor=0.1,
                                   patience=3,
                                   verbose=1,
                                   mode='min',
                                   cooldown=2,
                                   min_lr=0.000000001)
erly_s = EarlyStopping(monitor='val_hn_multilabel_loss', patience=7)
cb_list = [cb, checkpoint, erly_s, reduceLROnPlat]
Example #13
0
paddinglayer = ZeroPadding1D(padding=half_window_size)(word_emb)
conv = Conv1D(nb_filter=50,
              filter_length=(2 * half_window_size + 1),
              border_mode='valid')(paddinglayer)
conv_d = Dropout(0.1)(conv)
dense_conv = TimeDistributed(Dense(50))(conv_d)
rnn_cnn_merge = merge([bilstm_d, dense_conv], mode='concat', concat_axis=2)

dense = TimeDistributed(Dense(nb_tag))(rnn_cnn_merge)
crf = ChainCRF()
crf_output = crf(dense)

model = Model(input=[word_input], output=[crf_output])

model.compile(loss=crf.sparse_loss,
              optimizer=RMSprop(0.00001),
              metrics=['sparse_categorical_accuracy'])

model.summary()

model.load_weights('model.weights')
mCallBack = Callback()
model.fit(X_train,
          Y_train,
          batch_size=batch_size,
          nb_epoch=10,
          callbacks=[mCallBack])

model.save_weights('model.weights')
Example #14
0
def train_model(model, data, config, include_tensorboard):
	model_history = History()
	model_history.on_train_begin()
	saver = ModelCheckpoint(full_path(config.model_file()), verbose=1, save_best_only=True, period=1)
	saver.set_model(model)
	early_stopping = EarlyStopping(min_delta=config.min_delta, patience=config.patience, verbose=1)
	early_stopping.set_model(model)
	early_stopping.on_train_begin()
	csv_logger = CSVLogger(full_path(config.csv_log_file()))
	csv_logger.on_train_begin()
	if include_tensorboard:
		tensorborad = TensorBoard(histogram_freq=10, write_images=True)
		tensorborad.set_model(model)
	else:
	 tensorborad = Callback()

	epoch = 0
	stop = False
	while(epoch <= config.max_epochs and stop == False):
		epoch_history = History()
		epoch_history.on_train_begin()
		valid_sizes = []
		train_sizes = []
		print("Epoch:", epoch)
		for dataset in data.datasets:
			print("dataset:", dataset.name)
			model.reset_states()
			dataset.reset_generators()

			valid_sizes.append(dataset.valid_generators[0].size())
			train_sizes.append(dataset.train_generators[0].size())
			fit_history = model.fit_generator(dataset.train_generators[0],
				dataset.train_generators[0].size(), 
				nb_epoch=1, 
				verbose=0, 
				validation_data=dataset.valid_generators[0], 
				nb_val_samples=dataset.valid_generators[0].size())

			epoch_history.on_epoch_end(epoch, last_logs(fit_history))

			train_sizes.append(dataset.train_generators[1].size())
			fit_history = model.fit_generator(dataset.train_generators[1],
				dataset.train_generators[1].size(),
				nb_epoch=1, 
				verbose=0)

			epoch_history.on_epoch_end(epoch, last_logs(fit_history))

		epoch_logs = average_logs(epoch_history, train_sizes, valid_sizes)
		model_history.on_epoch_end(epoch, logs=epoch_logs)
		saver.on_epoch_end(epoch, logs=epoch_logs)
		early_stopping.on_epoch_end(epoch, epoch_logs)
		csv_logger.on_epoch_end(epoch, epoch_logs)
		tensorborad.on_epoch_end(epoch, epoch_logs)
		epoch+= 1

		if early_stopping.stopped_epoch > 0:
			stop = True

	early_stopping.on_train_end()
	csv_logger.on_train_end()
	tensorborad.on_train_end({})
Example #15
0
    def train(self, X, Y, epoch_ypred=False, epoch_xtest=None):
        """ Fit the neural network model, save additional stats (as attributes) and return Y predicted values.

        Parameters
        ----------
        X : array-like, shape = [n_samples, n_features]
            Predictor variables, where n_samples is the number of samples and n_features is the number of predictors.

        Y : array-like, shape = [n_samples, 1]
            Response variables, where n_samples is the number of samples.

        Returns
        -------
        y_pred_train : array-like, shape = [n_samples, 1]
            Predicted y score for samples.
        """

        # If batch-size is None:
        if self.batch_size is None:
            self.batch_size = len(X)

        X1 = X[0]
        X2 = X[1]

        # Layer for X1
        input_X1 = Input(shape=(len(X1.T), ))
        layer1_X1 = Dense(self.n_neurons_l1, activation="linear")(input_X1)
        layer1_X1 = Model(inputs=input_X1, outputs=layer1_X1)

        # Layer for X2
        input_X2 = Input(shape=(len(X2.T), ))
        layer1_X2 = Dense(self.n_neurons_l1, activation="linear")(input_X2)
        layer1_X2 = Model(inputs=input_X2, outputs=layer1_X2)

        # Concatenate
        concat = concatenate([layer1_X1.output, layer1_X2.output])
        #model_concat = Dense(self.n_neurons_l2, activation="sigmoid")(concat)
        model_concat = Dense(1, activation="sigmoid")(concat)

        self.model = Model(inputs=[layer1_X1.input, layer1_X2.input],
                           outputs=model_concat)
        self.model.compile(optimizer=self.optimizer,
                           loss=self.loss,
                           metrics=["accuracy"])

        # If epoch_ypred is True, calculate ypred for each epoch
        if epoch_ypred is True:
            self.epoch = YpredCallback(self.model, X, epoch_xtest)
        else:
            self.epoch = Callback()

        # Fit
        self.model.fit([X1, X2],
                       Y,
                       epochs=self.n_epochs,
                       batch_size=self.batch_size,
                       verbose=self.verbose,
                       callbacks=[self.epoch])

        # Not sure about the naming scheme (trying to match PLS)
        y_pred_train = self.model.predict(X).flatten()

        # Storing X, Y, and Y_pred
        self.Y_pred = y_pred_train
        self.X = X
        self.Y = Y
        return y_pred_train
Example #16
0
    plt.plot(1 + np.arange(len(val_mse)), val_mse)
    plt.title('mse', fontsize=18)
    plt.xlabel('time', fontsize=18)
    plt.ylabel('mse', fontsize=18)
    plt.legend(['Training', 'Validation'], loc='upper left')
    ax.xaxis.set_major_locator(MaxNLocator(integer=True))
    plt.show()


epochs = 200
batch_size = 64
Nval = 200
control_val = True
save_training_tensorboard = False

callbacks = Callback()

if not control_val:
    history = model.fit(X_train,
                        y_train,
                        epochs=epochs,
                        batch_size=batch_size,
                        verbose=2)

else:
    acum_tr_mse = []
    acum_val_mse = []
    filepath = "./best_model.h5"
    checkpoint = ModelCheckpoint(filepath,
                                 monitor='val_mse',
                                 verbose=2,