Example #1
0
 def __init__(self, x, keep_prob, num_class):
     self.x = x
     self.keep_prob = keep_prob
     self.num_class = num_class
     self.init = k.initializers.glorot_normal()
     self.regular = regularizers.l1_l2(l1=0.1, l2=0.5)
     self.trainable1 = True
     self.trainable2 = True
Example #2
0
def getModel(conv=False):
    model = Sequential()

    model.add(embeddings.Embedding(63840, 64, input_length=1))

    model.add(Flatten())

    for i in range(1):
        model.add(Dense(64,
                        kernel_regularizer=regularizers.l1_l2(l1=0., l2=0.),
                        activity_regularizer=regularizers.l1_l2(l1=0., l2=0.),
                        bias_regularizer=regularizers.l1_l2(l1=0., l2=0.),
                        # kernel_initializer=initializers.RandomNormal(mean=0.0, stddev=np.sqrt(2 / 128), seed=None),
                        # bias_initializer=initializers.RandomNormal(mean=0.0, stddev=np.sqrt(2 / 128), seed=None),
                        ))
        model.add(BatchNormalization())
        model.add(Activation('relu'))

    model.add(Dense(1))

    return model
Example #3
0
    def __init__(self, input_size: Tuple, optimizer: Optimizer, loss, hidden_layers: Tuple = (3, 3, 1),
                 activation: str = 'relu', output_activation: str = 'relu',
                 dropout: float = 0., batch_normalization: bool = False,
                 weight_decay_l1: float = 0., weight_decay_l2: float = 0.):
        # define model
        self.hidden_layers = hidden_layers

        # create the model
        inputs = x_data = Input(shape=input_size)
        # rest of the hidden layers if any
        for neurons in hidden_layers[:-1]:
            x_data = Dense(neurons, activation=activation,
                      kernel_regularizer=regularizers.l1_l2(l1=weight_decay_l1, l2=weight_decay_l2),
                      bias_regularizer=regularizers.l1_l2(l1=weight_decay_l1, l2=weight_decay_l2))(x_data)
            if dropout > 0.:
                x_data = Dropout(dropout)(x_data)
            if batch_normalization:
                x_data = BatchNormalization()(x_data)
        predictions = Dense(hidden_layers[-1], activation=output_activation)(x_data)
        self.model = Model(inputs=inputs, outputs=predictions)
        self.model.compile(optimizer=optimizer, loss=loss)
Example #4
0
def get_model(embedding_size, input_length=1, layers=1, width=64):
    model = Sequential()

    model.add(
        embeddings.Embedding(embedding_size, width, input_length=input_length))

    model.add(Flatten())

    for i in range(layers):
        model.add(
            Dense(
                width,
                kernel_regularizer=regularizers.l1_l2(l1=0., l2=0.),
                activity_regularizer=regularizers.l1_l2(l1=0., l2=0.),
                bias_regularizer=regularizers.l1_l2(l1=0., l2=0.),
                kernel_initializer=initializers.RandomNormal(mean=0.0,
                                                             stddev=np.sqrt(
                                                                 2 / 128),
                                                             seed=None),
                bias_initializer=initializers.RandomNormal(mean=0.0,
                                                           stddev=np.sqrt(2 /
                                                                          128),
                                                           seed=None),
            ))
        # model.add(BatchNormalization())
        model.add(Activation('relu'))

    model.add(Dense(1))

    adam = optimizers.Adam(lr=0.05)
    sgd = optimizers.SGD(lr=0.001, decay=1e-6)
    rms = optimizers.RMSprop()

    # model.compile(loss='mean_squared_error', optimizer=adam, metrics=['mse'])
    # model.compile(loss='mean_squared_error',
    #           optimizer=optimizers.Adam(lr=0.01),
    #           # optimizer=optimizers.Adam(lr=0.008),
    #           metrics=['mse'])
    return model
x = np.array([1])

for i in range(len(y)):
    y[i] = (y[i] - mean) / std
# mean = np.mean(y[0])
# std = np.std(y[0])
# y[0] = (y[0] - mean) / std

model = Sequential()

model.add(
    Dense(
        32,
        input_dim=1,
        activation='relu',
        kernel_regularizer=regularizers.l1_l2(l1=0., l2=0.),
        activity_regularizer=regularizers.l1_l2(l1=0., l2=0.),
        bias_regularizer=regularizers.l1_l2(l1=0., l2=0.),
        kernel_initializer=initializers.RandomNormal(mean=0.0,
                                                     stddev=1,
                                                     seed=None),
        bias_initializer=initializers.RandomNormal(mean=0.0,
                                                   stddev=1,
                                                   seed=None),
    ))

model.add(
    Dense(
        380 * 168,
        kernel_initializer=initializers.RandomNormal(mean=0.0,
                                                     stddev=1,
Example #6
0
    def fitting(self):

        timesteps = self.lags  # tiempo
        features = 1  # features or chanels (Volume)
        num_classes = 3  # 3 for categorical

        #data = np.random.random((1000, dim_row, dim_col))
        #clas = np.random.randint(3, size=(1000, 1))
        ##print(clas)
        #clas = to_categorical(clas)
        ##print(clas)
        data = self.X_train
        data_test = self.X_test
        print(data)

        data = data.values.reshape(len(data), timesteps, 1)
        data_test = data_test.values.reshape(len(data_test), timesteps, 1)
        print(data)

        clas = self.y_train
        clas_test = self.y_test
        clas = to_categorical(clas)
        clas_test = to_categorical(clas_test)

        cat0 = self.y_train.tolist().count(0)
        cat1 = self.y_train.tolist().count(1)
        cat2 = self.y_train.tolist().count(2)

        print("may: ", cat1, "  ", "menor: ", cat2, " ", "neutro: ", cat0)

        n_samples_0 = cat0
        n_samples_1 = (cat1 + cat2) / 2.0
        n_samples_2 = (cat1 + cat2) / 2.0

        class_weight = {
            0: 1.0,
            1: n_samples_0 / n_samples_1,
            2: n_samples_0 / n_samples_2
        }

        def class_1_accuracy(y_true, y_pred):
            # cojido de: http://www.deepideas.net/unbalanced-classes-machine-learning/
            class_id_true = K.argmax(y_true, axis=-1)
            class_id_preds = K.argmax(y_pred, axis=-1)

            accuracy_mask = K.cast(K.equal(class_id_preds, 1), 'int32')
            class_acc_tensor = K.cast(K.equal(class_id_true, class_id_preds),
                                      'int32') * accuracy_mask

            class_acc = K.sum(class_acc_tensor) / K.maximum(
                K.sum(accuracy_mask), 1)
            return class_acc

        class SecondOpinion(Callback):
            def __init__(self, model, x_test, y_test, N):
                self.model = model
                self.x_test = x_test
                self.y_test = y_test
                self.N = N
                self.epoch = 1

            def on_epoch_end(self, epoch, logs={}):
                if self.epoch % self.N == 0:
                    y_pred = self.model.predict(self.x_test)
                    pred_T = 0
                    pred_F = 0
                    for i in range(len(y_pred)):
                        if np.argmax(y_pred[i]) == 1 and np.argmax(
                                self.y_test[i]) == 1:
                            pred_T += 1
                        if np.argmax(y_pred[i]) == 1 and np.argmax(
                                self.y_test[i]) != 1:
                            pred_F += 1
                    if pred_T + pred_F > 0:
                        Pr_pos = pred_T / (pred_T + pred_F)
                        print("Yoe: epoch, Probabilidad pos: ", self.epoch,
                              Pr_pos)
                    else:
                        print("Yoe Probabilidad pos: 0")
                self.epoch += 1

#################################################################################################################

        model = Sequential()
        if self.nConv == 0:
            model.add(
                LSTM(units=self.lstm_nodes,
                     return_sequences=True,
                     activation='tanh',
                     input_shape=(timesteps, features),
                     kernel_regularizer=regularizers.l1_l2(l1=0.01, l2=0.01)))
        for i in range(self.nLSTM - 2):
            model.add(
                LSTM(units=self.lstm_nodes,
                     return_sequences=True,
                     activation='tanh',
                     kernel_regularizer=regularizers.l1_l2(l1=0.01, l2=0.01)))
        model.add(LSTM(units=self.lstm_nodes, activation='tanh'))
        model.add(Dropout(0.5))
        model.add(
            Dense(num_classes, activation='softmax')
        )  # the dimension of index one will be considered to be the temporal dimension
        #model.add(Activation('sigmoid'))  # for loss = 'binary_crossentropy'

        # haciendo x: x[:, -1, :], la segunda dimension desaparece quedando solo
        # los ULTIMOS elementos (-1) de dicha dimension:
        # Try this to see:
        # data = np.random.random((5, 3, 4))
        # print(data)
        # print(data[:, -1, :])

        #        model.add(Lambda(lambda x: x[:, -1, :], output_shape = [output_dim]))
        print(model.summary())

        tensorboard_active = False
        val_loss = False
        second_opinion = True
        callbacks = []
        if tensorboard_active:
            callbacks.append(
                TensorBoard(log_dir=self.putmodel + "Tensor_board_data",
                            histogram_freq=0,
                            write_graph=True,
                            write_images=True))
        if val_loss:
            callbacks.append(EarlyStopping(monitor='val_loss', patience=5))
        if second_opinion:
            callbacks.append(SecondOpinion(model, data_test, clas_test, 10))
        #model.compile(loss = 'categorical_crossentropy', optimizer='Adam', metrics = ['categorical_accuracy'])
        #model.compile(loss = 'binary_crossentropy', optimizer=Adam(lr=self.learning), metrics = ['categorical_accuracy'])
        model.compile(loss='categorical_crossentropy',
                      optimizer='Adam',
                      metrics=[class_1_accuracy])

        model.fit(x=data,
                  y=clas,
                  batch_size=self.batch_size,
                  epochs=800,
                  verbose=2,
                  callbacks=callbacks,
                  class_weight=class_weight)
        #validation_data=(data_test, clas_test))

        #####################################################################################################################

        # serialize model to YAML
        model_yaml = model.to_yaml()
        with open("model.yaml", "w") as yaml_file:
            yaml_file.write(model_yaml)
        # serialize weights to HDF5
        model.save_weights("model.h5")
        print("Saved model to disk")

        #        # load YAML and create model
        #        yaml_file = open('model.yaml', 'r')
        #        loaded_model_yaml = yaml_file.read()
        #        yaml_file.close()
        #        loaded_model = model_from_yaml(loaded_model_yaml)
        #        # load weights into new model
        #        loaded_model.load_weights("model.h5")
        #        print("Loaded model from disk")
        #        loaded_model.compile(loss = 'categorical_crossentropy', optimizer='Adam', metrics = [class_1_accuracy])
        #
        print("Computing prediction ...")
        y_pred = model.predict_proba(data_test)

        model.reset_states()
        print("Computing train evaluation ...")
        score_train = model.evaluate(data, clas, verbose=2)
        print('Train loss:', score_train[0])
        print('Train accuracy:', score_train[1])

        model.reset_states()
        #        score_train_loaded = loaded_model.evaluate(data, clas, verbose=2)
        #        loaded_model.reset_states()
        #        print('Train loss loaded:', score_train[0])
        #        print('Train accuracy loaded:', score_train[1])

        print("Computing test evaluation ...")
        score_test = model.evaluate(data_test, clas_test, verbose=2)
        print('Test loss:', score_test[0])
        print('Test accuracy:', score_test[1])

        model.reset_states()
        #        score_test_loaded = loaded_model.evaluate(data_test, clas_test, verbose=2)
        #        loaded_model.reset_states()
        #        print('Test loss loaded:', score_test[0])
        #        print('Test accuracy loaded:', score_test[1])

        pred_T = 0
        pred_F = 0
        for i in range(len(y_pred)):
            if np.argmax(y_pred[i]) == 1 and np.argmax(clas_test[i]) == 1:
                pred_T += 1
#                print(y_pred[i])
            if np.argmax(y_pred[i]) == 1 and np.argmax(clas_test[i]) != 1:
                pred_F += 1
        if pred_T + pred_F > 0:
            Pr_pos = pred_T / (pred_T + pred_F)
            print("Yoe Probabilidad pos: ", Pr_pos)
        else:
            print("Yoe Probabilidad pos: 0")

        history = DataFrame([[
            self.skip, self.nConv, self.nLSTM, self.learning, self.batch_size,
            self.conv_nodes, self.lstm_nodes, score_train[0], score_train[1],
            score_test[0], score_test[1]
        ]],
                            columns=('Skip', 'cConv', 'nLSTM', 'learning',
                                     'batch_size', 'conv_nodes', 'lstm_nodes',
                                     'loss_train', 'acc_train', 'loss_test',
                                     'acc_test'))
        self.history = self.history.append(history)
Example #7
0
if architecture == 0:
    #Overfit
    inputs = Input(shape=(9, ))
    layer1 = Dense(64, activation="relu")(inputs)
    layer2 = Dense(64, activation="relu")(layer1)
    outputs = Dense(1, activation="sigmoid")(layer2)
    epochnum = 256
    minimizer = "rmsprop"
    cost = "mean_squared_error"
elif architecture == 1:
    #Underfit
    inputs = Input(shape=(9, ))
    layer1 = Dense(64,
                   activation="relu",
                   activity_regularizer=regularizers.l1_l2(0.0001))(inputs)
    drop1 = Dropout(0.25)(layer1)
    layer2 = Dense(64,
                   activation="relu",
                   activity_regularizer=regularizers.l1_l2(0.0001))(drop1)
    drop2 = Dropout(0.25)(layer2)
    outputs = Dense(1, activation="sigmoid")(drop2)
    epochnum = 256
    minimizer = "nadam"
    cost = "mean_squared_error"
elif architecture == 2:
    #Overfit
    inputs = Input(shape=(9, ))
    layer1 = Dense(64, activation="relu")(inputs)
    layer2 = Dense(64, activation="relu")(layer1)
    outputs = Dense(1, activation="sigmoid")(layer2)
 def __init__(self, x):
     self.x = x
     self.init = k.initializers.glorot_normal()
     self.regular = regularizers.l1_l2(l1=0.1, l2=0.5)
     self.trainable1 = True
     self.trainable2 = True
Example #9
0
def get_deep_bidirectional(num_of_features,
                           reg1=0.01,
                           reg2=0.01,
                           neurons_conv=80,
                           neurons=19,
                           neurons2=20,
                           noise=0.3,
                           dropout=0.15,
                           lr=1.05,
                           rho=0.96):
    model = Sequential()
    model.add(InputLayer(input_shape=(num_of_features, )))

    model.add(Reshape(
        (1,
         num_of_features)))  # reshape into 4D tensor (samples, 1, maxlen, 256)

    model.add(
        Conv1D(neurons_conv,
               1,
               activation="relu",
               input_shape=(1, num_of_features),
               padding="same",
               strides=1))

    #
    model.add(GaussianNoise(noise))
    # keras.optimizers.Adadelta(lr=1.0, rho=0.95, epsilon=1e-08, decay=0.0) 58.77% (+/- 3.81%)
    # keras.optimizers.Adam(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0) 57.32% (+/- 3.70%)
    # keras.optimizers.Adamax(lr=0.002, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0) 57.91% (+/- 3.34%)
    # keras.optimizers.Nadam(lr=0.002, beta_1=0.9, beta_2=0.999, epsilon=1e-08, schedule_decay=0.004) 57.83% (+/- 3.99%)

    optimizer = Adadelta(lr=lr, rho=rho, epsilon=1e-08, decay=0.0)
    model.add(
        Bidirectional(LSTM(neurons,
                           stateful=False,
                           activation="tanh",
                           consume_less='gpu',
                           unroll=True,
                           recurrent_regularizer=regularizers.l1_l2(
                               reg1, reg2),
                           return_sequences=True,
                           go_backwards=True),
                      batch_input_shape=(None, 1, num_of_features)))

    model.add(Dropout(dropout))
    model.add(
        Bidirectional(
            LSTM(neurons2,
                 stateful=False,
                 activation='tanh',
                 consume_less='gpu',
                 unroll=True,
                 batch_input_shape=(None, 1, neurons),
                 recurrent_regularizer=regularizers.l1_l2(reg1, reg2),
                 return_sequences=True,
                 go_backwards=True)))
    model.add(Dropout(dropout))
    model.add(
        Bidirectional(
            LSTM(neurons2,
                 stateful=False,
                 activation='tanh',
                 consume_less='gpu',
                 unroll=True,
                 batch_input_shape=(None, 1, neurons),
                 recurrent_regularizer=regularizers.l1_l2(reg1, reg2),
                 return_sequences=True,
                 go_backwards=True)))
    model.add(Dropout(dropout))
    model.add(
        Bidirectional(
            LSTM(neurons2,
                 stateful=False,
                 activation='linear',
                 consume_less='gpu',
                 unroll=True,
                 batch_input_shape=(None, 1, neurons),
                 recurrent_regularizer=regularizers.l1_l2(reg1, reg2),
                 return_sequences=True,
                 go_backwards=True)))
    model.add(Dropout(dropout))
    model.add(
        Bidirectional(
            LSTM(neurons2,
                 stateful=False,
                 activation='relu',
                 consume_less='gpu',
                 unroll=True,
                 batch_input_shape=(None, 1, neurons),
                 recurrent_regularizer=regularizers.l1_l2(reg1, reg2),
                 return_sequences=True,
                 go_backwards=True)))

    model.add(
        LSTM(1,
             stateful=False,
             activation='linear',
             consume_less='gpu',
             recurrent_regularizer=regularizers.l1_l2(reg1, reg2),
             unroll=True,
             batch_input_shape=(None, 1, 18),
             go_backwards=True))
    model.compile(loss='mean_squared_error',
                  optimizer=optimizer,
                  metrics=[r2_keras])
    print(model.get_config())
    print("Trained model: bidirectional")
    return model