Beispiel #1
0
def get_cnn_adv(input_data, num_labels):
    model = Sequential()
    div = 4
    model.add(Conv2D(64, kernel_size=(input_data.shape[0] // div, 1), activation='relu', input_shape=input_data.shape))
    model.add(Conv2D(128, kernel_size=(div, 8), activation='relu'))
    model.add(Flatten())
    model.add(Dense(500, activation='relu'))
    model.add(Dropout(0.5))
    model.add(Dense(num_labels, activation='softmax'))
    opt = Adamax(learning_rate=KeyConstants.ELR)
    model.compile(loss='categorical_crossentropy', opt=opt, metrics=['accuracy'])
    return model
Beispiel #2
0
def sequential_nn_model(X_train, y_train):
    model = Sequential([
        Dense(100, activation='relu', input_shape=(X_train.shape[1], )),
        Dense(40, activation='relu'),
        Dense(20, activation='relu'),
        Dense(1, activation='relu')
    ])
    model.compile(optimizer='nadam',
                  loss=rmsle,
                  metrics=['mean_squared_logarithmic_error'])

    hist = model.fit(X_train, y_train, epochs=50)
    return model
Beispiel #3
0
def create_model():
    checkpoint = ModelCheckpoint('sdr_model.h5',
                                 monitor='accuracy',
                                 verbose=1,
                                 save_best_only=True,
                                 mode='max')
    callbacks_list = [checkpoint]

    gray_data = np.load("npy_data/gray_dataset.npy")
    color_data = np.load("npy_data/color_dataset.npy")
    # img_pixel_dataset = np.load("npy_data/img_pixel_dataset.npy")
    label = np.load("npy_data/label.npy")

    # dataset = pre_processing.npy_dataset_concatenate(gray_data, color_data)
    dataset = pre_processing.npy_dataset_concatenate(gray_data, color_data)
    # corr_matrix = np.corrcoef(dataset)
    # print(corr_matrix)
    le = preprocessing.LabelEncoder()
    label = le.fit_transform(label)

    x_train, x_test, y_train, y_test = train_test_split(dataset,
                                                        label,
                                                        test_size=0.20,
                                                        shuffle=True)

    model = Sequential()
    model.add(Dense(14, input_dim=14, activation=None))
    model.add(Dense(128, activation='tanh'))
    model.add(Dense(256, activation='sigmoid'))
    model.add(Dense(3, activation='softmax'))
    model.compile(optimizer='adam',
                  loss='sparse_categorical_crossentropy',
                  metrics=['accuracy'])

    model.fit(x_train,
              y_train,
              epochs=150,
              verbose=0,
              batch_size=20,
              shuffle=True,
              callbacks=callbacks_list)

    pred_y_test = model.predict_classes(x_test)

    acc_model = accuracy_score(y_test, pred_y_test)
    print("Prediction Acc model:", acc_model)
    print("Org. Labels:", y_test[:30])
    print("Pred Labels:", (pred_y_test[:30]))
    # c_report = classification_report(y_test, pred_y_test, zero_division=0)
    # print(c_report)
    print("\n\n")
Beispiel #4
0
def get_pen_cnn(input_data, num_labels):
    model = Sequential()
    model.add(Conv2D(64, kernel_size=(3, 3), activation='relu', input_shape=input_data.shape))
    model.add(MaxPooling2D(pool_size=2, strides=2))
    model.add(Conv2D(128, kernel_size=(3, 3), activation='relu'))
    model.add(Flatten())
    model.add(Dense(500, activation='relu'))
    model.add(Dropout(0.5))
    model.add(Dense(num_labels, activation='softmax'))

    opt = Adam(learning_rate=.0003)

    model.compile(loss='categorical_crossentropy', opt=opt, metrics=['accuracy'])
    return model
Beispiel #5
0
    def __init__(self,
                 state_size: int,
                 action_size: int,
                 representation_size: int,
                 max_value: int,
                 hidden_neurons: int = 64,
                 weight_decay: float = 1e-4,
                 representation_activation: str = 'tanh'):
        self.state_size = state_size
        self.action_size = action_size
        self.value_support_size = math.ceil(math.sqrt(max_value)) + 1

        regularizer = regularizers.l2(weight_decay)
        representation_network = Sequential([
            Dense(hidden_neurons,
                  activation='relu',
                  kernel_regularizer=regularizer),
            Dense(representation_size,
                  activation=representation_activation,
                  kernel_regularizer=regularizer)
        ])
        value_network = Sequential([
            Dense(hidden_neurons,
                  activation='relu',
                  kernel_regularizer=regularizer),
            Dense(self.value_support_size, kernel_regularizer=regularizer)
        ])
        policy_network = Sequential([
            Dense(hidden_neurons,
                  activation='relu',
                  kernel_regularizer=regularizer),
            Dense(action_size, kernel_regularizer=regularizer)
        ])
        dynamic_network = Sequential([
            Dense(hidden_neurons,
                  activation='relu',
                  kernel_regularizer=regularizer),
            Dense(representation_size,
                  activation=representation_activation,
                  kernel_regularizer=regularizer)
        ])
        reward_network = Sequential([
            Dense(16, activation='relu', kernel_regularizer=regularizer),
            Dense(1, kernel_regularizer=regularizer)
        ])

        super().__init__(representation_network, value_network, policy_network,
                         dynamic_network, reward_network)
Beispiel #6
0
 def __init__(self,
              num_hidden,
              window,
              end_time,
              future_target_size,
              validation_ratio,
              validation_freq,
              effective_factor,
              mean_absolute_percentage_error=None,
              describe=None,
              epoc=10,
              metric_sender=None):
     self.__num_hidden = num_hidden
     self.__future_target = future_target_size
     self.__window = window
     self.__epochs = epoc
     self.__mean_absolute_percentage_error = mean_absolute_percentage_error
     self.__end_time = end_time
     self.__effective_factor = effective_factor
     self.__model = Sequential([
         LSTM(num_hidden,
              input_shape=np.zeros((window, len(effective_factor))).shape),
         Dense(self.__future_target)
     ])
     self.__model.compile(optimizer='adam', loss='mean_squared_error')
     self.__validation_ratio = validation_ratio
     self.__validation_freq = validation_freq
     self.__fit_model = ModelType.LSTM
     self.__describe = describe
     self.__metric_collector = MetricCollector(epochs=self.__epochs,
                                               metric_sender=metric_sender)
Beispiel #7
0
def create_model(embeddings_file: str, input_size, input_length, hidden_size):
    """
    Create simple regression model with a single embedding layer.

    :param embeddings_file: embeddings file to load
    :param input_size: size of input layer
    :param hidden_size: size of embeddings
    :return: Keras model
    """

    model = Sequential()
    model.add(
        Embedding(input_size,
                  hidden_size,
                  input_length=input_length,
                  name='embedding'))
    model.add(keras.layers.Lambda(lambda x: keras.backend.sum(x, axis=1)))
    #model.add(Flatten())
    model.add(Dense(92, activation="sigmoid"))

    if embeddings_file is not None:
        embeddings = np.loadtxt(embeddings_file)
        model.get_layer("embedding").set_weights([embeddings])

    #model.summary()
    return model
def build_network():
    network = models.Sequential()
    network.add(Conv2D(32, (3, 3), activation='relu', input_shape=(28, 28, 1)))
    network.add(MaxPooling2D((2, 2)))
    network.add(Conv2D(64, (3, 3), activation='relu'))
    network.add(MaxPooling2D((2, 2)))
    network.add(Conv2D(64, (3, 3), activation='relu'))
    network.add(Flatten())
    network.add(Dense(64, activation='relu'))
    # network.add(Dense(32, activation='relu'))
    network.add(Dense(10, activation='softmax'))

    network.compile(optimizer='adam',
                    loss='sparse_categorical_crossentropy',
                    metrics=['accuracy'])
    return network
Beispiel #9
0
def get_cnn(input_data, num_labels):
    model = Sequential()
    model.add(Conv2D(16, kernel_size=2, activation='relu', input_shape=input_data.shape))
    model.add(Conv2D(64, kernel_size=3, activation='relu'))
    model.add(Conv2D(128, kernel_size=3, activation='relu'))
    model.add(Flatten())
    model.add(Dense(num_labels, activation='softmax'))
    opt = Adamax(learning_rate=LEARNING_RATE)
    model.compile(optimizer=opt, loss='categorical_crossentropy', metrics=['accuracy'])
    return model
Beispiel #10
0
    def __init__(self):
        super(FaceModel, self).__init__()
        self.base_model = InceptionResNetV2(
            input_shape=(IMG_SIZE, IMG_SIZE, 3),
            include_top=False,
            weights='imagenet',
            pooling='avg'
        )  # pooling_avg applies GlobalAveragePooling2D at the end

        # Map inceptionResnetV2 output to embeddings
        self.embedding_layer = Dense(EMB_SIZE)
def create_model():
    model = Sequential()

    model.add(Conv2D(32, (3, 3)))
    model.add(Activation('relu'))
    model.add(MaxPooling2D(pool_size=(2, 2)))

    model.add(Conv2D(64, (3, 3)))
    model.add(Activation('relu'))
    model.add(MaxPooling2D(pool_size=(2, 2)))

    model.add(Conv2D(128, (3, 3)))
    model.add(Activation('relu'))
    model.add(MaxPooling2D(pool_size=(2, 2)))

    model.add(Flatten())
    model.add(Dense(128))
    model.add(Activation('relu'))
    model.add(Dropout(0.5))
    model.add(Dense(1))
    model.add(Activation('sigmoid'))

    model.compile(loss='binary_crossentropy',
                  optimizer='adam',
                  metrics=['accuracy'])
    training_data_generator = ImageDataGenerator(
        rescale=1. / 255,
        shear_range=0.1,
        zoom_range=0.1,
        horizontal_flip=True)

    training_generator = training_data_generator.flow_from_directory(
        data_dir,
        target_size=(300, 300),
        batch_size=5,
        class_mode="categorical")

    model.fit_generator(training_generator,	epochs=3)

    return model
Beispiel #12
0
def get_stacked_cnn_lstm(input_data, num_labels):
    model = Sequential()
    model.add(TimeDistributed(Conv2D(16, kernel_size=3, activation='relu', input_shape=input_data.shape)))
    model.add(TimeDistributed(Conv2D(64, kernel_size=5, activation='relu')))
    model.add(TimeDistributed(Flatten()))
    model.add(LSTM(units=2048))
    model.add(Dense(num_labels, activation='softmax'))
    opt = Adamax(learning_rate=LEARNING_RATE)

    model.compile(loss='categorical_crossentropy',
                  optimizer='sgd',
                  metrics=['accuracy'])
    return model
Beispiel #13
0
 def prepare_model(self, number_of_unique_output_values):
     self.model.add(
         CuDNNLSTM(self.first_lstm_layer_size,
                   batch_input_shape=(None, self.sequence_length, 1),
                   return_sequences=True))
     self.model.add(Dropout(self.dropout_rate))
     self.model.add(CuDNNLSTM(self.second_lstm_layer_size))
     self.model.add(Dropout(self.dropout_rate))
     self.model.add(Dense(number_of_unique_output_values))
     self.model.add(Activation('softmax'))
     self.model.compile(loss='categorical_crossentropy',
                        optimizer='adam',
                        metrics=['accuracy'])
Beispiel #14
0
def get_clstm(input_data, num_labels):
    model = Sequential()
    model.add(ConvLSTM2D(filters=16, kernel_size=(3, 3),
                         input_shape=input_data.shape,
                         padding='same', return_sequences=True))
    model.add(BatchNormalization())
    model.add(ConvLSTM2D(filters=64, kernel_size=(5, 5),
                         padding='same', return_sequences=False))
    model.add(BatchNormalization())
    model.add(Flatten())
    model.add(Dense(num_labels, activation='softmax'))
    opt = Adamax(learning_rate=LEARNING_RATE)
    model.compile(optimizer=opt, loss='categorical_crossentropy', metrics=['accuracy'])
    return model
Beispiel #15
0
def create_model(step: Tensorflow2ModelStep):
    """
    Create a TensorFlow v2 Multi-Layer-Perceptron Model.

    :param step: The base Neuraxle step for TensorFlow v2 (Tensorflow2ModelStep)
    :return: TensorFlow v2 Keras model
    """
    # shape: (batch_size, input_dim)
    inputs = Input(
        shape=(step.hyperparams['input_dim']),
        batch_size=None,
        dtype=tf.dtypes.float32,
        name='inputs',
    )

    dense_layers = [
        Dense(units=step.hyperparams['hidden_dim'],
              kernel_initializer=step.hyperparams['kernel_initializer'],
              activation=step.hyperparams['activation'],
              input_shape=(step.hyperparams['input_dim'], ))
    ]

    hidden_dim = step.hyperparams['hidden_dim']
    for i in range(step.hyperparams['n_dense_layers'] - 1):
        hidden_dim *= step.hyperparams['hidden_dim_layer_multiplier']
        dense_layers.append(
            Dense(units=int(hidden_dim),
                  activation=step.hyperparams['activation'],
                  kernel_initializer=step.hyperparams['kernel_initializer']))

    for layer in dense_layers:
        outputs = layer(inputs)

    softmax_layer = Dense(step.hyperparams['n_classes'], activation='softmax')
    outputs = softmax_layer(outputs)

    return Model(inputs=inputs, outputs=outputs)
Beispiel #16
0
    def _make_layers(self):
        # Create the model
        model = Sequential()

        model.add(
            Conv2D(32,
                   kernel_size=(3, 3),
                   activation='relu',
                   input_shape=(48, 48, 1)))
        model.add(Conv2D(64, kernel_size=(3, 3), activation='relu'))
        model.add(MaxPooling2D(pool_size=(2, 2)))
        model.add(Dropout(0.25))

        model.add(Conv2D(128, kernel_size=(3, 3), activation='relu'))
        model.add(MaxPooling2D(pool_size=(2, 2)))
        model.add(Conv2D(128, kernel_size=(3, 3), activation='relu'))
        model.add(MaxPooling2D(pool_size=(2, 2)))
        model.add(Dropout(0.25))

        model.add(Flatten())
        model.add(Dense(1024, activation='relu'))
        model.add(Dropout(0.5))
        model.add(Dense(7, activation='softmax'))
        return model
def create_model(embeddings_file: str, input_size, hidden_size, output_size):
    """
    Create simple regression model with a single embedding layer.

    :param embeddings_file: embeddings file to load
    :param input_size: size of input layer
    :param hidden_size: size of embeddings
    :param output_size: size of output layer
    :return: Keras model
    """
    model = Sequential()
    model.add(
        Embedding(input_size, hidden_size, input_length=1, name='embedding'))
    if embeddings_file is not None:
        embeddings = np.loadtxt(embeddings_file)
        model.get_layer("embedding").set_weights([embeddings])
    model.add(Flatten())
    model.add(Dense(output_size, activation="sigmoid"))
    return model
Beispiel #18
0
contexts = contexts[0:train_size]

vocab_size = np.amax(pairs) + 1

input_target = Input((1, ))
input_context = Input((1, ))
embedding = Embedding(vocab_size, vector_dim, input_length=1, name='embedding')
target = embedding(input_target)
target = Reshape((vector_dim, 1))(target)
context = embedding(input_context)
context = Reshape((vector_dim, 1))(context)
# now perform the dot product operation to get a similarity measure
dot_product = dot([target, context], axes=1)
dot_product = Reshape((1, ))(dot_product)
# add the sigmoid output layer
output = Dense(1, activation='sigmoid')(dot_product)

model = Model(inputs=[input_target, input_context], outputs=output)
model.compile(loss='binary_crossentropy', optimizer='rmsprop')
model.summary()
history = model.fit(x=[targets, contexts],
                    y=labels,
                    batch_size=1,
                    epochs=1,
                    validation_split=0.0)

print("Saving the embeddings layer")

weights = model.get_layer("embedding").get_weights()[0]

np.savetxt(
Beispiel #19
0
    model.add(MaxPool2D(2, 2))
    model.add(Dropout(0.2))

    model.add(
        Conv2D(256,
               3,
               padding="same",
               activation="relu",
               kernel_initializer="he_uniform"))
    model.add(Conv2D(256, 3, padding="same", activation="relu"))
    model.add(BatchNormalization())
    model.add(MaxPool2D(2, 2))
    model.add(Dropout(0.5))

    model.add(Flatten())
    model.add(Dense(200, activation="relu"))
    model.add(Dropout(0.5))
    #model.add(Dense(1,activation="sigmoid"))
    model.add(Dense(len(classes), activation="softmax"))

    model.compile(optimizer="adam",
                  loss="sparse_categorical_crossentropy",
                  metrics=["accuracy"])
    history = model.fit_generator(train_generator,
                                  steps_per_epoch=len(train_generator),
                                  epochs=8,
                                  verbose=1)
    model.save(id_path + "./00.h5")
else:
    model = keras.models.load_model(modelpath)
Beispiel #20
0
mnist = tf.keras.datasets.mnist
(x_train, y_train), (x_test, y_test) = mnist.load_data()

x_train = x_train.reshape(x_train.shape[0], 28, 28, 1)
x_test = x_test.reshape(x_test.shape[0], 28, 28, 1)

model = tf.keras.models.Sequential()

#-------------MOD------------------------
model.add(
    Conv2D(16, kernel_size=(3, 3), activation='relu', input_shape=(28, 28, 1)))

model.add(MaxPooling2D(pool_size=(2, 2)))

model.add(Flatten())

model.add(Dense(10, activation='softmax'))
#-------------------------------------
model.compile(optimizer='adam',
              loss='sparse_categorical_crossentropy',
              metrics=['accuracy'])

model.fit(x_train, y_train, epochs=5)

model.evaluate(x_test, y_test, verbose=2)

print("Saving model to disk \n")
mp = "iris_model.h5"
model.save(mp)
Beispiel #21
0
# print(X_train.shape, X_val.shape, X_test.shape, y_train.shape, y_val.shape, y_test.shape)

# model = Sequential([
#     Dense(100, activation='relu', input_shape=(57,)),
#     Dense(40, activation='relu'),
#     Dense(20, activation='relu'),
#     Dense(1, activation='relu')
# ])
# model.compile(optimizer='adam',
#               loss='mean_squared_logarithmic_error',
#               metrics=['mean_squared_logarithmic_error'])

# hist = model.fit(X, y, epochs=100)
NNmodel = Sequential()

NNmodel.add(Dense(57, kernel_initializer='normal', activation='relu'))

NNmodel.add(Dense(100, kernel_initializer='normal', activation='relu'))
NNmodel.add(Dense(40, kernel_initializer='normal', activation='relu'))
NNmodel.add(Dense(20, kernel_initializer='normal', activation='relu'))

NNmodel.add(Dense(1, kernel_initializer='normal', activation='relu'))

NNmodel.compile(loss='mean_squared_logarithmic_error',
                optimizer='adam',
                metrics=['mean_squared_logarithmic_error'])

df_test['weather_4'] = 0
df_test = df_test[[
    x for x in all_columns if x.startswith(tuple(train_columns))
]]
    LSTM(hidden_layer_size, activation=tf.nn.relu,
         return_sequences=True,
         input_shape=(X_train.shape[1], X_train.shape[2])),
)
model.add(
    Dropout(0.2)
)
model.add(
    LSTM(hidden_layer_size, activation=tf.nn.relu,
         return_sequences=True,
         input_shape=(X_train.shape[1], X_train.shape[2])),
)
model.add(
    Dropout(0.2)
)
model.add(Dense(1))
model.compile(loss='mae', optimizer='adam')

model.load_weights('models/deep_weights.rnn')

predicted = model.predict(X_val)
predicted2 = [undo_normalize(y, petr4_frame['Close']) for y in np.ravel(predicted)]
y_val2 = [undo_normalize(y, petr4_frame['Close']) for y in np.ravel(y_val)]

plt.plot(range(0, past_history), [undo_normalize(x, petr4_frame['Close']) for x in X_val[0]], label='history')
plt.plot(range(len(X_val[0]), len(X_val[0]) + past_history), predicted2, '*', label='predict')
plt.plot(range(len(X_val[0]), len(X_val[0]) + past_history), y_val2, 'o', label='real', alpha=0.5)
plt.legend()
plt.show()

print(model.evaluate(X_val, y_val, verbose=2))
Beispiel #23
0
# Define an input sequence and process it.
encoder_inputs = Input(shape=(None, num_encoder_tokens))
encoder = LSTM(latent_dim, return_state=True)
encoder_outputs, state_h, state_c = encoder(encoder_inputs)
# We discard `encoder_outputs` and only keep the states.
encoder_states = [state_h, state_c]

# Set up the decoder, using `encoder_states` as initial state.
decoder_inputs = Input(shape=(None, num_decoder_tokens))
# We set up our decoder to return full output sequences,
# and to return internal states as well. We don't use the
# return states in the training model, but we will use them in inference.
decoder_lstm = LSTM(latent_dim, return_sequences=True, return_state=True)
decoder_outputs, _, _ = decoder_lstm(decoder_inputs,
                                     initial_state=encoder_states)
decoder_dense = Dense(num_decoder_tokens, activation='softmax')
decoder_outputs = decoder_dense(decoder_outputs)

# Define the model that will turn
# `encoder_input_data` & `decoder_input_data` into `decoder_target_data`
model = Model([encoder_inputs, decoder_inputs], decoder_outputs)

# Run training
model.compile(optimizer='rmsprop',
              loss='categorical_crossentropy',
              metrics=['accuracy'])
model.fit([encoder_input_data, decoder_input_data],
          decoder_target_data,
          batch_size=batch_size,
          epochs=epochs,
          validation_split=0.2)