def learn_model(x_train, y_train, x_test, y_test, take_components, save_path=None, do_pca=False):
    # pca select main features
    if do_pca:
        pca = PCA(n_components=take_components)
        print("Compute pca relevant features with " + str(take_components) + " percent of variance")
        previous_dims = len(x_train[0])
        x_train = pca.fit_transform(x_train)
        x_test = pca.transform(x_test)
        print(str(len(x_train[0])) + " dims are used from initially " + str(previous_dims))

    # expand dims
    x_train = np.expand_dims(x_train, axis=2)
    x_test = np.expand_dims(x_test, axis=2)

    # change label to categorical
    y_train = to_categorical(y_train)
    y_test = to_categorical(y_test)

    # build model
    model = Sequential()
    model.add(Conv1D(256, 8, padding='same', input_shape=(x_train.shape[1], 1)))
    model.add(Activation('relu'))
    model.add(Conv1D(256, 8, padding='same'))
    model.add(BatchNormalization())
    model.add(Activation('relu'))
    model.add(Dropout(0.6))
    model.add(Conv1D(128, 8, padding='same'))
    model.add(Activation('relu'))
    model.add(Conv1D(128, 8, padding='same'))
    model.add(Activation('relu'))
    model.add(Conv1D(128, 8, padding='same'))
    model.add(Activation('relu'))
    model.add(Conv1D(128, 8, padding='same'))
    model.add(BatchNormalization())
    model.add(Activation('relu'))
    model.add(Dropout(0.6))
    model.add(Conv1D(64, 8, padding='same'))
    model.add(Activation('relu'))
    model.add(Conv1D(64, 8, padding='same'))
    model.add(Activation('relu'))
    model.add(Flatten())
    model.add(Dense(4096, activation='relu'))
    model.add(Dense(4096, activation='relu'))
    model.add(Dropout(0.6))
    model.add(Dense(2))
    model.add(Activation('softmax'))
    model.compile(loss='categorical_crossentropy', optimizer=keras.optimizers.SGD(lr=0.001, momentum=0.9, decay=0.0),
                  metrics=['acc'])

    # fit network
    model.fit(x_train, y_train, batch_size=16, epochs=33)

    # evaluate model
    _, accuracy = model.evaluate(x_test, y_test)

    # save model
    if save_path is not None:
        model.save(save_path)

    return accuracy
Ejemplo n.º 2
0
def prueba_2():
	cantidad_twits=10
	# define class twits
	test = load_test()
	twits = preprocesing(test[:cantidad_twits, 0])
	print(f"\ntwiters:\n{twits}")
	# define class labels
	labels = test[:cantidad_twits, 1].astype('float32')
	print(f"\nlabels:\n{labels}")
	# prepare tokenizer
	t = Tokenizer()
	t.fit_on_texts(twits)
	vocab_size = len(t.word_index) + 1
	# integer encode the documents
	encoded_twits = t.texts_to_sequences(twits)
	print(f"\nencoded_twits:\n{encoded_twits}")
	# pad documents to a max length of 4 words
	# Calculo largo maximo
	mylen = np.vectorize(len)
	lens=mylen(encoded_twits)
	max_len=max(lens)
	#TODO: Contar el twtit mas largo
	max_length = max_len
	padded_twits = pad_sequences(encoded_twits, maxlen=max_length, padding='post')
	print(f"\npadded_twits:\n{padded_twits}")

	# load the whole embedding into memory
	embeddings_index = dict()
	f = open('fasttext.es.300.txt')
	for line in f:
		values = line.split()
		word = values[0]
		coefs = np.asarray(values[1:], dtype='float32')
		embeddings_index[word] = coefs
	f.close()
	print('Loaded %s word vectors.' % len(embeddings_index))

	# create a weight matrix for words in training docs
	embedding_matrix = np.zeros((vocab_size, 300))
	for word, i in t.word_index.items():
		embedding_vector = embeddings_index.get(word)
		if embedding_vector is not None:
			embedding_matrix[i] = embedding_vector

	# define model
	model = Sequential()
	e = Embedding(vocab_size, 300, weights=[embedding_matrix], input_length=max_length, trainable=False)
	model.add(e)
	model.add(Flatten())
	model.add(Dense(1, activation='sigmoid'))
	# compile the model
	model.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy'])
	# summarize the model
	print(model.summary())
	# fit the model
	model.fit(padded_twits, labels, epochs=50, verbose=0)
	# evaluate the model
	loss, accuracy = model.evaluate(padded_twits, labels, verbose=0)
	print('Accuracy: %f' % (accuracy * 100))
Ejemplo n.º 3
0
class DNN(BaseTrainer):
    @Decorator.log(True)
    def load_data(self, **dict):
        print(dict)
        self.traindata = pd.read_csv(dict['train'], header=None)
        self.testdata = pd.read_csv(dict['test'], header=None)
        X = self.traindata.iloc[:, 1:42]
        Y = self.traindata.iloc[:, 0]
        C = self.testdata.iloc[:, 0]
        T = self.testdata.iloc[:, 1:42]
        trainX = np.array(X)
        testT = np.array(T)
        trainX.astype(float)
        testT.astype(float)
        scaler = Normalizer().fit(trainX)
        trainX = scaler.transform(trainX)
        scaler = Normalizer().fit(testT)
        testT = scaler.transform(testT)

        self.y_train = np.array(Y)
        self.y_test = np.array(C)

        self.X_train = np.array(trainX)
        self.X_test = np.array(testT)

    def train(self):
        batch_size = 64
        nb_epoch = 100
        if self.has_train:
            nb_epoch = nb_epoch - self.epoch
            print('new epoch', nb_epoch)
            self.model.fit(self.X_train,
                           self.y_train,
                           batch_size=batch_size,
                           epochs=nb_epoch,
                           callbacks=[self.checkpointer, self.csv_logger])
        else:
            # 1. define the network
            self.model = Sequential()
            self.model.add(Dense(1024, input_dim=41, activation='relu'))
            self.model.add(Dropout(0.01))
            self.model.add(Dense(1))
            self.model.add(Activation('sigmoid'))
            self.model.compile(loss='binary_crossentropy',
                               optimizer='adam',
                               metrics=['accuracy'])

            self.model.fit(self.X_train,
                           self.y_train,
                           batch_size=batch_size,
                           epochs=nb_epoch,
                           callbacks=[self.checkpointer, self.csv_logger])
            self.model.save("./dnn1layer_model.hdf5")
        score, acc = self.model.evaluate(self.X_test, self.y_test)
        print('Test score:', score)
        print('Test accuracy', acc)
Ejemplo n.º 4
0
def train_net(steps, epochs):
    # Get images
    X = []
    for filename in os.listdir('./color_images/Train/'):
        X.append(img_to_array(load_img('./color_images/Train/' + filename)))
        # print(filename)
    X = np.array(X, dtype=float)
    # Set up training and test data
    split = int(0.95 * len(X))
    Xtrain = X[:split]
    Xtrain = 1.0 / 255 * Xtrain
    # Design the neural network
    model = Sequential()
    model.add(InputLayer(input_shape=(None, None, 1)))
    model.add(Conv2D(8, (3, 3), input_shape=(None, None, 1), activation='relu', padding='same', strides=2))
    model.add(Conv2D(8, (3, 3), activation='relu', padding='same'))
    model.add(Conv2D(16, (3, 3), activation='relu', padding='same'))
    model.add(Conv2D(16, (3, 3), activation='relu', padding='same', strides=2))
    model.add(Conv2D(32, (3, 3), activation='relu', padding='same'))
    model.add(Conv2D(32, (3, 3), activation='relu', padding='same', strides=2))
    model.add(UpSampling2D((2, 2)))
    model.add(Conv2D(32, (3, 3), activation='relu', padding='same'))
    model.add(UpSampling2D((2, 2)))
    model.add(Conv2D(16, (3, 3), activation='relu', padding='same'))
    model.add(UpSampling2D((2, 2)))
    model.add(Conv2D(2, (3, 3), activation='tanh', padding='same'))
    # Finish model
    model.compile(optimizer='rmsprop', loss='mse')
    # Image transformer
    datagen = ImageDataGenerator(
        shear_range=0.2,
        zoom_range=0.2,
        rotation_range=20,
        horizontal_flip=True)
    # Generate training data
    batch_size = 50

    def image_a_b_gen(batch_size):
        for batch in datagen.flow(Xtrain, batch_size=batch_size):
            lab_batch = rgb2lab(batch)
            X_batch = lab_batch[:, :, :, 0]
            Y_batch = lab_batch[:, :, :, 1:] / 128
            yield (X_batch.reshape(X_batch.shape + (1,)), Y_batch)

    # Train model
    TensorBoard(log_dir='/output')
    model.fit_generator(image_a_b_gen(batch_size), steps_per_epoch=steps, epochs=epochs)
    # Test images
    Xtest = rgb2lab(1.0 / 255 * X[split:])[:, :, :, 0]
    Xtest = Xtest.reshape(Xtest.shape + (1,))
    Ytest = rgb2lab(1.0 / 255 * X[split:])[:, :, :, 1:]
    Ytest = Ytest / 128
    print(model.evaluate(Xtest, Ytest, batch_size=batch_size))
    model.save('./result/network.h5')
    del model
Ejemplo n.º 5
0
def model(x_test, x_train, y_test, y_train):
    model = Sequential()
    
    model_choice = {{choice(['one', 'two'])}}
    if model_choice == 'one':
        model.add(Conv2D(64, kernel_size=3, activation='relu',padding='same', input_shape=(img_rows, img_cols, color_type)))
        model.add(Conv2D(128, kernel_size=3, activation='relu',padding='same'))
        model.add(MaxPooling2D(pool_size=2,strides=2))
        model.add(Dropout({{uniform(0, 1)}}))
        model.add(Conv2D(256, kernel_size=3, activation='relu'))
        model.add(Conv2D(512, kernel_size=3, activation='relu'))
        model.add(BatchNormalization())
        model.add(MaxPooling2D(pool_size=2,strides=2))
        model.add(Dropout({{uniform(0, 1)}}))
    elif model_choice == 'two':
        model.add(Conv2D(64, kernel_size=3, activation='relu',padding='same', input_shape=(img_rows, img_cols, color_type)))
        model.add(Conv2D(128, kernel_size=3, activation='relu',padding='same'))
        model.add(MaxPooling2D(pool_size=2,strides=2))
        model.add(Dropout({{uniform(0, 1)}}))
        model.add(Conv2D(256, kernel_size=3, activation='relu'))
        model.add(Conv2D(512, kernel_size=3, activation='relu'))
        model.add(BatchNormalization())
        model.add(MaxPooling2D(pool_size=2,strides=2))
        model.add(Dropout({{uniform(0, 1)}}))
    
    model.add(Flatten())
    model.add(Dense({{choice([256, 512,1024])}}, activation='relu'))
    model.add(BatchNormalization())
    model.add(Dropout({{uniform(0, 1)}}))
    choiceval = {{choice(['one', 'two'])}}
    if choiceval == 'two':
        model.add(Dense({{choice([256, 512,1024])}}, activation='relu'))
        model.add(BatchNormalization())
        model.add(Dropout({{uniform(0, 1)}}))
    
    model.add(Dense(10, activation='softmax'))
    
    adam = keras.optimizers.Adam(lr=0.001)
    
    model.compile(loss='categorical_crossentropy', metrics=['accuracy'],
                  optimizer=adam)
    model.fit(x_train, y_train,
              batch_size=256,
              nb_epoch=15,
              verbose=2,
              validation_data=(x_test, y_test))
    score, acc = model.evaluate(x_test, y_test, verbose=0)
    print('Val accuracy:', acc)
    return {'loss': -acc, 'status': STATUS_OK, 'model': model}
Ejemplo n.º 6
0
def prueba_1():
	docs = ['Well done!',
			'Good work',
			'Great effort',
			'nice work',
			'Excellent!',
			'Weak',
			'Poor effort!',
			'not good',
			'poor work',
			'Could have done better.']
	# define class labels
	labels = np.array([1,1,1,1,1,0,0,0,0,0])

	# integer encode the documents
	vocab_size = 50
	encoded_docs = [one_hot(d, vocab_size) for d in docs]
	print(encoded_docs)

	# pad documents to a max length of 4 words
	max_length = 4
	padded_docs = pad_sequences(encoded_docs, maxlen=max_length, padding='post')
	print(padded_docs)

	# define the model
	model = Sequential()
	model.add(Embedding(vocab_size, 8, input_length=max_length))
	model.add(Flatten())
	model.add(Dense(1, activation='sigmoid'))
	# compile the model
	model.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy'])
	# summarize the model
	print(model.summary())

	# fit the model
	model.fit(padded_docs, labels, epochs=50, verbose=0)
	# evaluate the model
	loss, accuracy = model.evaluate(padded_docs, labels, verbose=0)
	print('Accuracy: %f' % (accuracy*100))
Ejemplo n.º 7
0
class AudioFeaturesModel:
    def __init__(self, model_name, le, layers):
        self.le = le
        self.model = Sequential(name=model_name)
        # Builds layers based on the structure in model_structures
        for layer in layers:
            self.model.add(layer)

    def compile(self):
        """Compile the model and print the structure"""
        self.model.compile(loss='categorical_crossentropy', metrics=['accuracy'], optimizer='adam')
        self.model.summary()

    def test_model(self, x_data, y_data):
        """Calculate the model's accuracy on the input dataset"""
        score = self.model.evaluate(x_data, y_data, verbose=0)
        accuracy = 100 * score[1]
        return accuracy

    def train_model(self, x_train, y_train, x_val, y_val):
        """Train and save the model"""
        early_stopping = EarlyStopping(monitor='val_loss', patience=sounds_config.patience, mode='min')
        checkpointer = ModelCheckpoint(filepath=f'{sounds_config.sounds_model_dir}/{self.model.name}.hdf5', verbose=1,
                                       save_best_only=True)
        history = self.model.fit(x_train, y_train, batch_size=sounds_config.num_batch_size,
                                 epochs=sounds_config.num_epochs, validation_data=(x_val, y_val),
                                 callbacks=[checkpointer, early_stopping], verbose=1)
        self.le.save(self.model.name)
        return history

    def calculate_confusion_matrix(self, x_test, y_test):
        """Calculate the probabilities required for the confusion matrix and create a dataframe"""
        y_pred = self.model.predict_classes(x_test)
        y_test = argmax(y_test, axis=1)
        con_mat = confusion_matrix(labels=y_test, predictions=y_pred).numpy()
        con_mat_norm = np.around(con_mat.astype('float') / con_mat.sum(axis=1)[:, np.newaxis], decimals=2)
        classes = self.le.inverse_transform(list(range(0, self.le.encoded_labels.shape[1])))
        return pd.DataFrame(con_mat_norm, index=classes, columns=classes)
Ejemplo n.º 8
0
    def trainnet(self):
        classifier = Sequential([
            layers.Dense(4,
                         activation='relu',
                         kernel_initializer='random_normal',
                         input_dim=8),
            layers.Dense(4,
                         activation='relu',
                         kernel_initializer='random_normal'),
            layers.Dense(1,
                         activation='sigmoid',
                         kernel_initializer='random_normal')
        ])
        # Compiling the neural network
        classifier.compile(optimizer='adam',
                           loss='binary_crossentropy',
                           metrics=['accuracy'])
        # Fitting the data to the training dataset
        classifier.fit(self.X_train, self.y_train, batch_size=10, epochs=100)
        eval_model = classifier.evaluate(self.X_train, self.y_train)
        print(eval_model)

        nethandler().savenet(model=classifier)
Ejemplo n.º 9
0
class ImageFeaturesModel:
    def __init__(self, model_name, le, layers):
        self.le = le
        self.model = Sequential(name=model_name)

        for layer in layers:
            self.model.add(layer)

    def compile(self):
        self.model.compile(loss='categorical_crossentropy',
                           metrics=['accuracy'],
                           optimizer='adam')
        self.model.summary()

    def test_model(self, x_data, y_data):
        score = self.model.evaluate(x_data, y_data, verbose=0)
        accuracy = 100 * score[1]
        return accuracy

    def train_model(self, x_train, y_train, x_val, y_val):
        early_stop = EarlyStopping(monitor='val_loss',
                                   mode='min',
                                   verbose=1,
                                   patience=5)
        checkpointer = ModelCheckpoint(filepath=f'{self.model.name}.hdf5',
                                       verbose=1,
                                       save_best_only=True)
        history = self.model.fit(x_train,
                                 y_train,
                                 batch_size=config.num_batch_size,
                                 epochs=config.num_epochs,
                                 validation_data=(x_val, y_val),
                                 callbacks=[early_stop, checkpointer],
                                 verbose=1)
        self.le.save(self.model.name)
        return history
def test_crf_viterbi_accuracy(get_random_data):
    nb_samples = 2
    timesteps = 10
    embedding_dim = 4
    output_dim = 5
    embedding_num = 12

    crf_loss_instance = ConditionalRandomFieldLoss()

    x, y = get_random_data(nb_samples,
                           timesteps,
                           x_high=embedding_num,
                           y_high=output_dim)
    # right padding; left padding is not supported due to the tf.contrib.crf
    x[0, -4:] = 0

    # test with masking, fix length
    model = Sequential()
    model.add(
        Embedding(embedding_num,
                  embedding_dim,
                  input_length=timesteps,
                  mask_zero=True))
    model.add(CRF(output_dim, name="crf_layer"))
    model.compile(optimizer='rmsprop',
                  loss={"crf_layer": crf_loss_instance},
                  metrics=[crf_viterbi_accuracy])

    model.fit(x, y, epochs=1, batch_size=10)

    # test viterbi_acc
    y_pred = model.predict(x)
    _, v_acc = model.evaluate(x, y)
    np_acc = (y_pred[x > 0] == y[x > 0]).astype('float32').mean()
    print(v_acc, np_acc)
    assert np.abs(v_acc - np_acc) < 1e-4
Ejemplo n.º 11
0
    model.add(Embedding(len(vocab), args.embedding_size, input_length=max_answer_len))
    model.add(Dropout(args.dropout))
    if args.flatten:
        model.add(Flatten())
        model.add(Reshape((1, args.embedding_size * max_answer_len)))
    if args.lstm_dim_2:
        model.add(LSTM(args.lstm_dim_1, return_sequences=True))
        model.add(LSTM(args.lstm_dim_2, return_sequences=False))
    else:
        model.add(LSTM(args.lstm_dim_1, return_sequences=False))
    model.add(Dropout(args.dropout))
    model.add(Dense(1, activation="linear"))
    optimizer = AdamOptimizer()
    model.compile(optimizer=optimizer, loss='mean_squared_error', metrics=['acc'])


    # Train the model
    model.fit(train_x, train_y, epochs=args.epochs, verbose=0)
    
    # Validate
    test_y = test_data.iloc[:, 0]
    test_x = test_data.iloc[:, 1:]
    score = model.evaluate(test_x, test_y, verbose=0)
    print(f"Validation_loss:{score[0]};Validation_accuracy:{score[1]};")

    ## --- End of your code  --- ##

    # Save the trained model
    result = tf.contrib.saved_model.save_keras_model(model, os.environ['SM_MODEL_DIR'])
    print(f"Save Keras Model result: {result}")
 
Ejemplo n.º 12
0
    # x[train]
    # print(train)

    # 2. model frame config
    model = Sequential()
    model.add(Dense(20, input_dim=60, activation='relu'))
    model.add(Dense(10, input_dim=60, activation='relu'))
    model.add(Dense(2, input_dim=10, activation='softmax'))
    #model.add(Dense(1, input_dim=10, activation='sigmoid'))

    # 3. model fitting config
    model.compile(loss='mean_squared_error', optimizer='adam', metrics=['accuracy'])


    # 4. model fitting
    #history = model.fit(x, t, epochs=100, batch_size=5, verbose=1)
    history = model.fit(x[mask_train], tf.keras.utils.to_categorical(t[mask_train])ㅋ, epochs=200, batch_size=5, verbose=1)
    t = tf.keras.utils.to_categorical(t)

    # 5. result
    result = model.evaluate(x[mask_test],tf.keras.utils.to_categorical(t[mask_test]), verbose=0)
    accuracies.append(result[1])

    #loss = history.history['loss']
    #result = model.evaluate(x, t, verbose=0)

    print(f'\n{nfold} fold accuracies : {accuracies}')
    #print(f'\n(Loss, Accuracy) = ({result[0], result[1]}')
    #print(f'\n(Loss, Accuracy) = ({result[0], result[1]}')

Ejemplo n.º 13
0
test_images = test_images.astype('float32') / 255

train_labels = to_categorical(train_labels)
test_labels = to_categorical(test_labels)

from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Conv2D, MaxPooling2D, Flatten

model = Sequential()

model.add(
    Conv2D(32, kernel_size=(3, 3), input_shape=(28, 28, 1), activation='relu'))
model.add(MaxPooling2D(pool_size=2))
model.add(Flatten())
model.add(Dense(128, activation='relu'))
model.add(Dense(10, activation='softmax'))

from tensorflow.keras.optimizers import Adam
optimizer = Adam(lr=0.001)

model.compile(optimizer=optimizer,
              loss='categorical_crossentropy',
              metrics='accuracy')

model.summary()

model.fit(train_images, train_labels, epochs=5, batch_size=200)

test_loss, test_acc = model.evaluate(test_images, test_labels)

print('test_acc:', test_acc)
class Predictioner:
    numpy.random.seed(1234)
    set_seed(1234)

    def __init__(self):
        self.model = Sequential()
        self.setup_default_model()
        self.compile_model()

    def save_model(self, path):
        self.model.save(path)

    def load_model(self, path):
        self.model = keras.models.load_model(path)

    def update_input(self, train_x, train_y):
        self.push_train_sets(train_x, train_y)
        self.y_scaler = MinMaxScaler()
        self.x_scaler = MinMaxScaler()
        self.reshape_train_sets()
        self.adjust_scalers()

    def push_train_sets(self, train_x, train_y):
        self.train_x = train_x
        self.train_y = train_y

    def reshape_train_sets(self):
        self.train_x = reshaper(self.train_x, self.train_x.shape[1])
        self.train_y = reshaper(self.train_y, 1)

    def adjust_scalers(self):
        self.train_x = self.x_scaler.fit_transform(self.train_x)
        self.train_y = self.y_scaler.fit_transform(self.train_y)

    def setup_default_model(self):
        self.model.add(Dense(30))
        self.model.add(Dense(90, activation='relu'))
        self.model.add(Dense(45, activation='relu'))
        self.model.add(Dense(20, activation='relu'))
        self.model.add(Dense(10, activation='relu'))
        self.model.add(Dense(1))

    def compile_model(self):
        self.model.compile(
            optimizer=keras.optimizers.Adam(),
            loss=keras.losses.mean_squared_error,
            metrics=[
                keras.metrics.mean_squared_error,
                keras.metrics.mean_squared_logarithmic_error,
                keras.metrics.mean_absolute_percentage_error,
                keras.metrics.mean_absolute_error,
            ]
        )

    def fit_model(self, verbose=0):
        self.model.fit(
            self.train_x,  # [:int(len(self.train_x) * 0.66)],
            self.train_y,  # [:int(len(self.train_y) * 0.66)],
            epochs=300,
            batch_size=10,
            verbose=verbose,
            # validation_data=(self.train_y[int(len(self.train_x) * 0.66):],
            #                 self.train_x[int(len(self.train_x) * 0.66):])
        )

    def evaluate(self, x_test, y_test):
        return self.model.evaluate(x_test, y_test, batch_size=12, verbose=1)

    def predict(self, prediction_interval_x):
        prediction_interval_x = self.x_scaler.transform(prediction_interval_x)
        predicted_y = self.model.predict(prediction_interval_x)

        self.x_plot = self.x_scaler.inverse_transform(self.train_x)
        self.y_plot = self.y_scaler.inverse_transform(self.train_y)
        self.x_pred_plot = self.x_scaler.inverse_transform(prediction_interval_x)
        self.y_pred_plot = self.y_scaler.inverse_transform(predicted_y)
        return self.y_pred_plot

    def visualize(self):
        pyplot.scatter(self.x_pred_plot, self.y_pred_plot, label='Predicted')
        pyplot.scatter(self.x_plot, self.y_plot, label='Actual')
        pyplot.title('Input (x) versus Output (y)')
        pyplot.xlabel('Input Variable (x)')
        pyplot.ylabel('Output Variable (y)')
        pyplot.legend()
        pyplot.show()
Ejemplo n.º 15
0
          padding='same', activation='relu'))
model.add(layers.MaxPooling1D(pool_size=2))
model.add(layers.LSTM(100, recurrent_activation='sigmoid'))
model.add(layers.Dense(1, activation='sigmoid'))
model.compile(loss='binary_crossentropy',
              optimizer='adam', metrics=['accuracy'])
model.summary()

filename = os.path.join(current_dir, 'data', 'complain_model.h5')
is_training = False
if is_training:
    model.fit(X_train, Y_train, validation_data=(
        X_test, Y_test), epochs=20, batch_size=64)

    # Evaluate the model
    scores = model.evaluate(X_test, Y_test, verbose=0)
    print("Evaluation Accuracy: %.2f%%" % (scores[1]*100))
    model.save(filename, save_format='tf')
else:
    model.load_weights(filename)

t1 = time.time()

lstm_upstream = tf.keras.Model(
    inputs=model.input, outputs=model.get_layer('max_pooling1d').output)
lstm_input = lstm_upstream.predict(X_test, batch_size=8)
# print(lstm_input.shape)

num_records = lstm_input.shape[0]
quantized_lstm_input = quanti_convert_float_to_int16(
    lstm_input.reshape(num_records * 25*32), in_pos).reshape((num_records, 25*32))
Ejemplo n.º 16
0
iris_train_input, iris_test_input, iris_train_output, iris_test_output = train_test_split(input_data, output_data, test_size=0.20)

# Aufbau des Modells mit Keras
iris_model = Sequential()
iris_model.add(Dense(5,input_shape=(4,),activation="relu"))
iris_model.add(Dense(24,activation="relu"))
iris_model.add(Dense(3,activation="softmax"))

sgd = SGD(lr=0.001)

iris_model.compile(loss="categorical_crossentropy", optimizer=sgd, metrics=["accuracy",metrics.mae])
iris_model.fit(x=iris_train_input, y=iris_train_output, batch_size=10, epochs=500, verbose=1)


# Evaluation auf Test Daten
evaluation_results = iris_model.evaluate(iris_test_input, iris_test_output)

print("Loss: {}".format(evaluation_results[0]))
print("Accuracy: {}".format(evaluation_results[1]))
print("Mean Absolute Error: {}".format(evaluation_results[2]))

# Test 
test = np.array([[5.1,3.5,1.4,0.2], [5.9,3.,5.1,1.8], [4.9,3.,1.4,0.2], [5.8,2.7,4.1,1.]])
predictions = iris_model.predict(test)
index_max_predictions = np.argmax(predictions,axis=1)

for i in index_max_predictions:
    print("Iris mit den Eigenschaften {} gehört zur Klasse: {}".format(
    test[i],
    iris_label_array[i]))
Ejemplo n.º 17
0
from tensorflow.python.keras.layers import InputLayer
from tensorflow.python.keras.layers import Dense
from tensorflow.python.keras.layers import Dropout
from tensorflow.python.keras.constraints import maxnorm
from sklearn.model_selection import train_test_split

X_train, X_test, y_train, y_test = train_test_split(X, y, test_size= 0.2, random_state= 1234)

model = Sequential()
model.add(Dense(1024, input_dim=18098, activation='relu', kernel_constraint=maxnorm(3)))
model.add(Dropout(rate=0.2))
model.add(Dense(512, activation='relu', kernel_constraint=maxnorm(5)))
model.add(Dropout(rate=0.2))
model.add(Dense(256, activation='relu', kernel_constraint=maxnorm(5)))
model.add(Dropout(rate=0.2))
model.add(Dense(128, activation='relu', kernel_constraint=maxnorm(5)))
model.add(Dropout(rate=0.2))
model.add(Dense(64, activation='relu', kernel_constraint=maxnorm(5)))
model.add(Dropout(rate=0.2))
model.add(Dense(32, activation='relu', kernel_constraint=maxnorm(5)))
model.add(Dropout(rate=0.2))
model.add(Dense(16, activation='relu', kernel_constraint=maxnorm(5)))
model.add(Dropout(rate=0.2))
model.add(Dense(1, activation='sigmoid'))

model.compile(loss = "binary_crossentropy", optimizer = 'adam', metrics=['accuracy'])

train_acc = model.evaluate(X_train, y_train, verbose=0)
test_acc = model.evaluate(X_test, y_test, verbose=0)

history = model.fit(X_train, y_train, validation_data=(X_test, y_test), epochs=40, batch_size=50)
Ejemplo n.º 18
0
model.add(Dense(1, activation='relu'))
# compile the keras model
#just a test to change the learning rate
#we tried with SDG, it didn't work
model.compile(loss='mean_squared_error',
              optimizer='adam',
              metrics=[
                  'mse', 'mean_absolute_error',
                  'mean_absolute_percentage_error', 'cosine_proximity'
              ])
# fit the keras model on the dataset
#use first one for final model for production
#model.fit(X_train, y_train, epochs=50, batch_size=12)
model.fit(X_train, y_train, epochs=2000, batch_size=12)
#score
score = model.evaluate(X_test, y_test, verbose=0)
print("%s: %.2f%%" % (model.metrics_names[1], score[1] * 100))
#make a prediction
prediction1 = model.predict(XAle)

print(prediction1)

predplusnormalized = np.column_stack([XAle, prediction1[:, -1]])
inverted = scaler.inverse_transform(predplusnormalized)
result = inverted[:, 17]
print(result)

# RICKY TODO Save the model
import tensorflow as tf
model.save('C:\\Users\\Arturo A\\AguayDrenaje\\modeloDiario')
#RICKY Send export scalar to be used by flask later
Ejemplo n.º 19
0
model.add(Dense(1, activation='sigmoid')
          )  # sigmoid instead of relu for final probability between 0 and 1

# compile the model, adam gradient descent (optimized)
model.compile(loss="binary_crossentropy",
              optimizer="adam",
              metrics=['accuracy'])

# call the function to fit to the data (training the network)
history = model.fit(x_train,
                    y_train,
                    epochs=1000,
                    batch_size=20,
                    validation_data=(x_test, y_test))

scores = model.evaluate(X, Y)
print("\n%s: %.2f%%" % (model.metrics_names[1], scores[1] * 100))

plt.figure()
plt.isinteractive()
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.title('model loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['train', 'test'], loc='best')

plt.figure()

plt.plot(history.history['acc'])
plt.plot(history.history['val_acc'])
Ejemplo n.º 20
0
# Lapisan tersembunyi pertama 10 node dengan aktivasi rule
# Lapisan tersembunyi dua 6 node dengan aktivasi rule
# lapisan keluaran 1 node dengan aktivasi sigmoid
# Rule berfungsi mengubah nilai negatif menjadi 0
# Sigmoid Berfungsi mengubah nilai aktivasi menjadi nilai antara 0 dan 1
model = Sequential()
model.add(Dense(10, input_dim=8, activation='relu'))
model.add(Dense(6, activation='relu'))
model.add(Dense(1, activation='sigmoid'))

# Compile Model Keras
# membuat argumen kerugian dengan cross entropy
# Mengoptimalkan algoritma gradient descent dengan menggunakan adam
# Membuat keakrutan klasifikasi dengan argumen metric
model.compile(loss='binary_crossentropy',
              optimizer='adam',
              metrics=['accuracy'])

# Eksekusi Model
#
# Epoch : Satu melewati semua baris dalam set data pelatihan.
# Batch : Satu atau beberapa sampel yang dipertimbangkan oleh model dalam suatu periode sebelum bobot diperbarui.
# Satu epoch terdiri dari satu atau beberapa batch
model.fit(X, y, epochs=150, batch_size=20)

# Evaluasi Model
# Fungsi eval untuk mengembalikan daftar dengan dua nilai.
# Yang pertama adalah hilangnya model pada dataset dan yang kedua adalah keakuratan model pada dataset.
_, accuracy = model.evaluate(X, y)
print('Accuracy: %.2f' % (accuracy * 100))
Ejemplo n.º 21
0
          input_dim=200,
          bias_initializer='one',
          kernel_regularizer=l2(0.0003),
          activation='tanh'),
    Dropout(0.4),
    Dense(units=10,
          input_dim=100,
          bias_initializer='one',
          kernel_regularizer=l2(0.0003),
          activation='softmax'),
])
sgd = SGD(lr=0.1)
model.compile(optimizer=sgd,
              loss='categorical_crossentropy',
              metrics=['accuracy'])

#train
model.fit(x_train_data, y_train_data, batch_size=128, epochs=20)
model.save("01_tf_keras/sequential_model/weights/mnist_dnn.h5",
           include_optimizer=False,
           save_format='h5')

#evaluate of test data
loss, accuracy = model.evaluate(x_test_data, y_test_data)
print("test loss: ", loss)
print("test acc: ", accuracy)

#evaluate of train data
loss, accuracy = model.evaluate(x_train_data, y_train_data)
print("train loss:", loss)
print("train loss:", accuracy)
Ejemplo n.º 22
0
class NeuralNetwork(object):
    def __init__(self):
        self.model = None

    def createModel(self):
        """Create and compile the keras model. See layers-18pct.cfg and 
           layers-params-18pct.cfg for the network model, 
           and https://code.google.com/archive/p/cuda-convnet/wikis/LayerParams.wiki 
           for documentation on the layer format.
        """
        self.model = Sequential()
        self.model.add(
            keras.layers.Conv2D(filters=32,
                                kernel_size=5,
                                strides=(1, 1),
                                padding='same',
                                input_shape=(32, 32, 3),
                                data_format="channels_last",
                                dilation_rate=(1, 1),
                                activation=tf.nn.relu))
        self.model.add(
            keras.layers.MaxPooling2D(pool_size=(3, 3),
                                      strides=(2, 2),
                                      padding='same'))
        self.model.add(
            keras.layers.BatchNormalization(
                axis=1,
                momentum=0.99,
                epsilon=0.001,
            ))
        self.model.add(
            keras.layers.Conv2D(filters=32,
                                kernel_size=5,
                                strides=(1, 1),
                                padding='same',
                                dilation_rate=(1, 1),
                                activation=tf.nn.relu))
        self.model.add(
            keras.layers.AveragePooling2D(pool_size=(3, 3),
                                          strides=(2, 2),
                                          padding='same'))
        self.model.add(
            keras.layers.BatchNormalization(axis=-1,
                                            momentum=0.99,
                                            epsilon=0.001))
        self.model.add(
            keras.layers.Conv2D(filters=32,
                                kernel_size=5,
                                strides=(1, 1),
                                padding='same',
                                dilation_rate=(1, 1),
                                activation=tf.nn.relu))
        self.model.add(
            keras.layers.AveragePooling2D(pool_size=(3, 3),
                                          strides=(2, 2),
                                          padding='same'))
        self.model.add(keras.layers.Flatten())
        self.model.add(keras.layers.Dense(10, activation=tf.nn.softmax))

        self.model.compile(optimizer=keras.optimizers.Adam(),
                           loss='sparse_categorical_crossentropy',
                           metrics=['accuracy'])

    def train(self, train_data, train_labels, epochs):
        """Train the keras model
        
        Arguments:
            train_data {np.array} -- The training image data
            train_labels {np.array} -- The training labels
            epochs {int} -- The number of epochs to train for
        """

        self.model.fit(train_data, train_labels, epochs=epochs, batch_size=128)

        pass

    def evaluate(self, eval_data, eval_labels):
        """Calculate the accuracy of the model
        
        Arguments:
            eval_data {np.array} -- The evaluation images
            eval_labels {np.array} -- The labels for the evaluation images
        """

        return self.model.evaluate(eval_data, eval_labels)[1]

        pass

    def test(self, test_data):
        """Make predictions for a list of images and display the results
        
        Arguments:
            test_data {np.array} -- The test images
        """

        return self.model.predict(test_data)

        pass

    ## Exercise 7 Save and load a model using the keras.models API
    def saveModel(self, saveFile="model.h5"):
        """Save a model using the keras.models API
        
        Keyword Arguments:
            saveFile {str} -- The name of the model file (default: {"model.h5"})
        """

        keras.models.save_model(self.model, saveFile)

        pass

    def loadModel(self, saveFile="model.h5"):
        """Load a model using the keras.models API
        
        Keyword Arguments:
            saveFile {str} -- The name of the model file (default: {"model.h5"})
        """

        self.model = keras.models.load_model(saveFile)

        pass
Ejemplo n.º 23
0
#create Model
model = Sequential()
model.add(Dense(22, input_dim=56, activation='relu'))
model.add(Dense(8, activation='relu'))
model.add(Dense(1, activation='sigmoid'))

#Compile Model
model.compile(loss='binary_crossentropy',
              optimizer='adam',
              metrics=['accuracy'])

#fit the model
model.fit(numerical_features, numerical_outcomes, epochs=150, batch_size=10)

#evaluate the model
scores = model.evaluate(numerical_features, numerical_outcomes)
print('\n%s: %.2f%%' % (model.metrics_names[1], scores[1] * 100))

#Ask Questions
age = int(input("What's the students age::"))
Medu = int(input("What's Medu::"))
Fedu = int(input("What's Fedu::"))
traveltime = int(input("What's traveltime::"))
studytime = int(input("What's studytime::"))
failures = int(input("How many failures::"))
famrel = int(input("What's famrel::"))
freetime = int(input("What's freetime::"))
goout = int(input("What's goout::"))
Dalc = int(input("What's Dalc::"))
Walc = int(input("What's Walc::"))
health = int(input("What's health::"))
my_model.add(Dense(2, kernel_initializer='glorot_normal',kernel_regularizer=tf.keras.regularizers.l2(l=0.01)))

#Training model using multi-stage optimiser:
#Stage 1
my_model.compile(optimizer=tf.train.AdamOptimizer(learning_rate=0.005,beta1=0.85,beta2=0.95), loss=my_cat_crossentropy, metrics=['accuracy'])
my_model.fit(x=X_train, y=Y_train, batch_size=my_batch_size, epochs=100, verbose=VERBOSE, shuffle=True)

#Stage 2
my_model.compile(optimizer=tf.train.AdamOptimizer(learning_rate=0.001,beta1=0.9,beta2=0.99), loss=my_cat_crossentropy, metrics=['accuracy'])
my_model.fit(x=X_train, y=Y_train, batch_size=my_batch_size, epochs=150, verbose=VERBOSE, shuffle=True)# new

#Stage 3
my_model.compile(optimizer=tf.train.AdamOptimizer(learning_rate=0.0005,beta1=0.9,beta2=0.99), loss=my_cat_crossentropy, metrics=['accuracy'])
my_model.fit(x=X_train, y=Y_train, batch_size=my_batch_size, epochs=200, verbose=VERBOSE, shuffle=True)# new

#Stage 4
my_model.compile(optimizer=tf.train.AdamOptimizer(learning_rate=0.0001,beta1=0.9,beta2=0.99), loss=my_cat_crossentropy, metrics=['accuracy'])
my_model.fit(x=X_train, y=Y_train, batch_size=my_batch_size, epochs=250, verbose=VERBOSE, shuffle=True)# new

#Evaluate model on training data
print("evaluation on training data", my_model.evaluate(x=X_train, y=Y_train, batch_size=my_batch_size))

# Read testing data and convert target to one-hot for use with categorical cross entropy per documentation
ds = np.loadtxt(open(test_dataset_location),delimiter=',')
X_test = ds[:,0:30].astype(np.float64)
Y_test = ds[:,30].astype(int)
Y_test = tf.keras.utils.to_categorical(Y_test)

#Evaluate model on test data
print("evaluation on test data", my_model.evaluate(x=X_test, y=Y_test, batch_size=my_batch_size))
Ejemplo n.º 25
0
def train_task(
    endpoint: str, bucket: str, data: str, epochs: int, batch_size: int
) -> NamedTuple('Model', [('filename', str), ('examples', str)]):
    """Train CNN model on MNIST dataset."""

    from pathlib import Path
    from tempfile import TemporaryFile
    from tensorflow.python import keras
    from tensorflow.python.keras import backend as K
    from tensorflow.python.keras import Sequential
    from tensorflow.python.keras.layers import Conv2D, MaxPooling2D, Dropout, Flatten, Dense
    from tensorflow.python.keras.utils import to_categorical
    import numpy as np
    from minio import Minio

    mclient = Minio(
        endpoint,
        access_key=Path('/secrets/accesskey').read_text(),
        secret_key=Path('/secrets/secretkey').read_text(),
        secure=False,
    )

    with TemporaryFile('w+b') as outp:
        with mclient.get_object(bucket, data) as inp:
            outp.write(inp.read())
        outp.seek(0)
        mnistdata = np.load(outp)

        train_x = mnistdata['train_x']
        train_y = to_categorical(mnistdata['train_y'])
        test_x = mnistdata['test_x']
        test_y = to_categorical(mnistdata['test_y'])

    # For example purposes, we don't need the entire training set, just enough
    # to get reasonable accuracy
    train_x = train_x[:10000, :, :]
    train_y = train_y[:10000]

    num_classes = 10
    img_w = 28
    img_h = 28

    if K.image_data_format() == 'channels_first':
        train_x.shape = (-1, 1, img_h, img_w)
        test_x.shape = (-1, 1, img_h, img_w)
        input_shape = (1, img_h, img_w)
    else:
        train_x.shape = (-1, img_h, img_w, 1)
        test_x.shape = (-1, img_h, img_w, 1)
        input_shape = (img_h, img_w, 1)

    train_x = train_x.astype('float32')
    test_x = test_x.astype('float32')
    train_x /= 255
    test_x /= 255

    model = Sequential(
        [
            Conv2D(32, kernel_size=(3, 3), activation='relu', input_shape=input_shape),
            Conv2D(64, (3, 3), activation='relu'),
            MaxPooling2D(pool_size=(2, 2)),
            Dropout(0.25),
            Flatten(),
            Dense(128, activation='relu'),
            Dropout(0.5),
            Dense(num_classes, activation='softmax'),
        ]
    )

    model.compile(
        loss=keras.losses.categorical_crossentropy,
        optimizer=keras.optimizers.Adadelta(),
        metrics=['accuracy'],
    )

    model.fit(
        train_x,
        train_y,
        batch_size=batch_size,
        epochs=epochs,
        verbose=1,
        validation_data=(test_x, test_y),
    )

    score = model.evaluate(test_x, test_y)
    print('Test loss & accuracy: %s, %s' % score)

    model_name = 'model.h5'

    model.save(f'/output/{model_name}')

    mclient.fput_object(bucket, model_name, f'/output/{model_name}')

    examples = 'examples.npz'

    np.savez_compressed(
        f'/output/{examples}',
        **{
            'X': test_x[:10, :, :, :],
            'y': test_y[:10],
        },
    )

    mclient.fput_object(bucket, examples, f'/output/{examples}')

    return model_name, examples
Ejemplo n.º 26
0
def train_task(data: InputBinaryFile(str), epochs: int, batch_size: int,
               model_path: OutputBinaryFile(str)):
    """Train CNN model on MNIST dataset."""

    from tensorflow.python import keras
    from tensorflow.python.keras import Sequential, backend as K
    from tensorflow.python.keras.layers import Conv2D, MaxPooling2D, Dropout, Flatten, Dense
    import numpy as np

    mnistdata = np.load(data)

    train_x = mnistdata['train_x']
    train_y = mnistdata['train_y']
    test_x = mnistdata['test_x']
    test_y = mnistdata['test_y']

    num_classes = 10
    img_w = 28
    img_h = 28

    if K.image_data_format() == 'channels_first':
        train_x.shape = (-1, 1, img_h, img_w)
        test_x.shape = (-1, 1, img_h, img_w)
        input_shape = (1, img_h, img_w)
    else:
        train_x.shape = (-1, img_h, img_w, 1)
        test_x.shape = (-1, img_h, img_w, 1)
        input_shape = (img_h, img_w, 1)

    model = Sequential([
        Conv2D(32,
               kernel_size=(3, 3),
               activation='relu',
               input_shape=input_shape),
        Conv2D(64, (3, 3), activation='relu'),
        MaxPooling2D(pool_size=(2, 2)),
        Dropout(0.25),
        Flatten(),
        Dense(128, activation='relu'),
        Dropout(0.5),
        Dense(num_classes, activation='softmax'),
    ])

    model.compile(
        loss=keras.losses.categorical_crossentropy,
        optimizer=keras.optimizers.Adadelta(),
        metrics=['accuracy'],
    )

    model.fit(
        train_x,
        train_y,
        batch_size=batch_size,
        epochs=epochs,
        verbose=1,
        validation_data=(test_x, test_y),
    )

    score = model.evaluate(test_x, test_y)
    print('Test loss & accuracy: %s' % (score, ))

    model.save(model_path)
Ejemplo n.º 27
0
model.add(Dense(10, input_dim=60, activation='relu'))
model.add(Dense(2, input_dim=10, activation='softmax'))
#model.add(Dense(1, input_dim=10, activation='sigmoid'))

# 3. model fitting config
#model.compile(loss='mean_squared_error', optimizer='adam', metrics=['accuracy'])
model.compile(loss='categorical_crossentropy',
              optimizer='adam',
              metrics=['accuracy'])  #softmax 일때

# 4. model fitting
history = model.fit(train_x, train_t, epochs=100, batch_size=5, verbose=1)

# 5. result
loss = history.history['loss']
result = model.evaluate(train_x, train_t, verbose=0)
print(f'\n(Train Loss, Train Accuracy) = ({result[0], result[1]}')

# 6. save model
model_directory = os.path.join(os.getcwd(), 'model')
if not os.path.exists(model_directory):
    os.mkdir(model_directory)

model.save(os.path.join(model_directory, 'model.h5'))

# 7. overfitting
del model
model = load_model(os.path.join(model_directory, 'model.h5'))

result = model.evaluate(test_x, test_t, verbose=0)
print(f'\n(Test Loss, Test Accuracy) = ({result[0], result[1]}')
# preprocess data
X_normalized = np.array(X_train / 255.0 - 0.5)

from sklearn.preprocessing import LabelBinarizer

label_binarizer = LabelBinarizer()
y_one_hot = label_binarizer.fit_transform(y_train)

# compile and fit the model
model.compile('adam', 'categorical_crossentropy', ['accuracy'])
history = model.fit(X_normalized, y_one_hot, epochs=15, validation_split=0.2)

# evaluate model against the test data
with open('small_test_traffic.p', 'rb') as f:
    data_test = pickle.load(f)

X_test = data_test['features']
y_test = data_test['labels']

# preprocess data
X_normalized_test = np.array(X_test / 255.0 - 0.5)
y_one_hot_test = label_binarizer.fit_transform(y_test)

print("Testing")

metrics = model.evaluate(X_normalized_test, y_one_hot_test)
for metric_i in range(len(model.metrics_names)):
    metric_name = model.metrics_names[metric_i]
    metric_value = metrics[metric_i]
    print('{}: {}'.format(metric_name, metric_value))
Ejemplo n.º 29
0
model.add(Dense(2, activation='softmax'))

# 3. model fitting config
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])

# 4. model check point config
model_directory = os.path.join(os.getcwd(), 'model')
if not os.path.exists(model_directory):
    os.mkdir(model_directory)

checkpoint = ModelCheckpoint(
    filepath=os.path.join(model_directory, '{epoch:03d}-{val_loss: 4f}.h5'),
    minitor='val_loss',         # val_loss(시험셋 오차), loss(학습셋 오차), val_accuracy(시험셋 정확도), accuracy(학습셋 정확도)
    verbose=1,
    save_best_only=True
)

# 5. model fitting
model.fit(x, t, validation_split=0.2, epochs=200, batch_size=100, verbose=1, callbacks=[checkpoint])

# 6. result
result = model.evaluate(x, t, verbose=0)
print(f'\n(Loss, Accuracy)=({result[0]}, {result[1]})')

# 7. predict
data = np.array([[8.5, 0.21, 0.26, 9.25, 0.034, 73, 142, 0.9945, 3.05, 0.37, 11.4, 6]])
predict = model.predict(data)
index = np.argmax(predict)
wines = ['Red Wine', 'White Wine']
print(wines[index])
Ejemplo n.º 30
0
             epochs=500,
             verbose=VERBOSE,
             shuffle=True)

#Stage 2:
my_model.compile(optimizer=tf.train.AdamOptimizer(learning_rate=0.0001,
                                                  beta1=0.9,
                                                  beta2=0.99),
                 loss=my_cat_crossentropy,
                 metrics=['accuracy'])
my_model.fit(x=X_train,
             y=Y_train,
             batch_size=my_batch_size,
             epochs=300,
             verbose=VERBOSE,
             shuffle=True)

#Evaluate model on Training Data:
print("evaluation on training data",
      my_model.evaluate(x=X_train, y=Y_train, batch_size=my_batch_size))

#Read the testing dataset and convert target to one-hot as needed for categorical cross entropy per documentation
ds = np.loadtxt(open(test_dataset_location), delimiter=',')
X_test = ds[:, 0:30].astype(np.float64)
Y_test = ds[:, 30].astype(int)
Y_test = tf.keras.utils.to_categorical(Y_test)

#Evaluate model on testing data:
print("evaluation on test data",
      my_model.evaluate(x=X_test, y=Y_test, batch_size=my_batch_size))