Пример #1
0
def entrenamiento_red2():
	cnn = Sequential()
	cnn.add(Convolution2D(filtrosConv1, tamanio_filtro1, padding='same', input_shape=(altura, longitud, 3), activation='relu'))
	cnn.add(MaxPooling2D(pool_size=tamanio_pool))
	cnn.add(Convolution2D(filtrosConv2, tamanio_filtro2, padding='same', activation='relu'))
	cnn.add(MaxPooling2D(pool_size=tamanio_pool))
	#Teniendo una imagen con muchas capas, ahora la vamos a aplanar
	cnn.add(Flatten()) 
	#Despues de aplanar las imagenes, se conectan las capas
	cnn.add(Dense(256,activation='relu'))
	#A la capa densa, se le van a ir apagando el 50% de las neuronas con cada paso,
	#esto se hace para evitar el sobre-ajuste
	cnn.add(Dropout(0.5))
	#Se hace la conexion con la capa de salida
	cnn.add(Dense(clases, activation='softmax'))
	#Parametros para optimizar el algoritmo
	cnn.compile(loss='categorical_crossentropy', optimizer=optimizers.Adam(lr=lr), metrics=['accuracy'])

	cnn.fit_generator(imagen_entrenamiento_red2, steps_per_epoch=pasos, epochs=epocas, validation_data=imagen_validacion_red2, validation_steps=pasos_validacion)
	dir='./modelo/red2/'

	if not os.path.exists(dir):
		os.mkdir(dir)
	cnn.save('./modelo/red2/modelo.h5')
	cnn.save_weights('./modelo/red2/pesos.h5')
def train(train_X,
          train_y,
          num_of_features,
          classes,
          layer_det,
          id,
          num_epochs=30,
          batch_size=100):
    # Train a neural network
    # Saves a model to disk

    train_y = to_categorical(train_y)  #converts to one hot

    model = Sequential()
    model.add(Dense(layer_det[0], input_dim=num_of_features,
                    activation='relu'))
    model.add(layers.Dropout(0.3, noise_shape=None, seed=None))
    model.add(Dense(layer_det[1], activation='relu'))
    model.add(layers.Dropout(0.2, noise_shape=None, seed=None))
    model.add(Dense(classes, activation='softmax'))
    # Compile model
    model.compile(loss='categorical_crossentropy',
                  optimizer='adam',
                  metrics=['acc'])

    # Fit the model
    model.fit(train_X, train_y, epochs=num_epochs, batch_size=batch_size)
    score = model.evaluate(train_X, train_y, verbose=0)
    print("%s: %.2f%%" % (model.metrics_names[1], score[1] * 100))
    model.save('model' + str(id) + '.h5')  # creates a HDF5 file 'my_model.h5'
Пример #3
0
def runCNN():
    trainImgs = mnist.train_images()
    trainLabels = mnist.train_labels()
    print(trainImgs.shape)
    print(trainLabels.shape)

    testImgs = mnist.test_images()
    testLabels = mnist.test_labels()
    print(testImgs.shape)
    print(testLabels.shape)

    trainImgs = (trainImgs / 255) - 0.5
    testImgs = (testImgs / 255) - 0.5

    trainImgs = np.expand_dims(trainImgs, axis=3)
    testImgs = np.expand_dims(testImgs, axis=3)
    print(trainImgs.shape)
    print(testImgs.shape)

    model = Sequential()
    model.add(Conv2D(8, 3, input_shape=(28, 28, 1)))
    model.add(MaxPooling2D(pool_size=2))
    model.add(Flatten())
    model.add(Dense(10, activation='softmax'))

    model.compile(optimizer='adam',
                  loss='categorical_crossentropy',
                  metrics=['accuracy'])

    model.fit(x=trainImgs,
              y=to_categorical(trainLabels),
              epochs=5,
              validation_data=(testImgs, to_categorical(testLabels)))

    model.save("mnist_cnn.h5")
Пример #4
0
def scorensave_Keras(X, y):
    """
    Given a full training set (no splits), this function creates, scores
    and saves a Keras Classifer model for use in predictions
    NOTE:  Intended only for use after proper scoring of model using score_Keras (but not required)
    """
    X_dim = X.shape[1]
    # create model
    model = Sequential()
    model.add(Dense(12, input_dim=X_dim, activation='relu'))
    model.add(Dense(8, activation='relu'))
    model.add(Dense(1, activation='sigmoid'))
    # Compile model
    model.compile(loss='binary_crossentropy',
                  optimizer='adam',
                  metrics=['accuracy'])
    # Fit the model
    model.fit(X, y, epochs=150, batch_size=10)
    # evaluate the model
    scores = model.evaluate(X, y)
    print('150 epochs, 10 batch_size')
    print("\n%s: %.2f%%" % (model.metrics_names[1], scores[1] * 100))

    end_time = time.time()
    print("total time: ", end_time - start_time)
    model.save(K_filename)
    print("Saved Keras model to " + K_filename)
Пример #5
0
def main():
    (x_train, y_train), (x_test, y_test) = fashion_mnist.load_data()

    x_train = x_train.reshape(60000, 784)
    x_train = x_train / 255
    x_test = x_test.reshape(10000, 784)
    x_test = x_test / 255
    y_train = utils.to_categorical(y_train, 10)
    y_test = utils.to_categorical(y_test, 10)

    model = Sequential()
    model.add(Dense(784, input_dim=784, activation="relu"))
    model.add(Dense(10, activation="softmax"))

    model.compile(loss="categorical_crossentropy",
                  optimizer="SGD",
                  metrics=["accuracy"])
    model.summary()
    callback = [
        TensorBoard(log_dir='logs', histogram_freq=1, write_images=True)
    ]

    model.fit(x_train,
              y_train,
              batch_size=200,
              epochs=300,
              verbose=1,
              validation_split=0.2,
              callbacks=callback)

    model.save("fashion_model.h5")

    score = model.evaluate(x_test, y_test, verbose=1)
    print("Accuracy on test data is", score[1] * 100, "percent")
Пример #6
0
def evaluate_model(trainX, trainy, testX, testy):
    global best_accuracy
    verbose, epochs, batch_size = 0, 10, 32
    # n_timesteps, n_features, n_outputs = trainX.shape[1], trainX.shape[2], trainy.shape[1]
    model = Sequential()
    #     model.add(Conv1D(filters=64, kernel_size=3, activation='relu', input_shape=(n_timesteps,n_features)))  (x_train.shape[1],1)
    model.add(
        Conv1D(filters=64,
               kernel_size=3,
               activation='relu',
               input_shape=(trainX.shape[1], 1)))
    model.add(Conv1D(filters=64, kernel_size=3, activation='relu'))
    model.add(Dropout(0.5))
    model.add(MaxPooling1D(pool_size=2))
    model.add(Flatten())
    model.add(Dense(100, activation='relu'))
    model.add(Dense(trainy.shape[1], activation='softmax'))
    model.compile(loss='categorical_crossentropy',
                  optimizer='adam',
                  metrics=['accuracy'])
    # fit network
    model.fit(trainX, trainy, epochs=epochs, batch_size=batch_size, verbose=1)
    # evaluate model
    _, accuracy = model.evaluate(testX,
                                 testy,
                                 batch_size=batch_size,
                                 verbose=0)
    if accuracy > best_accuracy:
        best_accuracy = accuracy
        model.save(BestModleFilePath)
    return accuracy
Пример #7
0
    def trainCNN(name,X,y,layerSize,convLayer,denseLayer):

        X = X/255.0

        model = Sequential()

        model.add(Conv2D(layerSize, (3, 3), input_shape=X.shape[1:]))
        model.add(Activation('relu'))
        model.add(MaxPooling2D(pool_size=(2, 2)))

        for x in range(convLayer-1):
            model.add(Conv2D(layerSize, (3, 3)))
            model.add(Activation('relu'))
            model.add(MaxPooling2D(pool_size=(2, 2)))

        model.add(Flatten())

        for i in range(denseLayer):
            model.add(Dense(layerSize))
            model.add(Activation('relu'))

        model.add(Dense(1))
        model.add(Activation('sigmoid'))

        tensorboard = TensorBoard(log_dir="logs/{}".format(name))

        model.compile(loss='binary_crossentropy',optimizer='adam',metrics=['accuracy'])

        model.fit(X, y, batch_size=32, epochs=10, validation_split=0.3, callbacks=[tensorboard])

        model.save(str(layerSize) + "x" + str(convLayer) + "-CNN.model")
    def test_fit(self):
        x = np.random.standard_normal((1024, 3, 5, 5))
        y = (x[:, 1, 2, 3] > 0).astype('int32')

        model = Sequential()
        model.add(
            KernelConv2D(input_shape=(3, 5, 5),
                         filters=4,
                         kernel_size=3,
                         kernel_function=LinearKernel(),
                         data_format='channels_first'))
        model.add(
            KernelConv2D(filters=3,
                         kernel_size=3,
                         kernel_function=LinearKernel(),
                         padding='same',
                         data_format='channels_last'))
        model.add(Flatten())
        model.add(Dense(units=2, activation='softmax'))
        model.compile(optimizer='adam', loss='sparse_categorical_crossentropy')
        model.summary()

        model.fit(x, y, epochs=30)

        model_path = os.path.join(tempfile.gettempdir(),
                                  'test_knn_%f.h5' % np.random.random())
        model.save(model_path)
        model = load_model(model_path,
                           custom_objects={
                               'KernelConv2D': KernelConv2D,
                               'LinearKernel': LinearKernel,
                           })

        predicted = model.predict(x).argmax(axis=-1)
        self.assertLess(np.sum(np.abs(y - predicted)), 200)
Пример #9
0
def train_model(X_train,
                X_test,
                y_train,
                y_test,
                sample_weights=None,
                fig_dir='.',
                retrain=True,
                epochs=25):
    """ Train Neural Network classifier and save model. """

    model_filename = os.path.join(fig_dir, "keras_model.hdf5")
    #TODO: Try standard scaling and try normalising by peak and try reinputting such that it always normalises by largest value so far

    # colour = np.log10(X_train[:,:,0]) - np.log10(X_train[:,:,1])
    # X_train = np.dstack((X_train, colour))
    # colour = np.log10(X_test[:,:,0]) - np.log10(X_test[:,:,1])
    # X_test = np.dstack((X_test, colour))
    print("training...")
    if not retrain and os.path.isfile(model_filename):
        model = load_model(model_filename)
    else:
        num_classes = y_test.shape[-1]

        model = Sequential()

        model.add(Masking(mask_value=0.))

        # model.add(Conv1D(filters=32, kernel_size=3))
        # model.add(BatchNormalization())
        # model.add(Activation('relu'))
        # model.add(MaxPooling1D(pool_size=1))
        # model.add(Dropout(0.2, seed=42))

        model.add(LSTM(100, return_sequences=True))
        # model.add(Dropout(0.2, seed=42))
        # model.add(BatchNormalization())

        model.add(LSTM(100, return_sequences=True))
        # model.add(Dropout(0.2, seed=42))
        # model.add(BatchNormalization())
        # model.add(Dropout(0.2, seed=42))

        model.add(TimeDistributed(Dense(num_classes, activation='softmax')))
        model.compile(loss='categorical_crossentropy',
                      optimizer='adam',
                      metrics=['accuracy'])
        model.fit(X_train,
                  y_train,
                  validation_data=(X_test, y_test),
                  epochs=epochs,
                  batch_size=500,
                  verbose=2,
                  sample_weight=sample_weights)

        print(model.summary())
        model.save(model_filename)

    return model
Пример #10
0
def train_model(train_data_input, train_coordinates):
    print(train_coordinates.shape[1])
    model = Sequential()
    '''Choose a model to train'''

    # '''1. CNN model'''
    # model.add(Conv1D(filters=64, kernel_size=3, activation='relu', input_shape=(train_data_input.shape[1], 1)))
    # model.add(Conv1D(filters=64, kernel_size=3, activation='relu'))
    # model.add(Dropout(0.5))
    # model.add(MaxPooling1D(pool_size=2))
    # model.add(Flatten())
    # model.add(Dense(100, activation='relu'))
    # model.add(Dense(2, activation='relu'))
    '''2. MLP model'''
    n = 20
    model.add(
        Dense(512,
              input_shape=(train_data_input.shape[1], ),
              activation='relu'))
    for i in range(n):
        model.add(Dense(512, activation='relu'))
    model.add(Dropout(0.5))
    model.add(Dense(2, activation='relu'))

    # model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
    optimizer = tf.keras.optimizers.RMSprop(0.001)
    model.compile(loss='mse', optimizer=optimizer, metrics=['mae', 'mse'])

    # fit network
    # history = model.fit(train_data_input,train_coordinates,epochs=EPOCHS,validation_split=0.2,verbose=1,callbacks=[PrintDot()])

    # 使用early stop方法来fit network
    # early stop方法:如果经过一定数量的epochs后没有改进,则自动停止训练
    # patience 值用来检查改进 epochs 的数量
    early_stop = keras.callbacks.EarlyStopping(monitor='val_loss', patience=10)
    history = model.fit(train_data_input,
                        train_coordinates,
                        epochs=EPOCHS,
                        validation_split=0.2,
                        verbose=1,
                        callbacks=[early_stop, PrintDot()])
    # 可视化训练进度
    hist = pd.DataFrame(history.history)
    hist['epoch'] = history.epoch
    hist.tail()

    plot_history(history)

    # 测试集测试
    loss, mae, mse = model.evaluate(test_data_input,
                                    test_coordinates,
                                    verbose=2)
    print("Testing set Mean Abs Error: {:5.2f} ".format(mae))

    # save the model
    model.save(CurrentModleFile)
Пример #11
0
def create_model():
    import time
    name = "CATS_VS_DOGS_CNN-{}".format(int(time.time()))
    tensorboard = TensorBoard(log_dir='logs\\{}'.format(name))

    x_train = pickle.load(open("x_train.pickle", "rb"))
    y_train = pickle.load(open("y_train.pickle", "rb"))
    x_test = pickle.load(open("x_test.pickle", "rb"))
    y_test = pickle.load(open("y_test.pickle", "rb"))

    x_train = np.array(x_train)
    y_train = np.array(y_train)
    x_test = np.array(x_test)
    y_test = np.array(y_test)

    x_train = x_train / 255.0
    x_test = x_test / 255.0

    model = Sequential()

    model.add(Conv2D(64, (3, 3), input_shape=x_train.shape[1:]))
    model.add(Activation("relu"))
    model.add(MaxPooling2D(pool_size=(2, 2)))

    model.add(Conv2D(128, (3, 3)))
    model.add(Activation("relu"))
    model.add(MaxPooling2D(pool_size=(2, 2)))

    model.add(Conv2D(128, (3, 3)))
    model.add(Activation("relu"))
    model.add(MaxPooling2D(pool_size=(2, 2)))

    model.add(Conv2D(256, (3, 3)))
    model.add(Activation("relu"))
    model.add(MaxPooling2D(pool_size=(2, 2)))

    model.add(Flatten())
    model.add(Dropout(0.5))

    model.add(Dense(512))

    model.add(Dense(1))
    model.add(Activation('sigmoid'))

    model.compile(loss="binary_crossentropy", optimizer="adam", metrics=['accuracy'])
    model.summary()
    import time
    start_time = time.time()
    model.fit(x_train, y_train, batch_size=32, epochs=15, validation_split=0.35, callbacks=[tensorboard])

    model.save('network.model')
    print("--- %s seconds ---" % (time.time() - start_time))

    print('\n# Evaluate on test data')
    results = model.evaluate(x_test, y_test, batch_size=32, callbacks=[tensorboard])
    print('test loss, test acc:', results)
Пример #12
0
def train_test():
    data_frame = pd.read_csv(os.path.join(os.path.dirname(__file__),
                                          'Crops.csv'),
                             sep=",")
    data_frame = data_frame.reindex(np.random.permutation(data_frame.index))

    selected_features = data_frame[["Temperature", "Humidity"]]

    scaler = MinMaxScaler()
    scaler.fit(selected_features)
    selected_features = scaler.transform(selected_features)

    target_class = data_frame['Crop']

    label_encoder = LabelEncoder()
    label_encoder.fit(target_class)
    labels = label_encoder.transform(target_class)

    encoded_labels = np_utils.to_categorical(labels)

    X_train, X_test, y_train, y_test = train_test_split(
        np.asarray(selected_features), encoded_labels, test_size=0.33)

    checkpoint = ModelCheckpoint('output/{val_acc:.4f}.hdf5',
                                 monitor='val_acc',
                                 verbose=1,
                                 save_best_only=True,
                                 save_weights_only=True,
                                 mode='auto')

    model = Sequential()
    model.add(Dense(50, input_dim=2, activation='relu', name="dense_in"))
    model.add(Dense(100, activation='relu', name="dense_in_2"))
    model.add(Dense(3, activation='softmax', name="dense_in_3"))

    optimizer = adam(lr=0.0001)
    model.compile(optimizer=optimizer,
                  loss='categorical_crossentropy',
                  metrics=['acc'])

    tbCallBack = TensorBoard(log_dir="Graph",
                             histogram_freq=0,
                             write_graph=True,
                             write_images=True)
    model.fit(X_train,
              y_train,
              epochs=100,
              batch_size=5,
              validation_data=(X_test, y_test),
              verbose=2,
              callbacks=[tbCallBack, checkpoint])

    keras.models.save_model(model, "saved_model.h5", overwrite=True)
    model.save("best.h5", overwrite=True, save_weights_only=True)
    predict = model.predict(np.asarray([0.6, 0.8]).reshape(1, -1))
Пример #13
0
def main():
    samples = load_files("data")

    sequence_dim = 20
    sequence_lag = 1

    samples, labels = make_sequences(samples, sequence_dim, sequence_lag)

    model = Sequential()
    model.add(LSTM(128, input_shape=(sequence_dim, 2), return_sequences=True))
    model.add(LSTM(128))
    model.add(Dense(64))
    model.add(Dense(2))

    print(model.summary())

    (trainSamples, testSamples, trainLabels,
     testLabels) = train_test_split(samples,
                                    labels,
                                    test_size=0.15,
                                    random_state=42)

    imname = "animal-11"
    image = cv2.imread("img/{}.jpg".format(imname))
    # create ground truth image with all train gazes
    for j in range(len(trainLabels)):
        s = trainLabels[j]
        cv2.circle(image, (int(s[0]), int(s[1])), 10, (255, 0, 0), 3)
    cv2.imwrite("img/{}_truth.jpg".format(imname), image)

    model.compile(loss="mean_absolute_error",
                  optimizer="adam",
                  metrics=["mae"])

    EPOCHS = 30
    for e in range(EPOCHS):
        print("=" * 50)
        print("Iteration: {}".format(e))
        model.fit(trainSamples,
                  trainLabels,
                  validation_data=(testSamples, testLabels),
                  epochs=1,
                  batch_size=128,
                  verbose=1)

        predictions = model.predict(testSamples)

        # create and save image with all current predictions
        image = cv2.imread("img/{}.jpg".format(imname))
        cv2.line(image, (0, 0), (200, 200), (255, 255, 255), 2)
        for p in predictions:
            cv2.circle(image, (int(p[0]), int(p[1])), 10, (0, 255, 0), 3)
        cv2.imwrite("img/{}_e{:02d}.jpg".format(imname, e), image)

    model.save("model_rnn.h5")
Пример #14
0
def main():
	DEBUG_MODE = True
	SAVE_DIR = "D:/tfData/"
	SAVE_FILE = SAVE_DIR + "kerasModel.hdf5"
	feature_num = 3
	sequence_length = 20
	train_batch_size = test_batch_size = 2 ** sequence_length // 10000 if DEBUG_MODE else 2 ** sequence_length // 3

	all_possible = None
	if input("Generate datasets?(y/n): ") == 'y':
		all_possible = [f"{i:020b}" for i in range(2 ** sequence_length)]  # Generate all 20-char long sequences of 0s and 1s
		shuffle(all_possible)
		print("all possibilities generated")

		# Input data dimensions : [batch size, sequence_length, input_dim]
		train_input_d = np.array([toList(i) for i in all_possible[:train_batch_size]])  # Take train_batch_size of that randomly as training data
		train_expected_d = [int(sum([sum(x) for x in y])) for y in train_input_d]
		train_expected_onehot_d = np.eye(sequence_length + 1)[train_expected_d]

		test_input_d = np.array([toList(i) for i in all_possible[train_batch_size:train_batch_size + test_batch_size]])  # Take test_batch_size of that again for the test set
		test_expected_d = [int(sum([sum(x) for x in y])) for y in test_input_d]
		test_expected_onehot_d = np.eye(sequence_length + 1)[test_expected_d]
		print("datasets organized")

	if input("Load the model?(y/n): ") == 'y':
		model = load_model(SAVE_FILE) # TODO: resolve "You are trying to load a weight file containing 2 layers into a model with 0 layers."
	else:
		model = Sequential([
			GRU(16),
			Dropout(0.4),
			Dense(sequence_length + 1, input_shape=(feature_num,)), # (output size, input shape)
			Activation("relu"),
		])
		model.compile(optimizer="adagrad", loss="categorical_crossentropy", metrics=["accuracy"])
	board = TensorBoard(log_dir=SAVE_DIR, write_graph=True)

	while True:
		print("Train the model(t), predict with it(p) or exit(e).")
		what = input("Choice: ")
		if what == 't':
			if all_possible is None:
				print("Datasets not generated")
			else:
				model.fit(train_input_d, train_expected_onehot_d, epochs=20, batch_size=train_batch_size, callbacks=[board])
				print("eval accuracy: ")
				print(model.evaluate(test_input_d, test_expected_onehot_d))
				model.save(SAVE_FILE)
		elif what == 'p':
			to_p = input("data: ")
			orig_data = np.array(toArray(to_p))
			orig_ground = np.eye(sequence_length + 1)[sum([int(x) for x in to_p])]
			print(model.predict(orig_data, batch_size=1))
		elif what == 'e':
			break
Пример #15
0
def train():

    global seq_length, files_path

    file = read_file("tasks.txt")
    processed_inputs = tokenize_words(file)

    chars = sorted(list(set(processed_inputs)))
    char_to_num = dict((c, i) for i, c in enumerate(chars))

    save_file(",".join(chars), "chars.txt")

    input_len = len(processed_inputs)
    vocab_len = len(chars)

    x_data = []
    y_data = []

    for i in range(0, input_len - seq_length, 1):

        in_seq = processed_inputs[i:i + seq_length]
        out_seq = processed_inputs[i + seq_length]

        x_data.append([char_to_num[char] for char in in_seq])
        y_data.append(char_to_num[out_seq])

    n_patterns = len(x_data)

    X = np.reshape(x_data, (n_patterns, seq_length, 1))
    X = X/float(vocab_len)

    y = np_utils.to_categorical(y_data)

    # Model
    model = Sequential()
    model.add(LSTM(128, input_shape=(X.shape[1], X.shape[2]), return_sequences=True))
    model.add(Dropout(0.2))
    model.add(LSTM(128, return_sequences=True))
    model.add(Dropout(0.2))
    model.add(LSTM(64))
    model.add(Dropout(0.2))
    model.add(Dense(y.shape[1], activation="softmax"))

    model.compile(loss="categorical_crossentropy", optimizer="adam")

    try:
        model = load_model(files_path + "task_model.h5")
    except:
        pass

    model.fit(X, y, epochs=2000, batch_size=128, verbose=2)

    model.save(files_path + "task_model.h5")
Пример #16
0
    def buildModel(self, model_path=None):
        try:
            if model_path is None:
                model_path = './model_tensorboard_2.h5'
            mymodel = load_model(model_path)
            print('retrain model...........')
            history = mymodel.fit(self.x_train, self.y_train, batch_size=50, epochs=500, verbose=0, validation_split=0.2, callbacks=[TensorBoard('./logs2')])
            self.history = history.history
            mymodel.save('./model_tensorboard_2.h5')
            self.model = mymodel
            self._write_val_loss_to_csv()
        except:
            print('train new model.........')
            start = datetime.datetime.now()
            mymodel = Sequential()
            mymodel.add(CuDNNLSTM(50, input_shape=(20, 1), return_sequences=True))
            mymodel.add(Activation('sigmoid'))
            mymodel.add(BatchNormalization())
            mymodel.add(Dropout(0.2))

            mymodel.add(CuDNNLSTM(100, return_sequences=True))
            mymodel.add(Activation('sigmoid'))
            mymodel.add(BatchNormalization())
            mymodel.add(Dropout(0.2))

            mymodel.add(CuDNNLSTM(100))
            mymodel.add(Activation('tanh'))
            mymodel.add(BatchNormalization())
            mymodel.add(Dropout(0.2))

            mymodel.add(Dense(50, activation='sigmoid'))
            mymodel.add(BatchNormalization())
            mymodel.add(Dropout(0.2))

            mymodel.add(Dense(20, activation='sigmoid'))
            mymodel.add(BatchNormalization())
            mymodel.add(Dropout(0.2))

            mymodel.add(Dense(22, activation='relu'))

            mymodel.compile('adam', 'mae', metrics=['mae'])
            print(mymodel.summary())
            self.model = mymodel
            history = mymodel.fit(self.x_train, self.y_train, batch_size=50, epochs=3000, verbose=2, validation_split=0.2, callbacks=[TensorBoard()])
            self.history = history.history
            mymodel.save('./model_tensorboard_2.h5')
            end = datetime.datetime.now()
            print('耗时',end-start)
            self._write_val_loss_to_csv()
Пример #17
0
def build_feedforward_nn(X, y, save_path):
    'building the proposed NN architecture'

    model = Sequential()
    model.add(Dense(15, input_dim=100, activation='relu'))  # input layer
    model.add(Dense(10, activation='relu'))  # hidden layer I
    model.add(Dense(5, activation='relu'))  # hidden layer II
    model.add(Dense(1, activation='sigmoid'))  # output layer
    # compile the keras model
    model.compile(loss='binary_crossentropy',
                  optimizer='adam',
                  metrics=['accuracy'])
    # fit the keras model on the dataset
    model.fit(X, y, epochs=150, batch_size=10)
    model.save(save_path)
    return model
Пример #18
0
class Model:
    def __init__(self):
        self.model = Sequential()
        self.model_name = 'model.h5'
        self.train_name = 'train.npy'

    def build(self):
        # add model layers
        self.model.add(
            Conv2D(64,
                   kernel_size=3,
                   activation='relu',
                   input_shape=(28, 28, 1)))
        self.model.add(Conv2D(32, kernel_size=3, activation='relu'))
        self.model.add(Flatten())
        self.model.add(Dense(10, activation='softmax'))

    def load_data(self):
        train_data = np.load(self.train_name)
        indices = np.random.permutation(train_data.shape[0])
        valid_idx, train_idx = indices[:train_data.shape[0] *
                                       valid_percentage], indices[
                                           train_data.shape[0] *
                                           valid_percentage:]
        train, valid = train_data[train_idx, :], train_data[valid_idx, :]
        return np.hsplit(train, 1), np.hsplit(valid, 1)

    def train(self, data):
        self.model.compile(optimizer='adam',
                           loss='categorical_crossentropy',
                           metrics=['accuracy'])
        self.model.fit(data[0][0],
                       data[0][1],
                       validation_data=data[1],
                       epochs=3)

    def load_model(self):
        pass
        # self.model = keras.models.load_model(model_name)
        # self.model.summary()

    def save(self):
        self.model.save(self.model_name)

    def predict(self, depth_map):
        return self.model.predict(depth_map)
Пример #19
0
    def build_model(self, epochs=500):
        x, y = self.data_prepare()
        debug("build model")

        model = Sequential()
        model.add(Flatten(input_shape=(IMAGE_SIZE, IMAGE_SIZE)))
        model.add(Dense(128, activation='relu'))
        model.add(Dropout(0.1))
        model.add(Dense(64, activation='relu'))
        model.add(Dense(len(self.target_labels), activation='relu'))

        model.compile(loss='mse',
                      optimizer='adam',
                      metrics=['mae', 'accuracy'])
        model.fit(x, y, batch_size=128, epochs=epochs, validation_split=0.2)

        model.save(self.model_path())
Пример #20
0
class DQNSolver:
    def __init__(self, observation_space, action_space):
        self.exploration_rate = EXPLORATION_MAX

        self.action_space = action_space
        self.memory = deque(maxlen=MEMORY_SIZE)
        self.model = Sequential()
        self.model.add(
            Dense(128, input_dim=observation_space, activation="relu"))
        self.model.add(Dense(128, activation="relu"))
        #self.model.add(Dense(64, activation = "relu"))
        self.model.add(Dense(self.action_space, activation="linear"))
        self.model.compile(loss="mse", optimizer=Adam(lr=LEARNING_RATE))

    def set_model(self, name):
        newmodel = keras.models.load_model(name)
        self.model = newmodel

    def remember(self, state, action, reward, next_state, done):
        self.memory.append((state, action, reward, next_state, done))

    def act(self, state):
        if np.random.rand() < self.exploration_rate:
            return random.randrange(self.action_space)
        q_values = self.model.predict(state)
        return np.argmax(q_values[0])

    def experience_replay(self):
        if len(self.memory) < BATCH_SIZE:
            return
        batch = random.sample(self.memory, BATCH_SIZE)
        for state, action, reward, state_next, terminal in batch:
            q_update = reward
            if not terminal:
                q_update = (reward +
                            GAMMA * np.amax(self.model.predict(state_next)[0]))
            q_values = self.model.predict(state)
            q_values[0][action] = q_update
            self.model.fit(state, q_values, verbose=0)
        self.exploration_rate *= EXPLORATION_DECAY
        self.exploration_rate = max(EXPLORATION_MIN, self.exploration_rate)

    def save(self, name):
        self.model.save(name)
    def test_same(self):
        kernel = np.random.standard_normal((3, 3, 5, 11))
        x = np.random.standard_normal((2, 7, 13, 5))

        model = Sequential()
        model.add(
            KernelConv2D(
                input_shape=(7, 13, 5),
                filters=11,
                kernel_size=3,
                kernel_function=LinearKernel(),
                padding='same',
                use_bias=False,
                weights=[kernel],
            ))
        model.compile(optimizer='adam', loss='mse')
        model.summary()

        model_path = os.path.join(tempfile.gettempdir(),
                                  'test_knn_%f.h5' % np.random.random())
        model.save(model_path)
        model = load_model(model_path,
                           custom_objects={
                               'KernelConv2D': KernelConv2D,
                               'LinearKernel': LinearKernel,
                           })

        kernel_output = model.predict(x)

        model = Sequential()
        model.add(
            Conv2D(
                input_shape=(7, 13, 5),
                filters=11,
                kernel_size=3,
                padding='same',
                use_bias=False,
                weights=[kernel],
            ))
        model.compile(optimizer='adam', loss='mse')
        normal_output = model.predict(x)

        self.assertTrue(np.allclose(normal_output, kernel_output))
Пример #22
0
def entrenamiento_red1():
	cnn = Sequential()
	cnn.add(Convolution2D(filtrosConv1, tamanio_filtro1, padding='same', input_shape=(altura, longitud, 3), activation='relu'))
	cnn.add(MaxPooling2D(pool_size=tamanio_pool))
	cnn.add(Convolution2D(filtrosConv2, tamanio_filtro2, padding='same', activation='relu'))
	cnn.add(MaxPooling2D(pool_size=tamanio_pool))
	cnn.add(Flatten())
	cnn.add(Dense(256,activation='relu'))
	cnn.add(Dropout(0.5))
	cnn.add(Dense(clases, activation='softmax'))
	cnn.compile(loss='categorical_crossentropy', optimizer=optimizers.Adam(lr=lr), metrics=['accuracy'])

	cnn.fit_generator(imagen_entrenamiento_red1, steps_per_epoch=pasos, epochs=epocas, validation_data=imagen_validacion_red1, validation_steps=pasos_validacion)
	dir='./modelo/red1/'

	if not os.path.exists(dir):
		os.mkdir(dir)
	cnn.save('./modelo/red1/modelo.h5')
	cnn.save_weights('./modelo/red1/pesos.h5')
Пример #23
0
def build_nn_model(x_train, x_test, y_train, y_test):
    model = Sequential()
    model.add(Dense(input_shape=x_train[0].shape, units=13))
    model.add(Dense(128, activation='relu'))
    model.add(Dropout(0.3))
    model.add(Dense(64, activation='relu'))
    model.add(Dropout(0.2))
    model.add(Dense(2, activation='softmax'))
    model.compile(optimizer=tf.train.AdamOptimizer(),
                  loss='sparse_categorical_crossentropy',
                  metrics=['accuracy'])
    print(model.summary())
    model.fit(x_train,
              y_train,
              batch_size=32,
              epochs=1000,
              callbacks=[TensorBoard('./logs')])
    model.save('./nn_model.h5')
    test_loss, test_acc = model.evaluate(x_test, y_test)
    print(test_acc)
def build_model(save=False):
  print('Building model ....')
  model = Sequential()
    
  model.add(Conv2D(64, kernel_size=3, strides=1, activation='relu', input_shape=(IMAGE_SIZE, IMAGE_SIZE, 1)))
  model.add(Conv2D(64, kernel_size=3, strides=2, activation='relu'))
  model.add(Dropout(0.25))
  model.add(Conv2D(128, kernel_size=3, strides=1, activation='relu'))
  model.add(Conv2D(128, kernel_size=3, strides=2, activation='relu'))
  model.add(Dropout(0.25))
  model.add(Conv2D(256, kernel_size=3, strides=1, activation='relu'))
  model.add(Conv2D(256, kernel_size=3, strides=2, activation='relu'))
  model.add(Flatten())
  model.add(Dropout(0.25))
  model.add(Dense(512, activation='relu'))
  model.add(Dense(CATEGORY_NO, activation='softmax'))

  model.compile(optimizer='adam', loss='sparse_categorical_crossentropy', metrics=['accuracy'])

  if save: model.save(MODEL_PATH)
   
  return model
Пример #25
0
    def build_model(self):
        debug("build model")

        x, y = self.data_prep()

        model = Sequential()
        model.add(Conv2D(20,
                         kernel_size=(3, 3),
                         activation='relu',
                         input_shape=(IMAGE_SIZE, IMAGE_SIZE, 1)))
        model.add(Conv2D(20, kernel_size=(3, 3), activation='relu'))
        model.add(Flatten())
        model.add(Dense(128, activation='relu'))
        model.add(Dense(NUM_CLASSES, activation='softmax'))

        model.compile(loss=keras.losses.categorical_crossentropy,
                      optimizer='adam',
                      metrics=['accuracy'])
        model.fit(x, y,
                  batch_size=32,
                  epochs=2,
                  validation_split=0.2)

        model.save(MODEL_PATH)
Пример #26
0
model = Sequential()
model.add(Conv2D(64, (3, 3), activation='relu', input_shape=(84, 84, 1)))
model.add(MaxPooling2D((2, 2)))
model.add(Conv2D(32, (3, 3), activation='relu'))
model.add(MaxPooling2D((2, 2)))
model.add(Conv2D(32, (3, 3), activation='relu'))
model.add(MaxPooling2D((2, 2)))
model.add(Conv2D(32, (3, 3), activation='relu'))
model.add(MaxPooling2D((2, 2)))
model.add(Flatten())
model.add(Dense(32, activation='softmax'))
model.add(Dense(valY.shape[1], activation='softmax'))
model.compile(loss='categorical_crossentropy',
              optimizer='rmsprop',
              metrics=['acc'])

# fit network
history = model.fit(trainX,
                    trainY,
                    epochs=1000,
                    batch_size=1000,
                    verbose=2,
                    validation_data=(valX, valY),
                    shuffle=False,
                    callbacks=[es])

model.save('Pong_Action_Conv2D.keras')
print(model.summary())

np.save("history_Pong_Action_Conv2D.npy", history.history, allow_pickle=True)
Пример #27
0
def fitModel(NB_CLASSES, x_train, y_train, datagen, IMG_SIZE, CBP):
    '''
    fitModel() is here for train the model, all of the parameters are described followinig
    :param NB_CLASSES: Number of classes in the dataset
    :param x_train: numpy array of training images for the model
    :param y_train: Labels of the images
    :param datagen: ImageDataGenerator used for augmenting the images and here a datagen
                    reference to thata has been used for fitting x_train and flowing it to
                    the fit_generator() method
    :param IMG_SIZE: IMG_SIZE = [1080, 608]
                    INPUT_SHAPE = IMG_SIZE + [3]
                    instagram landscape picture size [16:9] i.e. [1080, 608] input size image

    :param CBP: CBP = ['CollegeName', 'Batch', 'Professor']
    :return: return max, min, testScore, accIndex, lossIndex, testIndex, date

    '''
    print("Training the model.")
    date = datetime.datetime.now()
    max, min, testScore, accIndex, lossIndex, testIndex = 70.0, 4.0, 75, 1, 1, 1
    test_score_out = []
    INPUT_SHAPE = IMG_SIZE + [3]  # instagram landscape picture size [16:9] i.e. [1080, 608] input size image

    print('''splitting x_train in train , validtion and test sets''')
    # train, validate, test = np.split(df.sample(frac=1), [int(.6 * len(df)), int(.8 * len(df))])
    x_train, x_val, y_train, y_val = train_test_split(x_train, y_train, test_size=0.2, random_state=1)
    x_train, x_test, y_train, y_test = train_test_split(x_train, y_train, test_size=0.125, random_state=1)

    for dense_layer in dense_layers:
        for layer_size in layer_sizes:
            for conv_layer in conv_layers:
                NAME = "{}-conv-{}-nodes-{}-dense-{}".format(conv_layer, layer_size, dense_layer,
                                                             f"{datetime.datetime.now():%m-%d-%Hh%Mm%Ss}")

                model = Sequential()
                model.add(Conv2D(layer_size, (8, 8), padding='valid',
                                 input_shape=INPUT_SHAPE, kernel_regularizer=l2(0.001)))
                model.add(Activation('relu'))
                # model.add(BatchNormalization())
                model.add(Dropout(DROPOUT))
                model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))

                for l in range(conv_layer - 1):
                    model.add(Conv2D(layer_size, (8, 8), padding='valid',
                                     kernel_regularizer=l2(0.001)))
                    model.add(Activation('relu'))
                    # model.add(BatchNormalization())
                    model.add(Dropout(DROPOUT))
                    model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))

                model.add(Flatten())

                for _ in range(dense_layer):
                    model.add(Dense(layer_size, kernel_regularizer=l2(0.001)))
                    model.add(Activation('relu'))
                    model.add(Dropout(DROPOUT))

                model.add(Dense(NB_CLASSES))
                model.add(Activation('sigmoid'))
                tensorboard = TensorBoard(log_dir="sample_data/{}".format(NAME))

                model.compile(loss='categorical_crossentropy',
                              optimizer=OPTIMIZER,
                              metrics=['accuracy'],
                              )
                # fit the datagen to the x_train
                datagen.fit(x_train)
                # train the model
                history = model.fit_generator(datagen.flow(x_train, y_train, batch_size=BATCH_SIZE),
                                              steps_per_epoch=len(x_train) // BATCH_SIZE,
                                              epochs=NB_EPOCHS,
                                              verbose=1,
                                              callbacks=[tensorboard],
                                              validation_data=(x_val, y_val),
                                              )

                '''saving the comparatively best one'''
                if history.history.get('val_acc')[-1] > max:
                    if accIndex >= 2:
                        os.remove('ValAcc{}_{}_{}_{}_{}_{}'.format(accIndex - 1, round(max, 4), CBP[0], CBP[1], CBP[2],
                                                                   f":{date:%Y-%m-%d-%Hh%Mm%Ss}"))
                    max = history.history.get('val_acc')[-1]
                    val_acc_out = open('ValAcc{}_{}_{}_{}_{}_{}'.format(accIndex, round(max, 4), CBP[0], CBP[1], CBP[2],
                                                                        f":{date:%Y-%m-%d-%Hh%Mm%Ss}"), "wb")
                    pickle.dump(
                        model.save('ValAcc{}_{}_{}_{}_{}_{}'.format(accIndex, round(max, 4), CBP[0], CBP[1], CBP[2],
                                                                    f":{date:%Y-%m-%d-%Hh%Mm%Ss}")),
                        val_acc_out)
                    val_acc_out.close()
                    accIndex += 1

                if history.history.get('val_loss')[-1] < min:
                    if lossIndex >= 2:
                        os.remove(
                            'ValLoss{}_{}_{}_{}_{}_{}'.format(lossIndex - 1, round(min, 4), CBP[0], CBP[1], CBP[2],
                                                              f":{date:%Y-%m-%d-%Hh%Mm%Ss}"))
                    min = history.history.get('val_loss')[-1]
                    val_loss_out = open(
                        'ValLoss{}_{}_{}_{}_{}_{}'.format(lossIndex, round(min, 4), CBP[0], CBP[1], CBP[2],
                                                          f":{date:%Y-%m-%d-%Hh%Mm%Ss}"))
                    pickle.dump(
                        model.save('ValLoss{}_{}_{}_{}_{}_{}'.format(lossIndex, round(min, 4), CBP[0], CBP[1], CBP[2],
                                                                     f":{date:%Y-%m-%d-%Hh%Mm%Ss}")),
                        val_loss_out)
                    val_loss_out.close()
                    lossIndex += 1

                score = model.evaluate(x_test, y_test, verbose=1)  # score[testScore, testAccuracy]

                if score[1] > testScore:
                    if testIndex >= 2:
                        os.remove(
                            'TestScore{}_{}_{}_{}_{}_{}'.format(testIndex - 1, round(testScore, 4), CBP[0], CBP[1],
                                                                CBP[2],
                                                                f":{date:%Y-%m-%d-%Hh%Mm%Ss}"))
                        os.remove('{}_FinestHistory'.format(testIndex - 1))
                    testScore = score[1]
                    test_acc_out = open(
                        'TestScore{}_{}_{}_{}_{}_{}'.format(testIndex, round(testScore, 4), CBP[0], CBP[1], CBP[2],
                                                            f":{date:%Y-%m-%d-%Hh%Mm%Ss}"), "wb")
                    pickle.dump(
                        model.save(
                            'TestScore{}_{}_{}_{}_{}_{}'.format(testIndex, round(testScore, 4), CBP[0], CBP[1], CBP[2],
                                                                f":{date:%Y-%m-%d-%Hh%Mm%Ss}")),
                        test_acc_out)
                    test_acc_out.close()
                    '''dumping best scoring history for future use'''
                    test_score_history = open('{}_FinestHistory'.format(testIndex), "wb")
                    pickle.dump(history, test_score_history)
                    test_score_out = copy.copy(score)
                    test_score_history.close()

                    testIndex += 1

    print('''drawing the diagram for accuracy and loss for best saved model''')
    test_score_history_out = open('{}_FinestHistory'.format(testIndex), "rb")
    history = pickle.load(test_score_history_out)
    print('Best test score: ', test_score_out[0])
    print('Best test accuracy: ', test_score_out[1])

    # list all data in history
    print(history.history.keys())
    # summarize history for accuracy
    plt.plot(history.history['acc'])
    plt.plot(history.history['val_acc'])
    plt.title('model accuracy')
    plt.ylabel('accuracy')
    plt.xlabel('epoch')
    plt.legend(['train', 'test'], loc='upper left')
    plt.show()
    # summarize history for loss
    plt.plot(history.history['loss'])
    plt.plot(history.history['val_loss'])
    plt.title('model loss')
    plt.ylabel('loss')
    plt.xlabel('epoch')
    plt.legend(['train', 'test'], loc='upper left')
    plt.show()

    return max, min, testScore, accIndex, lossIndex, testIndex, date
Пример #28
0
model = Sequential()

model.add(Dense(units=24, input_dim=x_column, activation='relu'))
model.add(Dense(units=10, activation='relu'))
model.add(Dense(units=y_column, activation='sigmoid'))

# metrics : 성능 지표
# metrics=['accuracy'] : 정확도도 같이 출력해주세요.

# 손실 함수 : [평균 제곱 오차], [크로스(교차) 엔트로피]
# 교차 엔트로피 방식은 출력 값에 로그를 취하여, 오차가 매우 큰 경우에는 빨리 수렴합니다.
# 오차가 적어지면 속도를 감속하도록 만들어진 알고리즘
# MSE : 'mean_squared_error'
# MAE : 'mean_absolute_error'
model.compile(loss='mean_squared_error', optimizer='adam', metrics=['accuracy'])

model.fit(x_train, y_train, epochs=200, batch_size=5, verbose=1)

print('모델을 파일 형식으로 저장합니다.')
model.save('my_model.h5')

# 저장해 둔 모델은 load_model('my_model.h5')을 사용하여 읽어 들일 수 있습니다.

print(model.metrics_names)

# evaluate : 테스트 모드에서 수행된 loss value, metrics values를 반환해줍니다.
score = model.evaluate(x_test, y_test)
print('train loss : %.4f' % (score[0]))
print('train accuracy : %.4f' % (score[1]))

print('finished')
Пример #29
0
def main(_):
    # Import data
    print('Reading in data from  ', FLAGS.data_dir)
    mnist = input_data.read_data_sets(FLAGS.data_dir, one_hot=True)

    my_model = Sequential()

    # If pre-trained model exists on disk, then just load that
    if os.path.isfile(
            os.path.join(os.getcwd(),
                         'saved_model/cork_ai_model_keras_deep.h5')):
        my_model = load_model("saved_model/cork_ai_model_keras_deep.h5")
        print("Model restored from disk")

    # Build and train a model using keras
    else:
        my_model = deepnn_keras(model_input_shape=(28, 28, 1))
        my_model.compile(loss='categorical_crossentropy',
                         optimizer='adam',
                         metrics=['accuracy'])

        train_images = np.reshape(mnist.train.images, [-1, 28, 28, 1])
        print("train set shape is ", train_images.shape)
        print("train labels shape is ", mnist.train.labels.shape)
        my_model.fit(train_images,
                     mnist.train.labels,
                     epochs=18,
                     batch_size=50)

        # model is trained, let's save it to disk
        if not os.path.exists(os.path.join(os.getcwd(), 'saved_model')):
            os.makedirs(os.path.join(os.getcwd(), 'saved_model'))
        my_model.save("saved_model/cork_ai_model_keras_deep.h5")

    test_images = np.reshape(mnist.test.images, [-1, 28, 28, 1])
    metrics = my_model.evaluate(test_images, mnist.test.labels, batch_size=50)
    print('\n\nevaluation test: loss, accuracy : ', metrics)

    # Test on individual test examples, writing examples of
    # successful and failed classifications to disk
    if FLAGS.write_samples:
        print('Will write sample outputs to output_images folder')
        file_prefix = ''
        if 'fashion' in FLAGS.data_dir:
            print('Using fashion data')
            file_prefix = 'fashion_deep_'
        if not os.path.exists(os.path.join(os.getcwd(), 'output_images')):
            os.makedirs(os.path.join(os.getcwd(), 'output_images'))
        num_each_to_store = 5
        stored_correct = 0
        stored_incorrect = 0
        idx = 0
        while (stored_correct < num_each_to_store or stored_incorrect <
               num_each_to_store) and idx < len(mnist.test.images):
            pred = np.argmax(
                my_model.predict(
                    np.reshape(mnist.test.images[idx], [-1, 28, 28, 1])))
            real_label = np.argmax(mnist.test.labels[idx])
            correct = pred == real_label

            if file_prefix is 'fashion_deep_':
                real_label = fashion_label_to_name(real_label)
                pred = '[' + fashion_label_to_name(pred) + ']'
            else:
                real_label = real_label.astype(str)
                pred = pred.astype(str)

            img = np.reshape(mnist.test.images[idx], [28, 28])
            plt.imshow(img, cmap='gray')

            if correct and stored_correct < num_each_to_store:
                stored_correct += 1
                plt.savefig("output_images/{}success_{}.png".format(
                    file_prefix, real_label))
            elif not correct and stored_incorrect < num_each_to_store:
                stored_incorrect += 1
                plt.savefig("output_images/{}fail_{}_{}.png".format(
                    file_prefix, real_label, pred))
            idx += 1

    # Test on extra test images made from photos of handwritten digits
    # or from digitally created 'hand' written digits
    if FLAGS.extra_test_imgs:
        print('Using manually hand-written digits')
        if not os.path.exists(os.path.join(os.getcwd(), 'output_images')):
            os.makedirs(os.path.join(os.getcwd(), 'output_images'))
        file_prefix = 'extra_'
        for idx in range(1, 7):
            img_file = 'extra_test_digits/{}.jpg'.format(idx)
            img = misc.imread(img_file)
            pred = np.argmax(my_model.predict(np.reshape(img, [1, 28, 28, 1])))

            plt.imshow(img, cmap='gray')
            plt.savefig("output_images/{}{}predicted_{}.png".format(
                file_prefix, idx, pred))
Пример #30
0
# Se agrega la capa de salida con una neurona con función de activación sigmoid
model.add(Dense(5, activation='sigmoid'))

# Se indica el tipo de pérdida (loss) que se utilizará, el "optimizador" de los pesos de las
# conexiones de las neuronas y las métricas que se quieren obtener
model.compile(loss='mean_squared_error',
              optimizer='adam',
              metrics=['binary_accuracy'])

# Se entrena la red
# training_data = entradas, target_data = salidas, epochs = interaciones de aprendizaje
# de entrenamiento
model.fit(training_data, target_data, epochs=1000)

# evaluamos el modelo
scores = model.evaluate(training_data, target_data)

print("\n%s: %.2f%%" % (model.metrics_names[1], scores[1] * 100))
print(model.predict(training_data_modificado).round())

name_file = 'modeloNNA.keras'  #Debe ser .keras.
model.save(name_file)  #Se guarda el archivo en la carpeta raíz.
model.summary()  #Se imprime la estructura del archivo, reporte.

# # serializar el modelo a JSON
# model_json = model.to_json()
# with open("model.json", "w") as json_file:
#    json_file.write(model_json)
# # serializar los pesos a HDF5
# model.save_weights("model.h5")
# print("Modelo Guardado!")