Ejemplo n.º 1
0
def entrenamiento_red2():
	cnn = Sequential()
	cnn.add(Convolution2D(filtrosConv1, tamanio_filtro1, padding='same', input_shape=(altura, longitud, 3), activation='relu'))
	cnn.add(MaxPooling2D(pool_size=tamanio_pool))
	cnn.add(Convolution2D(filtrosConv2, tamanio_filtro2, padding='same', activation='relu'))
	cnn.add(MaxPooling2D(pool_size=tamanio_pool))
	#Teniendo una imagen con muchas capas, ahora la vamos a aplanar
	cnn.add(Flatten()) 
	#Despues de aplanar las imagenes, se conectan las capas
	cnn.add(Dense(256,activation='relu'))
	#A la capa densa, se le van a ir apagando el 50% de las neuronas con cada paso,
	#esto se hace para evitar el sobre-ajuste
	cnn.add(Dropout(0.5))
	#Se hace la conexion con la capa de salida
	cnn.add(Dense(clases, activation='softmax'))
	#Parametros para optimizar el algoritmo
	cnn.compile(loss='categorical_crossentropy', optimizer=optimizers.Adam(lr=lr), metrics=['accuracy'])

	cnn.fit_generator(imagen_entrenamiento_red2, steps_per_epoch=pasos, epochs=epocas, validation_data=imagen_validacion_red2, validation_steps=pasos_validacion)
	dir='./modelo/red2/'

	if not os.path.exists(dir):
		os.mkdir(dir)
	cnn.save('./modelo/red2/modelo.h5')
	cnn.save_weights('./modelo/red2/pesos.h5')
Ejemplo n.º 2
0
def runRNN():
    # Assumes you're in the root level of the dataset directory.
    # If you aren't, you'll need to change the relative paths here.
    train_data = prepareData('./train')
    test_data = prepareData('./test')

    for text_batch, label_batch in train_data.take(1):
        print(text_batch.numpy()[0])
        print(label_batch.numpy()[0])  # 0 = negative, 1 = positive

    model = Sequential()

    # ----- 1. INPUT
    # We need this to use the TextVectorization layer next.
    model.add(Input(shape=(1,), dtype="string"))

    # ----- 2. TEXT VECTORIZATION
    # This layer processes the input string and turns it into a sequence of
    # max_len integers, each of which maps to a certain token.
    max_tokens = 1000
    max_len = 100
    vectorize_layer = TextVectorization(
        # Max vocab size. Any words outside of the max_tokens most common ones
        # will be treated the same way: as "out of vocabulary" (OOV) tokens.
        max_tokens=max_tokens,
        # Output integer indices, one per string token
        output_mode="int",
        # Always pad or truncate to exactly this many tokens
        output_sequence_length=max_len,
    )

    # Call adapt(), which fits the TextVectorization layer to our text dataset.
    # This is when the max_tokens most common words (i.e. the vocabulary) are selected.
    train_texts = train_data.map(lambda text, label: text)
    vectorize_layer.adapt(train_texts)

    model.add(vectorize_layer)

    # ----- 3. EMBEDDING
    # This layer turns each integer (representing a token) from the previous layer
    # an embedding. Note that we're using max_tokens + 1 here, since there's an
    # out-of-vocabulary (OOV) token that gets added to the vocab.
    model.add(Embedding(max_tokens + 1, 128))

    # ----- 4. RECURRENT LAYER
    model.add(LSTM(64))

    # ----- 5. DENSE HIDDEN LAYER
    model.add(Dense(64, activation="relu"))

    # ----- 6. OUTPUT
    model.add(Dense(1, activation="sigmoid"))

    # Compile and train the model.
    model.compile(loss="binary_crossentropy", optimizer="adam", metrics=["accuracy"])
    model.fit(train_data, epochs=1)

    model.save_weights('rnn')
Ejemplo n.º 3
0
class ColorsModel:
    def __init__(self, tokenizer_file_name, num_classes=28, maxlen=25, saved_model=None):
        self.maxlen = maxlen
        self.num_classes = num_classes
        self.history = None
        self.__initialize_model()
        self.callback = []
        self.__initialize_callback()
        self.data_transformer = DataTransformer(maxlen, num_classes)
        if saved_model is not None:
            self.data_transformer.tokenizer = self.load_tokenizer(tokenizer_file_name)
            self.__load_saved_model(saved_model)

    def __load_saved_model(self, saved_model):
        self.model.load_weights(saved_model)

    def __initialize_callback(self):
        log_dir = "logs/fit/color_model/" + datetime.datetime.now().strftime("%Y%m%d-%H%M%S")
        self.callback.append(tf.keras.callbacks.TensorBoard(log_dir=log_dir, histogram_freq=1))

    def __initialize_model(self):
        self.model = Sequential()
        self.model.add(LSTM(256, return_sequences=True, input_shape=(self.maxlen, self.num_classes)))
        self.model.add(LSTM(128))
        self.model.add(Dense(128, activation='relu'))
        self.model.add(Dense(3, activation='sigmoid'))
        self.model.compile(optimizer='adam', loss='mse', metrics=['acc'])
        # self.model.summary()

    def __scale(self, n):
        return int(n * 255)

    def predict(self, color):
        one_hot = self.data_transformer.transform_prediction_data(name=color)
        pred = self.model.predict(one_hot)[0]
        return [self.__scale(pred[0]), self.__scale(pred[1]), self.__scale(pred[2])]

    def fit(self, data, colors, epohs=40, batch_size=32, validation_split=0.1):
        normalized_values, one_hot_names = self.data_transformer.transform_colors_input_data(data=data, colors=colors)
        self.save_tokenizer('./tokenizer/tokenizer_2.pickle')
        self.history = self.model.fit(x=one_hot_names, y=normalized_values, epochs=epohs, batch_size=batch_size,
                                      validation_split=validation_split, verbose=1, callbacks=self.callback)

    def save_model(self, file_name):
        self.model.save_weights(file_name)
    
    def save_tokenizer(self, file_name):
        with open(file_name, 'wb') as handle:
            pickle.dump(self.data_transformer.tokenizer, handle, protocol=pickle.HIGHEST_PROTOCOL)
    
    def load_tokenizer(self, file_name):
        with open(file_name, 'rb') as handle:
            return pickle.load(handle)
    
    def save_progress(self, model_file_name, tokenizer_file_name):
        self.save_model(model_file_name)
        self.save_tokenizer(tokenizer_file_name)
Ejemplo n.º 4
0
    def save_model(self, model: Sequential):
        if not self.is_neural_network_ready_to_save:
            raise Exception('Could not save Neural Network, train model before save')

        print('\033[1;36m*' * 50)
        print('*', '\t' * 4, 'Saving the Model', '\t' * 4, '*')
        print('\033[1;36m*\033[m' * 50)
        json = model.to_json()
        with open(self.ESTRUTURA_JSON_FILE_PATH, 'w') as arquivo_json:
            arquivo_json.write(json)
            arquivo_json.close()

        model.save_weights(self.WEIGHTS_FILE_PATH)
Ejemplo n.º 5
0
def run():
    trainImgs = mnist.train_images()
    trainLabels = mnist.train_labels()
    print(trainImgs.shape)
    print(trainLabels.shape)

    testImgs = mnist.test_images()
    testLabels = mnist.test_labels()
    print(testImgs.shape)
    print(testLabels.shape)

    trainImgs = (trainImgs / 255) - 0.5
    testImgs = (testImgs / 255) - 0.5

    trainImgs = trainImgs.reshape((-1, 784))
    testImgs = testImgs.reshape((-1, 784))
    print(trainImgs.shape)
    print(testImgs.shape)

    model = Sequential()
    model.add(Dense(64, activation='relu', input_shape=(784, )))
    model.add(Dropout(0.1))
    model.add(Dense(64, activation='relu'))
    model.add(Dropout(0.2))
    model.add(Dense(10, activation='softmax'))

    model.compile(
        optimizer='adam',
        loss='categorical_crossentropy',
        metrics=['accuracy'],
    )

    model.fit(
        trainImgs,  # training data
        to_categorical(trainLabels),  # training targets
        epochs=3,
        batch_size=32,
        validation_data=(testImgs, to_categorical(testLabels)))

    # evaluate test data
    model.evaluate(testImgs, to_categorical(testLabels))

    # save model
    model.save_weights('mnist_weight.h5')
Ejemplo n.º 6
0
def entrenamiento_red1():
	cnn = Sequential()
	cnn.add(Convolution2D(filtrosConv1, tamanio_filtro1, padding='same', input_shape=(altura, longitud, 3), activation='relu'))
	cnn.add(MaxPooling2D(pool_size=tamanio_pool))
	cnn.add(Convolution2D(filtrosConv2, tamanio_filtro2, padding='same', activation='relu'))
	cnn.add(MaxPooling2D(pool_size=tamanio_pool))
	cnn.add(Flatten())
	cnn.add(Dense(256,activation='relu'))
	cnn.add(Dropout(0.5))
	cnn.add(Dense(clases, activation='softmax'))
	cnn.compile(loss='categorical_crossentropy', optimizer=optimizers.Adam(lr=lr), metrics=['accuracy'])

	cnn.fit_generator(imagen_entrenamiento_red1, steps_per_epoch=pasos, epochs=epocas, validation_data=imagen_validacion_red1, validation_steps=pasos_validacion)
	dir='./modelo/red1/'

	if not os.path.exists(dir):
		os.mkdir(dir)
	cnn.save('./modelo/red1/modelo.h5')
	cnn.save_weights('./modelo/red1/pesos.h5')
Ejemplo n.º 7
0
def train_regressor(filename, epochs, neuron_num):
    dataset = []
    with open(filename, 'r') as f:
        for line in f:
            jsonfile = json.loads(line[:-2])
            dataset.append(jsonfile)
    f.close()

    cars = pd.DataFrame(dataset)
    cars = cars.drop(cars[(cars['price'] > 30000)
                          & (cars['price'] < 300000)].index)
    cars_prices = cars["price"].copy()
    cars = cars.drop("price", axis=1)
    cars_prepared = full_pipeline.fit_transform(cars)

    model = Sequential()
    model.add(
        Dense(8, input_dim=10, kernel_initializer='normal', activation='relu'))
    model.add(Dense(neuron_num, activation='relu'))
    model.add(Dense(1, activation='linear'))
    model.compile(loss='mse', optimizer='adam', metrics=['mae'])
    history = model.fit(cars_prepared,
                        cars_prices,
                        epochs=epochs,
                        batch_size=50,
                        verbose=1,
                        validation_split=0.2)
    model_json = model.to_json()

    plt.plot(history.history['mean_absolute_error'])
    plt.plot(history.history['val_mean_absolute_error'])
    plt.title('model mean_absolute_error')
    plt.ylabel('mean_absolute_error')
    plt.xlabel('epok')
    plt.legend(['train', 'validation'], loc='upper left')
    plt.savefig('app/web/static/history.png')

    with open('neuralnetregressor.json', 'w') as json_file:
        json_file.write(model_json)
        model.save_weights('neuralnetregressor.h5')
Ejemplo n.º 8
0
    filepath=checkpoint_path,
    verbose=1,
    save_weights_only=True,
    period=5)

num_classes = 2
resnet_weights_path = 'resnet50_weights_tf_dim_ordering_tf_kernels_notop.h5'

model = Sequential()
model.add(ResNet50(include_top=False, pooling='avg', weights=resnet_weights_path))
model.add(Dense(num_classes, activation='softmax'))

# Say not to train first layer (ResNet) model. It is already trained
model.layers[0].trainable = False

# Compile model
model.compile(optimizer='sgd', loss='categorical_crossentropy', metrics=['accuracy'])

# Save the weights using the `checkpoint_path` format
model.save_weights(checkpoint_path.format(epoch=0))

model.fit_generator(
        train_generator,
        steps_per_epoch=26,
        epochs=15,
        validation_data=validation_generator,
        callbacks=[cp_callback],
        validation_steps=6)

model.save_weights(checkpoint_path.format(epoch=10))
Ejemplo n.º 9
0
def rnn(trainset, testset, all_sample_list, activation, loss, modelName):
    with open(trainset, 'rb') as f:
        data = pickle.load(f)
    f.close()
    with open(testset, 'rb') as f:
        testdata = pickle.load(f)
    f.close()
    dataset = []
    for txt, label in data:
        dataset.append(txt)
    for txt, label in testdata:
        dataset.append(txt)
    with open('data_files/' + all_sample_list, 'wb') as f:
        pickle.dump(dataset, f)
    f.close()
    tokenizer = Tokenizer(num_words=None)
    tokenizer.fit_on_texts(dataset)
    print(len(tokenizer.word_index))
    print(len(dataset))
    train_set = data
    x_train = []
    y_train = []
    for txt, label in train_set:
        x_train.append(txt)
        y_train.append(label)
        test_set = testdata
    x_test = []
    y_test = []
    for txt, label in test_set:
        x_test.append(txt)
        y_test.append(label)
    print(len(train_set))
    print(len(test_set) + len(train_set))
    x_train_tokens = tokenizer.texts_to_sequences(x_train)
    print(np.array(x_train_tokens[1]))
    x_test_tokens = tokenizer.texts_to_sequences(x_test)
    print(np.array(x_test_tokens[1]))
    num_tokens = [len(tokens) for tokens in x_train_tokens + x_test_tokens]
    num_tokens = np.array(num_tokens)
    print('vector of lengths of comments:')
    print(num_tokens)
    print('mean value of length of comments:')
    print(np.mean(num_tokens))
    print('max number of tokens in a sequence:')
    print(np.max(num_tokens))
    max_tokens = np.mean(num_tokens) + 2 * np.std(num_tokens)
    max_tokens = int(max_tokens)
    print(
        'The max number of tokens we will allow is set to the average plus 2 standard deviations'
    )
    print(max_tokens)
    print('this will cover the ' +
          str(np.sum(num_tokens < max_tokens) / len(num_tokens)) +
          '  percent of the dataset')
    pad = 'pre'
    x_train_pad = pad_sequences(x_train_tokens,
                                maxlen=max_tokens,
                                padding=pad,
                                truncating=pad)
    x_test_pad = pad_sequences(x_test_tokens,
                               maxlen=max_tokens,
                               padding=pad,
                               truncating=pad)
    print('shape of train set')
    print(x_train_pad.shape)
    print('shape of test set')
    print(x_test_pad.shape)
    print('a padded train element looks as follow:')
    print(x_train_pad[1])

    #the following allows to get back text from the array rappresentation
    idx = tokenizer.word_index
    inverse_map = dict(zip(idx.values(), idx.keys()))

    def tokens_to_string(tokens):
        # Map from tokens back to words.
        words = [inverse_map[token] for token in tokens if token != 0]

        # Concatenate all words.
        text = " ".join(words)

        return text

    y_train = tf.keras.utils.to_categorical(y_train, num_classes=3)
    y_test = tf.keras.utils.to_categorical(y_test, num_classes=3)

    #we now create the RNN
    num_words = len(
        tokenizer.word_index)  #chose num_words=None creating the dictionary
    model = Sequential()
    embedding_size = 8
    model.add(
        Embedding(input_dim=num_words + 1,
                  output_dim=embedding_size,
                  input_length=max_tokens,
                  name='layer_embedding'))
    model.add(GRU(units=16,
                  return_sequences=True))  #first GRU with 16 outputs units
    model.add(GRU(units=8, return_sequences=True))
    model.add(GRU(units=4))  #add 3rd GRU, it will be followed by a dense-layer
    model.add(Dense(3, activation=activation))
    optimizer = Adam(lr=1e-3)  #this gives the learning rate
    model.compile(
        loss=loss,  #compile the keras model
        optimizer=optimizer,
        metrics=['accuracy'])
    print(model.summary())
    tbCallBack = tf.keras.callbacks.TensorBoard(log_dir='./Graph',
                                                histogram_freq=0,
                                                write_graph=True,
                                                write_images=True)
    model.fit(x_train_pad,
              y_train,
              validation_split=0.05,
              epochs=100,
              verbose=1,
              batch_size=64,
              shuffle=True,
              callbacks=[tbCallBack])
    result = model.evaluate(x_test_pad, y_test)
    print("Accuracy: {0:.2%}".format(result[1]))
    print(type(model))
    tf.keras.models.save_model(model,
                               'saved_networks/' + modelName + '.hdf5',
                               overwrite=True,
                               include_optimizer=True)
    model.save_weights('saved_networks/' + modelName + 'net.h5')
    gc.collect(
    )  # avoid arror induced by different gc sequence, if python collect session first , the program will exit successfully, if python collect swig memory(tf_session) first, the program exit with failure.
# обучаем модель с использованием генераторов
model.fit_generator(train_generator, steps_per_epoch=nb_train_samples // batch_size,
                    epochs=epochs,
                    validation_data=val_generator,
                    validation_steps=nb_validation_samples // batch_size)

# оцениваем качество обучения сети на тренировочных данных
scores = model.evaluate_generator(train_generator, nb_train_samples // batch_size)
print("Аккуратность работы на тренировочных данных: %.2f%%" % (scores[1]*100))

# оцениваем качество обучения сети на проверочных данных
scores = model.evaluate_generator(val_generator, nb_validation_samples // batch_size)
print("Аккуратность работы на валидационных данных: %.2f%%" % (scores[1]*100))

# оцениваем качество обучения сети на тестовых данных
scores = model.evaluate_generator(test_generator, nb_test_samples // batch_size)
print("Аккуратность работы на тестовых данных: %.2f%%" % (scores[1]*100))

# сохраняем обученную сеть
# генерируем описание модели в формате json
model_json = model.to_json()
json_file = open("faces_NN.json", "w")
# записываем архитектуру сети в файл
json_file.write(model_json)
json_file.close()
# записываем данные о весах в файл
model.save_weights("faces_NN.h5")
print("Сохранение сети завершено")

Ejemplo n.º 11
0
    Convolution2D(filtrosConv1,
                  tamano_filtro1,
                  padding="same",
                  input_shape=(longitud, altura, 3),
                  activation='relu'))
cnn.add(MaxPooling2D(pool_size=tamano_pool))

cnn.add(Convolution2D(filtrosConv2, tamano_filtro2, padding="same"))
cnn.add(MaxPooling2D(pool_size=tamano_pool))

cnn.add(Flatten())
cnn.add(Dense(256, activation='relu'))
cnn.add(Dropout(0.5))
cnn.add(Dense(clases, activation='softmax'))

cnn.compile(loss='categorical_crossentropy',
            optimizer=optimizers.Adam(lr=lr),
            metrics=['accuracy'])

cnn.fit_generator(entrenamiento_generador,
                  steps_per_epoch=pasos,
                  epochs=epocas,
                  validation_data=validacion_generador,
                  validation_steps=validation_steps)
#Se guarda el modelo y los pesos
target_dir = './modelo/'
if not os.path.exists(target_dir):
    os.mkdir(target_dir)
cnn.save('./modelo/modelo.h5')
cnn.save_weights('./modelo/pesos.h5')
import tensorflow as tf
from tensorflow.python.keras.backend import set_session

config = tf.ConfigProto()
config.gpu_options.allow_growth = True
set_session(tf.Session(config=config))
# train
history = AccuracyHistory()
model.compile(
    loss=keras.losses.categorical_crossentropy,
    # optimizer=keras.optimizers.SGD(lr=0.01),
    optimizer=keras.optimizers.Adam(lr=learning_rate),
    metrics=['accuracy'])
model.summary()
model.fit(x_train,
          y_train,
          batch_size=mini_batch_size,
          epochs=n_epoch,
          verbose=2,
          validation_data=(x_val, y_val),
          callbacks=[history])
model.save_weights(train_path + '/model_weight.h5')
model.save(train_path + '/model.h5')
score = model.evaluate(x_test, y_test, verbose=1)
print('Test loss:', score[0])
print('Test accuracy:', score[1])
plt.plot(range(1, n_epoch + 1), history.acc)
plt.xlabel('Epochs')
plt.ylabel('Accuracy')
plt.show()
Ejemplo n.º 13
0
                  tamano_filtro2,
                  padding='same',
                  activation='relu'))
cnn.add(MaxPooling2D(pool_size=tamano_pool))
#aplanamos los datos, y le damos profundidad
cnn.add(Flatten())
cnn.add(Dense(256, activation='relu'))  #256 neuronas
cnn.add(Dropout(0.5))
cnn.add(Dense(clases, activation='softmax'))
#compilamos el modelo con el optimizador Adam con un learning rate de 0.5
cnn.compile(loss='categorical_crossentropy',
            optimizer=Adam(lr=lr),
            metrics=['accuracy'])
#ajustamos el modelo a la data
cnn.fit(imagen_entrenamiento,
        epochs=epocas,
        steps_per_epoch=pasos,
        validation_data=imagen_validacion,
        validation_steps=pasos_validacion)

archivo_historia = open("historia_deep_learning.pckl", "wb")
pickle.dump(cnn.history, archivo_historia)
archivo_historia.close()
print(imagen_entrenamiento.class_indices)
#guardamos el modelo
dir = './modelo/'
if not os.path.exists(dir):
    os.mkdir(dir)
cnn.save('./modelo/modelo.h5')  #estructura del modelo
cnn.save_weights('./modelo/pesos.h5')  #pesos de las capas
Ejemplo n.º 14
0
#model.fit_generator(train_generator(), steps_per_epoch=30, epochs=10, verbose=1)
num_batch_in_epoch = round(datasize / batch_size) + 1
#model.fit_generator(train_generator(), steps_per_epoch=num_batch_in_epoch, epochs=30, verbose=1)
history_callback = model.fit_generator(train_generator(), steps_per_epoch=30, epochs=epoch_size, verbose=1)
print(history_callback.history.keys())

fi_stem = "./model_sym%d_batch%d_epoch%d_1st%d_2nd%d" % (len(symbol_list), batch_size, epoch_size, first_node, second_node)
save_file = "%s.h5"%(fi_stem)
model.save(save_file)

model_json = model.to_json()
js_file = "./deploy_%s.json" % (fi_stem)
with open( js_file,  "w") as json_file:
  json_file.write(model_json)
weight_file = "./deploy_%s_weight.h5" % (fi_stem)
model.save_weights(weight_file)

loss_file = "%s_loss.txt"%(fi_stem)
acc_file = "%s_acc.txt"%(fi_stem)
mse_file = "%s_loss.txt"%(fi_stem)

loss_history = history_callback.history['loss']
numpy_loss_history = np.array(loss_history)
np.savetxt(loss_file, numpy_loss_history, delimiter=",")

acc_history = history_callback.history['acc']
numpy_acc_history = np.array(acc_history)
np.savetxt(acc_file, numpy_acc_history, delimiter=",")

mse_history = history_callback.history['mean_squared_error']
numpy_mse_history = np.array(mse_history)
Ejemplo n.º 15
0
output = model.predict(test_input)

#plot an histogram of the output
hist(output, 'Pt', title='Network output')

#save labels to csv files
root = 'ne_' + str(n_epochs) + '_lr_' + str(learning_rate) + '_nl_' + str(
    no_layers) + '_' + str(layer1) + '_' + str(layer2) + '_' + str(
        layer3) + '_'

# serialize model to JSON
model_json = model.to_json()
with open(root + "model.json", "w") as json_file:
    json_file.write(model_json)
# serialize weights to HDF5
model.save_weights(root + "model.h5")
print("Saved model to disk")

fname_train_lss = root + 'train_loss.csv'
fname_test_lss = root + 'val_loss.csv'
fname_train_res = root + 'train_res.csv'
fname_test_res = root + 'val_res.csv'
fname_output = root + 'output.csv'
fname_target = root + 'target.csv'

np.savetxt(fname_train_lss, history.history['loss'], delimiter=',')
np.savetxt(fname_test_lss, history.history['val_loss'], delimiter=',')
np.savetxt(fname_train_res, history.history['resolution'], delimiter=',')
np.savetxt(fname_test_res, history.history['val_resolution'], delimiter=',')
np.savetxt(fname_output, np.asarray(output), delimiter=',')
np.savetxt(fname_target, np.asarray(test_target), delimiter=',')
Ejemplo n.º 16
0
def train():
    start_time = time.time()
    print("Starting The Training Process")
    print("Loading The Preprocessed Images Data set")
    datasets = np.load('preprocessed_train_data.npy',
                       allow_pickle=True)  #Preprocessed Train Data
    print("Preprocessed Data ser Loaded Successfully.")
    print("Shuffling The Data set")
    np.random.shuffle(datasets)
    print(datasets.shape)
    pixels = []
    labels = []
    for pixel, label in datasets:
        pixels.append(pixel)
        labels.append(label)
    pixels = np.array(pixels).reshape(-1, 32, 32, 1)
    # print('number of inputs:')
    # Building The Model:
    print("creating The Sequential Model.")
    model = Sequential()
    print("Adding First Convolution Layer To The Model.")
    model.add(
        Conv2D(16, (5, 5),
               padding="same",
               input_shape=(32, 32, 1),
               activation="relu"))
    print("Adding Pooling Layer To The First Convolution Layer.")
    model.add(MaxPooling2D(pool_size=(2, 2)))
    print("Adding The Second Convolution Layer To The Model.")
    model.add(Conv2D(64, (5, 5), padding="same", activation="relu"))
    print("Adding Pooling Layer To The Second Convolution Layer.")
    model.add(MaxPooling2D(pool_size=(2, 2)))
    print("Flattening The Output From Second Convolution Layer.")
    model.add(Flatten())
    print("Adding The Hidden Dense Layer With 1000 Neurons.")
    model.add(Dense(1000))
    print("Adding The Output Layer With 36 Neurons For 36 Classes.")
    model.add(Dense(36))
    print("Adding The Softmax Activation At The Output Layer.")
    model.add(Activation("softmax"))
    print("Using The Cross Entropy As The Loss Function With Adam Optimizer")
    model.compile(loss='sparse_categorical_crossentropy',
                  optimizer='adam',
                  metrics=['accuracy'])
    print("Fitting The Model.")
    print("Training Begins...........")
    history = model.fit(pixels,
                        labels,
                        batch_size=256,
                        validation_split=0.15,
                        epochs=30)
    end_time = time.time()
    total_time = round(end_time - start_time)
    time_msg = "Training Completed Successfully in {Time}".format(
        Time=str(datetime.timedelta(seconds=total_time)))
    print(time_msg)
    print("Saving the Model in Hard Drive For Later Use")
    # serialize model to JSON
    model_json = model.to_json()
    with open("Trained_model.json", "w") as json_file:
        json_file.write(model_json)
    # serialize weights to HDF5
    model.save_weights("Weights_model.h5")
    plt.plot(history.history['acc'])
    plt.plot(history.history['val_acc'])
    plt.title('model accuracy')
    plt.ylabel('accuracy')
    plt.xlabel('epoch')
    plt.legend(['train', 'val'], loc='upper left')
    plt.show()
    plt.plot(history.history['loss'])
    plt.plot(history.history['val_loss'])
    plt.title('model loss')
    plt.ylabel('loss')
    plt.xlabel('epoch')
    plt.legend(['train', 'val'], loc='upper left')
    plt.show()
Ejemplo n.º 17
0
    )

acc = history.history['accuracy']
val_acc = history.history['val_accuracy']

loss = history.history['loss']
val_loss = history.history['val_loss']

epochs_range = range(EPOCHS)

plt.figure(figsize=(8, 8))
plt.subplot(1, 2, 1)
plt.plot(epochs_range, acc, label='Skuteczność treningowa')
plt.plot(epochs_range, val_acc, label='Skuteczność testowa')
plt.legend(loc='lower right')
plt.title('Skuteczność treningowa i testowa')

plt.subplot(1, 2, 2)
plt.plot(epochs_range, loss, label='Straty podczas treningu')
plt.plot(epochs_range, val_loss, label='Straty podczas testu')
plt.legend(loc='upper right')
plt.title('Straty podczas treningu i testu')
plt.show()

savepath1 = os.path.join(Base_path, r'MvsK_model.h5')
savepath2 = os.path.join(Base_path, r'MvsK_model_weights.h5')
tf.keras.models.save_model(
    classifier, savepath1
)
classifier.save_weights(savepath2)
                  padding="same",
                  input_shape=(longitud, altura, 3),
                  strides=(2, 2),
                  activation='relu'))
cnn.add(MaxPooling2D(pool_size=(7, 7)))
#cnn.add(Convolution2D(filtrosConv3, tamano_filtro2, padding ="same"))
#cnn.add(MaxPooling2D(pool_size=tamano_pool))
cnn.add(Convolution2D(filtrosConv2, tamano_filtro2, padding="same"))
cnn.add(MaxPooling2D(pool_size=(3, 3)))
cnn.add(Flatten())
cnn.add(Dense(1000, activation='relu'))
cnn.add(Dense(clases, activation='softmax'))

cnn.compile(loss='categorical_crossentropy',
            optimizer=optimizers.Adam(lr=lr),
            metrics=['accuracy'])

#Entrenar algoritmo
cnn.fit_generator(entrenamiento_generador,
                  steps_per_epoch=pasos,
                  epochs=epocas,
                  validation_data=validacion_generador,
                  validation_steps=validation_steps)

target_dir = '/home/angel/Escritorio/ProyectoFinal/modelo/'
if not os.path.exists(target_dir):
    os.mkdir(target_dir)
cnn.save('/home/angel/Escritorio/ProyectoFinal/modelo/modelo.h5')
cnn.save_weights('/home/angel/Escritorio/ProyectoFinal/modelo/pesos.h5')
#os.system('poweroff')
    'data2': [0, 1],
    'data3': [1, 0],
    'data4': [1, 1],
    'data5': [0, 1],
    'data6': [1, 0],
    'data7': [1, 1],
    'data8': [0, 1]
}
labels = DataFrame(Data_l,
                   columns=[
                       'data1', 'data2', 'data3', 'data4', 'data5', 'data6',
                       'data7', 'data8'
                   ])

model = Sequential()
model.add(Dense(30, input_dim=20, activation='sigmoid'))
model.add(Dense(8, activation='sigmoid'))
model.summary()

model.compile(loss='mse', optimizer='adam', metrics=['mse', 'mae'])

model.fit(features, labels)

# serialize model to JSON
model_json = model.to_json()
with open("memocode_wolf1.json", "w") as json_file:
    json_file.write(model_json)
# serialize weights to HDF5
model.save_weights("memocode_wolf1.h5")
print("Saved model to disk")
    # optimize the model
    myChatModel.compile(loss='categorical_crossentropy',
                        optimizer='adam',
                        metrics=['accuracy'])

    # train the model
    myChatModel.fit(training, output, epochs=1000, batch_size=8)

    # serialize model to yaml and save it to disk
    model_yaml = myChatModel.to_yaml()
    with open("chatbotmodel.yaml", "w") as y_file:
        y_file.write(model_yaml)

    # serialize weights to HDF5
    myChatModel.save_weights("chatbotmodel.h5")
    print("Saved model from disk")


def bag_of_words(s, words):
    bag = [0 for _ in range(len(words))]

    s_words = nltk.word_tokenize(s)
    s_words = [stemmer.stem(word.lower()) for word in s_words]

    for se in s_words:
        for i, w in enumerate(words):
            if w == se:
                bag[i] = 1

    return numpy.array(bag)
          X,
          batch_size=50,
          epochs=5,
          validation_split=0.3,
          callbacks=callbacks_list,
          verbose=1,
          shuffle=True)

model.save(current_output_local + '/my_checkpoint_test',
           overwrite=True)  # creates a HDF5 file 'my_model.h5'
#del model  # deletes the existing model
#model = load_model('output/my_model.h5')

######## save method two https://jovianlin.io/saving-loading-keras-models/
# Save the weights
model.save_weights(current_output_local + 'model_weights.h5', overwrite=True)

# Save the model architecture
with open(current_output_local + 'model_architecture.json', 'w') as f:
    f.write(model.to_json())
#######
############save method three#################
saver = tf.train.Saver()
sess = backend.get_session()
saver.save(sess, current_output_local)

model.save(current_output_local + 'my_model.h5')
#print(model.get_weights())

###########################perdiction####################################
print(X.shape)
Ejemplo n.º 22
0
    LSTM(hidden_neurons,
         batch_input_shape=(None, length_of_sequences, in_out_neurons),
         return_sequences=False))
model.add(Dense(in_out_neurons))
model.add(Activation("linear"))
model.compile(
    loss="mean_squared_error",
    optimizer="adam",
)

#学習の実施
early_stopping = EarlyStopping(monitor='val_loss', mode='auto', patience=0)
history = model.fit(X_train,
                    y_train,
                    batch_size=600,
                    epochs=10,
                    validation_split=0.1,
                    callbacks=[early_stopping])

json_string = model.to_json()
open('keras_lstm_model.json', 'w').write(json_string)

model.save_weights('keras_lstm_weihgts.h5')

#グラフ描画
pred_data = model.predict(X_train)
plt.plot(y_train, label='train')
plt.plot(pred_data, label='pred')
plt.legend(loc='upper left')
plt.show()
class Model():
    def __init__(self, train_x, train_y,
        validation_x , validation_y, seq_info:str,
        *,
        max_epochs = 100, batch_size = 1024, hidden_layers = 2,
        neurons_per_layer = 64, architecture = Architecture.LSTM.value,
        dropout = 0.1, is_bidirectional = False, initial_learn_rate = 0.001,
        early_stop_patience = 6, is_classification=False):
        """
        INFO GOES HERE
        """

        ## Param member vars
        self.max_epochs = max_epochs
        self.batch_size = batch_size
        self.hidden_layers = hidden_layers
        self.neurons_per_layer = neurons_per_layer
        self.architecture = architecture
        self.dropout = dropout
        self.is_bidirectional = is_bidirectional
        self.initial_learn_rate = initial_learn_rate
        self.seq_info = seq_info
        self.is_classification = is_classification
        self.early_stop_patience = early_stop_patience
        self.train_time = 0

        self.train_x = train_x
        self.train_y = train_y
        self.validation_x = validation_x
        self.validation_y = validation_y

        ## Other member vars
        self.model = Sequential()
        self.training_history = None
        self.score: dict = {}

        self._create_model()
        
        
    ### PUBLIC FUNCTIONS

    def get_model(self):
        return self.model

    def train(self):
        start = time.time()
        early_stop = EarlyStopping(monitor='val_loss', patience=self.early_stop_patience, restore_best_weights=True)
        tensorboard = TensorBoard(log_dir=f"{os.environ['WORKSPACE']}/logs/{self.seq_info}__{self.get_model_info_str()}__{datetime.now().timestamp()}")

        # Train model
        self.training_history = self.model.fit(
            self.train_x, self.train_y,
            batch_size=self.batch_size,
            epochs=self.max_epochs,
            validation_data=(self.validation_x, self.validation_y),
            callbacks=[tensorboard, early_stop],
            shuffle=True
        )

        # Score model
        self.score = self.model.evaluate(self.validation_x, self.validation_y, verbose=0)
        self.score = {out: self.score[i] for i, out in enumerate(self.model.metrics_names)}
        print('Scores:', self.score)
        end = time.time()
        self.train_time = end - start


    def save_model(self):
        self._save_model_config()
        self._save_model_weights()

    def get_model_info_str(self):
        return f"{'Bi' if self.is_bidirectional else ''}{self.architecture.__name__}-HidLayers{self.hidden_layers}-Neurons{self.neurons_per_layer}-Bat{self.batch_size}-Drop{self.dropout}"

    ### PRIVATE FUNCTIONS

    def _create_model(self):
        """
        Creates and compiles the model
        """
        self._use_gpu_if_available()

        ##### Create the model ####
        self.model = Sequential()
        
        if self.is_bidirectional:
            self.model.add(Bidirectional(self.architecture(self.neurons_per_layer, input_shape=(self.train_x.shape[1:]), return_sequences=True)))
        else:
            self.model.add(self.architecture(self.neurons_per_layer, input_shape=(self.train_x.shape[1:]), return_sequences=True))
        self.model.add(Dropout(self.dropout))
        self.model.add(BatchNormalization())
        
        for i in range(self.hidden_layers):
            return_sequences = i != self.hidden_layers - 1 # False on last iter
            if self.is_bidirectional:
                self.model.add(Bidirectional(self.architecture(self.neurons_per_layer, return_sequences=return_sequences)))
            else:
                self.model.add(self.architecture(self.neurons_per_layer, return_sequences=return_sequences))
            self.model.add(Dropout(self.dropout))
            self.model.add(BatchNormalization())
            
        if self.is_classification:
            self.model.add(Dense(2, activation="sigmoid"))
        else:
            self.model.add(Dense(1))

        adam = adam_v2.Adam(learning_rate=self.initial_learn_rate)


        if self.is_classification:
            self.model.compile(
                loss="sparse_categorical_crossentropy", 
                optimizer=adam,
                metrics=["sparse_categorical_crossentropy", "accuracy"]
            )
        else:
            self.model.compile(
                loss=RSquaredMetricNeg, 
                optimizer=adam,
                metrics=["mae", RSquaredMetric]
            )
            


    def _use_gpu_if_available(self):
        ## Utilise GPU if GPU is available
        local_devices = device_lib.list_local_devices()
        gpus = [x.name for x in local_devices if x.device_type == 'GPU']
        if len(gpus) != 0:
            if self.architecture == GRU:
                self.architecture = CuDNNGRU
            elif self.architecture == LSTM:
                self.architecture = CuDNNLSTM

    
    def _save_model_weights(self):
        file_path = ""
        if self.is_classification:
            file_path = f"{os.environ['WORKSPACE']}/models/final/{self.seq_info}__{self.get_model_info_str()}__{self.max_epochs}-{self.score['sparse_categorical_crossentropy']:.3f}.h5"
        else:
            file_path = f"{os.environ['WORKSPACE']}/models/final/{self.seq_info}__{self.get_model_info_str()}__{self.max_epochs}-{self.score['RSquaredMetric']:.3f}.h5"
        self.model.save_weights(file_path)
        print(f"Saved model weights to: {file_path}")

    def _save_model_config(self):
        json_config = self.model.to_json()
        file_path = f'{os.environ["WORKSPACE"]}/model_config/{self.get_model_info_str()}.json'
        with open(file_path, "w+") as file:
            file.write(json_config)
        print(f"Saved model config to: {file_path}")
Ejemplo n.º 24
0
    print(params)
'''

fit_history = model.fit_generator(
    train_generator,
    steps_per_epoch=STEPS_PER_EPOCH_TRAINING,
    epochs=NUM_EPOCHS,
    validation_data=validation_generator,
    validation_steps=STEPS_PER_EPOCH_VALIDATION,
    callbacks=[cb_checkpointer, cb_early_stopper])

model_json = model.to_json()
with open("model_resnet.json", "w") as json_file:
    json_file.write(model_json)
# serialize weights to HDF5
model.save_weights("model_resnet.h5")

print("Saved model to disk")

# model.load_weights("../working/resnet_model.hdf5")

print(fit_history.history.keys())

# plt.figure(1, figsize = (15,8))

# plt.subplot(221)
# plt.plot(fit_history.history['acc'])
# plt.plot(fit_history.history['val_acc'])
# plt.title('model accuracy')
# plt.ylabel('accuracy')
# plt.xlabel('epoch')
Ejemplo n.º 25
0
# k = 10
# step = math.floor(len(labels)/k)
# for i in range(0,k-1):
#     train_images= general_images[:i*step-1]
#     train_labels= general_labels[:i*step-1]
#     validation_images= general_images[i*step:(i+1)*step-1]
#     validation_labels= general_labels[i*step:(i+1)*step-1]
#     train_images= np.concatenate((train_images, general_images[(i+1)*step:]), axis=0)
#     train_labels= np.concatenate((train_labels, general_labels[(i+1)*step:]), axis=0)
#     model.fit(x=train_images,
#           y=train_labels,
#           epochs=5, batch_size=100,verbose=2) #,validation_split=0.2

model.fit(images_train, labels_train, batch_size=5, epochs=5, verbose=1, validation_split=0.1)
# model.save('last1_model.h5') 
model.save_weights('model_weights.h5')

# Evaluación del modelo

result = model.evaluate(images_test, labels_test, verbose=0)

print ('Testing set accuracy:', result[1]) 

# Imprimir perdida y precision
# for name, value in zip(model.metrics_names, result):
#     print(name, value)

# #Imprimir solo precision en forma de porcentaje(%)
# print("{0}: {1:.2%}".format(model.metrics_names[1], result[1]))

Ejemplo n.º 26
0
                                            target_size=(14, 14),
                                            batch_size=4,
                                            class_mode='categorical')

test_generator = datagen.flow_from_directory(test_dir,
                                             target_size=(14, 14),
                                             batch_size=4,
                                             class_mode='categorical')

model.fit_generator(train_generator,
                    steps_per_epoch=nb_train_samples // batch_size,
                    epochs=epochs,
                    validation_data=val_generator,
                    validation_steps=nb_validation_samples // batch_size)

scores = model.evaluate_generator(test_generator,
                                  nb_test_samples // batch_size)

print("Аккуратность на тестовых данных: %.2f%%" % (scores[1] * 100))

#Сохранение параметров нейросети
model_json = model.to_json()
json_file = open('golosovalka.json', 'w')
json_file.write(model_json)
json_file.close()

#Сохранение весов, полученных в процессе обучения
model.save_weights('golosovalka.h5')

print('Сохранение завершено')
Ejemplo n.º 27
0
    model.add(Flatten())
    model.add(Dense(256, activation='relu'))
    model.add(Dense(10, activation='softmax'))

    # Compile model
    model.compile(loss='categorical_crossentropy',
                  optimizer='adam',
                  metrics=['accuracy'])
    
    # Get summary of model
    print('Global model')
    model.summary()
    
    # Training logger
    direc = './logs2'
    logger = KerasTrainingLogger(data=X_test[:10],
                                 name=args['transformer_type'],
                                 log_dir=direc, 
                                 trans_layer=0)
    
    # Fit model
    model.fit(X_train, y_train,
              batch_size=args['batch_size'],
              epochs=args['num_epochs'],
              validation_data=(X_test, y_test),
              callbacks=[logger])    

    # Save weights
    model.save_weights(direc + '/' + args['transformer_type'] + '/weights')
    
    
Ejemplo n.º 28
0
    512,
    activation='relu',
))

cnn.add(Dropout(0.5))

cnn.summary()

cnn.add(Dense(clases, activation='softmax'))

cnn.compile(loss='categorical_crossentropy',
            optimizer=optimizers.Adam(lr=lr),
            metrics=['accuracy'])

#Entrenar y guardar
#En dir colocar la direccion de memoria donde se creara el directorio
#en save y save weigths colocar la direccion de memoria donde se guardara el modelo y sus pesos.
cnn.fit(imagen_entrenamiento,
        steps_per_epoch=pasos,
        epochs=epocas,
        validation_data=imagen_validacion,
        validation_steps=pasos_validacion)

dir = ''

if not os.path.exists(dir):
    os.mkdir(dir)

cnn.save('')
cnn.save_weights('')
    ytrain = np.zeros(shape=((NB_CLASSES + 1) * NB_VIDEOS_BY_CLASS_TRAIN,
                             NB_CLASSES + 1))

    print("Session " + str(serie))
    xtrain, ytrain = data_generation(xtrain, ytrain)

    if (MIXED_DATA == True):
        # manage several data files
        for i in range(len(REAL_VIDEO_DATASET)):
            data = np.load(str(REAL_VIDEO_DATASET[i]))
            xtrain = np.concatenate((xtrain, data['a']), axis=0)
            ytrain = np.concatenate((ytrain, data['b']), axis=0)

        indices = np.arange(xtrain.shape[0])
        np.random.shuffle(indices)
        xtrain = xtrain[indices]
        ytrain = ytrain[indices]

    #start training
    model.fit(xtrain,
              ytrain,
              epochs=EPOCHS,
              batch_size=BATCH_SIZE,
              verbose=VERBOSE)

    #save data
    model_json = model.to_json()
    open(f'{RESULTS_PATH}/model_conv3D.json', 'w').write(model_json)
    model.save_weights(f'{RESULTS_PATH}/weights_conv3D.h5', overwrite=True)
    print('A new model has been saved!\n')
Ejemplo n.º 30
0
    'data7': [1, 1],
    'data8': [0, 1],
    'data9': [1, 0],
    'data10': [1, 0]
}
features = DataFrame(Data_f,
                     columns=[
                         'data1', 'data2', 'data3', 'data4', 'data5', 'data6',
                         'data7', 'data8', 'data9', 'data10'
                     ])

Data_l = {'output1': [0, 0]}
labels = DataFrame(Data_l, columns=['output1'])

model = Sequential()
model.add(Dense(5, input_dim=10, activation='tanh'))
model.add(Dense(1, activation='tanh'))
model.summary()

model.compile(loss='mse', optimizer='adam', metrics=['mse', 'mae'])

model.fit(features, labels)

# serialize model to JSON
model_json = model.to_json()
with open("memocode_ess_price.json", "w") as json_file:
    json_file.write(model_json)
# serialize weights to HDF5
model.save_weights("memocode_ess_price.h5")
print("Saved model to disk")