Пример #1
0
    def save_model(self, model: Sequential):
        if not self.is_neural_network_ready_to_save:
            raise Exception('Could not save Neural Network, train model before save')

        print('\033[1;36m*' * 50)
        print('*', '\t' * 4, 'Saving the Model', '\t' * 4, '*')
        print('\033[1;36m*\033[m' * 50)
        json = model.to_json()
        with open(self.ESTRUTURA_JSON_FILE_PATH, 'w') as arquivo_json:
            arquivo_json.write(json)
            arquivo_json.close()

        model.save_weights(self.WEIGHTS_FILE_PATH)
Пример #2
0
def train_regressor(filename, epochs, neuron_num):
    dataset = []
    with open(filename, 'r') as f:
        for line in f:
            jsonfile = json.loads(line[:-2])
            dataset.append(jsonfile)
    f.close()

    cars = pd.DataFrame(dataset)
    cars = cars.drop(cars[(cars['price'] > 30000)
                          & (cars['price'] < 300000)].index)
    cars_prices = cars["price"].copy()
    cars = cars.drop("price", axis=1)
    cars_prepared = full_pipeline.fit_transform(cars)

    model = Sequential()
    model.add(
        Dense(8, input_dim=10, kernel_initializer='normal', activation='relu'))
    model.add(Dense(neuron_num, activation='relu'))
    model.add(Dense(1, activation='linear'))
    model.compile(loss='mse', optimizer='adam', metrics=['mae'])
    history = model.fit(cars_prepared,
                        cars_prices,
                        epochs=epochs,
                        batch_size=50,
                        verbose=1,
                        validation_split=0.2)
    model_json = model.to_json()

    plt.plot(history.history['mean_absolute_error'])
    plt.plot(history.history['val_mean_absolute_error'])
    plt.title('model mean_absolute_error')
    plt.ylabel('mean_absolute_error')
    plt.xlabel('epok')
    plt.legend(['train', 'validation'], loc='upper left')
    plt.savefig('app/web/static/history.png')

    with open('neuralnetregressor.json', 'w') as json_file:
        json_file.write(model_json)
        model.save_weights('neuralnetregressor.h5')
Пример #3
0
def train(x_train, y_train, x_test, y_test, epochs):

    #  calculate classes
    if np.unique(y_train).shape[0] == np.unique(y_test).shape[0]:
        #
        num_classes = np.unique(y_train).shape[0]
    else:
        print('Error in class data...')
        return -2

    # set validation data
    '''val_size = int(0.1 * x_train.shape[0])
    r = np.random.randint(0, x_train.shape[0], size=val_size)
    x_val = x_train[r, :, :]
    y_val = y_train[r]
    x_train = np.delete(x_train, r, axis=0)
    y_train = np.delete(y_train, r, axis=0)'''
    step = int(x_train.shape[0] * 0.005)
    length = int(x_train.shape[0] * 0.1 * 0.005)
    r = []
    for i in range(0, x_train.shape[0] - length, step):
        r.extend(range(i, i + length))
    x_val = x_train[r, :, :]
    y_val = y_train[r]
    x_train = np.delete(x_train, r, axis=0)
    y_train = np.delete(y_train, r, axis=0)

    print('\nInitializing CNN2D...')
    print('\nclasses:', num_classes)
    print('x train shape:',
          x_train.shape), print('x val shape:',
                                x_val.shape), print('x test shape:',
                                                    x_test.shape)
    print('y train shape:',
          y_train.shape), print('y val shape:',
                                y_val.shape), print('y test shape:',
                                                    y_test.shape)
    print("\nTrain split with mean|std {:.2f}|{:.2f}".format(
        np.mean(x_train), np.std(x_train)))
    print("Test split with mean|std {:.2f}|{:.2f}".format(
        np.mean(x_test), np.std(x_test)))

    # shape data
    x_train = x_train.reshape(x_train.shape[0], x_train.shape[1],
                              x_train.shape[2], 1)
    x_val = x_val.reshape(x_val.shape[0], x_val.shape[1], x_val.shape[2], 1)
    x_test = x_test.reshape(x_test.shape[0], x_test.shape[1], x_test.shape[2],
                            1)
    y_train = tf.keras.utils.to_categorical(y_train, num_classes)
    y_val = tf.keras.utils.to_categorical(y_val, num_classes)
    y_test = tf.keras.utils.to_categorical(y_test, num_classes)

    # define the model
    activation = 'elu'
    regularizer = 0.0000
    dropout = 0.25

    # preprocessing
    '''
    offset = 1.0 * np.std(x_train)
    dc0 = (x)
    dc1 = GaussianNoise(offset*0.1)(x)
    dc2 = GaussianDropout(dropout)(x)
    dc3 = Lambda(lambda r: r + __import__('keras').backend.random_uniform((1,), -offset, offset))(x)
    dc4 = Lambda(lambda r: r + __import__('keras').backend.random_uniform((1,), -offset, offset))(x)
    m = Concatenate()([dc0, dc1, dc2, dc3, dc4])
    m = Lambda(lambda r: r - __import__('keras').backend.mean(r))(x)
    '''

    # sequential

    model = Sequential()
    model.add(
        Conv2D(16,
               kernel_size=(3, 3),
               strides=(2, 1),
               activation='elu',
               kernel_regularizer=regularizers.l2(regularizer),
               input_shape=(x_train.shape[1], x_train.shape[2], 1)))
    model.add(EntropyPooling2D(pool_size=(2, 2)))
    model.add(Dropout(dropout))
    model.add(
        Conv2D(32,
               kernel_size=(3, 3),
               strides=(1, 1),
               activation='elu',
               kernel_regularizer=regularizers.l2(regularizer)))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(Dropout(dropout))
    model.add(
        Conv2D(64,
               kernel_size=(3, 3),
               strides=(1, 1),
               activation='elu',
               kernel_regularizer=regularizers.l2(regularizer)))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(Dropout(dropout))
    # model.add(Conv2D(128, kernel_size=(3, 3), strides=(1, 1), activation='elu', kernel_regularizer=regularizers.l2(regularizer)))
    # model.add(MaxPooling2D(pool_size=(1, 2)))
    # model.add(Dropout(dropout))
    model.add(Flatten())
    model.add(
        Dense(64,
              activation='elu',
              kernel_regularizer=regularizers.l2(regularizer)))
    model.add(Dropout(dropout))
    model.add(Dense(num_classes, activation='softmax'))

    # functional
    '''
    x = Input((x_train.shape[1], x_train.shape[2], x_train.shape[3]))
    m = Conv2D(16, 3, activation=activation , kernel_regularizer=regularizers.l2(regularizer))(x)
    m = EntropyPooling2D((2, 2))(m)
    m = Dropout(dropout)(m)
    m = Conv2D(32, 3, activation=activation, kernel_regularizer=regularizers.l2(regularizer))(m)
    m = EntropyPooling2D((2, 2))(m)
    m = Dropout(dropout)(m)
    m = Conv2D(64, 3, activation=activation, kernel_regularizer=regularizers.l2(regularizer))(m)
    m = EntropyPooling2D((2, 2))(m)
    m = Dropout(dropout)(m)
    if x_train.shape[1] < 50:
        #
        m = Flatten()(m)
    else:
        m = Conv2D(128, 3, activation=activation, kernel_regularizer=regularizers.l2(regularizer))(m)
        m = GlobalAveragePooling2D()(m)
        m = Dropout(dropout)(m)
    m = (Dense(64, activation=activation, kernel_regularizer=regularizers.l2(regularizer)))(m)
    m = Dropout(dropout)(m)
    y = Dense(num_classes, activation='softmax')(m)
    model = Model(inputs=[x], outputs=[y])
    '''

    # summarize model
    for i in range(0, len(model.layers)):
        if i == 0:
            plot_model(model, to_file='Models\\model_cnn2d.png')
            # f = open('Models\\model_cnn2d.txt', 'w')
            # print(' ')
        # print('{}. Layer {} with input / output shapes: {} / {}'.format(i, model.layers[i].name, model.layers[i].input_shape, model.layers[i].output_shape))
        # f.write('{}. Layer {} with input / output shapes: {} / {} \n'.format(i, model.layers[i].name, model.layers[i].input_shape, model.layers[i].output_shape))
        if i == len(model.layers) - 1:
            # f.close()
            print(' ')
            model.summary()

    # compile, fit evaluate
    callback = [
        callbacks.EarlyStopping(monitor='val_acc',
                                min_delta=0.01,
                                patience=10,
                                restore_best_weights=True)
    ]
    model.compile(loss=tf.keras.losses.categorical_crossentropy,
                  optimizer=tf.keras.optimizers.Adam(),
                  metrics=['accuracy'])
    model.fit(x_train,
              y_train,
              batch_size=256,
              epochs=epochs,
              verbose=2,
              validation_data=(x_val, y_val),
              callbacks=callback)
    score = model.evaluate(x_test, y_test, verbose=2)

    # evaluate on larger frames
    aggr_size = 5
    for i in range(0, y_test.shape[0] - aggr_size, aggr_size):
        if i == 0:
            y_pred = model.predict(x_test)
            y_pred = np.argmax(y_pred, axis=1)
            y_test = np.argmax(y_test, axis=1)
            y_aggr_test = []
            y_aggr_pred = []
        if np.unique(y_test[i:i + aggr_size]).shape[0] == 1:
            y_aggr_test.append(stats.mode(y_test[i:i + aggr_size])[0][0])
            y_aggr_pred.append(stats.mode(y_pred[i:i + aggr_size])[0][0])
    # print(confusion_matrix(np.argmax(y_test, axis=1), np.argmax(y_pred, axis=1)))
    scipy_score = classification_report(y_aggr_test,
                                        y_aggr_pred,
                                        output_dict=True)['accuracy']
    print('short {:.2f} and aggr {:.2f}'.format(score[1], scipy_score))

    # save model
    open("Models\\model_cnn2d.json", "w").write(model.to_json())
    pickle.dump(model.get_config(), open("Models\\model_cnn2d.pickle", "wb"))
    model.save_weights("Models\\model_cnn2d.h5")

    # results
    return score[1]
Пример #4
0
def example_1():

    simutation_parameters = {
        "PWM_file":
        "/home/qan/Desktop/DeepEpitif/DeepMetif/JASPAR2018_CORE_vertebrates_non-redundant_pfms_jaspar/MA0835.1.jaspar",
        "seq_length": 100,
        "center_pos": 20,
        "motif_width": 14,
        "metif_level": [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
    }

    [train_X, train_Y, valid_X, valid_Y, test_X,
     test_Y] = get_simulated_dataset(parameters=simutation_parameters,
                                     train_size=16000,
                                     valid_size=2000,
                                     test_size=20)

    #print(train_X.dtype)
    #print(train_Y.dtype)
    #print(train_X[2,:,:,:])
    #print(train_Y)
    #print(train_X.shape[1::])

    #exit()
    one_filter_keras_model = Sequential()
    one_filter_keras_model.add(
        Conv2D(filters=5,
               kernel_size=(1, 15),
               padding="same",
               input_shape=train_X.shape[1::]))
    one_filter_keras_model.add(BatchNormalization(axis=-1))
    one_filter_keras_model.add(Activation('relu'))
    one_filter_keras_model.add(MaxPooling2D(pool_size=(1, 35)))
    one_filter_keras_model.add(Flatten())
    one_filter_keras_model.add(Dense(1))
    one_filter_keras_model.add(Activation("sigmoid"))
    one_filter_keras_model.summary()

    one_filter_keras_model.compile(optimizer='adam',
                                   loss='binary_crossentropy')

    metrics_callback = MetricsCallback(train_data=(train_X, train_Y),
                                       validation_data=(valid_X, valid_Y))

    print(one_filter_keras_model.get_weights())

    history_one_filter = one_filter_keras_model.fit(
        x=train_X,
        y=train_Y,
        batch_size=10,
        epochs=50,
        verbose=1,
        callbacks=[History(), metrics_callback],
        validation_data=(valid_X, valid_Y))
    #print(one_filter_keras_model.get_weights())

    one_filter_keras_model_json = one_filter_keras_model.to_json()
    with open("one_filter_keras_model.json", "w") as json_file:
        json_file.write(one_filter_keras_model_json)

    one_filter_keras_model.save_weights("one_filter_keras_model.h5")
    print("Saved model to disk")
Пример #5
0
try:
    model.fit(x=train_generator, 
              validation_data=test_generator, 
              steps_per_epoch=training_len//batch_size,
              validation_steps=testing_len//batch_size,
              workers=2, 
              use_multiprocessing=True, 
              epochs=num_epochs)
    #save model upon successful completion of running model.fit
    model.save('deepfake_model_compare_frames.h5')
except Exception as e:
    print(e)
    #if there is an exception, want to automatically save the model 
    model.save('compare_frames_model_train_exception.h5') #uncomment in production
    #consider updating this to save to a json file
    print(model.to_json())

    
    


# In[ ]:


model.metrics_names


# In[ ]:


# x, _ = get_video('xpzfhhwkwb.mp4') # fake video
prediction += "\n dog-" + str(result[0][2])
prediction += "\n kitchen-" + str(result[0][3])
prediction += "\n knife-" + str(result[0][4])
print("Prediction - \n", prediction)

# Input image
plt.figure(1)
plt.title('Knife')
plt.axis('off')
plt.imshow(
    image.load_img(
        'Dataset_Multy/single_prediction/knife-darkfinal2Color.jpeg'))
#knife-dark.jpeg
#knife-darkfinal2Color.jpeg
#chair2.jpeg
#chair2final2Color.jpeg

# ---*-----*---Part 4 - Saving the trained model and weights---*------*----

# serialize model to JSON
model_json = classifier.to_json()
with open("model_cnn_multy.json", "w") as json_file:
    json_file.write(model_json)

# serialize weights to hdf5
classifier.save_weights("weights_cnn_multy.h5")

# ---*-----*---Part 5 - Loading the trained model weights---*------*----

classifier.load_weights("weights_cnn_multy.h5")
class Model():
    def __init__(self, train_x, train_y,
        validation_x , validation_y, seq_info:str,
        *,
        max_epochs = 100, batch_size = 1024, hidden_layers = 2,
        neurons_per_layer = 64, architecture = Architecture.LSTM.value,
        dropout = 0.1, is_bidirectional = False, initial_learn_rate = 0.001,
        early_stop_patience = 6, is_classification=False):
        """
        INFO GOES HERE
        """

        ## Param member vars
        self.max_epochs = max_epochs
        self.batch_size = batch_size
        self.hidden_layers = hidden_layers
        self.neurons_per_layer = neurons_per_layer
        self.architecture = architecture
        self.dropout = dropout
        self.is_bidirectional = is_bidirectional
        self.initial_learn_rate = initial_learn_rate
        self.seq_info = seq_info
        self.is_classification = is_classification
        self.early_stop_patience = early_stop_patience
        self.train_time = 0

        self.train_x = train_x
        self.train_y = train_y
        self.validation_x = validation_x
        self.validation_y = validation_y

        ## Other member vars
        self.model = Sequential()
        self.training_history = None
        self.score: dict = {}

        self._create_model()
        
        
    ### PUBLIC FUNCTIONS

    def get_model(self):
        return self.model

    def train(self):
        start = time.time()
        early_stop = EarlyStopping(monitor='val_loss', patience=self.early_stop_patience, restore_best_weights=True)
        tensorboard = TensorBoard(log_dir=f"{os.environ['WORKSPACE']}/logs/{self.seq_info}__{self.get_model_info_str()}__{datetime.now().timestamp()}")

        # Train model
        self.training_history = self.model.fit(
            self.train_x, self.train_y,
            batch_size=self.batch_size,
            epochs=self.max_epochs,
            validation_data=(self.validation_x, self.validation_y),
            callbacks=[tensorboard, early_stop],
            shuffle=True
        )

        # Score model
        self.score = self.model.evaluate(self.validation_x, self.validation_y, verbose=0)
        self.score = {out: self.score[i] for i, out in enumerate(self.model.metrics_names)}
        print('Scores:', self.score)
        end = time.time()
        self.train_time = end - start


    def save_model(self):
        self._save_model_config()
        self._save_model_weights()

    def get_model_info_str(self):
        return f"{'Bi' if self.is_bidirectional else ''}{self.architecture.__name__}-HidLayers{self.hidden_layers}-Neurons{self.neurons_per_layer}-Bat{self.batch_size}-Drop{self.dropout}"

    ### PRIVATE FUNCTIONS

    def _create_model(self):
        """
        Creates and compiles the model
        """
        self._use_gpu_if_available()

        ##### Create the model ####
        self.model = Sequential()
        
        if self.is_bidirectional:
            self.model.add(Bidirectional(self.architecture(self.neurons_per_layer, input_shape=(self.train_x.shape[1:]), return_sequences=True)))
        else:
            self.model.add(self.architecture(self.neurons_per_layer, input_shape=(self.train_x.shape[1:]), return_sequences=True))
        self.model.add(Dropout(self.dropout))
        self.model.add(BatchNormalization())
        
        for i in range(self.hidden_layers):
            return_sequences = i != self.hidden_layers - 1 # False on last iter
            if self.is_bidirectional:
                self.model.add(Bidirectional(self.architecture(self.neurons_per_layer, return_sequences=return_sequences)))
            else:
                self.model.add(self.architecture(self.neurons_per_layer, return_sequences=return_sequences))
            self.model.add(Dropout(self.dropout))
            self.model.add(BatchNormalization())
            
        if self.is_classification:
            self.model.add(Dense(2, activation="sigmoid"))
        else:
            self.model.add(Dense(1))

        adam = adam_v2.Adam(learning_rate=self.initial_learn_rate)


        if self.is_classification:
            self.model.compile(
                loss="sparse_categorical_crossentropy", 
                optimizer=adam,
                metrics=["sparse_categorical_crossentropy", "accuracy"]
            )
        else:
            self.model.compile(
                loss=RSquaredMetricNeg, 
                optimizer=adam,
                metrics=["mae", RSquaredMetric]
            )
            


    def _use_gpu_if_available(self):
        ## Utilise GPU if GPU is available
        local_devices = device_lib.list_local_devices()
        gpus = [x.name for x in local_devices if x.device_type == 'GPU']
        if len(gpus) != 0:
            if self.architecture == GRU:
                self.architecture = CuDNNGRU
            elif self.architecture == LSTM:
                self.architecture = CuDNNLSTM

    
    def _save_model_weights(self):
        file_path = ""
        if self.is_classification:
            file_path = f"{os.environ['WORKSPACE']}/models/final/{self.seq_info}__{self.get_model_info_str()}__{self.max_epochs}-{self.score['sparse_categorical_crossentropy']:.3f}.h5"
        else:
            file_path = f"{os.environ['WORKSPACE']}/models/final/{self.seq_info}__{self.get_model_info_str()}__{self.max_epochs}-{self.score['RSquaredMetric']:.3f}.h5"
        self.model.save_weights(file_path)
        print(f"Saved model weights to: {file_path}")

    def _save_model_config(self):
        json_config = self.model.to_json()
        file_path = f'{os.environ["WORKSPACE"]}/model_config/{self.get_model_info_str()}.json'
        with open(file_path, "w+") as file:
            file.write(json_config)
        print(f"Saved model config to: {file_path}")
Пример #8
0
def train():
    start_time = time.time()
    print("Starting The Training Process")
    print("Loading The Preprocessed Images Data set")
    datasets = np.load('preprocessed_train_data.npy',
                       allow_pickle=True)  #Preprocessed Train Data
    print("Preprocessed Data ser Loaded Successfully.")
    print("Shuffling The Data set")
    np.random.shuffle(datasets)
    print(datasets.shape)
    pixels = []
    labels = []
    for pixel, label in datasets:
        pixels.append(pixel)
        labels.append(label)
    pixels = np.array(pixels).reshape(-1, 32, 32, 1)
    # print('number of inputs:')
    # Building The Model:
    print("creating The Sequential Model.")
    model = Sequential()
    print("Adding First Convolution Layer To The Model.")
    model.add(
        Conv2D(16, (5, 5),
               padding="same",
               input_shape=(32, 32, 1),
               activation="relu"))
    print("Adding Pooling Layer To The First Convolution Layer.")
    model.add(MaxPooling2D(pool_size=(2, 2)))
    print("Adding The Second Convolution Layer To The Model.")
    model.add(Conv2D(64, (5, 5), padding="same", activation="relu"))
    print("Adding Pooling Layer To The Second Convolution Layer.")
    model.add(MaxPooling2D(pool_size=(2, 2)))
    print("Flattening The Output From Second Convolution Layer.")
    model.add(Flatten())
    print("Adding The Hidden Dense Layer With 1000 Neurons.")
    model.add(Dense(1000))
    print("Adding The Output Layer With 36 Neurons For 36 Classes.")
    model.add(Dense(36))
    print("Adding The Softmax Activation At The Output Layer.")
    model.add(Activation("softmax"))
    print("Using The Cross Entropy As The Loss Function With Adam Optimizer")
    model.compile(loss='sparse_categorical_crossentropy',
                  optimizer='adam',
                  metrics=['accuracy'])
    print("Fitting The Model.")
    print("Training Begins...........")
    history = model.fit(pixels,
                        labels,
                        batch_size=256,
                        validation_split=0.15,
                        epochs=30)
    end_time = time.time()
    total_time = round(end_time - start_time)
    time_msg = "Training Completed Successfully in {Time}".format(
        Time=str(datetime.timedelta(seconds=total_time)))
    print(time_msg)
    print("Saving the Model in Hard Drive For Later Use")
    # serialize model to JSON
    model_json = model.to_json()
    with open("Trained_model.json", "w") as json_file:
        json_file.write(model_json)
    # serialize weights to HDF5
    model.save_weights("Weights_model.h5")
    plt.plot(history.history['acc'])
    plt.plot(history.history['val_acc'])
    plt.title('model accuracy')
    plt.ylabel('accuracy')
    plt.xlabel('epoch')
    plt.legend(['train', 'val'], loc='upper left')
    plt.show()
    plt.plot(history.history['loss'])
    plt.plot(history.history['val_loss'])
    plt.title('model loss')
    plt.ylabel('loss')
    plt.xlabel('epoch')
    plt.legend(['train', 'val'], loc='upper left')
    plt.show()
Пример #9
0
                       optimizer=sgd,
                       metrics=[metrics.mae])
addition_model.fit(input_data,
                   output_data,
                   batch_size=1,
                   epochs=100,
                   verbose=1)

# Modell wird gespeichert
addition_model.save("addition_model.h5")

# Und auch für TensorFlow.js!
# tfjs.converters.save_keras_model(addition_model, "./addition_model")

print("== Modell als JSON-Struktur ==")
pprint(addition_model.to_json())
pprint("== Modell als YAML-Struktur ==")
pprint(addition_model.to_yaml())

# Weights werden gespeichert
addition_model.save_weights("addition_weights.h5")

# Struktur des Modells wird als JSON gespeichert
json_str = addition_model.to_json()

with open("addition_model.json", "w") as json_file:
    json_file.write(json_str)

# Modell wird neu geladen (vom .h5 Datei)
model = load_model('addition_model.h5')
result = model.predict([[[5,
                                                          img_height),
                                             batch_size=batch_size,
                                             class_mode='binary')

# In[9]:

model.fit_generator(train_generator,
                    steps_per_epoch=nb_train_samples // batch_size,
                    epochs=epochs,
                    validation_data=val_generator,
                    validation_steps=nb_validation_samples // batch_size)

# In[10]:

scores = model.evaluate_generator(test_generator,
                                  nb_test_samples // batch_size)

# In[11]:

print("Accuracy: %.2f%%" % (scores[1] * 100))

# In[12]:

import pandas as pd

# In[13]:

model.to_json()

# In[ ]:
Пример #11
0
def main():
    parser = argparse.ArgumentParser(description='Tensorflow MNIST Example')
    parser.add_argument('--batch-size',
                        type=int,
                        default=128,
                        metavar='N',
                        help='input batch size for training (default: 128)')
    parser.add_argument('--epochs',
                        type=int,
                        default=10,
                        metavar='N',
                        help='number of epochs to train (default: 10)')
    parser.add_argument('--lr',
                        type=float,
                        default=0.01,
                        metavar='LR',
                        help='learning rate (default: 0.01)')
    parser.add_argument('--no-cuda',
                        action='store_true',
                        default=False,
                        help='disables CUDA training')
    args = parser.parse_args()
    print(args)

    use_cuda = not args.no_cuda

    num_classes = 10
    # input image dimensions
    img_rows, img_cols = 28, 28

    # the data, shuffled and split between train and test sets
    (x_train, y_train), (x_test, y_test) = mnist.load_data(
        path=str(PurePath(data_dir, "mnist.npz")))

    if K.image_data_format() == 'channels_first':
        x_train = x_train.reshape(x_train.shape[0], 1, img_rows, img_cols)
        x_test = x_test.reshape(x_test.shape[0], 1, img_rows, img_cols)
        input_shape = (1, img_rows, img_cols)
    else:
        x_train = x_train.reshape(x_train.shape[0], img_rows, img_cols, 1)
        x_test = x_test.reshape(x_test.shape[0], img_rows, img_cols, 1)
        input_shape = (img_rows, img_cols, 1)

    x_train = x_train.astype('float32')
    x_test = x_test.astype('float32')
    x_train /= 255
    x_test /= 255
    print('x_train shape:', x_train.shape)
    print(x_train.shape[0], 'train samples')
    print(x_test.shape[0], 'test samples')

    # convert class vectors to binary class matrices
    y_train = tf.keras.utils.to_categorical(y_train, num_classes)
    y_test = tf.keras.utils.to_categorical(y_test, num_classes)

    model = Sequential()
    model.add(
        Conv2D(32,
               kernel_size=(3, 3),
               activation='relu',
               input_shape=input_shape))
    model.add(Conv2D(64, (3, 3), activation='relu'))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(Dropout(0.25))
    model.add(Flatten())
    model.add(Dense(128, activation='relu'))
    model.add(Dropout(0.5))
    model.add(Dense(num_classes, activation='softmax'))

    cp_callback = MyCallback.get_cp_callback(checkpoint_dir)
    mq_callback = MyCallback.get_mq_callback()

    if use_cuda:
        # support multiple gpu
        available_devices = _get_available_devices()
        available_devices = [
            _normalize_device_name(name) for name in available_devices
        ]
        gpu_names = [x for x in available_devices if '/gpu:' in x]
        num_gpus = len(gpu_names)

        if num_gpus <= 0:
            raise ValueError('Unable to find any gpu device ')

        print("Let's use gpus: " + str(gpu_names))

        if num_gpus > 1:
            model = tf.keras.utils.multi_gpu_model(model, gpus=num_gpus)
    else:
        os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
        os.environ["CUDA_VISIBLE_DEVICES"] = ""

    model.compile(loss=tf.keras.losses.categorical_crossentropy,
                  optimizer=tf.keras.optimizers.Adadelta(lr=args.lr),
                  metrics=['accuracy'])

    start_time = time.time()
    model.fit(x_train,
              y_train,
              batch_size=args.batch_size,
              epochs=args.epochs,
              verbose=1,
              validation_data=(x_test, y_test),
              callbacks=[cp_callback, mq_callback])
    duration = (time.time() - start_time) / 60
    print("Train finished. Time cost: %.2f minutes" % (duration))

    score = model.evaluate(x_test, y_test, verbose=0)
    print('Test loss:', score[0])
    print('Test accuracy:', score[1])

    # save model
    model_json = model.to_json()
    open(str(PurePath(model_path, 'mnist_arch.json')), 'w').write(model_json)

    output_weight = str(PurePath(model_path, 'mnist_weights.h5'))
    model.save_weights(output_weight, overwrite=True)
    print("Weight saved in path: %s" % output_weight)
Пример #12
0
model.add(Dense(128))
model.add(Activation('relu'))
model.add(Dropout(0.5))
model.add(Dense(NUM_CLASSES))
model.add(Activation('softmax'))

sgd = optimizers.SGD(lr=LEARNING_RATE)

model.compile(loss='categorical_crossentropy',
             optimizer='SGD',
             metrics=['accuracy'])

model.summary()

# モデルの保存
model_json_str = model.to_json()
with open(backup_dir + '/mnist_deep_model.json', 'w') as f:
   f.write(model_json_str)

# 重みデータのバックアップ
cb_cp = tf.keras.callbacks.ModelCheckpoint(backup_dir + '/weights.{epoch:02d}.hdf5', verbose=1, save_weights_only=True)
# TensorBoard用のデータ
cb_tf = tf.keras.callbacks.TensorBoard(log_dir=backup_dir + '/tensorBoard', histogram_freq=0)

start = time.time()
history = model.fit(X_train, y_train, batch_size=BATCH_SIZE, epochs=EPOCHS,
                  validation_data = (X_test, y_test), verbose = 1,  callbacks=[cb_cp, cb_tf])
elapsed_time = time.time() - start
print ("elapsed_time:{0}".format(elapsed_time) + "[sec]")

plt.plot(history.history['acc'])
Пример #13
0
class RNN:
    def __init__(self, cash_available, csv_train, csv_test, stock):
        self.cash_available = cash_available
        self.csv_train = csv_train
        self.csv_test = csv_test
        self.stock = stock

        self.model_json = "rnn.json"

        self.date = "2017-00-00T00:00:00.000Z" # date specified here only for testing purposes

        self.sc = MinMaxScaler(feature_range=(0, 1))
        self.api = tradeapi.REST(key_id='PKXVRTVRHQWTL50AYFKA',
                                 secret_key='',
                                 base_url='https://paper-api.alpaca.markets')

        self.dataset, self.training_set, self.X_train, self.y_train, \
            self.regressor, self.dataset_test, self.test_set, self.real_stock_price, \
            self.inputs, self.dataset_total, self.X_test, self.predicted_stock_price, \
            self.data = [], [], [], [], [], [], [], [], [], [], [], [], []

    def loadTrainData(self):
        """ Loads data for training. """
        self.dataset = pd.read_csv(self.csv_train, index_col="Date", parse_dates=True)

        training_set = self.dataset["Open"]
        self.training_set = pd.DataFrame(training_set)

    def loadTestData(self):
        """ Loads the data for testing. """
        self.dataset_test = pd.read_csv(self.csv_test, index_col="Date", parse_dates=True)
        self.real_stock_price = self.dataset_test.iloc[:, 1:2].values
        self.test_set = self.dataset_test['Open']
        self.test_set = pd.DataFrame(self.test_set)

    def updateData(self):
        self.stock_dataset = self.api.get_barset(self.stock, "1D", start=self.date)
        self.stock_data = self.stock_dataset[self.stock]

        for data_row in self.stock_data:
            date = data_row.t
            open = data_row.o
            high = data_row.h
            low = data_row.l
            close = data_row.c
            volume = data_row.v

            df_row = pd.DataFrame(np.array([[date, open, high, low, close, volume]]), columns=['Date', 'Open', 'High', 'Low', 'Close', 'Volume'])
            df_row = df_row.set_index('Date')

            self.dataset = self.dataset.append(df_row)

    def scaleTrainData(self):
        """ Scales the training data using sklearn MinMaxScaler. """
        self.loadTrainData()

        # Feature Scaling
        self.training_set_scaled = self.sc.fit_transform(self.training_set)

        # Creating a data structure with 60 timesteps and 1 output
        self.X_train = []
        self.y_train = []
        for i in range(60, 1258):
            self.X_train.append(self.training_set_scaled[i-60:i, 0])
            self.y_train.append(self.training_set_scaled[i, 0])
        self.X_train, self.y_train = np.array(self.X_train), np.array(self.y_train)

        # Reshaping
        self.X_train = np.reshape(self.X_train, (self.X_train.shape[0], self.X_train.shape[1], 1))

    def buildRNN(self):
        """ Builds the RNN. """
        self.scaleTrainData()

        # Initialising the RNN
        self.regressor = Sequential()

        # Adding the first LSTM layer and some Dropout regularisation
        self.regressor.add(LSTM(units=50, return_sequences=True,
                        input_shape=(self.X_train.shape[1], 1)))
        self.regressor.add(Dropout(0.2))

        # Adding a second LSTM layer and some Dropout regularisation
        self.regressor.add(LSTM(units=50, return_sequences=True))
        self.regressor.add(Dropout(0.2))

        # Adding a third LSTM layer and some Dropout regularisation
        self.regressor.add(LSTM(units=50, return_sequences=True))
        self.regressor.add(Dropout(0.2))

        # Adding a fourth LSTM layer and some Dropout regularisation
        self.regressor.add(LSTM(units=50))
        self.regressor.add(Dropout(0.2))

        # Adding the output layer
        self.regressor.add(Dense(units=1))

    def fitRNN(self):
        """ Fits the RNN to the data. """
        self.buildRNN()

        # Compiling the RNN
        self.regressor.compile(optimizer='adam', loss='mean_squared_error')

        # Fitting the RNN to the Training set
        self.regressor.fit(self.X_train, self.y_train, epochs=100, batch_size=32)

    def getPredictions_TestDataset(self):
        """ Gets predictions for the test dataset. """
        self.loadTestData()
        self.fitRNN()

        # Getting the predicted stock price
        self.dataset_total = pd.concat((self.dataset['Open'], self.dataset_test['Open']), axis=0)

        self.inputs = self.dataset_total[len(self.dataset_total) - len(self.dataset_test) - 60:].values
        self.inputs = self.inputs.reshape(-1, 1)
        self.inputs = self.sc.transform(self.inputs)

        for i in range(60, len(self.dataset_test) + 60):
            self.X_test.append(self.inputs[i-60:i, 0])

        self.X_test = np.array(self.X_test)
        self.X_test = np.reshape(self.X_test, (self.X_test.shape[0], self.X_test.shape[1], 1))

        self.predicted_stock_price = self.regressor.predict(self.X_test)
        self.predicted_stock_price = self.sc.inverse_transform(self.predicted_stock_price)

    def visualize(self):
        """ Visualize the results of the real stock price versus the predicted stock price. """
        plt.plot(self.real_stock_price, color='red', label='Real Google Stock Price')
        plt.plot(self.predicted_stock_price, color='blue',
                label='Predicted Google Stock Price')
        plt.title('Google Stock Price Prediction')
        plt.xlabel('Time')
        plt.ylabel('Google Stock Price')
        plt.legend()
        plt.xlim(0, 360)
        plt.ylim(750, 875)
        plt.show()

    def predictTomorrow(self):
        """ Predicts the stock price for tomorrow. """
        self.loadTrainData()
        self.updateData()
        self.fitRNN()

        self.inputs = self.dataset[:].values
        self.inputs = self.inputs.reshape(-1, 1)
        self.inputs = self.sc.transform(self.inputs)

        self.inputs = np.array(self.inputs)
        self.inputs = np.reshape(self.inputs, (self.inputs.shape[0], self.inputs.shape[1], 1))

        self.predicted_stock_price = self.regressor.predict(self.inputs)
        self.predicted_stock_price = self.sc.inverse_transform(self.predicted_stock_price)
        print("previous 2 days", self.api.get_barset(self.stock, 'day', limit=2))
        print("prediction for tomorrow:", self.predicted_stock_price)

    def saveModel(self):
        """ Saves RNN to JSON file """
        self.model_json = self.regressor.to_json() # serialize model to JSON
        with open("model.json", "w") as json_file:
            json_file.write(self.model_json)
        self.regressor.save_weights("model.h5")  # serialize weights to HDF5

    def loadModel(self):
        self.json_file = open('model.json', 'r') # load json and create model
        self.loaded_model_json = self.json_file.read()
        self.json_file.close()
        self.regressor = model_from_json(self.loaded_model_json)
        self.regressor.load_weights("model.h5") # load weights into new model

    def buy(self):
        # Submit a market order to buy 1 share of stock at market price
        self.api.submit_order(
            symbol=self.stock,
            qty=1,
            side='buy',
            type='market',
            time_in_force='gtc'
        )

    def sell(self):
        # Submit a market order to sell 1 share of stock at market price
        self.api.submit_order(
            symbol=self.stock,
            qty=1,
            side='sell',
            type='market',
            time_in_force='gtc'
        )
model.add(Dense(1024, activation='relu'))
model.add(Dropout(0.2))

model.add(Dense(num_labels, activation='softmax'))

model.summary()

callbacks = TensorBoard(log_dir='./graph')

# Compiling the model
model.compile(loss=categorical_crossentropy,
              optimizer=Adam(),
              metrics=['accuracy'])

# Training the model
model.fit(train_x, train_y,
          batch_size=batch_size,
          epochs=epochs,
          verbose=1,
          validation_data=(test_x, test_y),
          shuffle=True,
          callbacks=[callbacks])

# Saving the  model to  use it later on
fer_json = model.to_json()

with open("emotion_classification_cnn_5_emotions.json", "w") as json_file:
    json_file.write(fer_json)

model.save_weights("emotion_classification_cnn_5_emotions.h5")
Пример #15
0
def main():
    counting_dataset_path = 'counting_data_UCF'
    counting_dataset = list()
    train_labels = {}
    val_labels = {}
    for im_path in glob.glob(os.path.join(counting_dataset_path, '*.jpg')):
        counting_dataset.append(im_path)
        img = image.load_img(im_path)
        gt_file = im_path.replace('.jpg', '_ann.mat')
        h, w = img.size
        dmap, crowd_number = load_gt_from_mat(gt_file, (w, h))
        train_labels[im_path] = dmap
        val_labels[im_path] = crowd_number

    mae_sum = 0.0
    mse_sum = 0.0

    # create folder to save results
    date = str(datetime.datetime.now())
    d = date.split()
    d1 = d[0]
    d2 = d[1].split(':')
    results_folder = 'Results-' + d1 + '-' + d2[0] + '.' + d2[1]
    if not os.path.exists(results_folder):
        os.makedirs(results_folder)

    # 5-fold cross validation
    epochs = int(round(iterations / iterations_per_epoch))
    n_fold = 5
    for f in range(0, n_fold):
        print('\nFold ' + str(f))

        vgg = VGG16(include_top=False,
                    weights=None,
                    input_shape=(None, None, 3))
        transfer_layer = vgg.get_layer('block5_conv3')
        vgg_partial = Model(inputs=vgg.input,
                            outputs=transfer_layer.output,
                            name='vgg_partial')

        # Start a new Keras Sequential model.
        train_model = Sequential()

        # Add the convolutional part of the VGG16 model from above.
        train_model.add(vgg_partial)

        train_model.add(
            Conv2D(1, (3, 3),
                   strides=(1, 1),
                   padding='same',
                   data_format=None,
                   dilation_rate=(1, 1),
                   activation=None,
                   use_bias=True,
                   kernel_initializer='glorot_uniform',
                   bias_initializer='zeros',
                   kernel_regularizer=None,
                   bias_regularizer=None,
                   activity_regularizer=None,
                   kernel_constraint=None,
                   bias_constraint=None,
                   name='counting_output'))
        train_model.summary()

        # l2 weight decay
        for layer in train_model.layers:
            if hasattr(layer, 'kernel_regularizer'):
                layer.kernel_regularizer = regularizers.l2(5e-4)
            elif layer.name == 'vgg_partial':
                for l in layer.layers:
                    if hasattr(l, 'kernel_regularizer'):
                        l.kernel_regularizer = regularizers.l2(5e-4)

        optimizer = SGD(lr=0.0, decay=0.0, momentum=0.9, nesterov=False)
        loss = {'counting_output': euclideanDistanceCountingLoss}
        train_model.compile(optimizer=optimizer, loss=loss)

        if f == 0:
            split_train = [
                'counting_data_UCF/25.jpg', 'counting_data_UCF/49.jpg',
                'counting_data_UCF/18.jpg', 'counting_data_UCF/13.jpg',
                'counting_data_UCF/28.jpg', 'counting_data_UCF/34.jpg',
                'counting_data_UCF/17.jpg', 'counting_data_UCF/3.jpg',
                'counting_data_UCF/26.jpg', 'counting_data_UCF/15.jpg',
                'counting_data_UCF/31.jpg', 'counting_data_UCF/6.jpg',
                'counting_data_UCF/33.jpg', 'counting_data_UCF/2.jpg',
                'counting_data_UCF/30.jpg', 'counting_data_UCF/36.jpg',
                'counting_data_UCF/42.jpg', 'counting_data_UCF/20.jpg',
                'counting_data_UCF/38.jpg', 'counting_data_UCF/11.jpg',
                'counting_data_UCF/5.jpg', 'counting_data_UCF/7.jpg',
                'counting_data_UCF/4.jpg', 'counting_data_UCF/21.jpg',
                'counting_data_UCF/27.jpg', 'counting_data_UCF/39.jpg',
                'counting_data_UCF/22.jpg', 'counting_data_UCF/43.jpg',
                'counting_data_UCF/32.jpg', 'counting_data_UCF/35.jpg',
                'counting_data_UCF/8.jpg', 'counting_data_UCF/50.jpg',
                'counting_data_UCF/12.jpg', 'counting_data_UCF/19.jpg',
                'counting_data_UCF/44.jpg', 'counting_data_UCF/23.jpg',
                'counting_data_UCF/9.jpg', 'counting_data_UCF/46.jpg',
                'counting_data_UCF/16.jpg', 'counting_data_UCF/41.jpg'
            ]
            split_val = [
                'counting_data_UCF/37.jpg', 'counting_data_UCF/48.jpg',
                'counting_data_UCF/29.jpg', 'counting_data_UCF/10.jpg',
                'counting_data_UCF/14.jpg', 'counting_data_UCF/1.jpg',
                'counting_data_UCF/45.jpg', 'counting_data_UCF/47.jpg',
                'counting_data_UCF/40.jpg', 'counting_data_UCF/24.jpg'
            ]
            split_val_labels = {
                k: val_labels[k]
                for k in split_val
            }  #dizionario con 10 corrispondenze image_path-->crowd_number
            split_train_labels = {
                k: train_labels[k]
                for k in split_train
            }  #dizionario con 40 corrispondenze image_path-->dmap
        elif f == 1:
            split_train = [
                'counting_data_UCF/37.jpg', 'counting_data_UCF/48.jpg',
                'counting_data_UCF/29.jpg', 'counting_data_UCF/10.jpg',
                'counting_data_UCF/14.jpg', 'counting_data_UCF/1.jpg',
                'counting_data_UCF/45.jpg', 'counting_data_UCF/47.jpg',
                'counting_data_UCF/40.jpg', 'counting_data_UCF/24.jpg',
                'counting_data_UCF/31.jpg', 'counting_data_UCF/6.jpg',
                'counting_data_UCF/33.jpg', 'counting_data_UCF/2.jpg',
                'counting_data_UCF/30.jpg', 'counting_data_UCF/36.jpg',
                'counting_data_UCF/42.jpg', 'counting_data_UCF/20.jpg',
                'counting_data_UCF/38.jpg', 'counting_data_UCF/11.jpg',
                'counting_data_UCF/5.jpg', 'counting_data_UCF/7.jpg',
                'counting_data_UCF/4.jpg', 'counting_data_UCF/21.jpg',
                'counting_data_UCF/27.jpg', 'counting_data_UCF/39.jpg',
                'counting_data_UCF/22.jpg', 'counting_data_UCF/43.jpg',
                'counting_data_UCF/32.jpg', 'counting_data_UCF/35.jpg',
                'counting_data_UCF/8.jpg', 'counting_data_UCF/50.jpg',
                'counting_data_UCF/12.jpg', 'counting_data_UCF/19.jpg',
                'counting_data_UCF/44.jpg', 'counting_data_UCF/23.jpg',
                'counting_data_UCF/9.jpg', 'counting_data_UCF/46.jpg',
                'counting_data_UCF/16.jpg', 'counting_data_UCF/41.jpg'
            ]
            split_val = [
                'counting_data_UCF/25.jpg', 'counting_data_UCF/49.jpg',
                'counting_data_UCF/18.jpg', 'counting_data_UCF/13.jpg',
                'counting_data_UCF/28.jpg', 'counting_data_UCF/34.jpg',
                'counting_data_UCF/17.jpg', 'counting_data_UCF/3.jpg',
                'counting_data_UCF/26.jpg', 'counting_data_UCF/15.jpg'
            ]
            split_val_labels = {
                k: val_labels[k]
                for k in split_val
            }  #dizionario con 10 corrispondenze image_path-->crowd_number
            split_train_labels = {
                k: train_labels[k]
                for k in split_train
            }  #dizionario con 40 corrispondenze image_path-->dmap
        elif f == 2:
            split_train = [
                'counting_data_UCF/37.jpg', 'counting_data_UCF/48.jpg',
                'counting_data_UCF/29.jpg', 'counting_data_UCF/10.jpg',
                'counting_data_UCF/14.jpg', 'counting_data_UCF/1.jpg',
                'counting_data_UCF/45.jpg', 'counting_data_UCF/47.jpg',
                'counting_data_UCF/40.jpg', 'counting_data_UCF/24.jpg',
                'counting_data_UCF/25.jpg', 'counting_data_UCF/49.jpg',
                'counting_data_UCF/18.jpg', 'counting_data_UCF/13.jpg',
                'counting_data_UCF/28.jpg', 'counting_data_UCF/34.jpg',
                'counting_data_UCF/17.jpg', 'counting_data_UCF/3.jpg',
                'counting_data_UCF/26.jpg', 'counting_data_UCF/15.jpg',
                'counting_data_UCF/5.jpg', 'counting_data_UCF/7.jpg',
                'counting_data_UCF/4.jpg', 'counting_data_UCF/21.jpg',
                'counting_data_UCF/27.jpg', 'counting_data_UCF/39.jpg',
                'counting_data_UCF/22.jpg', 'counting_data_UCF/43.jpg',
                'counting_data_UCF/32.jpg', 'counting_data_UCF/35.jpg',
                'counting_data_UCF/8.jpg', 'counting_data_UCF/50.jpg',
                'counting_data_UCF/12.jpg', 'counting_data_UCF/19.jpg',
                'counting_data_UCF/44.jpg', 'counting_data_UCF/23.jpg',
                'counting_data_UCF/9.jpg', 'counting_data_UCF/46.jpg',
                'counting_data_UCF/16.jpg', 'counting_data_UCF/41.jpg'
            ]
            split_val = [
                'counting_data_UCF/31.jpg', 'counting_data_UCF/6.jpg',
                'counting_data_UCF/33.jpg', 'counting_data_UCF/2.jpg',
                'counting_data_UCF/30.jpg', 'counting_data_UCF/36.jpg',
                'counting_data_UCF/42.jpg', 'counting_data_UCF/20.jpg',
                'counting_data_UCF/38.jpg', 'counting_data_UCF/11.jpg'
            ]
            split_val_labels = {
                k: val_labels[k]
                for k in split_val
            }  #dizionario con 10 corrispondenze image_path-->crowd_number
            split_train_labels = {
                k: train_labels[k]
                for k in split_train
            }  #dizionario con 40 corrispondenze image_path-->dmap
        elif f == 3:
            split_train = [
                'counting_data_UCF/37.jpg', 'counting_data_UCF/48.jpg',
                'counting_data_UCF/29.jpg', 'counting_data_UCF/10.jpg',
                'counting_data_UCF/14.jpg', 'counting_data_UCF/1.jpg',
                'counting_data_UCF/45.jpg', 'counting_data_UCF/47.jpg',
                'counting_data_UCF/40.jpg', 'counting_data_UCF/24.jpg',
                'counting_data_UCF/25.jpg', 'counting_data_UCF/49.jpg',
                'counting_data_UCF/18.jpg', 'counting_data_UCF/13.jpg',
                'counting_data_UCF/28.jpg', 'counting_data_UCF/34.jpg',
                'counting_data_UCF/17.jpg', 'counting_data_UCF/3.jpg',
                'counting_data_UCF/26.jpg', 'counting_data_UCF/15.jpg',
                'counting_data_UCF/31.jpg', 'counting_data_UCF/6.jpg',
                'counting_data_UCF/33.jpg', 'counting_data_UCF/2.jpg',
                'counting_data_UCF/30.jpg', 'counting_data_UCF/36.jpg',
                'counting_data_UCF/42.jpg', 'counting_data_UCF/20.jpg',
                'counting_data_UCF/38.jpg', 'counting_data_UCF/11.jpg',
                'counting_data_UCF/8.jpg', 'counting_data_UCF/50.jpg',
                'counting_data_UCF/12.jpg', 'counting_data_UCF/19.jpg',
                'counting_data_UCF/44.jpg', 'counting_data_UCF/23.jpg',
                'counting_data_UCF/9.jpg', 'counting_data_UCF/46.jpg',
                'counting_data_UCF/16.jpg', 'counting_data_UCF/41.jpg'
            ]
            split_val = [
                'counting_data_UCF/5.jpg', 'counting_data_UCF/7.jpg',
                'counting_data_UCF/4.jpg', 'counting_data_UCF/21.jpg',
                'counting_data_UCF/27.jpg', 'counting_data_UCF/39.jpg',
                'counting_data_UCF/22.jpg', 'counting_data_UCF/43.jpg',
                'counting_data_UCF/32.jpg', 'counting_data_UCF/35.jpg'
            ]
            split_val_labels = {
                k: val_labels[k]
                for k in split_val
            }  #dizionario con 10 corrispondenze image_path-->crowd_number
            split_train_labels = {
                k: train_labels[k]
                for k in split_train
            }  #dizionario con 40 corrispondenze image_path-->dmap
        elif f == 4:
            split_train = [
                'counting_data_UCF/37.jpg', 'counting_data_UCF/48.jpg',
                'counting_data_UCF/29.jpg', 'counting_data_UCF/10.jpg',
                'counting_data_UCF/14.jpg', 'counting_data_UCF/1.jpg',
                'counting_data_UCF/45.jpg', 'counting_data_UCF/47.jpg',
                'counting_data_UCF/40.jpg', 'counting_data_UCF/24.jpg',
                'counting_data_UCF/25.jpg', 'counting_data_UCF/49.jpg',
                'counting_data_UCF/18.jpg', 'counting_data_UCF/13.jpg',
                'counting_data_UCF/28.jpg', 'counting_data_UCF/34.jpg',
                'counting_data_UCF/17.jpg', 'counting_data_UCF/3.jpg',
                'counting_data_UCF/26.jpg', 'counting_data_UCF/15.jpg',
                'counting_data_UCF/31.jpg', 'counting_data_UCF/6.jpg',
                'counting_data_UCF/33.jpg', 'counting_data_UCF/2.jpg',
                'counting_data_UCF/30.jpg', 'counting_data_UCF/36.jpg',
                'counting_data_UCF/42.jpg', 'counting_data_UCF/20.jpg',
                'counting_data_UCF/38.jpg', 'counting_data_UCF/11.jpg',
                'counting_data_UCF/5.jpg', 'counting_data_UCF/7.jpg',
                'counting_data_UCF/4.jpg', 'counting_data_UCF/21.jpg',
                'counting_data_UCF/27.jpg', 'counting_data_UCF/39.jpg',
                'counting_data_UCF/22.jpg', 'counting_data_UCF/43.jpg',
                'counting_data_UCF/32.jpg', 'counting_data_UCF/35.jpg'
            ]
            split_val = [
                'counting_data_UCF/8.jpg', 'counting_data_UCF/50.jpg',
                'counting_data_UCF/12.jpg', 'counting_data_UCF/19.jpg',
                'counting_data_UCF/44.jpg', 'counting_data_UCF/23.jpg',
                'counting_data_UCF/9.jpg', 'counting_data_UCF/46.jpg',
                'counting_data_UCF/16.jpg', 'counting_data_UCF/41.jpg'
            ]
            split_val_labels = {
                k: val_labels[k]
                for k in split_val
            }  #dizionario con 10 corrispondenze image_path-->crowd_number
            split_train_labels = {
                k: train_labels[k]
                for k in split_train
            }  #dizionario con 40 corrispondenze image_path-->dmap

        X_counting = np.empty((len(split_train), 224, 224, 3))
        y_counting = np.empty((len(split_train), 14, 14, 1))
        y_tmp = np.empty(
            (len(split_train), 14,
             14))  # to temporarily save the resized counting target
        for i, imgpath in enumerate(split_train):
            counting_img = image.load_img(imgpath)
            crop_resized_img = counting_img.resize((224, 224),
                                                   PIL.Image.BILINEAR)
            crop_resized_array_img = image.img_to_array(crop_resized_img)
            X_counting[i, ] = crop_resized_array_img

            dmap = split_train_labels[imgpath]
            y_tmp[i] = downsample(dmap, (14, 14))
            y_counting[i] = np.resize(y_tmp[i], (14, 14, 1))

        lrate = LearningRateScheduler(step_decay)
        callbacks_list = [lrate]
        train_model.fit(x=X_counting,
                        y=y_counting,
                        batch_size=batch_size,
                        epochs=epochs,
                        callbacks=callbacks_list)

        predictions = np.empty((len(split_val), 1))
        y_validation = np.empty((len(split_val), 1))
        for i in range(len(split_val)):
            img = image.load_img(split_val[i])  # test image original size
            #            img = image.load_img(split_val[i], target_size=(224, 224)) # test image 224x224
            img_to_array = image.img_to_array(img)
            img_to_array = np.expand_dims(img_to_array, axis=0)

            pred_test = train_model.predict(img_to_array)
            predictions[i] = np.sum(pred_test)
            y_validation[i] = split_val_labels[split_val[i]]

        mean_abs_err = mae(predictions, y_validation)
        mean_sqr_err = mse(predictions, y_validation)

        # serialize model to JSON
        model_json = train_model.to_json()
        model_json_name = "test_model_" + str(f) + ".json"
        with open(model_json_name, "w") as json_file:
            json_file.write(model_json)
        # serialize weights to HDF5
        model_h5_name = "test_model_" + str(f) + ".h5"
        train_model.save_weights(model_h5_name)
        print("Saved model to disk")

        print('\n######################')
        print('Results on TEST SPLIT:')
        print(' MAE: {}'.format(mean_abs_err))
        print(' MSE: {}'.format(mean_sqr_err))
        print("Took %f seconds" % (time.time() - s))
        path1 = results_folder + '/test_split_results_fold-' + str(f) + '.txt'
        with open(path1, 'w') as f:
            f.write('mae: %f,\nmse: %f, \nTook %f seconds' %
                    (mean_abs_err, mean_sqr_err, time.time() - s))

        mae_sum = mae_sum + mean_abs_err
        mse_sum = mse_sum + mean_sqr_err

    print('\n################################')
    print('Average Results on TEST SPLIT:')
    print(' AVE MAE: {}'.format(mae_sum / n_fold))
    print(' AVE MSE: {}'.format(mse_sum / n_fold))
    print("Took %f seconds" % (time.time() - s))
    path2 = results_folder + '/test_split_results_avg.txt'
    with open(path2, 'w') as f:
        f.write('avg_mae: %f, \navg_mse: %f, \nTook %f seconds' %
                (mae_sum / n_fold, mse_sum / n_fold, time.time() - s))
Пример #16
0
# In[25]:

categories = {0: 'with_mask', 1: 'without_mask'}


def mapping(value):
    return categories[value]


# In[26]:

prediction = model.predict(prediction_image)
value = np.argmax(prediction)
predicted = mapping(value)
print("PREDICTION IS {}".format(predicted))

# In[27]:

# model.save("mask_detection.h5")
from tensorflow.keras.models import model_from_json

# In[28]:

# prediction_image.shape
json_file = model.to_json()
with open("mask_detection.json", "w") as file:
    file.write(json_file)
model.save_weights("mask_detection.h5")

# In[ ]:
Пример #17
0
class GRUTheano:
    def __init__(self,
                 input_x,
                 hidden_layer,
                 epoch=20,
                 lr=1e-3,
                 optimasi='SGD'):
        self.input = input_x
        self.hidden_layer = hidden_layer
        self.epoch = epoch
        self.lr = lr
        self.feat_max = 50
        self.optim = optimasi
        self.__build_model__()

    def __build_model__(self):
        self.model = Sequential()
        self.model.add(
            GRU(units=self.hidden_layer,
                return_sequences=True,
                input_shape=(self.input, self.feat_max)))
        self.model.add(Flatten())
        self.model.add(Dense(1, activation='sigmoid'))
        if self.optim == 'Adam':
            optimasi = Adam(lr=self.lr)
        else:
            optimasi = SGD(lr=self.lr)
        self.model.compile(loss='binary_crossentropy',
                           optimizer=optimasi,
                           metrics=['accuracy'])

    def feedpassward(self, data_train, label_train, batc=100):
        history = self.model.fit(data_train,
                                 label_train,
                                 validation_split=0.05,
                                 epochs=self.epoch,
                                 batch_size=batc)
        return history

    def evaluasimodel(self, data_test, label_test):
        result = self.model.evaluate(data_test, label_test)
        #   print(result)
        print("Accuracy: {0:.2%}".format(result[1]))
        if result[1] > 0.90:
            self.simpan_model()

    def prediksi(self, data_prediksi, label_prediksi):
        y_predik = self.model.predict(x=data_prediksi)
        y_predik = y_predik.T[0]

        cls_pred = np.array([1. if p > 0.5 else 0. for p in y_predik])
        cls_true = np.array(label_prediksi)

        incorect = np.where(cls_pred != cls_true)
        print(incorect)
        incorect = incorect[0]
        print("incorect : ", incorect)
        print(len(incorect))

    def simpan_model(self):
        #   serialize model to json
        model_json = self.model.to_json()
        with open('berat/model.json', "w") as json_file:
            json_file.write(model_json)
        # serialize bobot to HDFS
        self.model.save_weights("berat/model.h5")
        print("model disimpan di disk")
Пример #18
0
    LSTM(hidden_neurons,
         batch_input_shape=(None, length_of_sequences, in_out_neurons),
         return_sequences=False))
model.add(Dense(in_out_neurons))
model.add(Activation("linear"))
model.compile(
    loss="mean_squared_error",
    optimizer="adam",
)

#学習の実施
early_stopping = EarlyStopping(monitor='val_loss', mode='auto', patience=0)
history = model.fit(X_train,
                    y_train,
                    batch_size=600,
                    epochs=10,
                    validation_split=0.1,
                    callbacks=[early_stopping])

json_string = model.to_json()
open('keras_lstm_model.json', 'w').write(json_string)

model.save_weights('keras_lstm_weihgts.h5')

#グラフ描画
pred_data = model.predict(X_train)
plt.plot(y_train, label='train')
plt.plot(pred_data, label='pred')
plt.legend(loc='upper left')
plt.show()
def trainModel(modelName, train_X, train_Y, test_X, test_Y, layers, inputDime,
               outputDim, transfnc, optimizer, epochs, batch):
    # create tesnsorboard callback
    tb = TensorBoard(log_dir='./logs',
                     histogram_freq=0,
                     write_graph=True,
                     write_images=True)
    # create a csv logger callback
    csv_logger = CSVLogger("logs/" + modelName + 'training.log')

    ## create model
    model = Sequential()
    model.add(Dense(layers[0], input_dim=inputDime, activation=transfnc))

    # setup model's architecure
    for i in range(1, len(layers)):
        model.add(Dense(layers[i], activation=transfnc))
    model.add(Dense(outputDim, activation='softmax'))

    # Compile model
    model.compile(loss='categorical_crossentropy',
                  optimizer=optimizer,
                  metrics=['accuracy', 'mse'])
    start_time = time.time()
    # Fit the model
    model.fit(
        train_X,
        train_Y,
        epochs=epochs,
        shuffle=True,
        batch_size=batch,
        callbacks=[tb, csv_logger],
        verbose=1,
        validation_split=0.15,
    )

    # evaluate the model
    scores = evaluateModel(model, test_X, test_Y, batch)
    elapsed_time = time.time() - start_time

    # Saving the model
    architecture = ""
    for layer in layers:
        architecture = architecture + str(layer) + "-"
    architecture = architecture[0:len(architecture) - 1]

    stats = open(statsFile, 'a')
    stats.write(architecture + "," + transfnc + "," + optimizer + "," +
                str(epochs) + "," + str(batch) + "," + str(scores[1] * 100) +
                "," + str(scores[2]) + "," +
                (time.strftime("%H:%M:%S.", time.gmtime(elapsed_time))) + "\n")
    stats.close()
    # serialize model to JSON
    model_json = model.to_json()
    with open("Models/JSON/" + modelName + "_Architecure.json",
              "w") as json_file:
        json_file.write(model_json)
    # serialize weights to HDF5
    model.save("Models/Weights/" + modelName + ".hd5")
    print("SAVING " + modelName + " TO DISK")
    print("FINISHED TRAINING MODEL : " + modelName)

    # sleep(30)
    return model
          callbacks=callbacks_list,
          verbose=1,
          shuffle=True)

model.save(current_output_local + '/my_checkpoint_test',
           overwrite=True)  # creates a HDF5 file 'my_model.h5'
#del model  # deletes the existing model
#model = load_model('output/my_model.h5')

######## save method two https://jovianlin.io/saving-loading-keras-models/
# Save the weights
model.save_weights(current_output_local + 'model_weights.h5', overwrite=True)

# Save the model architecture
with open(current_output_local + 'model_architecture.json', 'w') as f:
    f.write(model.to_json())
#######
############save method three#################
saver = tf.train.Saver()
sess = backend.get_session()
saver.save(sess, current_output_local)

model.save(current_output_local + 'my_model.h5')
#print(model.get_weights())

###########################perdiction####################################
print(X.shape)
#temp=temp
data = X[0:100, :, :, :]
del X
data = np.array(data).reshape(-1, IMG_SIZE_UP, IMG_SIZE_UP, 3)
Пример #21
0
print('done')

from tensorflow.python.keras.applications.resnet50 import preprocess_input
from tensorflow.python.keras.preprocessing.image import ImageDataGenerator
image_size = 224
data_generator = ImageDataGenerator(preprocessing_function=preprocess_input)

train_generator = data_generator.flow_from_directory('Images/train',
                                                     target_size=(image_size,
                                                                  image_size),
                                                     batch_size=10,
                                                     class_mode='categorical')
validation_generator = data_generator.flow_from_directory(
    'Images/val',
    target_size=(image_size, image_size),
    batch_size=10,
    class_mode='categorical')

my_new_model.compile(optimizer='sgd',
                     loss='categorical_crossentropy',
                     metrics=['accuracy'])
my_new_model.fit_generator(train_generator,
                           steps_per_epoch=900,
                           epochs=2,
                           validation_data=validation_generator,
                           validation_steps=400)

model_json = my_new_model.to_json()
with open("model_arch.json", "w") as json_file:
    json_file.write(model_json)
my_new_model.save_weights("my_model_weights.h5")
    ytrain = np.zeros(shape=((NB_CLASSES + 1) * NB_VIDEOS_BY_CLASS_TRAIN,
                             NB_CLASSES + 1))

    print("Session " + str(serie))
    xtrain, ytrain = data_generation(xtrain, ytrain)

    if (MIXED_DATA == True):
        # manage several data files
        for i in range(len(REAL_VIDEO_DATASET)):
            data = np.load(str(REAL_VIDEO_DATASET[i]))
            xtrain = np.concatenate((xtrain, data['a']), axis=0)
            ytrain = np.concatenate((ytrain, data['b']), axis=0)

        indices = np.arange(xtrain.shape[0])
        np.random.shuffle(indices)
        xtrain = xtrain[indices]
        ytrain = ytrain[indices]

    #start training
    model.fit(xtrain,
              ytrain,
              epochs=EPOCHS,
              batch_size=BATCH_SIZE,
              verbose=VERBOSE)

    #save data
    model_json = model.to_json()
    open(f'{RESULTS_PATH}/model_conv3D.json', 'w').write(model_json)
    model.save_weights(f'{RESULTS_PATH}/weights_conv3D.h5', overwrite=True)
    print('A new model has been saved!\n')
Пример #23
0
class FaceRecogPredictor:
    def __init__(self, labels, from_json=False):
        self.from_json = from_json
        self.labels = labels
        if from_json:
            self.load()
        else:
            self.amt_labels = len(self.labels)
            self.dirs = [PHOTO_DIR + label + "/" for label in self.labels]
            self.model = Sequential()
            self.model.add(
                Conv2D(64,
                       kernel_size=1,
                       activation="relu",
                       input_shape=(1, PHOTO_ROWS, PHOTO_COLS)))
            self.model.add(Conv2D(32, kernel_size=1, activation="relu"))
            self.model.add(Flatten())
            self.model.add(Dense(16, activation="relu"))
            self.model.add(Dense(self.amt_labels, activation="softmax"))
            self.model.compile(optimizer="adam",
                               loss="categorical_crossentropy",
                               metrics=["accuracy"])

    def fetch_data(self):
        xs = []
        ys = []
        for ix, direc in enumerate(self.dirs):
            y = np.array([0 for i in range(self.amt_labels)])
            y[ix] = 1
            photos = [direc + fname for fname in os.listdir(direc)]
            for p in photos:
                photo = Image.open(p)
                resized = photo.resize((PHOTO_ROWS, PHOTO_COLS))
                gray = resized.convert("L")
                xs.append(np.array([np.array(gray)]))
                ys.append(y)
        return (xs, ys)

    def get_train_test(self, xs, ys, split=0.7):
        sh_xs, sh_ys = shuffle(xs, ys)
        len_xs = len(xs)
        train_size = int(len_xs * split)
        x_train, y_train = sh_xs[:train_size], sh_ys[:train_size]
        x_test, y_test = sh_xs[train_size + 1:len_xs], sh_ys[train_size +
                                                             1:len_xs]
        return (x_train, x_test, y_train, y_test)

    def train(self):
        xs, ys = self.fetch_data()
        x_train, x_test, y_train, y_test = self.get_train_test(xs, ys)
        self.model.fit(np.array(x_train),
                       np.array(y_train),
                       validation_data=(np.array(x_test), np.array(y_test)),
                       epochs=100)

    def predict(self, x):
        prediction = self.model.predict(x)
        ix = np.argmax(prediction)
        return self.labels[ix]

    def save(self):
        with open(MODEL_DIR + "model.json", "w") as json_file:
            json_file.write(self.model.to_json())
        self.model.save_weights("model.h5")

    def load(self):
        inside = os.listdir(MODEL_DIR)
        if "model.json" not in inside or "model.h5" not in inside:
            raise ValueError(
                "Make sure both model.json and model.h5 are inside \'models/\'"
            )
        with open(MODEL_DIR + "model.json", "w") as json_file:
            self.model = model_from_json(json_file.read())
        self.model.load_weights(MODEL_DIR + "model.h5")