Ejemplo n.º 1
0
def generate_second_model(num_classes, batch_size, epochs, x_train, y_train):
    model = Sequential()
    # 1st convolution layer
    model.add(
        Conv2D(64, (5, 5),
               input_shape=(48, 48, 1),
               activation='softmax',
               padding='same'))
    model.add(MaxPooling2D(pool_size=(5, 5)))

    # 2nd convolution layer
    model.add(Conv2D(128, (5, 5), activation='softmax', padding='same'))
    model.add(Conv2D(128, (5, 5), activation='softmax', padding='same'))
    model.add(MaxPooling2D(pool_size=(2, 2)))

    model.add(Flatten())

    model.add(Dense(1024, activation='softmax'))
    model.add(Dropout(0.2))
    model.add(Dense(1024, activation='softmax'))
    model.add(Dropout(0.2))

    model.add(Dense(num_classes, activation='softmax'))
    # ------------------------------
    # batch process
    gen = ImageDataGenerator()
    train_generator = gen.flow(x_train, y_train, batch_size=batch_size)

    model.compile(loss='categorical_crossentropy',
                  optimizer=keras.optimizers.Adadelta(),
                  metrics=['accuracy'])
    model.fit_generator(train_generator,
                        steps_per_epoch=batch_size,
                        epochs=epochs)
    return model
Ejemplo n.º 2
0
def train(data_set, steps_per_epoch):
    model = Sequential()
    model.add(Conv2D(8, (3, 3),
                     activation='relu',
                     input_shape=(816, 816, 3)))
    model.add(Conv2D(8, (3, 3),
                     activation='relu'))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(Dropout(rate=0.5))

    model.add(Flatten())
    model.add(Dense(units=32, activation='relu'))
    model.add(Dropout(0.5))
    model.add(Dense(lables_size, activation='softmax'))

    sgd = SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True)
    model.compile(optimizer=keras.optimizers.Adam(),
                  loss=keras.losses.categorical_crossentropy,
                  metrics=['accuracy'])

    model.fit_generator(batch_generator(data_set, batch_size, img_w, img_h, channels),
                        steps_per_epoch=steps_per_epoch,
                        epochs=3,
                        verbose=1)
    # score = model.evaluate(x_test, y_test)
    model.save(model_path)
Ejemplo n.º 3
0
def run_state_model(state_name, batch_size = 5, n_input = 5, num_epochs=25, num_per_epoch=100, qverbose=2, graph=0, base_path="Models/"):
#   path = f"{base_path}{state_name}"
  path = str(base_path) + str(state_name)
  model = None
  #This try catch block either gets a preexisting model or creates a new one
  try:
    model = load_model(path)
    print("Model {} found, continuing training".format(path))
  except:
    #print("python 2.7?")
    print("Path {} not found, training new model for {}".format(path, state_name))
  
  if (model == None):
    model = Sequential()
    model.add(LSTM(100, activation='relu', input_shape=(n_input,1), return_sequences=True))
    model.add(LSTM(100, activation='relu', input_shape=(1, n_input)))
    model.add(Dropout(0.3))

   
    model.add(Dense(1))
    model.compile(loss="mse", optimizer="adam")

  #Data preprocessing block
  phase_time, cases, scaler = get_cases(state_name)
#   plt.plot(phase_time, cases)
  print(max(scaler.inverse_transform(cases)))


  phase_time =  phase_time.reshape((len(phase_time), 1))
  


  cases = cases.reshape((len(cases), 1))
  print(len(cases), len(phase_time))
  #print(cases)
  data_gen = TimeseriesGenerator(phase_time, cases,
                                length=n_input, batch_size=5)
  


  print("RUNNING TRAINING FOR {}".format(state_name))
  
  model.fit_generator(data_gen, steps_per_epoch=100, epochs=15, verbose=0)


#   filename = f"{state_name}"
  filename = str(state_name)
  print("TRAINING FOR {} FINISHED, WRITING OUT MODEL TO FILE".format(state_name))
  model.save(str(base_path) + str(filename)) #f"{base_path}{filename}")
  delt = abs(phase_time[1]-phase_time[0])
  if (graph == 1):
      graph_cases(model, phase_time, cases, delt, scaler)
  
 
  predictslist = scaler.inverse_transform([single_predict(i, model, delt, scaler) for i in np.arange(phase_time[0],phase_time[0]+(len(cases)+30)*delt,delt)])

#   new_data[f'{state_name} Predict'] = np.squeeze(predictslist)
  new_data[state_name + ' Predict'] = np.squeeze(predictslist)
Ejemplo n.º 4
0
class ModelFive():
    def __init__(self, train_loc, val_loc, batch_size):
        self.train_loc = train_loc
        self.val_loc = val_loc
        self.batch_size = batch_size

    def load_and_prep_data(self):
        with open(self.train_loc, 'rb') as f:
            self.t_data = pickle.load(f)
        with open(self.val_loc, 'rb') as f:
            self.v_data = pickle.load(f)
        global train_step, val_step
        train_step = 0
        val_step = 0
        for t in self.t_data.X_list:
            train_step += int(len(t) / self.batch_size) + 1
        for t in self.v_data.X_list:
            val_step += int(len(t) / self.batch_size) + 1

    def data_generator(self, mode):
        while True:
            if mode == 'train':
                data = self.t_data
            elif mode == 'validation':
                data = self.v_data
            for X, Y in zip(data.X_list, data.Y_list):
                i = 0
                j = self.batch_size
                x, y = X[i:j], Y[i:j]
                yield (x, y)
                length = len(X)
                while j < length:
                    i += self.batch_size
                    j += self.batch_size
                    x, y = X[i:j], Y[i:j]
                    yield (x, y)

    def define_model(self, hidden_size):
        input_size = self.t_data.X_list[0].shape[2]
        self.model = Sequential()
        self.model.add(Masking(input_shape=(None, input_size)))
        self.model.add(Bidirectional(LSTM(hidden_size, return_sequences=True)))
        self.model.add(Bidirectional(LSTM(hidden_size, return_sequences=True)))
        self.model.add(TimeDistributed(Dense(12, activation='sigmoid')))
        print('compiling')
        self.model.compile(loss='mean_squared_error',
                           optimizer='rmsprop',
                           metrics=['mse'])

    def train(self, num_epochs):
        train_gen = self.data_generator('train')
        val_gen = self.data_generator('validation')
        self.model.fit_generator(train_gen,
                                 steps_per_epoch=train_step,
                                 validation_data=val_gen,
                                 validation_steps=val_step,
                                 epochs=num_epochs)
def train_features_integration_layer(train_features_path, test_features_path, model_path):
    tr_data = h5py.File(train_features_path, 'r+')
    te_data = h5py.File(test_features_path, 'r+')

    labels = meta.get_train_labels_list()
    class_weights = compute_class_weight('balanced', np.unique(labels), labels)

    batch_size = 64
    epochs = 200

    create_folder(os.path.dirname(model_path))
    mc_top = ModelCheckpoint(model_path, monitor='val_loss', verbose=1, save_best_only=True,
                             save_weights_only=False, mode='auto', period=1)

    input_shape = tr_data['x'].shape[1:]

    model = Sequential([
        BatchNormalization(input_shape=input_shape),
        Dropout(0.5),
        #Dense(2048, activation='tanh'),
        #BatchNormalization(),
        #Dropout(0.5),
        Dense(1024, activation='tanh'),
        BatchNormalization(),
        Dropout(0.5),
        Dense(512, activation='tanh'),
        BatchNormalization(),
        Dropout(0.5),
        Dense(256, activation='tanh'),
        BatchNormalization(),
        Dropout(0.5),
      #  Dense(256, activation='relu'),
      #  BatchNormalization(),
     #   Dropout(0.6),
        Dense(128, activation='tanh'),
        BatchNormalization(),
        Dropout(0.5),
     #   Dense(32, activation='relu'),
     #   BatchNormalization(),
     #   Dropout(0.6),
        Dense(17, activation='sigmoid')
    ])
    model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])

    gen = RatioDataGenerator(batch_size=batch_size, type='train')

    model.fit_generator(generator=gen.generate(tr_data),
                        steps_per_epoch= 100,
                        epochs=epochs,
                        verbose=1,
                        callbacks=[mc_top],
                        validation_data=(te_data['x'], te_data['y']),
                        class_weight=class_weights)
Ejemplo n.º 6
0
def train():
    print('Training started...')
    maxsize = (100, 100)

    # train_labels_data = prepare_label_data('/Users/mehrdad/Documents/Dev/TheGreenBots/data/labels.json')

    (train_images,
     train_labels) = load_image_dataset(os.path.join(PROJECT_PATH, 'images/'),
                                        maxsize)

    # display_images(train_images, train_labels, class_names)
    # plt.show()

    # Setting up the layers.
    classifier = Sequential()
    classifier.add(
        keras.layers.Conv2D(32, (3, 3),
                            input_shape=(100, 100, 3),
                            activation='relu'))
    classifier.add(keras.layers.MaxPooling2D(pool_size=(2, 2)))
    classifier.add(keras.layers.Flatten())
    classifier.add(keras.layers.Dense(units=128, activation='relu'))
    classifier.add(keras.layers.Dense(units=1, activation='sigmoid'))

    classifier.compile(optimizer='adam',
                       loss='binary_crossentropy',
                       metrics=['accuracy'])

    train_datagen = ImageDataGenerator(rescale=1. / 255,
                                       shear_range=0.2,
                                       zoom_range=0.2,
                                       horizontal_flip=True)
    test_datagen = ImageDataGenerator(rescale=1. / 255)
    training_set = train_datagen.flow_from_directory(os.path.join(
        PROJECT_PATH, 'images/'),
                                                     batch_size=32,
                                                     class_mode='binary',
                                                     color_mode='rgb')
    test_set = test_datagen.flow_from_directory(os.path.join(
        PROJECT_PATH, 'straight/'),
                                                batch_size=32,
                                                class_mode='binary',
                                                color_mode='rgb')

    classifier.fit_generator(
        training_set,
        steps_per_epoch=10,  # 8000
        epochs=5,
        validation_data=test_set,
        validation_steps=20  # 2000
    )
    return classifier
Ejemplo n.º 7
0
def network(train_generator, validation_generator, test_generator,
            callback_list):
    model = Sequential()
    model.add(
        layers.Deconv2D(32, (3, 3),
                        activation="relu",
                        input_shape=(150, 150, 3)))
    model.add(layers.MaxPooling2D(pool_size=(2, 2)))
    model.add(layers.Deconv2D(64, (3, 3), activation="relu"))
    model.add(layers.MaxPooling2D(pool_size=(2, 2)))
    model.add(layers.Deconv2D(128, (3, 3), activation="relu"))
    model.add(layers.MaxPooling2D(pool_size=(2, 2)))
    model.add(layers.Deconv2D(128, (3, 3), activation="relu"))
    model.add(layers.MaxPooling2D(pool_size=(2, 2)))
    model.add(layers.Flatten())
    model.add(layers.Dropout(0.5))
    model.add(layers.Dense(512, activation="relu"))
    model.add(layers.Dense(1, activation="sigmoid"))
    model.compile(optimizer=optimizers.RMSprop(lr=1e-4),
                  loss='binary_crossentropy',
                  metrics=['acc'])
    history = model.fit_generator(train_generator,
                                  steps_per_epoch=50,
                                  epochs=50,
                                  callbacks=callback_list,
                                  validation_data=validation_generator,
                                  validation_steps=50)
    model.save('cats_and_dogs_small_2.h5')
    return history
Ejemplo n.º 8
0
def main():
    # Name of this script
    script_name = os.path.basename(__file__)[0:-3]
    # Construct folder name using name of this script
    output_path_name = '_{}_outputs'.format(script_name)
    # Try to create a new folder
    try:
        # Make the output folder
        os.mkdir(output_path_name)
    except FileExistsError:
        pass

    # Model below this line ================================================

    custom_callback = get_custom_callback('multi_label', './{}'.format(output_path_name))
    callbacks_list = [custom_callback]


    x_train, y_train, x_test, y_test = get_values()

    y_train = to_categorical(y_train, NUM_CLASSES)
    y_test = to_categorical(y_test, NUM_CLASSES)

    datagen = ImageDataGenerator(
        horizontal_flip=True,
        vertical_flip=True,
        rotation_range=360
    )

    model = Sequential()

    densenet = DenseNet121(
        weights='./DenseNet-BC-121-32-no-top.h5',
        include_top=False,
        input_shape=(IMG_SIZE, IMG_SIZE, 3)
    )

    model.add(densenet)
    model.add(layers.GlobalAveragePooling2D())
    model.add(layers.Dropout(0.5))
    # model.add(layers.Dense(NUM_CLASSES, activation='softmax'))
    model.add(layers.Dense(NUM_CLASSES, activation='sigmoid'))

    model.summary()

    model.compile(loss='binary_crossentropy',
                  # optimizer=optimizers.Adam(lr=0.0001,decay=1e-6),
                  optimizer=optimizers.SGD(lr=0.0001, momentum=0.9),
                  metrics=['accuracy'])

    # fits the model on batches with real-time data augmentation:
    history = model.fit_generator(
        datagen.flow(x_train, y_train, batch_size=BATCH_SIZE, seed=1),
        steps_per_epoch=len(x_train) // BATCH_SIZE,
        epochs=NUM_EPOCHS,
        validation_data=(x_test, y_test),
        callbacks=callbacks_list,
        max_queue_size=2
    )
Ejemplo n.º 9
0
class Model():
    def __init__(self):
        self.X_train = []
        self.Y_train = []
        self.model = Sequential()

    def add_training_files(self, path):
        with open(path, 'r') as f:
            training_files = f.readlines()
        pattern = re.compile('mngu0_s1_(\d{4})\n')
        for filename in training_files:
            num = int(pattern.match(filename).group(1))
            self.X_train.append(self.lsfs[num])
            self.Y_train.append(self.emas[num])

    def load_lsf_data(self, path):
        self.lsfs = get_all_data(path, 'lsf')

    def load_ema_data(self, path):
        self.emas = get_all_data(path, 'ema')

    def define_model(self, hidden_size, input_size):
        self.model.add(
            LSTM(hidden_size,
                 return_sequences=True,
                 input_shape=(None, input_size)))
        self.model.add(Activation('sigmoid'))
        print('compiling')
        self.model.compile(loss='mean_squared_error',
                           optimizer='adam',
                           metrics=['accuracy'])

    def data_generator(self, test=False):
        while True:
            if test:
                x_list, y_list = self.X_test, self.Y_test
            else:
                x_list, y_list = self.X_train, self.Y_train
            for x, y in zip(x_list, y_list):
                yield (x.reshape(1, len(x),
                                 len(x[0])), y.reshape(1, len(y), len(y[0])))

    def train_model(self):
        data_gen = self.data_generator()
        self.model.fit_generator(data_gen, steps_per_epoch=100, epochs=1)
Ejemplo n.º 10
0
def generate_first_model(num_classes, batch_size, epochs, x_train, y_train):
    model = Sequential()

    # 1st convolution layer
    model.add(Conv2D(64, (5, 5), activation='relu', input_shape=(48, 48, 1)))
    model.add(MaxPooling2D(pool_size=(5, 5), strides=(2, 2)))

    # 2nd convolution layer
    model.add(Conv2D(64, (3, 3), activation='relu'))
    model.add(Conv2D(64, (3, 3), activation='relu'))
    model.add(AveragePooling2D(pool_size=(3, 3), strides=(2, 2)))

    # 3rd convolution layer
    model.add(Conv2D(128, (3, 3), activation='relu'))
    model.add(Conv2D(128, (3, 3), activation='relu'))
    model.add(AveragePooling2D(pool_size=(3, 3), strides=(2, 2)))

    model.add(Flatten())

    # fully connected neural networks
    model.add(Dense(1024, activation='relu'))
    model.add(Dropout(0.2))
    model.add(Dense(1024, activation='relu'))
    model.add(Dropout(0.2))

    model.add(Dense(num_classes, activation='softmax'))
    # ------------------------------
    # batch process
    gen = ImageDataGenerator()
    train_generator = gen.flow(x_train, y_train, batch_size=batch_size)

    # ------------------------------

    model.compile(loss='categorical_crossentropy',
                  optimizer=keras.optimizers.Adam(),
                  metrics=['accuracy'])

    # model.fit_generator(x_train, y_train, epochs=epochs) #train for all trainset
    model.fit_generator(train_generator,
                        steps_per_epoch=batch_size,
                        epochs=epochs)  # train for randomly selected one

    return model
def get_basic_gru_model():
    model = Sequential()
    model.add(layers.GRU(32, input_shape=(None, float_data.shape[-1])))
    model.add(layers.Dense(1))

    model.compile(optimizer=RMSprop(), loss='mae')
    history = model.fit_generator(train_gen,
                                  steps_per_epoch=500,
                                  epochs=20,
                                  validation_data=val_gen,
                                  validation_steps=val_steps)
    return history
def get_basic_ml_model():
    model = Sequential()
    model.add(
        layers.Flatten(input_shape=(lookback // step, float_data.shape[-1])))
    model.add(layers.Dense(32, activation='relu'))
    model.add(layers.Dense(1))

    model.compile(optimizer=RMSprop(), loss='mae')
    history = model.fit_generator(train_gen,
                                  steps_per_epoch=500,
                                  epochs=20,
                                  validation_data=val_gen,
                                  validation_steps=val_steps)
    return history
Ejemplo n.º 13
0
    def train(self):
        rnn_type = LSTM

        rnn_dropout = 0.0
        rnn_units = 128
        rnn_timesteps = 128
        rnn_features = 32

        output_size = 1

        batch_size = 512
        epochs = 10000000

        input_shape = (rnn_timesteps, rnn_features)

        if os.path.isfile(self.model_path):
            profiler = Profiler()
            model = load_model(self.model_path,
                               custom_objects={
                                   'root_mean_squared_error':
                                   root_mean_squared_error
                               })
            profiler.stop(f'Loaded model from "{self.model_path}".')
        else:
            model = Sequential()
            model.add(
                rnn_type(rnn_units,
                         dropout=rnn_dropout,
                         return_sequences=False,
                         input_shape=input_shape))
            model.add(Dense(output_size))
            model.add(Activation('tanh'))
            optimizer = Adam(lr=0.01)
            model.compile(optimizer=optimizer, loss=root_mean_squared_error)

        training_generator = SlidingWindowGenerator(self.x_training_wav,
                                                    self.y_training_wav,
                                                    input_shape, output_size,
                                                    batch_size)
        validation_generator = SlidingWindowGenerator(self.x_validation_wav,
                                                      self.y_validation_wav,
                                                      input_shape, output_size,
                                                      batch_size)
        save_callback = SaveCallback(self.model_path)
        history = model.fit_generator(generator=training_generator,
                                      epochs=epochs,
                                      verbose=1,
                                      validation_data=validation_generator,
                                      callbacks=[save_callback])
Ejemplo n.º 14
0
def training():
    train_generator, validation_generator = prepare_data()
    # train_generator, validation_generator, test_generator = prepare_data()

    resnet50_network = ResNet50(include_top=False,
                                weights='imagenet',
                                input_shape=(IMAGE_SIZE, IMAGE_SIZE, 3))

    for layer in resnet50_network.layers[:-3]:
        layer.trainable = False

    model = Sequential()
    model.add(resnet50_network)
    model.add(AveragePooling2D((7, 7), name='avg_pool'))
    model.add(Flatten())
    model.add(
        Dense(train_generator.class_indices.items().__len__(),
              activation='softmax'))
    model.summary()
    model.compile(loss='categorical_crossentropy',
                  optimizer=Adam(lr=0.001),
                  metrics=['accuracy'])

    early_stopping = EarlyStopping(patience=10)
    checkpointer = ModelCheckpoint(filepath='car_resNet50_best.h5',
                                   verbose=0,
                                   save_best_only=True)

    csv_logger = CSVLogger('ResNet50_training_log.csv',
                           append=True,
                           separator=';')

    history = model.fit_generator(
        train_generator,
        steps_per_epoch=train_generator.samples / train_generator.batch_size,
        epochs=EPOCHS,
        callbacks=[early_stopping, checkpointer, csv_logger],
        validation_data=validation_generator,
        validation_steps=validation_generator.samples /
        validation_generator.batch_size)

    # evaluate_scores = model.evaluate_generator(generator=test_generator)

    model.save('car_resNet50_final.h5')
    model.sample_weights('resNet50_weights')
    show_history(history)
Ejemplo n.º 15
0
def get_1d_convnet_model():
    model = Sequential()
    model.add(layers.Conv1D(32, 5, input_shape=(None, float_data.shape[-1])))
    model.add(layers.MaxPooling1D(3))
    model.add(layers.Conv1D(32, 5, activation='relu'))
    model.add(layers.MaxPooling1D(3))
    model.add(layers.Conv1D(32, 5, activation='relu'))
    model.add(layers.GlobalMaxPooling1D())
    model.add(layers.Dense(1))
    model.summary()

    model.compile(optimizer=RMSprop(), loss='mae')
    history = model.fit_generator(train_gen,
                                  steps_per_epoch=500,
                                  epochs=20,
                                  validation_data=val_gen,
                                  validation_steps=val_steps)
    return history
def get_rec_stacked_model():
    model = Sequential()
    model.add(
        layers.GRU(32,
                   input_shape=(None, float_data.shape[-1]),
                   dropout=0.1,
                   recurrent_dropout=0.5,
                   return_sequences=True))
    model.add(
        layers.GRU(64,
                   input_shape=(None, float_data.shape[-1]),
                   dropout=0.1,
                   recurrent_dropout=0.5,
                   activation='relu'))
    model.add(layers.Dense(1))

    model.compile(optimizer=RMSprop(), loss='mae')
    history = model.fit_generator(train_gen,
                                  steps_per_epoch=500,
                                  epochs=40,
                                  validation_data=val_gen,
                                  validation_steps=val_steps)
    return history
Ejemplo n.º 17
0
learner.summary()

learner.compile(loss=keras.losses.categorical_crossentropy,
                optimizer=Adam(amsgrad=True),
                metrics=['accuracy'])

checkpoint = ModelCheckpoint(filepath='fashion_learner.hdf5',
                             verbose=1,
                             save_best_only=True,
                             monitor='val_acc')

for i in range(0, 20):
    learner.fit(x_train,
                y_train,
                batch_size=batch_size,
                epochs=25,
                shuffle=True,
                verbose=1,
                validation_data=(x_test, y_test),
                callbacks=[checkpoint, reduce_lr])

    learner.fit_generator(data_gen.flow(x_train,
                                        y_train,
                                        batch_size=batch_size),
                          steps_per_epoch=len(x_train) / batch_size,
                          epochs=25,
                          shuffle=True,
                          verbose=1,
                          validation_data=(x_test, y_test),
                          callbacks=[checkpoint, reduce_lr])
Ejemplo n.º 18
0
top_model.add(Dense(256, activation='relu'))
top_model.add(Dense(64, activation='relu'))
top_model.add(Dense(1))

top_model.compile(loss='mean_squared_error', optimizer='adam', metrics=[rmse])

filepath = ('resnet50-freeze-{epoch:02d}-{val_loss:.2f}.h5')
checkpoint = ModelCheckpoint(filepath,
                             monitor='val_loss',
                             save_best_only=True,
                             verbose=1,
                             period=2)

train_df = label_file.iloc[:int(np.floor(label_file.shape[0] * 0.7)), :]
valid_df = label_file.iloc[int(np.floor(label_file.shape[0] * 0.7)):, :]

train_gen = generator_from_df(train_df, batch_size, target_size)
valid_gen = generator_from_df(valid_df, batch_size, target_size)
nbatches_train, mod = divmod(train_df.shape[0], batch_size)
nbatches_valid, mod = divmod(valid_df.shape[0], batch_size)
nworkers = 8

print("Training model...")
top_model.fit_generator(train_gen,
                        steps_per_epoch=nbatches_train,
                        epochs=epochs,
                        validation_data=valid_gen,
                        validation_steps=nbatches_valid,
                        workers=nworkers,
                        verbose=2)
Ejemplo n.º 19
0
class ImageClassifier(object):
    """
    Image classifier class
    """
    def __init__(self, args):
        """
        Initializes the required variables
        """
        self.args = args
        self.model = None
        self.data = None
        self.df = None
        self.x = None
        self.y = None
        self.xTrain = None
        self.xTest = None
        self.yTrain = None
        self.yTest = None
        self.numClasses = 3
        self.activation = 'relu'
        self.tensorBoard = TensorBoard(log_dir='./tenosorboard',
                                       histogram_freq=0,
                                       write_graph=True,
                                       write_images=True)
        self.trainDataGen = ImageDataGenerator(rescale=1. / 255,
                                               shear_range=0.2,
                                               zoom_range=0.2,
                                               horizontal_flip=True)
        self.testDataGen = ImageDataGenerator(rescale=1. / 255)

    def createModel(self):
        """
        Creates a Conv2D sequential Keras model
        One can play with the numbers of conv layers and others except input shape.
        I've optimized below numbers considering my HW limitations
        """
        self.model = Sequential()
        self.model.add(
            Conv2D(16,
                   kernel_size=5,
                   padding='same',
                   input_shape=(
                       250,
                       250,
                       3,
                   ),
                   activation=self.activation))
        self.model.add(MaxPool2D(pool_size=(2, 2)))
        self.model.add(
            Conv2D(8,
                   kernel_size=5,
                   padding='same',
                   activation=self.activation))
        self.model.add(MaxPool2D(pool_size=(2, 2)))
        self.model.add(Dropout(0.3))
        self.model.add(Flatten())
        self.model.add(Dense(300, activation=self.activation))
        self.model.add(Dense(self.numClasses, activation='softmax'))
        self.model.summary()
        self.model.compile(loss='categorical_crossentropy',
                           optimizer=SGD(lr=0.001),
                           metrics=['accuracy'])

    def loadAndPrepareData(self):
        """
        Loads the data from npz file and prepares it for feeding it to network
        """
        #I've used data file as npz. And images are stored in (250 * 250 * 1) i.e, GRAY format of CV2
        self.data = np.load(self.args.data)

        # Loading the data in dataFrame of pandas
        self.df = pd.DataFrame(self.data.items()[0][1])

        # Normalizing the data by dividing the individual element by 255
        self.x = np.array([np.divide(item, 255) for item in self.df[0]])

        # Loading the xData in x
        self.y = np.array([item for item in self.df[1]])
        # Reshaping it for accomodation on model. New shape = (250, 250, 1, 1)
        self.x = self.x.reshape(self.x.shape[0], self.x.shape[1],
                                self.x.shape[2], 1)

        # Splitting the xTrain, xTest, yTrain and yTest from x and y
        self.xTrain, self.xTest, self.yTrain, self.yTest = train_test_split(
            self.x, self.y, test_size=0.2)

        # Number of classes are
        # 1. Myself (Siddhesh)
        # 2. My beautiful wife (Ketaki)
        # 3. Unknown (None of us or ppl whom the model had never seen)
        self.numClasses = self.y.shape[1]

    def dataGenerator(self):
        self.trainGenerator = self.trainDataGen.flow_from_directory(
            directory=self.args.data,
            target_size=(250, 250),
            color_mode='rgb',
            batch_size=30,
            class_mode='categorical',
            shuffle=True)

    def train(self):
        #history = self.model.fit(self.xTrain, self.yTrain, batch_size=self.args.batchSize, epochs=self.args.epochs, verbose=1, callbacks=[self.tensorBoard])
        history = self.model.fit_generator(self.trainGenerator,
                                           steps_per_epoch=50,
                                           epochs=self.args.epochs,
                                           callbacks=[self.tensorBoard])

        if self.args.save:
            print('Saving the model to {}'.format(self.args.save))
            self.model.save(self.args.save)

        print(history)
        fig1, ax_acc = plt.subplots()
        plt.plot(history.history['acc'])
        plt.plot(history.history['loss'])
        plt.xlabel('Epoch')
        plt.ylabel('Accuracy')
        # This is a blocking call, user will have to manually close the window opened by matplotlib showing accuracy vs loss data.
        plt.show()

    def evaluate(self):
        score = self.model.evaluate(self.xTest, self.yTest)
        print(score)
Ejemplo n.º 20
0
            # trim image to only see section with road
            X_train = np.array(images)
            y_train = np.array(angles)
            yield sklearn.utils.shuffle(X_train, y_train)


# compile and train the model using the generator function
train_generator = generator(train_samples, batch_size=32)
validation_generator = generator(validation_samples, batch_size=32)

ch, row, col = 3, 80, 320  # Trimmed image format

model = Sequential()
# Preprocess incoming data, centered around zero with small standard deviation
model.add(
    Lambda(lambda x: x / 127.5 - 1.,
           input_shape=(ch, row, col),
           output_shape=(ch, row, col)))
model.add()  # ... finish defining the rest of your model architecture here ...

model.compile(loss='mse', optimizer='adam')
model.fit_generator(train_generator,
                    samples_per_epoch=len(train_samples),
                    validation_data=validation_generator,
                    nb_val_samples=len(validation_samples),
                    nb_epoch=3)
"""
If the above code throw exceptions, try 
model.fit_generator(train_generator, steps_per_epoch= len(train_samples),
validation_data=validation_generator, validation_steps=len(validation_samples), epochs=5, verbose = 1)
"""
Ejemplo n.º 21
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('--dir',
                        default='../data/FIDS30/',
                        help='Root folder for the (unprocessed) data set.')
    parser.add_argument(
        '--log_dir',
        default=
        'C:\\Users\\Patrick\\Documents\\TU\\2019S\\ML\\ML_Exercise3\\ML_Exercise3',
        help='Root folder for TensorBoard logging.')
    dir = parser.parse_args().dir
    log_dir = parser.parse_args().log_dir

    os.chdir(dir)
    fileNames = glob.glob("*/*.jpg")
    targetLabels = []
    imageList = []
    for fileName in fileNames:
        pathSepIndex = fileName.index(os.path.sep)
        targetLabels.append(fileName[:pathSepIndex])
        # print(np.array(Image.open(fileName)).shape)
        image = cv2.resize(np.array(Image.open(fileName)), image_size)
        imageList.append(np.array(image))

    toDelete = np.where(np.array([x.shape for x in imageList]) == 4)[0][0]
    del imageList[toDelete]
    imageArr = np.array(imageList)
    #imageArr = imageArr / 255.0

    le = preprocessing.LabelEncoder()
    le.fit(targetLabels)
    target = le.transform(targetLabels)
    target = np.delete(target, toDelete, 0)
    target_C = to_categorical(target)

    # imageArr = np.array(imageList)
    X_train, X_test, y_train, y_test = train_test_split(imageArr,
                                                        target_C,
                                                        random_state=42)

    datagen_train = ImageDataGenerator(
        rescale=1. / 255,
        featurewise_center=True,
        featurewise_std_normalization=True,
        rotation_range=10,
        #width_shift_range=0.1,
        #height_shift_range=0.1,
        #shear_range=0.1,
        #zoom_range=0.1,
        horizontal_flip=True,
        #vertical_flip=True
    )
    datagen_train.fit(X_train)
    generator_train = datagen_train.flow(X_train,
                                         y_train,
                                         batch_size=batch_size)

    datagen_test = ImageDataGenerator(rescale=1. / 255,
                                      featurewise_center=True,
                                      featurewise_std_normalization=True)
    datagen_test.fit(X_train)
    generator_test = datagen_test.flow(X_test, y_test, batch_size=batch_size)

    # Instantiate an empty model
    model = Sequential()

    # 1st Convolutional Layer
    model.add(
        Conv2D(filters=96,
               input_shape=(224, 224, 3),
               kernel_size=(11, 11),
               strides=(4, 4),
               padding='valid'))
    model.add(Activation('relu'))
    # Max Pooling
    model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2), padding='valid'))

    # 2nd Convolutional Layer
    model.add(
        Conv2D(filters=256,
               kernel_size=(11, 11),
               strides=(1, 1),
               padding='valid'))
    model.add(Activation('relu'))
    # Max Pooling
    model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2), padding='valid'))

    # 3rd Convolutional Layer
    model.add(
        Conv2D(filters=384,
               kernel_size=(3, 3),
               strides=(1, 1),
               padding='valid'))
    model.add(Activation('relu'))

    # 4th Convolutional Layer
    model.add(
        Conv2D(filters=384,
               kernel_size=(3, 3),
               strides=(1, 1),
               padding='valid'))
    model.add(Activation('relu'))

    # 5th Convolutional Layer
    model.add(
        Conv2D(filters=256,
               kernel_size=(3, 3),
               strides=(1, 1),
               padding='valid'))
    model.add(Activation('relu'))
    # Max Pooling
    model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2), padding='valid'))

    # Passing it to a Fully Connected layer
    model.add(Flatten())
    # 1st Fully Connected Layer
    model.add(Dense(4096, input_shape=(224 * 224 * 3, )))
    model.add(Activation('relu'))
    # Add Dropout to prevent overfitting
    # model.add(Dropout(0.2))

    # 2nd Fully Connected Layer
    model.add(Dense(4096))
    model.add(Activation('relu'))
    # Add Dropout
    # model.add(Dropout(0.2))

    # 3rd Fully Connected Layer
    model.add(Dense(1000))
    model.add(Activation('relu'))
    # Add Dropout
    # model.add(Dropout(0.2))

    # Output Layer
    model.add(Dense(30))
    model.add(Activation('softmax'))

    model.summary()

    # Compile the model
    model.compile(loss='categorical_crossentropy',
                  optimizer='adam',
                  metrics=['accuracy'])

    now = time.strftime("%b%d_%H-%M")
    model.fit_generator(generator_train,
                        steps_per_epoch=512 // batch_size,
                        epochs=50,
                        validation_data=generator_test,
                        validation_steps=500 // batch_size,
                        callbacks=[
                            TensorBoard(histogram_freq=0,
                                        log_dir=os.path.join(
                                            log_dir, 'logs', now + '-' + NAME),
                                        write_graph=True)
                        ])
Ejemplo n.º 22
0
class Model(object):
    def __init__(self, Config):
        self.config = Config
        self.model = None
        self.word2numF, self.num2word, \
        self.words, self.poems \
            = prepare_data()
        self.poem = self.poems.split(']')
        self.poem_num = len(self.poem)
        if os.path.exists(self.config.model_path):
            self.model = load_model(self.config.model_path)
        else:
            self.train()

    def sample(self, preds, diversity=1.0):
        preds = np.asarray(preds).astype('float64')
        exp_preds = np.power(preds, 1. / diversity)
        preds = exp_preds / np.sum(exp_preds)
        pro = np.random.choice(range(len(preds)), 1, p=preds)
        return int(pro.squeeze())

    def _preds(self, sentence, length=18, diversity=1.0):
        '''生成长度为length的字符串'''
        sentence = sentence[:self.config.max_len]
        generate = ''
        for i in range(length):
            pred = self._pred(sentence, diversity)
            generate += pred
            sentence = sentence[1:] + pred
        return generate

    def _pred(self, sentence, diversity=1.0):
        '''预测下一个字符'''
        sentence = sentence[-self.config.max_len:]
        x_pred = np.zeros(shape=(1, self.config.max_len), dtype=np.int32)
        for t, char in enumerate(sentence):
            x_pred[0, t] = self.word2numF(char)
        preds = self.model.predict(x_pred)[0]
        next_index = self.sample(preds, diversity=diversity)
        next_char = self.num2word[next_index]
        return next_char

    def predict_sen(self, sen, diversity=1.0):
        '''给定第一句(前6个字符)'''
        sentence = sen[-self.config.max_len:]
        generate = str(sentence)
        generate += self._preds(sentence,
                                length=self.config.poem_len -
                                self.config.max_len,
                                diversity=diversity)
        return generate

    def predict_random(self, diversity=1.0):
        '''随机生成,此处随机取第一句'''
        index = random.randint(0, self.poem_num)
        sentence = self.poem[index][:self.config.max_len]
        generate = self.predict_sen(sentence, diversity=diversity)
        return generate

    def generate_sample(self, epoch, logs):
        if epoch % 10 != 0:
            return
        with open(self.config.output_path, 'a', encoding='utf-8') as file:
            file.write('=============Epoch {}============='.format(epoch))
        print('\n=============Epoch {}============='.format(epoch) + '\n')
        for diversity in [0.5, 1.0, 1.5]:
            print('-------------Diversity {}-------------'.format(diversity))
            generate = self.predict_random(diversity=diversity)
            print(generate)
            with open(self.config.output_path, 'a', encoding='utf-8') as file:
                file.write(generate + '\n')

    def data_generator(self):
        i = 0
        while 1:
            x = self.poems[i:i + self.config.max_len]
            y = self.poems[i + self.config.max_len]
            if ']' in x or ']' in y:
                i += 1
                continue

            x_vec = np.zeros(shape=(1, self.config.max_len), dtype=np.int32)
            for t, char in enumerate(x):
                x_vec[0, t] = self.word2numF(char)

            y_vec = np.zeros(shape=(1, len(self.words)), dtype=np.bool)
            y_vec[0, self.word2numF(y)] = 1

            yield x_vec, y_vec
            i += 1

    def train(self):
        epoch_num = len(self.poems) // self.config.batch_size

        if not self.model:
            print('building model...')
            self.model = Sequential()
            self.model.add(
                Embedding(len(self.words) + 2,
                          300,
                          input_length=self.config.max_len))
            self.model.add(LSTM(512, return_sequences=True))
            self.model.add(Dropout(0.6))
            self.model.add(LSTM(256))
            self.model.add(Dropout(0.6))
            self.model.add(Dense(len(self.words), activation='softmax'))
            optimizer = Adam(lr=self.config.learning_rate)
            self.model.compile(optimizer=optimizer,
                               loss='categorical_crossentropy')
        self.model.summary()

        print('training model...')
        self.model.fit_generator(
            generator=self.data_generator(),
            verbose=True,
            steps_per_epoch=self.config.batch_size,
            epochs=self.config.epoch_num,
            callbacks=[
                ModelCheckpoint(self.config.model_path,
                                save_weights_only=False),
                LambdaCallback(on_epoch_end=self.generate_sample)
            ])
Ejemplo n.º 23
0
class myModel(object):
    def __init__(self):
        self.model = Sequential()
        self.model.add(Conv2D(32, (3, 3), input_shape=(100, 100, 3)))
        self.model.add(Activation('relu'))
        self.model.add(MaxPooling2D(pool_size=(2, 2)))

        self.model.add(Conv2D(32, (3, 3)))
        self.model.add(Activation('relu'))
        self.model.add(MaxPooling2D(pool_size=(2, 2)))

        self.model.add(Conv2D(64, (3, 3)))
        self.model.add(Activation('relu'))
        self.model.add(MaxPooling2D(pool_size=(2, 2)))

        self.model.add(Conv2D(64, (3, 3)))
        self.model.add(Activation('relu'))
        self.model.add(MaxPooling2D(pool_size=(2, 2)))

        self.model.add(Flatten())
        self.model.add(Dense(64))
        self.model.add(Activation('relu'))
        self.model.add(Dropout(0.85))
        self.model.add(Dense(2))
        self.model.add(Activation('sigmoid'))

    def train(self, dataset):
        batch_size = dataset.batch_size
        nb_epoch = dataset.nb_epoch
        self.model.compile(loss='binary_crossentropy',
                           optimizer='adam',
                           metrics=['accuracy'])
        self.model.fit_generator(
            dataset.train_data_generate(),
            steps_per_epoch=dataset.total_train // batch_size,
            epochs=nb_epoch,
            validation_data=dataset.val_data_generate(),
            validation_steps=dataset.total_val // batch_size)

    def save(self, file_path="model.h5"):
        print('Model Saved.')
        self.model.save_weights(file_path)

    def load(self, file_path="model.h5"):
        print('Model Loaded.')
        self.model.load_weights(file_path)

    def predict(self, image):
        # 预测样本分类
        img = image.resize((1, IMAGE_SIZE, IMAGE_SIZE, 3))
        img = image.astype('float32')
        img /= 255

        #归一化
        result = self.model.predict(img)
        print(result)
        # 概率
        result = self.model.predict_classes(img)
        print(result)
        # 0/1

        return result[0]

    def evaluate(self, dataset):
        # 测试样本准确率
        score = self.model.evaluate_generator(dataset.valid, steps=2)
        print("样本准确率%s: %.2f%%" %
              (self.model.metrics_names[1], score[1] * 100))
Ejemplo n.º 24
0
                                                zoom_range=0.2)

train_image_data_flow = train_image_data_generator.flow_from_directory(
    "./Images/Training",
    target_size=(dimensions[0], dimensions[1]),
    batch_size=batch_size,
    class_mode="categorical")

validation_image_data_generator = ImageDataGenerator(rescale=1. / 255)

validation_image_data_flow = validation_image_data_generator.flow_from_directory(
    "./Images/Validation",
    target_size=(dimensions[0], dimensions[1]),
    batch_size=batch_size,
    class_mode="categorical")

sample_amount = 3399
validation_sample_amount = 1021
epochs = 50
epoch_steps = sample_amount // batch_size
validation_steps = validation_sample_amount // batch_size

history = model.fit_generator(train_image_data_flow,
                              steps_per_epoch=epoch_steps,
                              epochs=epochs,
                              validation_data=validation_image_data_flow,
                              validation_steps=validation_steps)

print(history.history)

model.save("cnn_model_v10_50epoch.h5")
Ejemplo n.º 25
0
print('TESTING DATASET IS PREPARED')

tb_callback = TensorBoard(log_dir=TF_LOG_PATH,
                          histogram_freq=0,
                          write_graph=True,
                          write_images=True)

cp_callback = ModelCheckpoint(monitor='val_accuracy',
                              save_best_only=True,
                              filepath=os.path.join(
                                  CHECK_DIR,
                                  'model_{epoch:02d}_{val_accuracy:.3f}.h5'))

model.fit_generator(train_generator,
                    epochs=12,
                    steps_per_epoch=1024,
                    validation_data=test_generator,
                    validation_steps=64,
                    callbacks=[tb_callback, cp_callback])

print('TRAINING COMPLETE')
model.save(MODEL_STRUCT_PATH)

for dp in glob(os.path.join(TEST_DATA_PATH, '*')):
    for fp in glob(os.path.join(TEST_DATA_PATH, dp, '*')):
        (fn, _) = os.path.splitext(fp)
        arr = numpy.array(
            load_img(fp,
                     target_size=(SIGN_IMG_HEIGHT, SIGN_IMG_WIDTH),
                     grayscale=False,
                     color_mode='rgb',
                     interpolation='nearest'))
Ejemplo n.º 26
0
# -eg. flip horizontally/vertically, rotate, zoom in/out, vary color, crop 
# may reduce overfitting

# insert code-block

"""---------------------------------------------------------------------------------------------------------------------------------------------

# Train the new vgg16 model
"""

# compile the new model using Adam optimization function with learning rate 0.0001 and provided functions and metrics
model.compile(Adam(lr=.0001), loss='categorical_crossentropy', metrics=['accuracy'])

# passing in images from image data generator(code block missing) in training set, steps_per_epoch= (images in dataset)/(batch size), valid set generated from image generator(missing code block),
# ..., verbose set to level 2
model.fit_generator(train_batches, steps_per_epoch=7,
                    validation_data=valid_batches, validation_steps=4, epochs=9, verbose =2)

# observe metrics from training- compare with 3X64 node CNN created previously- use the better one
# also check for overfitting

"""# Predictions"""

#
test_imgs, test_labels = next(test_batches)
plots(test_imgs, titles=test_labels)

# 0th index so that empty/occupied gets values 0/1 or vice versa
test_labels = test_labels[:,0]

# set steps according to : test set image count (eg. 10) and batch size: (eg. 10), so it takes 1 step to run through the batch of imgs
predictions = model.predict_generator(test_batches, steps=1, verbose=0)
Ejemplo n.º 27
0
	
	model.add(Dense(units=500, \
		kernel_regularizer=regularizers.l2(1e-4)))
	model.add(Activation('relu'))
	model.add(BatchNormalization())
	model.add(Dropout(0.5))
	
	model.add(Dense(units=len(TrainLabl[0])))
	model.add(Activation('softmax'))
	
	ADAM = optimizers.adam(lr=0.001)

	model.compile(loss='categorical_crossentropy', \
				optimizer=ADAM, metrics=['accuracy'])
	
	history = model.fit_generator(
		datagen.flow(TrainFeat, TrainLabl, batch_size=128), \
		validation_data=(ValiFeat, ValiLabl), \
		steps_per_epoch=(len(TrainFeat) * 10 / 128), \
		epochs=40, verbose=1)
	
	print('End of fitting!!')

	score = model.evaluate(ValiFeat, ValiLabl)
	print('Total loss on Testing Set: ', score[0])
	print('Accuracy of Testing Set: ', score[1])

	model.summary()
	model.save('CNN.h5')

Ejemplo n.º 28
0
def ResNet_fineturning(aug, trainX, trainY, testX, testY):
    from keras.applications.resnet50 import ResNet50
    from keras.preprocessing import image
    from keras.models import Model
    from keras import Sequential
    from keras.layers import Dense, GlobalAveragePooling2D, Softmax, Dropout
    from keras import backend as K

    # 构建不带分类器的预训练模型
    base_model = ResNet50(weights='imagenet', include_top=False)

    model = Sequential([
        base_model,
        GlobalAveragePooling2D(),
        Dropout(rate=0.5),
        Dense(1024, activation="relu"),
        Dropout(rate=0.5),
        Dense(5),
        Softmax()
    ])

    # 我们只训练顶部的几层(随机初始化的层)
    # 锁住所有卷积层
    for layer in base_model.layers:
        layer.trainable = False

    # 编译模型(一定要在锁层以后操作)
    model.compile(optimizer='rmsprop',
                  loss='categorical_crossentropy',
                  metrics=["accuracy"])

    # 加载之前训练的权重
    checkpoint_path = './save_weights/ResNet.ckpt'
    # model.load_weights(checkpoint_path)

    checkpoint = ModelCheckpoint(checkpoint_path,
                                 monitor='acc',
                                 save_weights_only=True,
                                 verbose=1,
                                 save_best_only=True,
                                 period=1)

    # 在新的数据集上训练几代
    H = model.fit_generator(aug.flow(trainX, trainY, batch_size=BS),
                            validation_data=(testX, testY),
                            steps_per_epoch=len(trainX) // BS,
                            epochs=EPOCHS,
                            verbose=1,
                            callbacks=[checkpoint])

    # save the model to disk
    print("[INFO] serializing network...")
    model.save_weights("./save_weights/ResNet.ckpt")

    # plot the training loss and accuracy
    plt.style.use("ggplot")
    plt.figure()
    N = EPOCHS
    plt.plot(np.arange(0, N), H.history["loss"], label="train_loss")
    plt.plot(np.arange(0, N), H.history["val_loss"], label="val_loss")
    plt.plot(np.arange(0, N), H.history["acc"], label="train_acc")
    plt.plot(np.arange(0, N), H.history["val_acc"], label="val_acc")
    plt.title("Training Loss and Accuracy on traffic-sign classifier")
    plt.xlabel("Epoch #")
    plt.ylabel("Loss/Accuracy")
    plt.legend(loc="lower left")
    plt.savefig("plot.png")
Ejemplo n.º 29
0
    model.add(Dense(128, activation='relu'))
    model.add(BatchNormalization())
    model.add(Dropout(0.3))
    model.add(Dense(64, activation='relu'))
    model.add(BatchNormalization())
    model.add(Dropout(0.3))
    model.add(Dense(10, activation='softmax'))

    #3. Compile, Train
    model.compile(loss='sparse_categorical_crossentropy',
                  optimizer=Adam(lr=0.002, epsilon=None),
                  metrics=['acc'])
    # epsilon : 0으로 나눠지는 것을 피하기 위함
    learning_hist = model.fit_generator(train_generator,
                                        epochs=1000,
                                        validation_data=valid_generator,
                                        callbacks=[es, mc, reLR])
    # model.load_weights('../data/DACON_vision1/cp/0203_4_cp.hdf5')

    #4. Evaluate, Predict
    loss, acc = model.evaluate(test_generator)
    print("loss : ", loss)
    print("acc : ", acc)

    result += model.predict_generator(pred_generator, verbose=True) / 40

    # save val_loss
    hist = pd.DataFrame(learning_hist.history)
    val_loss_min.append(hist['val_loss'].min())
    val_acc_max.append(hist['val_acc'].max())
Ejemplo n.º 30
0
class CNNmodel7:
    def __init__(self, img_size=(256, 256), dump_path='dump/'):
        # Random parameters
        conv1_filters = np.random.randint(1, 65)
        conv2_filters = np.random.randint(1, 65)
        conv3_filters = np.random.randint(1, 65)
        conv1_kernel = np.random.randint(2, 10)
        conv2_kernel = np.random.randint(2, 10)
        conv3_kernel = np.random.randint(2, 10)
        conv1_strides = np.random.randint(1, conv1_kernel / 2 + 1)
        conv2_strides = np.random.randint(1, conv2_kernel / 2 + 1)
        conv3_strides = np.random.randint(1, conv3_kernel / 2 + 1)
        maxpool1_size = np.random.randint(2, 8)
        maxpool2_size = np.random.randint(2, 8)
        maxpool3_size = np.random.randint(2, 8)
        fc1_units = 2**np.random.randint(6, 11)
        fc2_units = 2**np.random.randint(6, 11)

        # Model architecture
        self.model = Sequential()
        self.model.add(
            Conv2D(filters=conv1_filters,
                   kernel_size=(conv1_kernel, conv1_kernel),
                   strides=(conv1_strides, conv1_strides),
                   activation='relu',
                   input_shape=(img_size[0], img_size[1], 3),
                   name='conv1'))
        self.model.add(
            MaxPooling2D(pool_size=(maxpool1_size, maxpool1_size),
                         strides=None,
                         name='maxpool1'))
        self.model.add(
            Conv2D(filters=conv2_filters,
                   kernel_size=(conv2_kernel, conv2_kernel),
                   strides=(conv2_strides, conv2_strides),
                   activation='relu',
                   name='conv2'))
        self.model.add(
            MaxPooling2D(pool_size=(maxpool2_size, maxpool2_size),
                         strides=None,
                         name='maxpool2'))
        self.model.add(
            Conv2D(filters=conv3_filters,
                   kernel_size=(conv3_kernel, conv3_kernel),
                   strides=(conv3_strides, conv3_strides),
                   activation='relu',
                   name='conv3'))
        self.model.add(
            MaxPooling2D(pool_size=(maxpool3_size, maxpool3_size),
                         strides=None,
                         name='maxpool3'))
        self.model.add(Flatten())
        self.model.add(Dense(units=fc1_units, activation='relu', name='fc1'))
        self.model.add(Dense(units=fc2_units, activation='relu', name='fc2'))
        self.model.add(Dense(units=8, activation='softmax', name='classif'))

        # Optimizer
        optimizer = Adam()

        # Compile
        self.model.compile(loss='categorical_crossentropy',
                           optimizer=optimizer,
                           metrics=['accuracy'])
        # Parameters
        self.born_time = time.strftime('%Y%m%d%H%M%S', time.gmtime())
        self.identifier = str(hash(str(self.model.get_config())))
        self.dump_path = os.path.join(
            dump_path,
            str(self.born_time) + '_' + self.identifier)
        self.input_img_size = img_size

        # Print
        if not os.path.exists(self.dump_path):
            os.makedirs(self.dump_path)
        self.model.summary()
        print('Current model: ' + self.identifier)
        plot_model(self.model,
                   show_shapes=True,
                   show_layer_names=True,
                   to_file=os.path.join(self.dump_path,
                                        self.identifier + '.png'))

    def _train_generator(self, path, batch_size):
        datagen = ImageDataGenerator(
            preprocessing_function=self._preprocess_input,
            rotation_range=0,
            width_shift_range=0.,
            height_shift_range=0.,
            shear_range=0.,
            zoom_range=0.,
            channel_shift_range=0.,
            fill_mode='reflect',
            cval=0.,
            horizontal_flip=False,
            vertical_flip=False)
        return datagen.flow_from_directory(path,
                                           target_size=self.input_img_size,
                                           batch_size=batch_size,
                                           class_mode='categorical')

    def _test_val_generator(self, path, batch_size):
        datagen = ImageDataGenerator(
            preprocessing_function=self._preprocess_input)
        return datagen.flow_from_directory(path,
                                           target_size=self.input_img_size,
                                           batch_size=batch_size,
                                           class_mode='categorical',
                                           shuffle=False)

    def fit_directory(self,
                      path,
                      batch_size,
                      epochs,
                      val_path=None,
                      save_weights=False):
        train_generator = self._train_generator(path, batch_size)
        if val_path is None:
            validation_generator = None
            validation_steps = None
        else:
            validation_generator = self._test_val_generator(
                val_path, batch_size)
            validation_steps = validation_generator.samples / batch_size

        history = self.model.fit_generator(
            train_generator,
            steps_per_epoch=train_generator.samples / batch_size,
            epochs=epochs,
            validation_data=validation_generator,
            validation_steps=validation_steps)
        utils.plot_history(history,
                           self.dump_path,
                           identifier='e' + str(epochs) + '_b' +
                           str(batch_size))
        with open(
                os.path.join(
                    self.dump_path, 'e' + str(epochs) + '_b' +
                    str(batch_size) + '_history.pklz'), 'wb') as f:
            cPickle.dump((history.epoch, history.history, history.params,
                          history.validation_data, self.model.get_config()), f,
                         cPickle.HIGHEST_PROTOCOL)
        if save_weights:
            self.model.save_weights(
                os.path.join(
                    self.dump_path, 'e' + str(epochs) + '_b' +
                    str(batch_size) + '_weights.h5'))
        return history

    def evaluate(self, path):
        test_generator = self._test_val_generator(path, batch_size=32)
        return self.model.evaluate_generator(test_generator)

    def _preprocess_input(self, x, dim_ordering='default'):
        if dim_ordering == 'default':
            dim_ordering = K.image_dim_ordering()
        assert dim_ordering in {'tf', 'th'}

        mean = [109.07621812, 115.45609435, 114.70990406]
        std = [56.91689916, 55.4694083, 59.14847488]
        if dim_ordering == 'th':
            # Zero-center by mean pixel
            x[0, :, :] -= mean[0]
            x[1, :, :] -= mean[1]
            x[2, :, :] -= mean[2]
            # Normalize by std
            x[0, :, :] /= std[0]
            x[1, :, :] /= std[1]
            x[2, :, :] /= std[2]
        else:
            # Zero-center by mean pixel
            x[:, :, 0] -= mean[0]
            x[:, :, 1] -= mean[1]
            x[:, :, 2] -= mean[2]
            # Normalize by std
            x[:, :, 0] /= std[0]
            x[:, :, 1] /= std[1]
            x[:, :, 2] /= std[2]
        return x