예제 #1
0
class FBB():
    def __init__(self, use_gpu=False):
        self.use_cudnn = use_gpu

    def load_data(self, x_train_loc, y_train_loc, x_val_loc, y_val_loc):
        self.X_train = np.load(x_train_loc)
        self.Y_train = np.load(y_train_loc)
        self.val = (np.load(x_val_loc), np.load(y_val_loc))

    def define_model(self, hidden_size, test=False):
        if test:
            input_size = self.X_test[0].shape[1]
        else:
            input_size = self.X_train.shape[2]
        self.model = Sequential()
        self.model.add(
            TimeDistributed(Dense(hidden_size, activation='sigmoid'),
                            input_shape=(None, input_size)))
        #        self.model.add(Masking(input_shape=(None, input_size)))
        if self.use_cudnn:
            self.model.add(
                Bidirectional(CuDNNLSTM(hidden_size, return_sequences=True)))
            self.model.add(Bidirectional(CuDNNLSTM(6, return_sequences=True)))
        else:
            self.model.add(
                Bidirectional(LSTM(hidden_size, return_sequences=True),
                              input_shape=(None, input_size)))
            self.model.add(Bidirectional(LSTM(6, return_sequences=True)))

        print('compiling')
        self.model.compile(loss='mean_squared_error',
                           optimizer='rmsprop',
                           metrics=['mse'])

    def load_test_data(self, x_test_loc, y_test_loc):
        with open(x_test_loc, 'rb') as f:
            self.X_test = pickle.load(f)
        with open(y_test_loc, 'rb') as f:
            self.Y_test = pickle.load(f)

    def test_generator(self):
        while True:
            for x, y in zip(self.X_test, self.Y_test):
                yield (x.reshape(1, x.shape[0], x.shape[1]),
                       y.reshape(1, y.shape[0], y.shape[1]))

    def train(self, batch_size, num_epochs):
        early_stopping = EarlyStopping(monitor='val_loss', patience=7)
        self.model.fit(self.X_train,
                       self.Y_train,
                       batch_size=batch_size,
                       epochs=num_epochs,
                       validation_data=self.val,
                       callbacks=[early_stopping])

    def test(self):
        gen = self.test_generator()
        score = self.model.evaluate_generator(gen, steps=len(self.X_test))
        print("%s: %.2f%%" % (self.model.metrics_names[1], score[1] * 100))
callbacks = [
    TensorBoard(log_dir="logs/{}".format(NAME)),
    EarlyStopping(monitor='val_loss',
                  min_delta=0,
                  patience=2,
                  verbose=0,
                  mode='auto',
                  baseline=None,
                  restore_best_weights=False)
]

model.fit_generator(train_generator,
                    callbacks=callbacks,
                    steps_per_epoch=TRAIN_STEP,
                    epochs=EPOCHS,
                    validation_data=validation_generator,
                    validation_steps=VALIDATION_STEP)

score = model.evaluate_generator(validation_generator,
                                 VALIDATION_STEP / BATCH_SIZE,
                                 workers=12)
scores = model.predict_generator(validation_generator,
                                 VALIDATION_STEP / BATCH_SIZE,
                                 workers=12)
model.save(FILE_NAME)

with open("logs/mylog.txt", "a") as f:
    f.write("\n\n--------" + NAME + "----------\n")
    f.write(str(score) + "\n")
예제 #3
0
파일: lstm.py 프로젝트: mcai/heo
    look_back = 10
    n_features = 2

    train_data_gen = TimeseriesGenerator(train,
                                         train,
                                         length=look_back,
                                         sampling_rate=1,
                                         stride=1,
                                         batch_size=3)
    test_data_gen = TimeseriesGenerator(test,
                                        test,
                                        length=look_back,
                                        sampling_rate=1,
                                        stride=1,
                                        batch_size=1)

    model = Sequential()
    model.add(LSTM(25, input_shape=(look_back, n_features)))
    model.add(Dense(n_features, activation='softmax'))
    model.compile(loss='categorical_crossentropy',
                  optimizer='adam',
                  metrics=['acc'])
    model.summary()

    model.fit_generator(train_data_gen, epochs=100, steps_per_epoch=10)

    model.evaluate_generator(test_data_gen)

    print()
예제 #4
0
set_trainable = False
for layer in conv_base.layers:
    if layer.name == 'block5_conv1':
        set_trainable = True
    if set_trainable:
        layer.trainable = True
    else:
        layer.trainable = False

model_2.compile(loss='binary_crossentropy',
                optimizer=optimizers.RMSprop(lr=1e-5),
                metrics=['acc'])

history = model_2.fit_generator(
    train_generator,
    steps_per_epoch=100,
    epochs=100,
    validation_data=validation_generator,
    validation_steps=50)

plot_history(history, smooth=True)

test_generator = test_datagen.flow_from_directory(test_dir,
                                                  target_size=(150, 150),
                                                  batch_size=BATCH_SIZE,
                                                  class_mode='binary')

test_loss, test_acc = model_2.evaluate_generator(test_generator, steps=50)
print('test acc:', test_acc)
model = Sequential()


model.add(layers.Conv2D(64, kernel_size=3, activation='relu', input_shape=(img_height,img_width,1)))
model.add(layers.Conv2D(64, kernel_size=1, activation='relu'))
model.add(layers.Conv2D(64, kernel_size=3, activation='relu'))
model.add(layers.MaxPooling2D(pool_size=(2,2),strides = None))
model.add(layers.Conv2D(128, kernel_size=3, activation='relu'))
model.add(layers.MaxPooling2D(pool_size=(2,2),strides = None))
model.add(layers.Conv2D(256, kernel_size=3, activation='relu'))
model.add(layers.MaxPooling2D(pool_size=(2,2),strides = None))

model.add(layers.Flatten())

model.add(layers.Dense(500, activation='relu'))
model.add(Dropout(0.5))

num_classes = train_generator.num_classes
model.add(layers.Dense(num_classes, activation='softmax'))

model.compile(loss='categorical_crossentropy', optimizer='rmsprop', metrics = ['accuracy'])

model.load_weights('../output/first_try.h5')

model.save('../output/jmatt_best_sketches.h5')


acc = model.evaluate_generator(validation_generator, steps=np.floor(validation_generator.n/batch_size),verbose=1)

print(acc)
예제 #6
0
plt.xlabel('epoch')
plt.legend(['train', 'validation'], loc='upper left')
plt.savefig(output_dir + '/accuracy.png')

plt.figure()
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.title('model loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['train', 'validation'], loc='upper left')
plt.savefig(output_dir + '/loss.png')

print("evaluation time")
evaluation = model.evaluate_generator(test_generator,
                                      steps=test_generator.n //
                                      test_generator.batch_size,
                                      verbose=1)

print(evaluation)
with open(output_dir + '/evaluation.txt', 'w') as f:
    f.write(str(evaluation[0]) + "\n")
    f.write(str(evaluation[1]))

print("prediction time")
test_generator.reset()

pred = model.predict_generator(test_generator,
                               steps=test_generator.n //
                               test_generator.batch_size,
                               verbose=1)
예제 #7
0
class Model():
    def __init__(self,
                 train_generator,
                 validation_generator,
                 test_generator,
                 epochs=10):
        self.train_generator = train_generator
        self.validation_generator = validation_generator
        self.test_generator = test_generator
        self.batch_size = len(train_generator)
        self.epochs = epochs

    def create_model(self):
        input_shape_ = self.train_generator.shape[1:]
        self.model = Sequential()
        self.model.add(
            Conv2D(32, (5, 5),
                   strides=(1, 1),
                   padding='same',
                   input_shape=input_shape_))
        self.model.add(Activation('relu'))
        #self.model.add(BatchNormalization(axis=chanDim))
        self.model.add(Conv2D(32, (5, 5), strides=(2, 2), padding='valid'))
        self.model.add(Activation('relu'))
        self.model.add(Conv2D(64, (3, 3), strides=(1, 1), padding='valid'))
        self.model.add(Activation('relu'))
        self.model.add(MaxPooling2D(pool_size=(2, 2)))
        self.model.add(Activation('relu'))
        self.model.add(Conv2D(32, (3, 3), strides=(2, 2), padding='valid'))
        self.model.add(Activation('relu'))
        self.model.add(Flatten())
        self.model.add(Dense(64))
        self.model.add(Activation('relu'))
        self.model.add(Dense(40))
        self.model.add(Dropout(0.5))
        self.model.add(Activation('relu'))
        self.model.add(Dense(20))
        self.model.add(Activation('relu'))
        self.model.add(Dense(2))
        self.model.add(Activation('softmax'))
        self.summery()
        opt = SGD(0.01)
        self.model.compile(optimizer=opt,
                           loss=categorical_crossentropy,
                           metrics=['accuracy'])

    def summery(self):
        self.model.summary()

    def fit(self):

        self.model.fit_generator(
            generator=self.train_generator,
            epochs=self.epochs,
            validation_data=self.validation_generator,
            use_multiprocessing=True,
            workers=4,
            callbacks=[checkpoint, reduce_lr, early_stopping])
        self.model.save("save")

    def evaluate(self):
        score, acc = self.model.evaluate_generator(
            generator=self.test_generator,
            max_queue_size=10,
            workers=1,
            use_multiprocessing=False,
            verbose=0)
        print('Test score:', score)
        print('Test accuracy:', acc)
예제 #8
0
class Model:
    def __init__(self, dataset, model_path):
        self.dataset = dataset
        self.model = Sequential()
        self.model_path = model_path

    def create(self, tile_size: (int, int)):
        input_shape = (*tile_size, 3)
        model = Sequential()
        model.add(Conv2D(filters=32, kernel_size=3, activation='relu',
                         padding='same', input_shape=input_shape))
        # model.add(Conv2D(filters=32, kernel_size=3,
        #                  padding='same', activation='relu'))
        model.add(BatchNormalization())
        model.add(MaxPool2D())
        model.add(Conv2D(filters=64, kernel_size=3,
                         padding='same', activation='relu'))
        # model.add(Conv2D(filters=64, kernel_size=3,
        #                  padding='same', activation='relu'))
        model.add(BatchNormalization())
        model.add(MaxPool2D())
        model.add(Flatten())
        model.add(Dense(64, activation='relu'))
        model.add(Dropout(0.3))
        model.add(Dense(16, activation='relu'))
        model.add(Dense(1, activation='sigmoid'))
        model.compile(
            loss='binary_crossentropy',
            optimizer='adam',
            metrics=['accuracy'])
        model.summary()

        self.model = model

    def train(self, epochs, verbose=2):
        train_datagen = self._get_scaling_generator(self.dataset.X_train,
                                                    self.dataset.Y_train)
        val_datagen = self._get_scaling_generator(self.dataset.X_test,
                                                  self.dataset.Y_test)

        train_history = self.model.fit_generator(train_datagen,
                                                 epochs=epochs,
                                                 validation_data=val_datagen,
                                                 verbose=verbose)

        fig, axes = plt.subplots(1, 2, sharex='all')
        axes[0].plot(train_history.history['loss'])
        axes[0].plot(train_history.history['val_loss'])
        axes[0].set_ylabel('loss')
        axes[1].plot(train_history.history['acc'])
        axes[1].plot(train_history.history['val_acc'])
        axes[1].set_ylabel('accuracy')
        axes[0].legend(['train loss', 'validation loss'], loc='best')
        axes[1].legend(['train accuracy', 'validation accuracy'], loc='best')
        fig.text(0.5, 0.02, "Number of epochs", horizontalalignment='center')

        plt.show()

    def evaluate(self):
        test_datagen = self._get_scaling_generator(self.dataset.X_test,
                                                   self.dataset.Y_test)
        outputs = self.model.evaluate_generator(test_datagen)
        logger.info("Results: Loss: %.3f, Accuracy: %.3f", *outputs)

    def predict(self, verbose=1):
        test_datagen = self._get_scaling_generator(self.dataset.X_test, shuffle=False)
        results = self.model.predict_generator(test_datagen, verbose=verbose)
        return results

    def save(self):
        model_path = self.model_path
        self.model.save(model_path)

    def load(self):
        model_path = self.model_path
        self.model = load_model(model_path)

    @staticmethod
    def _get_scaling_generator(x, y=None, shuffle=True):
        datagen = ImageDataGenerator(rescale=1. / 255)
        return datagen.flow(x, y, shuffle=shuffle)
예제 #9
0
                  metrics=['accuracy'])

model.summary()

print(train_generator.class_indices, end='\n\n')

early_stopping = tf.keras.callbacks.EarlyStopping(patience=2,
                                                  restore_best_weights=True)

model.fit_generator(train_generator,
                    epochs=7,
                    verbose=2,
                    callbacks=[early_stopping],
                    validation_data=validation_generator)

_, acc = model.evaluate_generator(test_generator)
print('Test Accuracy: %.3f' % (acc * 100))

if model_name:
    model.save('models/%.2f_%s' % (acc, model_name))
else:
    import time

    ts = time.gmtime()
    simple_ts = time.strftime("%s", ts)
    model.save('models/%.2f_%s.h5' % (acc, simple_ts))

if suspend_on_finished:
    import os

    os.system("systemctl suspend")
예제 #10
0
    Convolution2D(32,
                  3,
                  3,
                  activation='relu',
                  input_shape=(IMG_SIZE, IMG_SIZE, 3)))
model.add(MaxPooling2D(pool_size=(2, 2)))

model.add(Flatten())
model.add(Dense(1, activation='softmax'))

# Print out model definition
model.summary()

plot_model(model, to_file='model.png')

# Prepare the model for training
model.compile(loss='binary_crossentropy', optimizer='adam')

# Train the model
model.fit_generator(generator=trainGen,
                    epochs=20,
                    steps_per_epoch=10,
                    validation_steps=5,
                    validation_data=valGen)

# Save the model
model.save("model1.h5")

# Evaluate the model
model.evaluate_generator(valGen, steps=5)
예제 #11
0
model.compile(loss=custom_loss, optimizer='adam')
model.summary()

print("model compiled")
# generators
data_generator = ImageDataGenerator()
train_generator = data_generator.flow_from_directory(
    TRAIN_DATASET,
    target_size=image_template.shape[0:2],
    batch_size=4,
    color_mode="rgb")
test_generator = data_generator.flow_from_directory(
    TEST_DATASET,
    target_size=image_template.shape[0:2],
    batch_size=4,
    color_mode="rgb")

# fitting
print(len(train_generator))
model.fit_generator(test_generator,
                    steps_per_epoch=16,
                    epochs=20,
                    verbose=1,
                    callbacks=[tb_call_back])

# Saving the model
model.save_weights('pklot_1.h5')

scores = model.evaluate_generator(train_generator)
print(scores)
예제 #12
0
                                                  target_size=(224, 224),
                                                  batch_size=20,
                                                  class_mode='categorical')

model.compile(optimizer="adam",
              loss="categorical_crossentropy",
              metrics=["acc"])
history = model.fit_generator(train_generator,
                              steps_per_epoch=100,
                              validation_data=validation_generator,
                              validation_steps=50,
                              epochs=10)

#model.save("dogCatFineTune.h5")

predictions = model.evaluate_generator(test_generator, steps=34, verbose=0)
print(predictions)

acc = history.history['acc']
val_acc = history.history['val_acc']
loss = history.history['loss']
val_loss = history.history['val_loss']
epochs = range(len(acc))
plt.plot(epochs, acc, 'b', label='Training acc')
plt.plot(epochs, val_acc, 'r', label='Validation acc')
plt.title('Dog Cat Fine Tune\nTraining and validation accuracy')
plt.legend()
plt.figure()
plt.plot(epochs, loss, 'b', label='Training loss')
plt.plot(epochs, val_loss, 'r', label='Validation loss')
plt.title('Dog Cat Fine Tune\nTraining and validation loss')
예제 #13
0
from keras import Sequential
from keras.layers import Dense

print("Tensorflow version", tf.__version__)

model1 = Sequential()
model1.add(Dense(10, input_shape=(1000,)))

model1.add(Dense(3, activation='relu'))
model1.compile('sgd', 'mse')


def gen():
    while True:
        yield np.zeros([10, 1000]), np.ones([10, 3])


import os
import psutil

process = psutil.Process(os.getpid())
g = gen()
while True:
    print(process.memory_info().rss / float(2 ** 20))
    model1.fit_generator(g, 100, 2, use_multiprocessing=True, verbose=0)
    model1.evaluate_generator(gen(), 100, use_multiprocessing=True, verbose=0)



예제 #14
0
                                      batch_size=batch_size,
                                      shuffle=False)
test_generator = TimeseriesGenerator(X_test,
                                     y_test,
                                     length=window_size,
                                     batch_size=1,
                                     shuffle=False)

model = Sequential()
model.add(CuDNNGRU(128, input_shape=(
    window_size,
    X_train.shape[1],
)))
model.add(Dense(64, activation='relu'))
model.add(Dense(64, activation='relu'))
model.add(Dense(64, activation='relu'))
model.add(Dense(y_train.shape[1], activation='softmax'))

# Run training
model.compile(optimizer='adam',
              loss='categorical_crossentropy',
              metrics=['accuracy'])
model.fit_generator(train_generator, epochs=epochs)
print(model.evaluate_generator(test_generator))

y_true = np.argmax(y_test[window_size:], axis=1)
y_pred = np.argmax(model.predict_generator(test_generator), axis=1)

print('Confusion matrix')
print(confusion_matrix(y_true, y_pred))
예제 #15
0
class myModel(object):
    def __init__(self):
        self.model = Sequential()
        self.model.add(Conv2D(32, (3, 3), input_shape=(100, 100, 3)))
        self.model.add(Activation('relu'))
        self.model.add(MaxPooling2D(pool_size=(2, 2)))

        self.model.add(Conv2D(32, (3, 3)))
        self.model.add(Activation('relu'))
        self.model.add(MaxPooling2D(pool_size=(2, 2)))

        self.model.add(Conv2D(64, (3, 3)))
        self.model.add(Activation('relu'))
        self.model.add(MaxPooling2D(pool_size=(2, 2)))

        self.model.add(Conv2D(64, (3, 3)))
        self.model.add(Activation('relu'))
        self.model.add(MaxPooling2D(pool_size=(2, 2)))

        self.model.add(Flatten())
        self.model.add(Dense(64))
        self.model.add(Activation('relu'))
        self.model.add(Dropout(0.85))
        self.model.add(Dense(2))
        self.model.add(Activation('sigmoid'))

    def train(self, dataset):
        batch_size = dataset.batch_size
        nb_epoch = dataset.nb_epoch
        self.model.compile(loss='binary_crossentropy',
                           optimizer='adam',
                           metrics=['accuracy'])
        self.model.fit_generator(
            dataset.train_data_generate(),
            steps_per_epoch=dataset.total_train // batch_size,
            epochs=nb_epoch,
            validation_data=dataset.val_data_generate(),
            validation_steps=dataset.total_val // batch_size)

    def save(self, file_path="model.h5"):
        print('Model Saved.')
        self.model.save_weights(file_path)

    def load(self, file_path="model.h5"):
        print('Model Loaded.')
        self.model.load_weights(file_path)

    def predict(self, image):
        # 预测样本分类
        img = image.resize((1, IMAGE_SIZE, IMAGE_SIZE, 3))
        img = image.astype('float32')
        img /= 255

        #归一化
        result = self.model.predict(img)
        print(result)
        # 概率
        result = self.model.predict_classes(img)
        print(result)
        # 0/1

        return result[0]

    def evaluate(self, dataset):
        # 测试样本准确率
        score = self.model.evaluate_generator(dataset.valid, steps=2)
        print("样本准确率%s: %.2f%%" %
              (self.model.metrics_names[1], score[1] * 100))
for i in range(test_pred.shape[0]):
    for j in range(test_pred.shape[1]):
        temp = np.load('data/data_rows_target/' + 'shop_' + str(i) +
                       '_target' + '.npy')
        sum_per_day_y = 0
        sum_per_day_pred = 0
        for n in range(test_pred.shape[2]):
            sum_per_day_y = sum_per_day_y + temp[j, n]
            sum_per_day_pred = sum_per_day_pred + test_pred[i, j, n]
        test_y_list.append(sum_per_day_y)
        test_pred_list.append(sum_per_day_pred)

plot_shop = 25

pyplot.plot(x_labels,
            test_y_list[plot_shop * 10:(plot_shop * 10) + 10],
            label='target')
pyplot.plot(x_labels,
            test_pred_list[plot_shop * 10:(plot_shop * 10) + 10],
            label='predicted')
pyplot.xticks(rotation=45)
pyplot.legend()
pyplot.show()

# evaluate model (val generator, steps (batches of samples) to yield from generator before stopping)
score = model.evaluate_generator(validation_generator, 1000, verbose=2)
print(score)

print("--- %s s ---" % (time.time() - start_time))
예제 #17
0
class CNNmodel7:
    def __init__(self, img_size=(256, 256), dump_path='dump/'):
        # Random parameters
        conv1_filters = np.random.randint(1, 65)
        conv2_filters = np.random.randint(1, 65)
        conv3_filters = np.random.randint(1, 65)
        conv1_kernel = np.random.randint(2, 10)
        conv2_kernel = np.random.randint(2, 10)
        conv3_kernel = np.random.randint(2, 10)
        conv1_strides = np.random.randint(1, conv1_kernel / 2 + 1)
        conv2_strides = np.random.randint(1, conv2_kernel / 2 + 1)
        conv3_strides = np.random.randint(1, conv3_kernel / 2 + 1)
        maxpool1_size = np.random.randint(2, 8)
        maxpool2_size = np.random.randint(2, 8)
        maxpool3_size = np.random.randint(2, 8)
        fc1_units = 2**np.random.randint(6, 11)
        fc2_units = 2**np.random.randint(6, 11)

        # Model architecture
        self.model = Sequential()
        self.model.add(
            Conv2D(filters=conv1_filters,
                   kernel_size=(conv1_kernel, conv1_kernel),
                   strides=(conv1_strides, conv1_strides),
                   activation='relu',
                   input_shape=(img_size[0], img_size[1], 3),
                   name='conv1'))
        self.model.add(
            MaxPooling2D(pool_size=(maxpool1_size, maxpool1_size),
                         strides=None,
                         name='maxpool1'))
        self.model.add(
            Conv2D(filters=conv2_filters,
                   kernel_size=(conv2_kernel, conv2_kernel),
                   strides=(conv2_strides, conv2_strides),
                   activation='relu',
                   name='conv2'))
        self.model.add(
            MaxPooling2D(pool_size=(maxpool2_size, maxpool2_size),
                         strides=None,
                         name='maxpool2'))
        self.model.add(
            Conv2D(filters=conv3_filters,
                   kernel_size=(conv3_kernel, conv3_kernel),
                   strides=(conv3_strides, conv3_strides),
                   activation='relu',
                   name='conv3'))
        self.model.add(
            MaxPooling2D(pool_size=(maxpool3_size, maxpool3_size),
                         strides=None,
                         name='maxpool3'))
        self.model.add(Flatten())
        self.model.add(Dense(units=fc1_units, activation='relu', name='fc1'))
        self.model.add(Dense(units=fc2_units, activation='relu', name='fc2'))
        self.model.add(Dense(units=8, activation='softmax', name='classif'))

        # Optimizer
        optimizer = Adam()

        # Compile
        self.model.compile(loss='categorical_crossentropy',
                           optimizer=optimizer,
                           metrics=['accuracy'])
        # Parameters
        self.born_time = time.strftime('%Y%m%d%H%M%S', time.gmtime())
        self.identifier = str(hash(str(self.model.get_config())))
        self.dump_path = os.path.join(
            dump_path,
            str(self.born_time) + '_' + self.identifier)
        self.input_img_size = img_size

        # Print
        if not os.path.exists(self.dump_path):
            os.makedirs(self.dump_path)
        self.model.summary()
        print('Current model: ' + self.identifier)
        plot_model(self.model,
                   show_shapes=True,
                   show_layer_names=True,
                   to_file=os.path.join(self.dump_path,
                                        self.identifier + '.png'))

    def _train_generator(self, path, batch_size):
        datagen = ImageDataGenerator(
            preprocessing_function=self._preprocess_input,
            rotation_range=0,
            width_shift_range=0.,
            height_shift_range=0.,
            shear_range=0.,
            zoom_range=0.,
            channel_shift_range=0.,
            fill_mode='reflect',
            cval=0.,
            horizontal_flip=False,
            vertical_flip=False)
        return datagen.flow_from_directory(path,
                                           target_size=self.input_img_size,
                                           batch_size=batch_size,
                                           class_mode='categorical')

    def _test_val_generator(self, path, batch_size):
        datagen = ImageDataGenerator(
            preprocessing_function=self._preprocess_input)
        return datagen.flow_from_directory(path,
                                           target_size=self.input_img_size,
                                           batch_size=batch_size,
                                           class_mode='categorical',
                                           shuffle=False)

    def fit_directory(self,
                      path,
                      batch_size,
                      epochs,
                      val_path=None,
                      save_weights=False):
        train_generator = self._train_generator(path, batch_size)
        if val_path is None:
            validation_generator = None
            validation_steps = None
        else:
            validation_generator = self._test_val_generator(
                val_path, batch_size)
            validation_steps = validation_generator.samples / batch_size

        history = self.model.fit_generator(
            train_generator,
            steps_per_epoch=train_generator.samples / batch_size,
            epochs=epochs,
            validation_data=validation_generator,
            validation_steps=validation_steps)
        utils.plot_history(history,
                           self.dump_path,
                           identifier='e' + str(epochs) + '_b' +
                           str(batch_size))
        with open(
                os.path.join(
                    self.dump_path, 'e' + str(epochs) + '_b' +
                    str(batch_size) + '_history.pklz'), 'wb') as f:
            cPickle.dump((history.epoch, history.history, history.params,
                          history.validation_data, self.model.get_config()), f,
                         cPickle.HIGHEST_PROTOCOL)
        if save_weights:
            self.model.save_weights(
                os.path.join(
                    self.dump_path, 'e' + str(epochs) + '_b' +
                    str(batch_size) + '_weights.h5'))
        return history

    def evaluate(self, path):
        test_generator = self._test_val_generator(path, batch_size=32)
        return self.model.evaluate_generator(test_generator)

    def _preprocess_input(self, x, dim_ordering='default'):
        if dim_ordering == 'default':
            dim_ordering = K.image_dim_ordering()
        assert dim_ordering in {'tf', 'th'}

        mean = [109.07621812, 115.45609435, 114.70990406]
        std = [56.91689916, 55.4694083, 59.14847488]
        if dim_ordering == 'th':
            # Zero-center by mean pixel
            x[0, :, :] -= mean[0]
            x[1, :, :] -= mean[1]
            x[2, :, :] -= mean[2]
            # Normalize by std
            x[0, :, :] /= std[0]
            x[1, :, :] /= std[1]
            x[2, :, :] /= std[2]
        else:
            # Zero-center by mean pixel
            x[:, :, 0] -= mean[0]
            x[:, :, 1] -= mean[1]
            x[:, :, 2] -= mean[2]
            # Normalize by std
            x[:, :, 0] /= std[0]
            x[:, :, 1] /= std[1]
            x[:, :, 2] /= std[2]
        return x
예제 #18
0
class ModelSix():
    def __init__(self,
                 x_train_loc,
                 y_train_loc,
                 x_val_loc,
                 y_val_loc,
                 use_cudnn=False):
        with open(x_train_loc, 'rb') as f:
            self.X_train = pickle.load(f)
        with open(y_train_loc, 'rb') as f:
            self.Y_train = pickle.load(f)
        with open(x_val_loc, 'rb') as f:
            self.X_val = pickle.load(f)
        with open(y_val_loc, 'rb') as f:
            self.Y_val = pickle.load(f)

        self.use_cudnn = use_cudnn

    def define_model(self, hidden_size):
        input_size = self.X_train[0].shape[1]
        self.model = Sequential()
        #        self.model.add(Masking(input_shape=(None, input_size)))
        if self.use_cudnn:
            self.model.add(
                Bidirectional(CuDNNLSTM(hidden_size, return_sequences=True),
                              input_shape=(None, input_size)))
            self.model.add(
                Bidirectional(CuDNNLSTM(hidden_size, return_sequences=True)))
        else:
            self.model.add(
                Bidirectional(LSTM(hidden_size, return_sequences=True),
                              input_shape=(None, input_size)))
            self.model.add(
                Bidirectional(LSTM(hidden_size, return_sequences=True)))
        self.model.add(TimeDistributed(Dense(12, activation=None)))
        print('compiling')
        self.model.compile(loss='mean_squared_error',
                           optimizer='rmsprop',
                           metrics=['mse'])

    def load_model(self, loc_h5):
        self.define_model(200)
        self.model.load_weights(loc_h5)
        #self.model.compile(loss='mean_squared_error', optimizer='rmsprop', metrics=['rmsprop'])
        print(self.model.summary())

    def load_test_data(self, x_test_loc, y_test_loc):
        with open(x_test_loc, 'rb') as f:
            self.X_test = pickle.load(f)
        with open(y_test_loc, 'rb') as f:
            self.Y_test = pickle.load(f)

    def data_generator(self, mode):
        while True:
            if mode == 'train':
                for x, y in zip(self.X_train, self.Y_train):
                    yield (x.reshape(1, x.shape[0], x.shape[1]),
                           y.reshape(1, y.shape[0], y.shape[1]))
            if mode == 'val':
                for x, y in zip(self.X_val, self.Y_val):
                    yield (x.reshape(1, x.shape[0], x.shape[1]),
                           y.reshape(1, y.shape[0], y.shape[1]))
            if mode == 'test':
                for x, y in zip(self.X_test, self.Y_test):
                    yield (x.reshape(1, x.shape[0], x.shape[1]),
                           y.reshape(1, y.shape[0], y.shape[1]))

    def train(self, num_epochs):
        train_gen = self.data_generator('train')
        val_gen = self.data_generator('val')
        self.model.fit_generator(train_gen,
                                 steps_per_epoch=len(self.X_train),
                                 epochs=num_epochs,
                                 validation_data=val_gen,
                                 validation_steps=len(self.X_val))

    def test(self):
        test_gen = self.data_generator('test')
        score = self.model.evaluate_generator(test_gen, steps=len(self.X_test))
        print("%s: %.2f%%" % (self.model.metrics_names[1], score[1] * 100))