Beispiel #1
0
top_model = Flatten()(top_model)
top_model = Dense(600)(top_model)
top_model = BatchNormalization()(top_model)
top_model = Activation('relu', name='relu_conv9')(top_model)
# top_model = Dense(600)(top_model)
# top_model = Activation('relu', name='relu_conv10')(top_model)
top_model = Dense(10)(top_model)
top_model = BatchNormalization()(top_model)
top_model = Activation('softmax', name='loss')(top_model)
top_model = Model(model.input, top_model, name='top_net')

top_model.load_weights(top_model_weights_path)
#設定model參數
top_model.compile(loss=keras.losses.categorical_crossentropy,
              optimizer='adam',
              metrics=['accuracy'])
#設定訓練紀錄
history = LossHistory(model, x_test, y_test)
#開始訓練
top_model.fit_generator(
            generator=train_flow,
            steps_per_epoch=30000 // 20,   
            verbose=2, 
            validation_data=train_flow,
            validation_steps=5000 // 32,
            epochs=30,
            callbacks=[history]
            )

#pre=model.predict(test_data)
#print(np.argmax(pre, axis=1))
Beispiel #2
0
                               rotation_range=35,
                               width_shift_range=0.22,
                               height_shift_range=0.22)
inp = Input(shape=(28, 28, 1), dtype='float32')
model = Conv2D(16, (3, 3), padding='same', activation='relu')(inp)
model = MaxPooling2D((2, 2))(model)
model = Dropout(0.3)(model)
model = Conv2D(32, (3, 3), padding='same', activation='relu')(model)
model = MaxPooling2D((2, 2))(model)
model = Conv2D(64, (3, 3), padding='same', activation='relu')(model)
model = MaxPooling2D((2, 2))(model)
model = Flatten()(model)
model = Dropout(0.2)(model)
output = Dense(10, activation='softmax')(model)
model = Model(inputs=inp, outputs=output)
model.summary()
train_generator = train_gen.flow(train[:40000], labels[:40000], batch_size=128)
model.compile(loss='categorical_crossentropy',
              optimizer='Adam',
              metrics=['accuracy'])
checkpt = ModelCheckpoint(
    filepath=r'./models/model.{epoch:02d}-{val_loss:.2f}.hdf5',
    monitor='val_loss',
    save_best_only=True)
csv_logger = CSVLogger('history.log')
model.fit_generator(train_generator,
                    steps_per_epoch=int(40000 / 128),
                    epochs=3000,
                    callbacks=[csv_logger, checkpt],
                    validation_data=(train[40000:], labels[40000:]))
Beispiel #3
0
class MultiClassDecisionDriverLayer(BaseDriverLayer):
    def build_model(self):
        inp = Input(shape=(200, 200, 3))
        self.model = Conv2D(32, (3, 3), padding='same')(inp)
        self.model = Activation('relu')(self.model)
        self.model = Conv2D(32, (3, 3))(self.model)
        self.model = Activation('relu')(self.model)
        self.model = MaxPooling2D(pool_size=(2, 2))(self.model)
        self.model = Dropout(0.25)(self.model)
        self.model = Conv2D(64, (3, 3), padding='same')(self.model)
        self.model = Activation('relu')(self.model)
        self.model = Conv2D(64, (3, 3))(self.model)
        self.model = Activation('relu')(self.model)
        self.model = MaxPooling2D(pool_size=(2, 2))(self.model)
        self.model = Dropout(0.25)(self.model)
        self.model = Flatten()(self.model)
        self.model = Dense(512)(self.model)
        self.model = Activation('relu')(self.model)
        self.model = Dropout(0.5)(self.model)
        output1 = Dense(1, activation='sigmoid')(self.model)
        output2 = Dense(1, activation='sigmoid')(self.model)
        output3 = Dense(1, activation='sigmoid')(self.model)
        model = Model(inp, [output1, output2, output3])
        model.compile(optimizers.rmsprop(lr=0.0001, decay=1e-6),
                      loss=[
                          "binary_crossentropy",
                          "binary_crossentropy",
                          "binary_crossentropy",
                      ],
                      metrics=["accuracy"])
        return model

    def train(self):
        train_datagen = ImageDataGenerator(rescale=1. / 255,
                                           shear_range=0.2,
                                           zoom_range=0.2,
                                           horizontal_flip=True)
        valid_datagen = ImageDataGenerator(rescale=1. / 255,
                                           shear_range=0.2,
                                           zoom_range=0.2,
                                           horizontal_flip=True)

        test_datagen = ImageDataGenerator(rescale=1. / 255,
                                          shear_range=0.2,
                                          zoom_range=0.2,
                                          horizontal_flip=True)

        train_generator = train_datagen.flow_from_directory(
            directory=os.path.join(self.data_path, 'train/'),
            target_size=(200, 200),
            color_mode="rgb",
            batch_size=32,
            class_mode='categorical',
            shuffle=True,
            seed=42)

        valid_generator = valid_datagen.flow_from_directory(
            directory=os.path.join(self.data_path, 'valid/'),
            target_size=(200, 200),
            color_mode="rgb",
            batch_size=32,
            class_mode="categorical",
            shuffle=True,
            seed=42)

        test_generator = test_datagen.flow_from_directory(
            directory=os.path.join(self.data_path, 'test/'),
            target_size=(200, 200),
            color_mode="rgb",
            batch_size=32,
            class_mode='categorical')

        STEP_SIZE_TRAIN = train_generator.n // train_generator.batch_size
        STEP_SIZE_VALID = valid_generator.n // valid_generator.batch_size

        self.model.fit_generator(generator=train_generator,
                                 steps_per_epoch=STEP_SIZE_TRAIN,
                                 validation_data=valid_generator,
                                 validation_steps=STEP_SIZE_VALID,
                                 epochs=10)

        self.model.evaluate_generator(generator=valid_generator,
                                      steps=STEP_SIZE_VALID)

        STEP_SIZE_TEST = test_generator.n // test_generator.batch_size
        test_generator.reset()
        pred = self.model.predict_generator(test_generator,
                                            steps=STEP_SIZE_TEST,
                                            verbose=1)
        predicted_class_indices = np.argmax(pred, axis=1)
        labels = train_generator.class_indices
        self.labels = dict((v, k) for k, v in labels.items())
        predictions = [self.labels[k] for k in predicted_class_indices]
        print(predictions)

    def predict(self, image_data):
        image_array = image.img_to_array(image_data)
        image_array = np.expand_dims(image_array, axis=0)
        result = self.model.predict_classes(image_array)
        return self.labels.get(result[0])
Beispiel #4
0
    vgg = cnn_algo()
    # vgg.summary()

    for layers in vgg.layers:  # we are keeping the weights constant, it is already trained by the thousands of weights that is in imagenet
        layers.trainable = False  # we are not training all the layers

    x = Flatten()(
        vgg.output
    )  # need to add last layer, after flattning we can add the last layer
    folders = glob.glob(
        cwd + '/train/*')  # check number of folders inside training folder
    print(folders)

    prediction = Dense(len(folders), activation='softmax')(
        x)  # its the sigmoid activation function
    model = Dropout(0.2)
    model = input_model()
    model.summary()  # view the structure of model
    model.compile(loss='categorical_crossentropy',
                  optimizer='adam',
                  metrics=['accuracy'])

    training_set = aggumentation_training()
    test_set = aggumentation_training()
    r = model.fit_generator(training_set,
                            epochs=3,
                            steps_per_epoch=len(training_set),
                            validation_steps=len(test_set))
    model.save('My_face_features_model.h5')