Пример #1
0
test = h5py.File(validation_file)
images = test['images'].value
labels = test['labels'].value

y_test = tf.keras.utils.to_categorical(labels, 7)
x_test = images/255

inputs = Input(shape=(112, 112, 3))
x = Conv2D(32, (3, 3), activation='relu')(inputs)
x = Conv2D(64, (3, 3), activation='relu')(x)
x = MaxPooling2D(pool_size=(2, 2))(x)
x = Dropout(0.25)(x)
x = Flatten()(x)
x = Dense(128, activation='relu')(x)
x = Dropout(0.5)(x)
y = Dense(7, activation='softmax')(x)

model = Model(inputs=inputs, outputs=y)

model.compile(optimizer = Adadelta(), 
              loss='categorical_crossentropy', 
              metrics=['accuracy'])

tensorboard = TensorBoard(log_dir='../logs/{0}'.format(time()))

model.fit(x_train, y_train, batch_size=32, epochs=25, verbose=1, callbacks=[tensorboard])


model.save(model_file)

Пример #2
0
class FusionModel:
    def __init__(self, config, load_weight_path=None, ab_loss='mse'):
        img_shape = (config.IMAGE_SIZE, config.IMAGE_SIZE)

        # Creating generator and discriminator
        optimizer = Adam(0.00002, 0.5)

        self.foreground_generator = instance_network(img_shape)
        self.foreground_generator.compile(loss=[
            constant_loss_dummy, constant_loss_dummy, constant_loss_dummy,
            constant_loss_dummy, constant_loss_dummy, ab_loss
        ],
                                          optimizer=optimizer)

        self.fusion_discriminator = discriminator_network(img_shape)
        self.fusion_discriminator.compile(loss=wasserstein_loss_dummy,
                                          optimizer=optimizer)
        self.fusion_generator = fusion_network(img_shape, config.BATCH_SIZE)
        self.fusion_generator.compile(loss=[ab_loss, 'kld'],
                                      optimizer=optimizer)

        if load_weight_path:
            chroma_gan = load_model(load_weight_path)
            chroma_gan_layers = [layer.name for layer in chroma_gan.layers]

            print('Loading chroma GAN parameter to instance network...')
            instance_layer_names = [
                layer.name for layer in self.foreground_generator.layers
            ]
            for i, layer in enumerate(instance_layer_names):
                if layer == 'fg_model_3':
                    print('model 3 skip')
                    continue
                if len(layer) < 2:
                    continue
                if layer[:3] == 'fg_':
                    try:
                        j = chroma_gan_layers.index(layer[3:])
                        self.foreground_generator.layers[i].set_weights(
                            chroma_gan.layers[j].get_weights())
                        print(f'Successfully set weights for layer {layer}')
                    except ValueError:
                        print(f'Layer {layer} not found in chroma gan.')
                    except Exception as e:
                        print(e)

            print('Loading chroma GAN parameter to fusion network...')
            fusion_layer_names = [
                layer.name for layer in self.fusion_generator.layers
            ]
            for i, layer in enumerate(fusion_layer_names):
                if layer == 'model_3':
                    print('model 3 skip')
                    continue
                try:
                    j = chroma_gan_layers.index(layer)
                    self.fusion_generator.layers[i].set_weights(
                        chroma_gan.layers[j].get_weights())
                    print(f'Successfully set weights for layer {layer}')
                except ValueError:
                    print(f'Layer {layer} not found in chroma gan.')
                except Exception as e:
                    print(e)

        # Fg=instance prediction
        fg_img_l = Input(shape=(*img_shape, 1, MAX_INSTANCES), name='fg_img_l')

        # self.foreground_generator.trainable = False
        self.foreground_generator.trainable = False
        fg_model_3, fg_conv2d_11, fg_conv2d_13, fg_conv2d_15, fg_conv2d_17, up_sampling2d_3 = self.foreground_generator(
            fg_img_l)

        # Fusion prediction
        fusion_img_l = Input(shape=(*img_shape, 1), name='fusion_img_l')
        fusion_img_real_ab = Input(shape=(*img_shape, 2),
                                   name='fusion_img_real_ab')
        fg_bbox = Input(shape=(4, MAX_INSTANCES), name='fg_bbox')
        fg_mask = Input(shape=(*img_shape, MAX_INSTANCES), name='fg_mask')

        self.fusion_generator.trainable = False
        fusion_img_pred_ab, fusion_class_vec = self.fusion_generator([
            fusion_img_l, fg_model_3, fg_conv2d_11, fg_conv2d_13, fg_conv2d_15,
            fg_conv2d_17, fg_bbox, fg_mask
        ])

        dis_pred_ab = self.fusion_discriminator(
            [fusion_img_pred_ab, fusion_img_l])
        dis_real_ab = self.fusion_discriminator(
            [fusion_img_real_ab, fusion_img_l])

        # Sample the gradient penalty
        img_ab_interp_samples = RandomWeightedAverage()(
            [fusion_img_pred_ab, fusion_img_real_ab])
        dis_interp_ab = self.fusion_discriminator(
            [img_ab_interp_samples, fusion_img_l])
        partial_gp_loss = partial(
            gradient_penalty_loss,
            averaged_samples=img_ab_interp_samples,
            gradient_penalty_weight=GRADIENT_PENALTY_WEIGHT)
        partial_gp_loss.__name__ = 'gradient_penalty'

        # Compile D and G as well as combined
        self.discriminator_model = Model(
            inputs=[
                fusion_img_l, fusion_img_real_ab, fg_img_l, fg_bbox, fg_mask
            ],
            outputs=[dis_real_ab, dis_pred_ab, dis_interp_ab],
            name='discriminator')

        self.discriminator_model.compile(optimizer=optimizer,
                                         loss=[
                                             wasserstein_loss_dummy,
                                             wasserstein_loss_dummy,
                                             partial_gp_loss
                                         ],
                                         loss_weights=[-1.0, 1.0, 1.0])

        self.fusion_generator.trainable = True
        self.fusion_discriminator.trainable = False
        self.combined = Model(
            inputs=[fusion_img_l, fg_img_l, fg_bbox, fg_mask],
            outputs=[
                fusion_img_pred_ab, up_sampling2d_3, fusion_class_vec,
                dis_pred_ab
            ],
            name='combined')
        self.combined.compile(
            loss=[ab_loss, ab_loss, 'kld', wasserstein_loss_dummy],
            loss_weights=[1.0, 0.5, 0.003, -0.1],
            optimizer=optimizer)

        # Monitor stuff
        self.callback = TensorBoard(config.LOG_DIR)
        self.callback.set_model(self.combined)
        self.train_names = [
            'loss', 'mse_loss', 'msei_loss', 'kullback_loss',
            'wasserstein_loss'
        ]
        self.disc_names = ['disc_loss', 'disc_valid', 'disc_fake', 'disc_gp']

        self.test_loss_array = []
        self.g_loss_array = []

    def train(self,
              data: Data,
              test_data,
              log,
              config,
              skip_to_after_epoch=None):
        # Load VGG network
        VGG_modelF = applications.vgg16.VGG16(weights='imagenet',
                                              include_top=True)

        # Real, Fake and Dummy for Discriminator
        positive_y = np.ones((data.batch_size, 1), dtype=np.float32)
        negative_y = -positive_y
        dummy_y = np.zeros((data.batch_size, 1), dtype=np.float32)

        # total number of batches in one epoch
        total_batch = int(data.size / data.batch_size)
        print(f'batch_size={data.batch_size} * total_batch={total_batch}')

        save_path = lambda type, epoch: os.path.join(
            config.MODEL_DIR, f"fusion_{type}Epoch{epoch}.h5")

        if skip_to_after_epoch:
            start_epoch = skip_to_after_epoch + 1
            print(f"Loading weights from epoch {skip_to_after_epoch}")
            self.combined.load_weights(
                save_path("combined", skip_to_after_epoch))
            self.fusion_discriminator.load_weights(
                save_path("discriminator", skip_to_after_epoch))
        else:
            start_epoch = 0

        for epoch in range(start_epoch, config.NUM_EPOCHS):
            for batch in tqdm(range(total_batch)):
                train_batch = data.generate_batch()
                resized_l = train_batch.resized_images.l
                resized_ab = train_batch.resized_images.ab

                # GT vgg
                predictVGG = VGG_modelF.predict(
                    np.tile(resized_l, [1, 1, 1, 3]))

                # train generator
                g_loss = self.combined.train_on_batch([
                    resized_l, train_batch.instances.l,
                    train_batch.instances.bbox, train_batch.instances.mask
                ], [
                    resized_ab, train_batch.instances.ab, predictVGG,
                    positive_y
                ])
                # train discriminator
                d_loss = self.discriminator_model.train_on_batch([
                    resized_l, resized_ab, train_batch.instances.l,
                    train_batch.instances.bbox, train_batch.instances.mask
                ], [positive_y, negative_y, dummy_y])

                # update log files
                write_log(self.callback, self.train_names, g_loss,
                          (epoch * total_batch + batch + 1))
                write_log(self.callback, self.disc_names, d_loss,
                          (epoch * total_batch + batch + 1))

                if batch % 10 == 0:
                    print(
                        f"[Epoch {epoch}] [Batch {batch}/{total_batch}] [generator loss: {g_loss[0]:08f}] [discriminator loss: {d_loss[0]:08f}]"
                    )

            print('Saving models...')
            self.combined.save(save_path("combined", epoch))
            self.fusion_discriminator.save(save_path("discriminator", epoch))
            print('Models saved.')

            print('Sampling test images...')
            # sample images after each epoch
            self.sample_images(test_data, epoch, config)

    def sample_images(self, test_data: Data, epoch, config):
        total_batch = int(ceil(test_data.size / test_data.batch_size))
        for _ in range(total_batch):
            # load test data
            test_batch = test_data.generate_batch()

            # predict AB channels
            fg_model_3, fg_conv2d_11, fg_conv2d_13, fg_conv2d_15, fg_conv2d_17, up_sampling2d_3 = self.foreground_generator.predict(
                test_batch.instances.l)

            fusion_img_pred_ab, _ = self.fusion_generator.predict([
                test_batch.resized_images.l, fg_model_3, fg_conv2d_11,
                fg_conv2d_13, fg_conv2d_15, fg_conv2d_17,
                test_batch.instances.bbox, test_batch.instances.mask
            ])

            # print results
            for i in range(test_data.batch_size):
                original_full_img = test_batch.images.full[i]
                height, width, _ = original_full_img.shape
                pred_ab = cv2.resize(
                    deprocess_float2int(fusion_img_pred_ab[i]),
                    (width, height))
                reconstruct_and_save(
                    test_batch.images.l[i], pred_ab,
                    f'epoch{epoch}_{test_batch.file_names[i]}', config)
    def train():
        print(Messages.TRAINER_START)

        data = []
        labels = []

        Trainer.insert_base_images()

        for person in an_connector.persons:
            for entry in database_connector.get(person.objectGUID):
                image = ImageUtility.bin_to_np_arr(entry['data'])
                data.append(image)
                labels.append(person.objectGUID)

        data = np.array(data, dtype='float32')
        labels = np.array(labels)

        lb = LabelBinarizer()
        labels = lb.fit_transform(labels)

        (trainX, testX, trainY, testY) = train_test_split(data, labels, test_size=0.25, random_state=42)

        # If SSL exception occurs, run `Install Certificates.command` inside Applications/Python X
        aug = ImageDataGenerator(rotation_range=20, zoom_range=0.15, width_shift_range=0.2, height_shift_range=0.2,
                                 shear_range=0.15, horizontal_flip=True, fill_mode='nearest')
        base_model = MobileNetV2(weights='imagenet', include_top=False, input_tensor=Input(shape=(224, 224, 3)))

        head_model = base_model.output
        head_model = AveragePooling2D(pool_size=(7, 7))(head_model)
        head_model = Flatten(name='flatten')(head_model)
        head_model = Dense(128, activation='relu')(head_model)
        head_model = Dropout(0.5)(head_model)
        head_model = Dense(len(labels), activation='softmax')(head_model)

        model = Model(inputs=base_model.input, outputs=head_model)
        # print(model.summary())

        for layer in base_model.layers:
            layer.trainable = False

        print('Compiling model ... \n')
        opt = Adam(lr=Trainer.INIT_LR, decay=Trainer.INIT_LR / Trainer.EPOCHS)
        model.compile(loss='categorical_crossentropy', optimizer=opt, metrics=['accuracy'])

        # Needs depth of 3, received 1
        print('Training head ... \n')
        head = model.fit(x=aug.flow(trainX, trainY, batch_size=Trainer.BS), validation_data=(testX, testY),
                         steps_per_epoch=len(trainX) // Trainer.BS, epochs=Trainer.EPOCHS)

        print('Evaluating network ... \n')
        pred_idxs = model.predict(testX, batch_size=Trainer.BS)
        pred_idxs = np.argmax(pred_idxs, axis=1)

        # print(classification_report(testY.argmax(axis=1), pred_idxs, target_names=lb.classes_))

        print('Saving mask detector model ... \n')
        model.save('mask_detector.model', save_format='h5')

        plt.style.use('ggplot')
        plt.figure()
        plt.plot(np.arange(0, Trainer.EPOCHS), head.history['loss'], label='train_loss')
        plt.plot(np.arange(0, Trainer.EPOCHS), head.history['val_loss'], label='valuation_loss')
        plt.plot(np.arange(0, Trainer.EPOCHS), head.history['accuracy'], label='train_acc')
        plt.plot(np.arange(0, Trainer.EPOCHS), head.history['val_accuracy'], label='valuation_acc')
        plt.title('Training Loss and Accuracy')
        plt.xlabel('Epoch #')
        plt.ylabel('Loss/Accuracy')
        plt.legend(loc='lower left')
        plt.savefig('plot.png')

        print(Messages.TRAINER_FINISH)
Пример #4
0
x_train2, x_test2, y_train2, y_test2 = train_test_split(images,
                                                        races,
                                                        random_state=100)

inputs = Input(shape=(200, 200, 3))
flt = Flatten()(inputs)

gender_l = Dense(128, activation="relu")(flt)
gender_l = Dense(80, activation="relu")(gender_l)
gender_l = Dense(64, activation="relu")(gender_l)
gender_l = Dense(32, activation="relu")(gender_l)
gender_l = Dense(2, activation="softmax")(gender_l)

race_l = Dense(128, activation="relu")(flt)
race_l = Dense(80, activation="relu")(race_l)
race_l = Dense(64, activation="relu")(race_l)
race_l = Dense(32, activation="relu")(race_l)
race_l = Dense(7, activation="softmax")(race_l)

model = Model(inputs=inputs, outputs=[gender_l, race_l])
model.compile(optimizer="adam",
              loss="sparse_categorical_crossentropy",
              metrics='accuracy')

csv_logger = CSVLogger("plots/AWE.csv", append=True)
save = model.fit(x_train, [y_train, y_train2],
                 validation_data=(x_test, [y_test, y_test2]),
                 epochs=100,
                 callbacks=[csv_logger])
model.save("AWEdataset.h5")