Beispiel #1
0
dropout_rate = 0.2
initializer = 'he_normal'
weight_decay = 5e-4
regularizer = l2(weight_decay)

# training parameters
epochs = 200
batch_size = 32
learning_rate = 0.01
max_learning_rate = 0.1
clr = OneCycleLR(num_samples=X_train.shape[0],
                 batch_size=batch_size,
                 max_lr=max_learning_rate)
chk = ModelCheckpoint(filepath='results/wrn1028',
                      save_weights_only=True,
                      monitor='val_loss',
                      mode='min',
                      save_best_only=True)

# fit the model
model = WideResNet(width, depth, classes, filters, input_shape, activation,
                   dropout_rate, initializer, regularizer).get_model()
model.compile(optimizer=SGD(lr=learning_rate),
              loss='categorical_crossentropy',
              metrics=['accuracy'])
model.fit(generator.flow(X_train, Y_train, batch_size=batch_size),
          epochs=epochs,
          batch_size=batch_size,
          verbose=2,
          validation_data=(X_test, Y_test),
          callbacks=[clr, chk])
Beispiel #2
0
class TrainerV2(object):
    def __init__(self,
                 input_shape,
                 encode_dim,
                 output_dim,
                 model='efficient_net',
                 loss='emd'):
        self.model = None
        if model == 'efficient_net':
            self.model = EfficientNet(input_shape, encode_dim, output_dim)
        elif model == 'wide_res_net':
            self.model = WideResNet(input_shape, output_dim)
        else:
            raise Exception('no match model name')
        optimizer = tf.keras.optimizers.SGD(learning_rate=0.1, momentum=0.1)
        loss_func = None
        if loss == 'emd':
            loss_func = EMD
        elif loss == 'categorical_crossentropy':
            loss_func = 'categorical_crossentropy'
        else:
            raise Exception('no match loss function')
        self.model.compile(optimizer=optimizer,
                           loss=loss_func,
                           metrics=['acc'])

    def train(self, x_train, t_train, x_val, t_val, epochs, batch_size,
              image_path, save_name):
        train_gen = DataGenerator(x_train,
                                  t_train,
                                  image_path=image_path,
                                  batch_size=batch_size)
        val_gen = DataGenerator(x_val,
                                t_val,
                                image_path=image_path,
                                batch_size=batch_size)

        callbacks = [
            tf.keras.callbacks.ModelCheckpoint(save_name,
                                               monitor='val_loss',
                                               verbose=1,
                                               save_best_only=True,
                                               mode='min')
        ]

        self.history = self.model.fit_generator(
            train_gen,
            len(train_gen),
            epochs=30,
            validation_data=val_gen,
            validation_steps=len(val_gen),
            callbacks=callbacks,
        )

    def evaluate(
        self,
        x_test,
        t_test,
        batch_size,
        image_path,
    ):
        test_gen = DataGenerator(x_test,
                                 t_test,
                                 image_path=image_path,
                                 batch_size=batch_size)

        preds = self.model.predict_generator(
            test_gen,
            len(test_gen),
        )
        idx = np.array([0, 1, 2, 3, 4])
        acc1 = accuracy_score(np.argmax(t_test, axis=1),
                              np.argmax(preds, axis=1))
        acc2 = accuracy_score(np.argmax(t_test, axis=1),
                              np.sum(preds * idx, axis=1).astype(np.int32))

        cm = confusion_matrix(np.argmax(t_test, axis=1),
                              np.argmax(preds, axis=1))
        print(acc1, acc2, cm)
        return (acc1, acc2, cm)