Exemple #1
0
    def test(self, gen_test, output_dir=None):
        if self.model is None:
            raise Exception("Model is not defined.")

        images, labels = gen_test.get_all()

        logger.debug("Predicting... ")

        pred = self.predict(images)

        df_pred = self.get_pred_df(pred)

        df_ans = pd.DataFrame(labels)
        df_ans = df_ans.applymap(lambda x: np.argmax(x))

        df_right = (df_pred == df_ans)

        for num_klass, klass_name in enumerate(df_right):
            logger.debug("Label {} acc: {}/{} = {:.4f}".format(klass_name, np.sum(df_right[klass_name]), len(df_right),
                                                               np.sum(df_right[klass_name]) / len(df_right)))

            # save wrong prediction
            for i in df_pred[klass_name].loc[df_ans[klass_name] != df_pred[klass_name]].index:
                annotation = gen_test.dataset.annotations[i]
                title = f"ans({self.list_class_names[num_klass][df_ans[i]]})_pred({self.list_class_names[num_klass][df_pred[i]]})"

                image = image_processing.draw_annotation(annotation)
                if output_dir is None:
                    image_processing.show(image, title=f"{annotation.filename}\n{title}")
                else:
                    image_processing.save_image(image, os_path.join(output_dir, f"{annotation.filename}_{title}.png"))

        logger.debug(
            "Total acc {}/{} = {:.4f}".format(np.sum(df_right.all(axis=1)), len(df_right),
                                              np.sum(df_right.all(axis=1)) / len(df_right)))
Exemple #2
0
 def __init__(self, model_dir, version=None):
     super(TFServeModel, self).__init__()
     self.model_dir = model_dir
     if version is None:
         version = os_path.list_dir(model_dir, sort=True, sort_key=os_path.getctime, full_path=False)[-1]
     self.version = version
     model = tf.saved_model.load(os_path.join(self.model_dir, self.version))
     self.__dict__.update(model.__dict__)
Exemple #3
0
 def load(self, model_dir=None, model_path=None):
     if model_dir is None and model_path is None:
         raise Exception("No model path can be loaded")
     if model_path is None:
         model_path = os_path.join(model_dir, f"{self.model_name}.h5")
     if not os_path.exists(model_path):
         logger.warning("Pretrained model does not exist in {}.".format(model_path))
         return
     logger.debug('Load model {}.'.format(model_path))
     model = load_model(model_path, compile=False)
     self.__dict__.update(model.__dict__)
Exemple #4
0
 def display(image_path, bbox, pred_classes, true_classes, title):
     image_org = image_processing.imread(image_path)
     image = model.img_aug.aug([image_org])[0][0]
     image = image_processing.draw_box(image,
                                       bbox,
                                       label=get_label(pred_classes))
     image_path = os_path.join(
         os_path.dirname(image_path, depth=2, full_path=False),
         os_path.get_filename(image_path))
     image_processing.show(
         image,
         title=f"{title} true={get_label(true_classes)}\n {image_path}")
Exemple #5
0
    def load_weights(self, weight_dir=None, weight_path=None, force=False, **kwargs):
        if not force:
            if weight_dir is None and weight_path is None:
                raise Exception("No model_weight path can be loaded")
            if weight_path is None:
                weight_path = os_path.join(weight_dir, f"{self.model_name}.h5")
            if not os_path.exists(weight_path):
                logger.warning("Pretrained weights does not exist in {}.".format(weight_path))
                return

        logger.debug('Load weights {}.'.format(weight_path))
        super(CNNModel, self).load_weights(weight_path, **kwargs)
Exemple #6
0
def run_eager_fit(model, train_dataset, val_dataset, optimizer, loss):
    # Eager mode is great for debugging
    # Non eager graph mode is recommended for real training
    avg_loss = tf.keras.metrics.Mean('loss', dtype=tf.float32)
    avg_val_loss = tf.keras.metrics.Mean('val_loss', dtype=tf.float32)

    for epoch in range(1, FLAGS.epochs + 1):
        for batch, (images, labels) in enumerate(train_dataset):
            with tf.GradientTape() as tape:
                outputs = model(images, training=True)
                regularization_loss = tf.reduce_sum(model.losses)
                pred_loss = []
                for output, label, loss_fn in zip(outputs, labels, loss):
                    pred_loss.append(loss_fn(label, output))
                total_loss = tf.reduce_sum(pred_loss) + regularization_loss

            grads = tape.gradient(total_loss, model.trainable_variables)
            optimizer.apply_gradients(zip(grads, model.trainable_variables))

            logger.info(
                f"[Epoch{epoch}/{FLAGS.epochs}][Train][Batch {batch + 1}] "
                f"Loss: {total_loss.numpy()}, {list(map(lambda x: np.sum(x.numpy()), pred_loss))}"
            )
            avg_loss.update_state(total_loss)

        for batch, (images, labels) in enumerate(val_dataset):
            outputs = model(images)
            regularization_loss = tf.reduce_sum(model.losses)
            pred_loss = []
            for output, label, loss_fn in zip(outputs, labels, loss):
                pred_loss.append(loss_fn(label, output))
            total_loss = tf.reduce_sum(pred_loss) + regularization_loss

            logger.info(
                f"[Epoch{epoch}/{FLAGS.epochs}][Val][Batch {batch}] "
                f"Loss: {total_loss.numpy()}, {list(map(lambda x: np.sum(x.numpy()), pred_loss))}"
            )
            avg_val_loss.update_state(total_loss)

        logger.info(
            f"[Epoch {epoch}][Summary] train: {avg_loss.result().numpy()}, val: {avg_val_loss.result().numpy()}"
        )
        model.save_weights(
            os_path.join(
                os_path.dirname(FLAGS.weights), "checkpoints",
                f'ep{epoch:03d}-loss{avg_loss.result().numpy():.3f}'
                f'-val_loss{avg_val_loss.result().numpy():.3f}.h5'), )
        avg_loss.reset_states()
        avg_val_loss.reset_states()
Exemple #7
0
def train(model, train_dataset, val_dataset):
    flag_utils.log_flag()
    """Configure the model for transfer learning"""
    if FLAGS.transfer == 'none':
        pass  # Nothing to do
    elif FLAGS.transfer in ['darknet', 'no_output']:
        if FLAGS.tiny:
            model_pretrained = YoloV3Tiny()
        else:
            model_pretrained = YoloV3()
        model_pretrained.load_weights(FLAGS.weights, by_name=True)

        if FLAGS.transfer == 'darknet':
            model.get_layer('yolo_darknet').set_weights(
                model_pretrained.get_layer('yolo_darknet').get_weights())
            yolo_utils.freeze_all(model.get_layer('yolo_darknet'),
                                  until_layer=FLAGS.freeze)

        elif FLAGS.transfer == 'no_output':
            for l in model.layers:
                if not l.name.startswith('yolo_output'):
                    l.set_weights(
                        model_pretrained.get_layer(l.name).get_weights())
                    yolo_utils.freeze_all(l, until_layer=FLAGS.freeze)

    else:
        # All other transfer require matching classes
        model.load_weights(weight_path=FLAGS.weights)
        if FLAGS.transfer == 'fine_tune':
            # freeze darknet and fine tune other layers
            darknet = model.get_layer('yolo_darknet')
            yolo_utils.freeze_all(darknet, until_layer=FLAGS.freeze)
        elif FLAGS.transfer == 'frozen':
            # freeze everything
            yolo_utils.freeze_all(model, until_layer=FLAGS.freeze)

    lr_scheduler = CosineDecay(initial_learning_rate=FLAGS.learning_rate,
                               decay_steps=FLAGS.decay_steps,
                               alpha=FLAGS.lr_alpha)
    optimizer = tf.keras.optimizers.Adam(learning_rate=lr_scheduler)
    loss = [
        YoloLoss(model.anchors[mask], classes=model.num_classes)
        for mask in model.anchor_masks
    ]
    """Start training"""
    if FLAGS.fit == 'eager_tf':
        run_eager_fit(model, train_dataset, val_dataset, optimizer, loss)
    else:
        model.compile(optimizer=optimizer,
                      loss=loss,
                      run_eagerly=(FLAGS.mode == 'eager_fit'))
        checkpoint_path = os_path.join(
            os_path.dirname(FLAGS.weights), "checkpoints",
            'ep{epoch:03d}-loss{loss:.3f}-val_loss{val_loss:.3f}.h5')
        os_path.make_dir(os_path.dirname(checkpoint_path))
        plt_his = PlotHistory()
        callbacks = [
            # ReduceLROnPlateau(verbose=1, patience=FLAGS.lr_patience),
            EarlyStopping(patience=FLAGS.early_stop, verbose=1),
            ModelCheckpoint(checkpoint_path,
                            monitor='val_loss',
                            verbose=0,
                            save_best_only=False,
                            save_weights_only=True,
                            mode='auto',
                            save_freq='epoch'),
            # TensorBoard(log_dir='../logs'),
            LogHistory(),
            plt_his
        ]
        try:
            history = model.fit(train_dataset,
                                epochs=FLAGS.epochs,
                                callbacks=callbacks,
                                validation_data=val_dataset)
        except Exception as e:
            raise e
        finally:
            plt_his.plot(save_path=os_path.join(
                os_path.dirname(FLAGS.weights),
                f"{datetime.datetime.now().strftime('%Y%m%d-%H%M')}.png"))
Exemple #8
0
 def save_to_serve(self, model_dir, version="0001", overwrite=True):
     output_dir = os_path.join(model_dir, self.model_name, version)
     tf.saved_model.save(self, output_dir)
     logger.debug(f"model has been exported to {output_dir}")