예제 #1
0
            wandb_callback = WandbCallback(save_model=False)
            frozen_callbacks.append(wandb_callback)

        # use custom yolo_loss Lambda layer.
        model.compile(
            optimizer=Adam(lr=1e-3),
            loss={
                "yolo_loss": lambda y_true, y_pred: y_pred
            },
        )

        print("Train on {} samples, val on {} samples, with batch size {}.".
              format(num_train, num_val, BATCH_SIZE))

        history = model.fit_generator(
            data_generator_wrapper(lines[:num_train], BATCH_SIZE, INPUT_SHAPE,
                                   anchors, num_classes),
            steps_per_epoch=max(1, num_train // BATCH_SIZE),
            validation_data=data_generator_wrapper(lines[num_train:],
                                                   BATCH_SIZE, INPUT_SHAPE,
                                                   anchors, num_classes),
            validation_steps=max(1, num_val // BATCH_SIZE),
            epochs=epoch1,
            initial_epoch=0,
            callbacks=frozen_callbacks,
        )
        model.save_weights(os.path.join(log_dir, "trained_weights_stage_1.h5"))

        # Unfreeze and continue training, to fine-tune.
        # Train longer if the result is unsatisfactory.

        full_callbacks = [logging, checkpoint, reduce_lr, early_stopping]
예제 #2
0
            optimizer=Adam(lr=1e-3),
            loss={
                # use custom yolo_loss Lambda layer.
                "yolo_loss": lambda y_true, y_pred: y_pred
            },
        )

        batch_size = 32
        print(
            "Train on {} samples, val on {} samples, with batch size {}.".format(
                num_train, num_val, batch_size
            )
        )
        history = model.fit_generator(
            data_generator_wrapper(
                lines[:num_train], batch_size, input_shape, anchors, num_classes
            ),
            steps_per_epoch=max(1, num_train // batch_size),
            validation_data=data_generator_wrapper(
                lines[num_train:], batch_size, input_shape, anchors, num_classes
            ),
            validation_steps=max(1, num_val // batch_size),
            epochs=epoch1,
            initial_epoch=0,
            callbacks=[logging, checkpoint],
        )
        model.save(os.path.join(log_dir, "trained_weights_stage_1.h5"))

        step1_train_loss = history.history["loss"]

        file = open(os.path.join(log_dir_time, "step1_loss.npy"), "w")
예제 #3
0
    # Adjust num epochs to your dataset. This step is enough to obtain a decent model.
    if FLAGS.train_step1:
        model.compile(
            optimizer=Adam(lr=1e-4),
            loss={
                # use custom yolo_loss Lambda layer.
                'yolo_loss': lambda y_true, y_pred: y_pred
            })

        batch_size = 32
        print('Train on {} samples, val on {} samples, with batch size {}.'.
              format(num_train, num_val, batch_size))
        history = model.fit_generator(
            data_generator_wrapper(lines[:num_train],
                                   batch_size,
                                   input_shape,
                                   anchors,
                                   num_classes,
                                   random=FLAGS.is_augment),
            steps_per_epoch=max(1, num_train // batch_size),
            validation_data=data_generator_wrapper(lines[num_train:],
                                                   batch_size,
                                                   input_shape,
                                                   anchors,
                                                   num_classes,
                                                   random=False),
            validation_steps=max(1, num_val // batch_size),
            epochs=epoch1,
            initial_epoch=0,
            callbacks=[logging, checkpoint])
        model.save_weights(os.path.join(log_dir, 'trained_weights_stage_1.h5'))