Beispiel #1
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument(
        "--tolabel",
        help="Preprocess images to create labels (out/tolabel)",
        action="store_true",
        default=False)
    parser.add_argument("--augmentation",
                        help="Dataset augmentation (pass quantity)",
                        type=int)
    parser.add_argument("--dataset",
                        help="Dataset name",
                        type=str,
                        default=constant.DATASET)
    parser.add_argument("--train",
                        help="Train",
                        action="store_true",
                        default=False)
    parser.add_argument("--test",
                        help="Predict",
                        action="store_true",
                        default=False)
    parser.add_argument("--arch",
                        help="Neural Network architecture",
                        type=str,
                        default=constant.MODEL)
    parser.add_argument("--dip",
                        help="Method for image processing",
                        type=str,
                        default=constant.IMG_PROCESSING)
    parser.add_argument("--gpu",
                        help="Enable GPU mode",
                        action="store_true",
                        default=False)
    args = parser.parse_args()

    environment.setup(args)
    exist = lambda x: len(x) > 0 and path.exist(path.data(x, mkdir=False))

    if (args.tolabel):
        generator.tolabel()

    elif args.dataset is not None and exist(args.dataset):

        if (args.augmentation):
            generator.augmentation(args.augmentation)

        elif (args.train):
            nn.train()

        elif (args.test):
            nn.test()
    else:
        print("\n>> Dataset not found\n")
Beispiel #2
0
def train():
    nn = NeuralNetwork()

    total = data.length_from_path(nn.dn_image, nn.dn_aug_image)
    q = misc.round_up(total, 100) - total

    if (q > 0):
        print("Dataset augmentation (%s increase) is necessary (only once)\n" %
              q)
        gen.augmentation(q)

    images, labels = data.fetch_from_paths([nn.dn_image, nn.dn_aug_image],
                                           [nn.dn_label, nn.dn_aug_label])
    images, labels, v_images, v_labels = misc.random_split_dataset(
        images, labels, const.p_VALIDATION)

    epochs, steps_per_epoch, validation_steps = misc.epochs_and_steps(
        len(images), len(v_images))

    print(
        "Train size:\t\t%s |\tSteps per epoch: \t%s\nValidation size:\t%s |\tValidation steps:\t%s\n"
        % misc.str_center(len(images), steps_per_epoch, len(v_images),
                          validation_steps))

    patience, patience_early = const.PATIENCE, int(epochs * 0.25)
    loop, past_monitor = 0, float('inf')

    checkpoint = ModelCheckpoint(nn.fn_checkpoint,
                                 monitor=const.MONITOR,
                                 save_best_only=True,
                                 verbose=1)
    early_stopping = EarlyStopping(monitor=const.MONITOR,
                                   min_delta=const.MIN_DELTA,
                                   patience=patience_early,
                                   restore_best_weights=True,
                                   verbose=1)
    logger = CSVLogger(nn.fn_logger, append=True)

    while True:
        loop += 1
        h = nn.model.fit_generator(
            shuffle=True,
            generator=nn.prepare_data(images, labels),
            steps_per_epoch=steps_per_epoch,
            epochs=epochs,
            validation_steps=validation_steps,
            validation_data=nn.prepare_data(v_images, v_labels),
            use_multiprocessing=True,
            workers=0,
            callbacks=[checkpoint, early_stopping, logger])

        val_monitor = h.history[const.MONITOR]

        if ("loss" in const.MONITOR):
            val_monitor = min(val_monitor)
            improve = (past_monitor - val_monitor)
        else:
            val_monitor = max(val_monitor)
            improve = (val_monitor - past_monitor)

        print("\n##################")
        print("Finished epoch (%s) with %s: %f" %
              (loop, const.MONITOR, val_monitor))

        if (abs(improve) == float("inf") or improve > const.MIN_DELTA):
            print("Improved from %f to %f" % (past_monitor, val_monitor))
            past_monitor = val_monitor
            patience = const.PATIENCE
            test(nn)
        elif (patience > 0):
            print("Did not improve from %f" % (past_monitor))
            print("Current patience: %s" % (patience))
            patience -= 1
        else:
            break
        print("##################\n")