Ejemplo n.º 1
0
def main():
    args = get_args()
    if not os.path.exists(args.output_path):
        os.makedirs(args.output_path)
    network = nn_builder.get_network(args.nntype, args.cls_num,
                                     args.input_size)
    if args.ckpt is not None:
        network.load_weights(args.ckpt).expect_partial(
        )  # expect_partial enables to ignore training information for prediction
    loss = tf.keras.losses.categorical_crossentropy
    hot_map_helper = HotMapHelper(network, loss)
    hot_map_helper.creates_hotmap_for_classes_directories(
        args.ds_path, args.input_size, args.output_path, args.kernel_size,
        args.stride)
def main():
    tf.keras.backend.set_floatx('float32')
    args = get_args()
    if not os.path.exists(args.output_path):
        os.makedirs(args.output_path)

    train_dataloader = DataLoader("train_dataset",
                                  args.train_file,
                                  args.cls_num,
                                  args.input_size,
                                  args.output_path,
                                  augment=True)
    # val_dataloader = DataLoader("val_dataset", args.val_file, args.cls_num, args.input_size, args.output_path, augment=False)

    optimizer = tf.keras.optimizers.Adam(learning_rate=args.learning_rate1)
    loss = tf.keras.losses.CategoricalCrossentropy()
    network = nn_builder.get_network(args.nntype)
    # network.update_classes(args.cls_num, args.input_size)

    trainer = TrainTestHelper(network, optimizer, loss, training=True)

    train(args.num_epochs1, args.batch_size, trainer, train_dataloader)
Ejemplo n.º 3
0
def main():
    tf.keras.backend.set_floatx('float32')
    args = get_args()
    if not os.path.exists(args.output_path):
        os.makedirs(args.output_path)

    dataloader = DataLoader("dataloader",
                            args.train_path,
                            args.val_path,
                            args.test_path,
                            args.cls_num,
                            args.input_size,
                            output_path=args.output_path)
    network = nn_builder.get_network(args.nntype, args.cls_num,
                                     args.input_size)
    network.load_model(args.ckpt_dir)

    counter = 0
    step = 32

    batch_x, batch_y = dataloader.read_batch(step, "test")
    loss_func = tf.keras.losses.SparseCategoricalCrossentropy()

    loss, accuracy = traintest.get_accuracy_and_loss(batch_x, batch_y, network,
                                                     loss_func)
    print("loss: {}, accuracy: {}".format(loss, accuracy))

    counter += step

    hot_map_creator = traintest.HotMapHelper(network, args.input_size,
                                             loss_func)
    paths = dataloader.paths_logger
    labels = dataloader.labels_logger

    for i in range(len(paths)):
        hot_map_creator.test_with_square(paths["test"][i], labels["test"][i],
                                         args.kernel_size, args.stride,
                                         args.output_path)
Ejemplo n.º 4
0
Archivo: DOC.py Proyecto: LotanLevy/DOC
def main():
    tf.keras.backend.set_floatx('float32')

    args = get_args()
    print(args)

    if not os.path.exists(args.output_path):
        os.makedirs(args.output_path)

    # data loaders #

    ref_gen = tf.keras.preprocessing.image.ImageDataGenerator(
        validation_split=0.2)
    ref_classes = [str(i) for i in range(1000)]
    ref_train_datagen = ref_gen.flow_from_directory(
        args.ref_path,
        subset="training",
        class_mode="categorical",
        target_size=args.input_size,
        batch_size=args.ref_batch_size)
    ref_val_datagen = ref_gen.flow_from_directory(
        args.ref_path,
        subset="validation",
        class_mode="categorical",
        target_size=args.input_size,
        batch_size=args.ref_batch_size)

    tar_gen = tf.keras.preprocessing.image.ImageDataGenerator(
        zoom_range=0.15,
        width_shift_range=0.2,
        height_shift_range=0.2,
        shear_range=0.15,
        horizontal_flip=True,
        fill_mode="nearest",
        validation_split=0.2)

    tar_train_datagen = tar_gen.flow_from_directory(
        args.tar_path,
        subset="training",
        class_mode="categorical",
        target_size=args.input_size,
        batch_size=args.tar_batch_size)
    tar_val_datagen = tar_gen.flow_from_directory(
        args.tar_path,
        subset="validation",
        class_mode="categorical",
        target_size=args.input_size,
        batch_size=args.tar_batch_size)

    save_paths("ref_val", args.output_path, ref_val_datagen)
    save_paths("tar_val", args.output_path, tar_val_datagen)

    model = nn_builder.get_network(args.nntype, args.cls_num, args.input_size)
    optimizer = tf.keras.optimizers.Adam(learning_rate=args.lr)

    templates_images = np.concatenate(
        [tar_train_datagen.next()[0] for i in range(args.templates_num)])

    features_loss = FeaturesLoss(templates_images, model, args.cls_num,
                                 args.tar_batch_size)

    losses = {
        "d_loss": tf.keras.losses.CategoricalCrossentropy(),
        "c_loss": compactnessLoss(args.cls_num, args.tar_batch_size),
        "features_loss": features_loss
    }
    metrics = {
        "accuracy": tf.keras.metrics.CategoricalAccuracy,
        "total": tf.keras.metrics.Mean,
        "pred_val": tf.keras.metrics.Mean
    }

    aoc_helper = None

    if args.alien_path is not None:
        target_data = np.concatenate([
            tar_val_datagen.next()[0]
            for _ in range(int(args.test_size / args.tar_batch_size))
        ])

        alien_gen = tf.keras.preprocessing.image.ImageDataGenerator()

        alien_datagen = alien_gen.flow_from_directory(
            args.alien_path,
            subset="training",
            class_mode="categorical",
            target_size=args.input_size,
            batch_size=target_data.shape[0])
        alien_data, _ = alien_datagen.next()
        aoc_helper = AOC_helper(templates_images, target_data, alien_data,
                                args.cls_num, args.tar_batch_size)

        print("target_test {} alien size {}".format(len(target_data),
                                                    len(alien_data)))

    log_dir = os.path.join(
        os.path.join(
            args.output_path,
            "logs/" + datetime.datetime.now().strftime("%Y%m%d-%H%M%S")))
    summary_writer = tf.summary.create_file_writer(logdir=log_dir)

    model.set_ready_for_train(optimizer,
                              args.lambd,
                              losses=losses,
                              metrics=metrics)

    train(model, (ref_train_datagen, tar_train_datagen),
          args.iter, (ref_val_datagen, tar_val_datagen),
          args.val_size,
          1,
          print_freq=args.print,
          summary_writer=summary_writer,
          aoc_helper=aoc_helper,
          output_path=args.output_path)
Ejemplo n.º 5
0
def main():
    random.seed(1234)
    np.random.seed(1234)
    tf.random.set_seed(1234)

    tf.keras.backend.set_floatx('float32')
    args = get_args()
    if not os.path.exists(args.output_path):
        os.makedirs(args.output_path)

    # check_corrupted_images(args)

    train_datagen = tf.keras.preprocessing.image.ImageDataGenerator(
        validation_split=0.2)

    # train_datagen = tf.keras.preprocessing.image.ImageDataGenerator(
    #                                     rotation_range=20,
    #                                     zoom_range=0.15,
    #                                     width_shift_range=0.2,
    #                                     height_shift_range=0.2,
    #                                     shear_range=0.15,
    #                                     horizontal_flip=True,
    #                                     fill_mode="nearest",
    #                                    validation_split=0.2)

    train_generator = train_datagen.flow_from_directory(
        args.ds_path,
        subset="training",
        seed=123,
        shuffle=True,
        class_mode="categorical",
        target_size=args.input_size,
        batch_size=args.batch_size)

    validation_generator = train_datagen.flow_from_directory(
        args.ds_path,
        subset="validation",
        seed=123,
        shuffle=True,
        class_mode="categorical",
        target_size=args.input_size,
        batch_size=args.batch_size)

    cls_num = len(train_generator.class_indices.keys())

    network = nn_builder.get_network(args.nntype, cls_num, args.input_size)
    network.update_output_path(args.output_path)

    network.freeze_status()

    if args.ckpt is not None:
        network.load_weights(args.ckpt).expect_partial(
        )  # expect_partial enables to ignore training information for prediction

    log_dir = os.path.join(
        os.path.join(
            args.output_path, "amazon_logs\\fit\\" +
            datetime.datetime.now().strftime("%Y%m%d-%H%M%S")))
    tensorboard_callback = tf.keras.callbacks.TensorBoard(log_dir=log_dir,
                                                          histogram_freq=1)

    optimizer = tf.keras.optimizers.SGD(lr=0.001,
                                        decay=1e-6,
                                        momentum=0.5,
                                        nesterov=True)

    network.compile(optimizer=optimizer,
                    loss='categorical_crossentropy',
                    metrics=['accuracy'])

    chackpoint_path = os.path.join(os.path.join(args.output_path,
                                                "checkpoint"))

    checkpoint = ModelCheckpoint(chackpoint_path,
                                 monitor='val_accuracy',
                                 save_best_only=True,
                                 save_weights_only=False,
                                 mode='max',
                                 verbose=1)
    early = EarlyStopping(monitor='val_accuracy',
                          min_delta=0,
                          patience=20,
                          verbose=1,
                          mode='auto')

    csv_logger = CSVLogger(os.path.join(args.output_path, 'log.csv'),
                           append=True,
                           separator=';')

    hist = network.fit_generator(
        generator=train_generator,
        steps_per_epoch=len(train_generator),
        validation_data=validation_generator,
        validation_steps=10,
        epochs=args.num_epochs,
        callbacks=[checkpoint, early, csv_logger, tensorboard_callback],
        workers=args.workers,
        use_multiprocessing=True)
Ejemplo n.º 6
0
def main():
    random.seed(1234)
    np.random.seed(1234)
    tf.random.set_seed(1234)

    tf.keras.backend.set_floatx('float32')
    args = get_args()
    if not os.path.exists(args.output_path):
        os.makedirs(args.output_path)

    log_dir = os.path.join(
        os.path.join(
            args.output_path,
            "logs\\fit\\" + datetime.datetime.now().strftime("%Y%m%d-%H%M%S")))
    tensorboard_callback = tf.keras.callbacks.TensorBoard(log_dir=log_dir,
                                                          histogram_freq=1)

    (x_train, y_train), (x_test,
                         y_test) = tf.keras.datasets.cifar10.load_data()

    cls_num = len(np.unique(y_train))

    train_generator = DataGenerator(images=x_train,
                                    labels=y_train,
                                    batch_size=args.batch_size,
                                    dim=args.input_size,
                                    n_channels=3,
                                    n_classes=cls_num,
                                    shuffle=True)

    validation_generator = DataGenerator(images=x_test,
                                         labels=y_test,
                                         batch_size=args.batch_size,
                                         dim=args.input_size,
                                         n_channels=3,
                                         n_classes=cls_num,
                                         shuffle=True)

    network = nn_builder.get_network(args.nntype, cls_num, args.input_size)
    network.update_output_path(args.output_path)

    network.freeze_status()

    optimizer = tf.keras.optimizers.SGD(lr=0.001,
                                        decay=1e-6,
                                        momentum=0.5,
                                        nesterov=True)

    network.compile(optimizer=optimizer,
                    loss='categorical_crossentropy',
                    metrics=['accuracy'])

    chackpoint_path = os.path.join(os.path.join(args.output_path,
                                                "vgg16_1.h5"))

    checkpoint = ModelCheckpoint(chackpoint_path,
                                 monitor='val_acc',
                                 verbose=1,
                                 save_best_only=True,
                                 save_weights_only=False,
                                 mode='auto',
                                 save_freq=1)
    early = EarlyStopping(monitor='val_acc',
                          min_delta=0,
                          patience=20,
                          verbose=1,
                          mode='auto')

    csv_logger = CSVLogger(os.path.join(args.output_path, 'log.csv'),
                           append=True,
                           separator=';')

    hist = network.fit_generator(generator=train_generator,
                                 steps_per_epoch=len(train_generator),
                                 validation_data=validation_generator,
                                 validation_steps=10,
                                 epochs=args.num_epochs,
                                 callbacks=[checkpoint, tensorboard_callback])