示例#1
0
def get_data_loaders(cfg, args):
    data_man = DataManager(cfg)
    train_loader = DataLoader(data_man.train_ds,
                              batch_size=args.batch_size,
                              num_workers=cfg.NUM_WORKERS,
                              pin_memory=cfg.PIN_MEMORY,
                              shuffle=True)

    val_loader = DataLoader(data_man.val_ds,
                            batch_size=args.batch_size,
                            num_workers=cfg.NUM_WORKERS,
                            pin_memory=cfg.PIN_MEMORY,
                            shuffle=False)
    return train_loader, val_loader
示例#2
0
def main():

    cfg = Config()
    data_man = DataManager(cfg)
    tf = A.Compose([
        A.Resize(width=cfg.IMG_WIDTH * 2, height=cfg.IMG_HEIGHT * 2),
        A.ToFloat(),
        ToTensorV2(),
    ])

    im_folder = ImageFolder(data_man.data, tf)

    loader = DataLoader(im_folder,
                        batch_size=64,
                        num_workers=cfg.NUM_WORKERS,
                        pin_memory=cfg.PIN_MEMORY,
                        shuffle=False)

    mean, std = get_mean_std(loader, cfg)

    print('mean', mean)
    print('std', std)
                image_patches = get_image_patches(image, self.patch_size)
                image_patches['orig'] = cv2.resize(image, self.patch_size)

                img_probas = classify_image(model, image_patches)

                name_arch = re.search('b[0-9]', model_arch).group(0)
                image_preds_to_dataFrame(img_probas, name_arch, image_name)

        self.data_frame.to_csv(
            '/Users/eugeneolkhovik/python_files/ML/melanoma/derma_classifier/meta_study/ensemble_pred.csv'
        )


if __name__ == "__main__":
    cfg = ConfigTwoClasses()
    data_manager = DataManager(cfg)
    _, data_loader = get_data_loaders(cfg, )  # args
    patches_name = ['tl', 'tr', 'bl', 'br', 'center', 'orig']

    writer = PredictionWriter()
    writer.create_dataframe(patches_name)
    writer.run()
"""
### experiment -> models directories

all_dirs = os.walk(experiments_dir) 
models_res = []
model_path = []
for i in next(all_dirs)[1]: 
    model_resualts_dir  = os.path.join(experiments_dir, i)
    models_res.append(model_resualts_dir)
示例#4
0
def load_all_images(input_path):
    dataset_name = 'imdb'
    data_loader = DataManager(dataset_name, input_path)
    data = data_loader.get_data()
    return data
示例#5
0
def main():
    args = get_args()
    input_path = args.input
    images_path = args.dataset_path
    batch_size = args.batch_size
    nb_epochs = args.nb_epochs
    validation_split = args.validation_split

    logging.debug("Loading data...")

    dataset_name = 'imdb'
    data_loader = DataManager(dataset_name, dataset_path=input_path)
    ground_truth_data = data_loader.get_data()
    train_keys, val_keys = split_imdb_data(ground_truth_data, validation_split)

    print("Samples: Training - {}, Validation - {}".format(
        len(train_keys), len(val_keys)))
    input_shape = (IMG_SIZE, IMG_SIZE, 3)
    # images_path = 'data/imdb_crop/'

    image_generator = ImageGenerator(ground_truth_data,
                                     batch_size,
                                     input_shape[:2],
                                     train_keys,
                                     val_keys,
                                     path_prefix=images_path,
                                     vertical_flip_probability=0)

    n_age_bins = 21
    alpha = 1
    model = MobileNetDeepEstimator(input_shape[0],
                                   alpha,
                                   n_age_bins,
                                   weights='imagenet')()

    opt = SGD(lr=0.001)

    model.compile(
        optimizer=opt,
        loss=["binary_crossentropy", "categorical_crossentropy"],
        metrics={
            'gender': 'accuracy',
            'age': 'accuracy'
        },
    )

    logging.debug("Model summary...")
    model.count_params()
    model.summary()

    logging.debug("Saving model...")
    mk_dir("models")
    with open(os.path.join("models", "MobileNet.json"), "w") as f:
        f.write(model.to_json())

    mk_dir("checkpoints")

    run_id = "MobileNet - " + str(batch_size) + " " + '' \
        .join(random
              .SystemRandom()
              .choice(string.ascii_uppercase) for _ in
              range(10)
              )
    print(run_id)

    reduce_lr = ReduceLROnPlateau(verbose=1, epsilon=0.001, patience=4)

    callbacks = [
        LearningRateScheduler(schedule=Schedule(nb_epochs)), reduce_lr,
        ModelCheckpoint(os.path.join(
            'checkpoints', 'weights.{epoch:02d}-{val_loss:.2f}.hdf5'),
                        monitor="val_loss",
                        verbose=1,
                        save_best_only=True,
                        mode="auto"),
        TensorBoard(log_dir='logs/' + run_id)
    ]

    logging.debug("Running training...")

    logging.debug("steps : " + str(int((len(val_keys) / batch_size))))

    hist = model.fit_generator(
        image_generator.flow(mode='train'),
        steps_per_epoch=int(len(train_keys) / batch_size),
        epochs=nb_epochs,
        callbacks=callbacks,
        validation_data=image_generator.flow('val'),
        validation_steps=int(len(val_keys) / batch_size))

    logging.debug("Saving weights...")
    model.save(os.path.join("models", "MobileNet_model.h5"))
    model.save_weights(os.path.join("models", FINAL_WEIGHTS_PATH),
                       overwrite=True)
    pd.DataFrame(hist.history).to_hdf(os.path.join("models", "history.h5"),
                                      "history")
def main():

    args = get_args()
    input_path = args.input
    eps = args.eps
    batch_size = args.batch_size
    validation_split = args.validation_split
    model_weights = args.model_weights

    logging.debug("Loading data...")

    dataset_name = 'imdb'
    data_loader = DataManager(dataset_name, dataset_path=input_path)
    ground_truth_data = data_loader.get_data()
    train_keys, val_keys = split_imdb_data(ground_truth_data, validation_split)

    print("Samples: Training - {}, Validation - {}".format(len(train_keys), len(val_keys)))
    input_shape = (IMG_SIZE, IMG_SIZE, 3)
    images_path = 'data/imdb_crop/'

    n_age_bins = 21
    alpha=1
    model = MobileNetDeepEstimator(input_shape[0], alpha, n_age_bins, weights='imagenet')()

    image_generator = ImageGenerator(ground_truth_data, batch_size,
                                     input_shape[:2],
                                     train_keys, val_keys,
                                     path_prefix=images_path,
                                     vertical_flip_probability=0
                                     )


    opt = SGD(lr=0.001)
    adv_acc_metric_gender = get_adversarial_acc_metric_gender(model, eps=eps)
    adv_acc_metric_age = get_adversarial_acc_metric_age(model, eps=eps)

    model.compile(
        optimizer=opt,
        loss={'gender':'binary_crossentropy',
            'age':'categorical_crossentropy'},
        metrics={'gender':adv_acc_metric_gender,
            'age':adv_acc_metric_age}
    )
    model.load_weights(model_weights)

    eval_list = model.evaluate_generator(image_generator.flow(mode='val'),
            steps=int(len(val_keys) / batch_size))

    print(eval_list)

    del(model)
    K.clear_session()

    model = MobileNetDeepEstimator(input_shape[0], alpha, n_age_bins, weights='imagenet')()
    model.load_weights(model_weights)
    model.compile(
        optimizer=opt,
        loss={'gender':'binary_crossentropy',
            'age':'categorical_crossentropy'},
        metrics={'gender':'accuracy',
            'age':'accuracy'}
    )

    eval_list = model.evaluate_generator(image_generator.flow(mode='val'),
            steps=int(len(val_keys) / batch_size))

    print(eval_list)
示例#7
0
def main():
    # Set GPU memory usage
    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True
    session = tf.Session(config=config)
    K.set_session(session)
    tf.logging.set_verbosity(tf.logging.ERROR)

    args = get_args()
    IMG_SIZE = args.image_size
    input_path = args.input
    batch_size = args.batch_size
    nb_epochs = args.nb_epochs
    validation_split = args.validation_split
    dataset_name = args.dataset_name
    n_age_bins = args.class_num
    embdding = args.embdding
    lr = args.lr

    logging.debug("[INFO] Loading data...")

    data_loader = DataManager(dataset_name)
    ground_truth_data = data_loader.get_data()
    train_keys, val_keys = split_data(ground_truth_data,
                                      validation_split=validation_split,
                                      do_shuffle=True)

    print("Samples: Training - {}, Validation - {}".format(
        len(train_keys), len(val_keys)))
    input_shape = (IMG_SIZE, IMG_SIZE, 3)

    image_generator = ImageGenerator(ground_truth_data,
                                     batch_size,
                                     input_shape[:2],
                                     train_keys,
                                     val_keys,
                                     path_prefix=input_path,
                                     vertical_flip_probability=0,
                                     eraser_probability=0,
                                     bins=n_age_bins)

    model = facenet_resnet(nb_class=n_age_bins,
                           embdding=embdding,
                           is_train=True,
                           weights="./models/facenet_keras_weights.h5")
    model.compile(optimizer=optimizers.SGD(lr=lr,
                                           momentum=0.9,
                                           decay=5e-4,
                                           nesterov=False),
                  loss={
                      'pred_g': focal_loss(alpha=.4, gamma=2),
                      'pred_a': mae,
                      "pred_e": "categorical_crossentropy"
                  },
                  loss_weights={
                      'pred_g': 0.2,
                      'pred_a': 1,
                      'pred_e': 0.4
                  },
                  metrics={
                      'pred_g': 'accuracy',
                      'pred_a': mae,
                      'pred_e': 'accuracy'
                  })

    logging.debug("[INFO] Saving model...")

    mk_dir("checkpoints")

    callbacks = [
        CSVLogger(os.path.join('checkpoints', 'train.csv'), append=False),
        ModelCheckpoint(os.path.join(
            'checkpoints',
            'weights.{epoch:02d}-{val_pred_g_acc:.3f}-{val_pred_a_mae:.3f}-{val_pred_e_acc:.3f}.h5'
        ),
                        monitor="val_loss",
                        verbose=1,
                        save_best_only=True,
                        mode="min"),
        # Use Stochastic Gradient Descent with Restart
        # https://github.com/emrul/Learning-Rate
        # Based on paper SGDR: STOCHASTIC GRADIENT DESCENT WITH WARM RESTARTS
        # SGDRScheduler(min_lr=lr*((0.1)**3), max_lr=lr, steps_per_epoch=np.ceil(len(train_keys) /
        #                                                                        batch_size), lr_decay=0.9, cycle_length=5, mult_factor=1.5)

        # Use Cyclical Learning Rate
        # CyclicLR(mode='triangular', step_size=np.ceil(
        #     len(train_keys)/batch_size), base_lr=lr*((0.1)**3), max_lr=lr)
        LearningRateScheduler(PolyDecay(lr, 0.9, nb_epochs).scheduler)
    ]

    logging.debug("[INFO] Running training...")

    history = model.fit_generator(
        image_generator.flow(mode='train'),
        steps_per_epoch=np.ceil(len(train_keys) / batch_size),
        epochs=nb_epochs,
        callbacks=callbacks,
        validation_data=image_generator.flow('val'),
        validation_steps=np.ceil(len(val_keys) / batch_size))

    logging.debug("[INFO] Saving weights...")

    K.clear_session()