Beispiel #1
0
def main():
    config = utils.load_config(args.config)
    config["weighted"] = "weighted" in config.keys()

    # copy args to config
    config["mode"] = args.mode
    config["fold"] = args.fold
    config["apex"] = args.apex
    config["output"] = args.output
    config["snapshot"] = args.snapshot
    # config["resume_from"] = args.resume_from if args.resume_from

    utils.set_seed(SEED)
    device = torch.device(DEVICE)

    log(f"Fold {args.fold}")

    model = factory.get_model(config).to(device)

    log(f"Model type: {model.__class__.__name__}")
    if config["mode"] == 'train':
        train(config, model)
        valid(config, model)
    elif config["mode"] == 'valid':
        valid(config, model)
    elif config["mode"] == 'test':
        valid(config, model, all_exam=True)
Beispiel #2
0
def main():
    args = get_args()
    weight_file = args.weight_file
    margin = args.margin
    image_dir = args.image_dir

    if not weight_file:
        weight_file = get_file("EfficientNetB3_224_weights.11-3.44.hdf5", pretrained_model, cache_subdir="pretrained_models",
                               file_hash=modhash, cache_dir=str(Path(__file__).resolve().parent))

    # for face detection
    detector = dlib.get_frontal_face_detector()

    # load model and weights
    model_name, img_size = Path(weight_file).stem.split("_")[:2]
    img_size = int(img_size)
    cfg = OmegaConf.from_dotlist([f"model.model_name={model_name}", f"model.img_size={img_size}"])
    model = get_model(cfg)
    model.load_weights(weight_file)

    image_generator = yield_images_from_dir(image_dir) if image_dir else yield_images()

    for img in image_generator:
        input_img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
        img_h, img_w, _ = np.shape(input_img)

        # detect faces using dlib detector
        detected = detector(input_img, 1)
        faces = np.empty((len(detected), img_size, img_size, 3))

        if len(detected) > 0:
            for i, d in enumerate(detected):
                x1, y1, x2, y2, w, h = d.left(), d.top(), d.right() + 1, d.bottom() + 1, d.width(), d.height()
                xw1 = max(int(x1 - margin * w), 0)
                yw1 = max(int(y1 - margin * h), 0)
                xw2 = min(int(x2 + margin * w), img_w - 1)
                yw2 = min(int(y2 + margin * h), img_h - 1)
                faces[i] = cv2.resize(img[yw1:yw2 + 1, xw1:xw2 + 1], (img_size, img_size))
                # if you want to capture roi then uncomment below line
                cv2.imwrite(f"results/face-{i}.jpg", faces[i])

            # predict ages and genders of the detected faces
            results = model.predict(faces)
            predicted_genders = results[0]
            ages = np.arange(0, 101).reshape(101, 1)
            predicted_ages = results[1].dot(ages).flatten()

            # draw results
            for i, d in enumerate(detected):
                label = "{}, {}".format(int(predicted_ages[i]),
                                        "M" if predicted_genders[i][0] < 0.5 else "F")
                cv2.rectangle(img, (d.left(), d.top()), (d.right(), d.bottom()), (255, 0, 0), 2)
                draw_label(img, (d.left(), d.top()), label)

        cv2.imshow("result", img)
        key = cv2.waitKey(-1) if image_dir else cv2.waitKey(30)

        if key == 27:  # ESC
            break
def main(cfg):
    if cfg.wandb.project:
        import wandb
        from wandb.keras import WandbCallback
        wandb.init(project=cfg.wandb.project)
        callbacks = [WandbCallback()]
    else:
        callbacks = []

    csv_path = Path(to_absolute_path(__file__)).parent.joinpath(
        "meta", f"{cfg.data.db}.csv")
    df = pd.read_csv(str(csv_path))
    train, val = train_test_split(df, random_state=42, test_size=0.1)
    train_gen = ImageSequence(cfg, train, "train")
    val_gen = ImageSequence(cfg, val, "val")

    strategy = tf.distribute.MirroredStrategy()

    with strategy.scope():
        model = get_model(cfg)
        opt = get_optimizer(cfg)
        scheduler = get_scheduler(cfg)
        model.compile(optimizer=opt,
                      loss=[
                          "sparse_categorical_crossentropy",
                          "sparse_categorical_crossentropy"
                      ],
                      metrics=['accuracy'])

    dir_parent_checkpoint = "/content/drive/My Drive/deep_learning/age-gender-estimation"
    if os.path.exists(dir_parent_checkpoint):
        checkpoint_dir = Path(dir_parent_checkpoint).joinpath("checkpoint")
    else:
        checkpoint_dir = Path(
            to_absolute_path(__file__)).parent.joinpath("checkpoint")

    checkpoint_dir.mkdir(exist_ok=True)
    print(f"checkpoint_dir: {checkpoint_dir}")

    filename = "_".join([
        cfg.model.model_name,
        str(cfg.model.img_size), "weights.{epoch:02d}-{val_loss:.2f}.hdf5"
    ])
    callbacks.extend([
        LearningRateScheduler(schedule=scheduler),
        ModelCheckpoint(str(checkpoint_dir) + "/" + filename,
                        monitor="val_loss",
                        verbose=1,
                        save_best_only=True,
                        mode="auto")
    ])

    model.fit(train_gen,
              epochs=cfg.train.epochs,
              callbacks=callbacks,
              validation_data=val_gen,
              workers=multiprocessing.cpu_count())
def main():
    args = get_args()
    weight_file = args.weight_file

    if not weight_file:
        weight_file = get_file("EfficientNetB3_224_weights.11-3.44.hdf5",
                               pretrained_model,
                               cache_subdir="pretrained_models",
                               file_hash=modhash,
                               cache_dir=os.path.dirname(
                                   os.path.abspath(__file__)))

    # load model and weights
    model_name, img_size = Path(weight_file).stem.split("_")[:2]
    img_size = int(img_size)
    cfg = OmegaConf.from_dotlist(
        [f"model.model_name={model_name}", f"model.img_size={img_size}"])
    model = get_model(cfg)
    model.load_weights(weight_file)

    dataset_root = Path(__file__).parent.joinpath("megaage_asian")
    gt_valid_path = dataset_root.joinpath("file_names.txt")
    image_paths = []
    with open(str(gt_valid_path)) as f:
        reader = f.read().strip().split('\n')
        for temp_path in reader:
            pre, post = temp_path.split('/')
            post = '_'.join(post.split('_')[1:])
            image_paths.append(pre + '/' + post)
        real_ages = [
            int(temp_path.split('/')[-1].split('_')[1]) for temp_path in reader
        ]
    batch_size = 8

    faces = np.empty((batch_size, img_size, img_size, 3))
    ages = []
    for i, image_path in tqdm(enumerate(image_paths)):
        try:
            cv2.resize(cv2.imread(str(image_path), 1), (img_size, img_size))
            faces[i % batch_size] = cv2.resize(cv2.imread(str(image_path), 1),
                                               (img_size, img_size))
            if (i + 1) % batch_size == 0 or i == len(image_paths) - 1:
                results = model.predict(faces)
                ages_out = np.arange(0, 101).reshape(101, 1)
                predicted_ages = results[1].dot(ages_out).flatten()
                ages += list(predicted_ages)
                # len(ages) can be larger than len(image_names) due to the last batch, but it's ok.
        except Exception as e:
            continue

    real_abs_error = 0.0

    for i, real_age in enumerate(real_ages):
        real_abs_error += abs(ages[i] - real_age)
    print("MAE Real: {}".format(real_abs_error / len(real_ages)))
def main(cfg):
    if cfg.wandb.project:
        import wandb
        from wandb.keras import WandbCallback
        wandb.init(project=cfg.wandb.project)
        callbacks = [WandbCallback()]
    else:
        callbacks = []
    weight_file = cfg.train.weight_file

    csv_path = Path(to_absolute_path(__file__)).parent.joinpath("meta", f"{cfg.data.db}.csv")
    df = pd.read_csv(str(csv_path))
    train, val = train_test_split(df, random_state=42, test_size=0.2)
    train_gen = ImageSequence(cfg, train, "train")
    val_gen = ImageSequence(cfg, val, "val")

    strategy = tf.distribute.MirroredStrategy()
    initial_epoch = 0
    if weight_file:
        _, file_meta, *_ = weight_file.split('.')
        prev_epoch, new_epoch, _ = file_meta.split('-')
        initial_epoch = int(prev_epoch) + int(new_epoch)
    with strategy.scope():
        model = get_model(cfg)
        opt = get_optimizer(cfg)
        scheduler = get_scheduler(cfg, initial_epoch)
        model.compile(optimizer=opt,
                      loss=["sparse_categorical_crossentropy", "sparse_categorical_crossentropy"],
                      metrics=['accuracy'])
    if cfg.train.is_collab:
        checkpoint_dir = Path(to_absolute_path(__file__)).parent.parent.joinpath('drive', 'MyDrive', 'AgeGenderCheckpoint')
    else:
        checkpoint_dir = Path(to_absolute_path(__file__)).parent.joinpath('checkpoints')
    checkpoint_dir.mkdir(exist_ok=True)

    filename = "_".join([cfg.model.model_name,
                         str(cfg.model.img_size),
                         f"weights.{initial_epoch:02d}-" + "{epoch:02d}-{val_loss:.2f}.hdf5"])
    callbacks.extend([
        LearningRateScheduler(schedule=scheduler),
        get_logger(checkpoint_dir, initial_epoch, cfg.train.lr),
        ModelCheckpoint(str(checkpoint_dir) + "/" + filename,
                        monitor="val_loss",
                        verbose=1,
                        save_best_only=True,
                        mode="auto")
    ])

    if weight_file:
      model.load_weights(str(checkpoint_dir) + "/" + weight_file)
    model.fit(train_gen, epochs=cfg.train.epochs, callbacks=callbacks, validation_data=val_gen,
              workers=multiprocessing.cpu_count())
Beispiel #6
0
def run_train():
    args = get_args()
    cfg = Config.fromfile(args.config)

    # copy command line args to cfg
    cfg.mode = args.mode
    cfg.debug = args.debug
    cfg.fold = args.fold
    cfg.snapshot = args.snapshot
    cfg.output = args.output
    cfg.n_tta = args.n_tta
    cfg.gpu = args.gpu
    cfg.device = device

    # print setting
    show_config(cfg)

    # torch.cuda.set_device(cfg.gpu)
    set_seed(cfg.seed)

    # setup -------------------------------------
    for f in ['checkpoint', 'train', 'valid', 'test', 'backup']:
        os.makedirs(cfg.workdir + '/' + f, exist_ok=True)
    if 0:  #not work perfect
        backup_project_as_zip(
            PROJECT_PATH,
            cfg.workdir + '/backup/code.train.%s.zip' % IDENTIFIER)

    ## model ------------------------------------
    log.info('\n')
    log.info('** model setting **')
    model = factory.get_model(cfg)

    # multi-gpu----------------------------------
    if torch.cuda.device_count() > 1:
        print("Let's use", torch.cuda.device_count(), "GPUs!")
        model = nn.DataParallel(model)
    model.to(device)

    ## ------------------------------------------
    if cfg.mode == 'train':
        do_train(cfg, model)
    elif cfg.mode == 'valid':
        do_valid(cfg, model)
    elif cfg.mode == 'test':
        do_test(cfg, model)
    else:
        log.error(f"mode '{cfg.mode}' is not in [train, valid, test]")
        exit(0)
Beispiel #7
0
def main():
    args = get_args()
    weight_file = args.weight_file

    if not weight_file:
        weight_file = get_file("EfficientNetB3_224_weights.11-3.44.hdf5", pretrained_model, cache_subdir="pretrained_models",
                               file_hash=modhash, cache_dir=os.path.dirname(os.path.abspath(__file__)))

    # load model and weights
    model_name, img_size = Path(weight_file).stem.split("_")[:2]
    img_size = int(img_size)
    cfg = OmegaConf.from_dotlist([f"model.model_name={model_name}", f"model.img_size={img_size}"])
    model = get_model(cfg)
    model.load_weights(weight_file)

    dataset_root = Path(__file__).parent.joinpath("appa-real", "appa-real-release")
    validation_image_dir = dataset_root.joinpath("valid")
    gt_valid_path = dataset_root.joinpath("gt_avg_valid.csv")
    image_paths = list(validation_image_dir.glob("*_face.jpg"))
    batch_size = 8

    faces = np.empty((batch_size, img_size, img_size, 3))
    ages = []
    image_names = []

    for i, image_path in tqdm(enumerate(image_paths)):
        faces[i % batch_size] = cv2.resize(cv2.imread(str(image_path), 1), (img_size, img_size))
        image_names.append(image_path.name[:-9])

        if (i + 1) % batch_size == 0 or i == len(image_paths) - 1:
            results = model.predict(faces)
            ages_out = np.arange(0, 101).reshape(101, 1)
            predicted_ages = results[1].dot(ages_out).flatten()
            ages += list(predicted_ages)
            # len(ages) can be larger than len(image_names) due to the last batch, but it's ok.

    name2age = {image_names[i]: ages[i] for i in range(len(image_names))}
    df = pd.read_csv(str(gt_valid_path))
    appa_abs_error = 0.0
    real_abs_error = 0.0

    for i, row in df.iterrows():
        appa_abs_error += abs(name2age[row.file_name] - row.apparent_age_avg)
        real_abs_error += abs(name2age[row.file_name] - row.real_age)

    print("MAE Apparent: {}".format(appa_abs_error / len(image_names)))
    print("MAE Real: {}".format(real_abs_error / len(image_names)))
Beispiel #8
0
    weight_file = get_file("EfficientNetB3_224_weights.11-3.44.hdf5",
                           pretrained_model,
                           cache_subdir="pretrained_models",
                           file_hash=modhash,
                           cache_dir=str(Path(__file__).resolve().parent))

    # for face detection
    # detector = dlib.get_frontal_face_detector()

    # load model and weights
    model_name, img_size = Path(weight_file).stem.split("_")[:2]
    img_size = int(img_size)
    cfg = OmegaConf.from_dotlist(
        [f"model.model_name={model_name}", f"model.img_size={img_size}"])
    model = get_model(cfg)
    model.load_weights(weight_file)

    image_generator = yield_images_from_dir(IMG_PATH)

    faces = []
    for img in image_generator:
        image = np.array(
            cv2.resize(cv2.cvtColor(img, cv2.COLOR_BGR2RGB),
                       (img_size, img_size)))
        faces.append(image)

    # predict ages and genders of the detected faces
    inp = np.array(faces)
    results = model.predict(inp)
    predicted_genders = results[0]
Beispiel #9
0
def main(cfg):
    if cfg.wandb.project:
        import wandb
        from wandb.keras import WandbCallback
        wandb.init(project=cfg.wandb.project)
        callbacks = [WandbCallback()]
    else:
        callbacks = []
        
    data_path = Path("/pfs/faces/data/imdb_crop")
    #data_path = Path("/home/raoulfasel/Documents/pachyderm/age_gender_estimation/data/imdb_crop")
    
    csv_path = Path(to_absolute_path("./")).joinpath("meta", f"{cfg.data.db}.csv")
    #csv_path = Path(to_absolute_path("/pfs/faces")).joinpath("meta", f"{cfg.data.db}.csv")
    print(csv_path)
    df = pd.read_csv(str(csv_path))
    train, val = train_test_split(df, random_state=42, test_size=0.1)
    train_gen = ImageSequence(cfg, train, "train", data_path)
    val_gen = ImageSequence(cfg, val, "val", data_path)

    strategy = tf.distribute.MirroredStrategy()

    with strategy.scope():
        model = get_model(cfg)
        opt = get_optimizer(cfg)
        scheduler = get_scheduler(cfg)
        model.compile(optimizer=opt,
                      loss=["sparse_categorical_crossentropy", "sparse_categorical_crossentropy"],
                      metrics=['accuracy'])

    #checkpoint_dir = Path(to_absolute_path("age_gender_estimation")).joinpath("checkpoint")
    checkpoint_dir = Path(to_absolute_path("/pfs/build")).joinpath("checkpoint")

    print(checkpoint_dir)
    checkpoint_dir.mkdir(exist_ok=True)
    filename = "_".join([cfg.model.model_name,
                         str(cfg.model.img_size),
                         "weights.{epoch:02d}-{val_loss:.2f}.hdf5"])
    callbacks.extend([
        LearningRateScheduler(schedule=scheduler),
        ModelCheckpoint(str(checkpoint_dir) + "/" + filename,
                        monitor="val_loss",
                        verbose=1,
                        save_best_only=True,
                        mode="auto")
    ])

    model.fit(train_gen, epochs=cfg.train.epochs, callbacks=callbacks, validation_data=val_gen,
              workers=multiprocessing.cpu_count())
    
    model.save("tensorflow_deployment_package/tensorflow_model.h5")

    with open('/opt/ubiops/token', 'r') as reader:
        API_TOKEN = reader.read()
    client = ubiops.ApiClient(ubiops.Configuration(api_key={'Authorization': API_TOKEN}, 
                                               host='https://api.ubiops.com/v2.1'))
    api = ubiops.CoreApi(client)
    
    # Create the deployment
    deployment_template = ubiops.DeploymentCreate(
        name=DEPLOYMENT_NAME,
        description='Tensorflow deployment',
        input_type='structured',
        output_type='structured',
        input_fields=[
            ubiops.DeploymentInputFieldCreate(
                name='input_image',
                data_type='blob',
            ),
        ],
        output_fields=[
            ubiops.DeploymentOutputFieldCreate(
                name='output_image',
                data_type='blob'
            ),
        ],
        labels={"demo": "tensorflow"}
    )

    api.deployments_create(
        project_name=PROJECT_NAME,
        data=deployment_template
    )

    # Create the version
    version_template = ubiops.DeploymentVersionCreate(
        version=DEPLOYMENT_VERSION,
        language='python3.8',
        instance_type="2048mb",
        minimum_instances=0,
        maximum_instances=1,
        maximum_idle_time=1800 # = 30 minutes
    )

    api.deployment_versions_create(
        project_name=PROJECT_NAME,
        deployment_name=DEPLOYMENT_NAME,
        data=version_template
    )

    # Zip the deployment package
    shutil.make_archive('tensorflow_deployment_package', 'zip', '.', 'tensorflow_deployment_package')

    # Upload the zipped deployment package
    file_upload_result =api.revisions_file_upload(
        project_name=PROJECT_NAME,
        deployment_name=DEPLOYMENT_NAME,
        version=DEPLOYMENT_VERSION,
        file='tensorflow_deployment_package.zip'
    )