Exemple #1
0
def main():
    args = get_args()
    if args.opts:
        cfg.merge_from_list(args.opts)
    cfg.freeze()
    # create model
    print("=> creating model '{}'".format(cfg.MODEL.ARCH))
    model = get_model(model_name=cfg.MODEL.ARCH, pretrained=None)
    device = "cuda" if torch.cuda.is_available() else "cpu"
    model = model.to(device)
    # load checkpoint
    resume_path = args.resume
    if Path(resume_path).is_file():
        print("=> loading checkpoint '{}'".format(resume_path))
        checkpoint = torch.load(resume_path, map_location="cpu")
        model.load_state_dict(checkpoint['state_dict'])
        print("=> loaded checkpoint '{}'".format(resume_path))
    else:
        raise ValueError("=> no checkpoint found at '{}'".format(resume_path))
    if device == "cuda":
        cudnn.benchmark = True
    test_dataset = FaceDataset(args.data_dir,
                               "test",
                               img_size=cfg.MODEL.IMG_SIZE,
                               augment=False)
    test_loader = DataLoader(test_dataset,
                             batch_size=cfg.TEST.BATCH_SIZE,
                             shuffle=False,
                             num_workers=cfg.TRAIN.WORKERS,
                             drop_last=False)
    ####################################################### KE LIANG #######################################################
    print("=> start testing")
    _, _, test_mae = train(test_loader, model, None, None, 0, device)
    print(f"test mae: {test_mae:.3f}")
Exemple #2
0
def parse_args():
    parser = argparse.ArgumentParser(description='Train keypoints network')
    # general
    parser.add_argument('--cfg',
                        help='experiment configure file name',
                        required=False,
                        type=str)

    parser.add_argument('--batchsize',
                        help='batchsize',
                        type=int,
                        required=False)

    parser.add_argument('--epochs', help='episodes', type=int, required=False)

    parser.add_argument('opts',
                        help="Modify config options using the command-line",
                        default=None,
                        nargs=argparse.REMAINDER)
    args = parser.parse_args()

    if args.cfg is not None:
        cfg.merge_from_file(args.cfg)
    if args.batchsize is not None:
        cfg.EPOCHS = params['epochs']
    if args.epochs is not None:
        cfg.BATCHSIZE = params['batchsize']
    cfg.merge_from_list(args.opts)
Exemple #3
0
                    pred_per_level
                }]

    logger.info('batch_time: {time:.4f} batch_memory: {memory:.2f}'.format(
        time=(time.time() - end) / iteration,
        memory=torch.cuda.max_memory_allocated() / 1024.0**3))

    return pred_dict


if __name__ == "__main__":
    parser = argparse.ArgumentParser(description="inference")
    parser.add_argument("opts", default=None, nargs=argparse.REMAINDER)
    args = parser.parse_args()

    config.merge_from_list(args.opts)
    config.freeze()

    save_dir = os.path.join(config.save_dir)
    mkdir(save_dir)
    logger = setup_logger("inference", save_dir, 0)
    logger.info("Running with config:\n{}".format(config))

    device = torch.device(config.device)
    num_types = len(config.boundaries) + 2

    generator = Generator(BertConfig(type_vocab_size=num_types))
    generator = generator.to(device)
    g_checkpointer = Checkpointer(model=generator, logger=logger)
    g_checkpointer.load(config.model_path, True)
Exemple #4
0
def main():
    args = get_args()
    if args.opts:
        cfg.merge_from_list(args.opts)
    cfg.freeze()
    start_epoch = 0
    checkpoint_dir = Path(args.checkpoint)
    checkpoint_dir.mkdir(parents=True, exist_ok=True)
    # create model
    print("=> creating model '{}'".format(cfg.MODEL.ARCH))
    model = get_model(model_name=cfg.MODEL.ARCH)
    if cfg.TRAIN.OPT == "sgd":
        optimizer = torch.optim.SGD(model.parameters(),
                                    lr=cfg.TRAIN.LR,
                                    momentum=cfg.TRAIN.MOMENTUM,
                                    weight_decay=cfg.TRAIN.WEIGHT_DECAY)
    else:
        optimizer = torch.optim.Adam(model.parameters(), lr=cfg.TRAIN.LR)
    device = "cuda" if torch.cuda.is_available() else "cpu"
    model = model.to(device)
    # optionally resume from a checkpoint
    resume_path = args.resume
    if resume_path:
        if Path(resume_path).is_file():
            print("=> loading checkpoint '{}'".format(resume_path))
            checkpoint = torch.load(resume_path, map_location="cpu")
            start_epoch = checkpoint['epoch']
            model.load_state_dict(checkpoint['state_dict'])
            print("=> loaded checkpoint '{}' (epoch {})".format(
                resume_path, checkpoint['epoch']))
            optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
        else:
            print("=> no checkpoint found at '{}'".format(resume_path))
    if args.multi_gpu:
        model = nn.DataParallel(model)
    if device == "cuda":
        cudnn.benchmark = True
############################################### KE LIANG ###############################################################
    criterion = nn.CrossEntropyLoss().to(device)
    train_dataset = FaceDataset(args.data_dir,
                                "train",
                                img_size=cfg.MODEL.IMG_SIZE,
                                augment=True,
                                age_stddev=cfg.TRAIN.AGE_STDDEV)
    train_loader = DataLoader(train_dataset,
                              batch_size=cfg.TRAIN.BATCH_SIZE,
                              shuffle=True,
                              num_workers=cfg.TRAIN.WORKERS,
                              drop_last=True)
    #for i, (input, target) in enumerate(train_loader):
    #   print("target: {}".format(target))

    scheduler = StepLR(optimizer,
                       step_size=cfg.TRAIN.LR_DECAY_STEP,
                       gamma=cfg.TRAIN.LR_DECAY_RATE,
                       last_epoch=start_epoch - 1)
    best_train_mae = 10000.0
    train_writer = None
    if args.tensorboard is not None:
        opts_prefix = "_".join(args.opts)
        train_writer = SummaryWriter(log_dir=args.tensorboard + "/" +
                                     opts_prefix + "_train")
    for epoch in range(start_epoch, cfg.TRAIN.EPOCHS):
        # train
        train_loss, train_acc, train_mae = train(train_loader, model,
                                                 criterion, optimizer, epoch,
                                                 device)
        if args.tensorboard is not None:
            train_writer.add_scalar("loss", train_loss, epoch)
            train_writer.add_scalar("acc", train_acc, epoch)
            train_writer.add_scalar("mae", train_mae, epoch)
        # checkpoint
        if train_mae < best_train_mae:
            print(
                f"=> [epoch {epoch:03d}] best val mae was improved from {best_train_mae:.3f} to {train_mae:.3f}"
            )
            model_state_dict = model.module.state_dict(
            ) if args.multi_gpu else model.state_dict()
            torch.save(
                {
                    'epoch': epoch + 1,
                    'arch': cfg.MODEL.ARCH,
                    'state_dict': model_state_dict,
                    'optimizer_state_dict': optimizer.state_dict()
                },
                str(
                    checkpoint_dir.joinpath(
                        "epoch{:03d}_{:.5f}_{:.4f}.pth".format(
                            epoch, train_loss, train_mae))))
            best_train_mae = train_mae
        else:
            print(
                f"=> [epoch {epoch:03d}] best val mae was not improved from {best_train_mae:.3f} ({train_mae:.3f})"
            )
        # adjust learning rate
        scheduler.step()
    print("=> training finished")
    print(f"additional opts: {args.opts}")
    print(f"best train mae: {best_train_mae:.3f}")
Exemple #5
0
def main():
    args = get_args()

    if args.opts:
        cfg.merge_from_list(args.opts)

    cfg.freeze()

    if args.output_dir is not None:
        if args.img_dir is None:
            raise ValueError("=> --img_dir argument is required if --output_dir is used")

        output_dir = Path(args.output_dir)
        output_dir.mkdir(parents=True, exist_ok=True)

    # create model
    print("=> creating model '{}'".format(cfg.MODEL.ARCH))
    model = get_model(model_name=cfg.MODEL.ARCH, pretrained=None)
    device = "cuda" if torch.cuda.is_available() else "cpu"
    model = model.to(device)

    # load checkpoint
    resume_path = args.resume

    if resume_path is None:
        resume_path = Path(__file__).resolve().parent.joinpath("misc", "epoch044_0.02343_3.9984.pth")

        if not resume_path.is_file():
            print(f"=> model path is not set; start downloading trained model to {resume_path}")
            url = "https://github.com/yu4u/age-estimation-pytorch/releases/download/v1.0/epoch044_0.02343_3.9984.pth"
            urllib.request.urlretrieve(url, str(resume_path))
            print("=> download finished")

    if Path(resume_path).is_file():
        print("=> loading checkpoint '{}'".format(resume_path))
        checkpoint = torch.load(resume_path, map_location="cpu")
        model.load_state_dict(checkpoint['state_dict'])
        print("=> loaded checkpoint '{}'".format(resume_path))
    else:
        raise ValueError("=> no checkpoint found at '{}'".format(resume_path))

    if device == "cuda":
        cudnn.benchmark = True

    model.eval()
    margin = args.margin
    img_dir = args.img_dir
    detector = dlib.get_frontal_face_detector()
    img_size = cfg.MODEL.IMG_SIZE
    image_generator = yield_images_from_dir(img_dir) if img_dir else yield_images()

    with torch.no_grad():
        for img, name in image_generator:
            input_img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
            img_h, img_w, _ = np.shape(input_img)

            # detect faces using dlib detector
            detected = detector(input_img, 1)
            faces = np.empty((len(detected), img_size, img_size, 3))

            if len(detected) > 0:
                for i, d in enumerate(detected):
                    x1, y1, x2, y2, w, h = d.left(), d.top(), d.right() + 1, d.bottom() + 1, d.width(), d.height()
                    xw1 = max(int(x1 - margin * w), 0)
                    yw1 = max(int(y1 - margin * h), 0)
                    xw2 = min(int(x2 + margin * w), img_w - 1)
                    yw2 = min(int(y2 + margin * h), img_h - 1)
                    cv2.rectangle(img, (x1, y1), (x2, y2), (255, 255, 255), 2)
                    cv2.rectangle(img, (xw1, yw1), (xw2, yw2), (255, 0, 0), 2)
                    faces[i] = cv2.resize(img[yw1:yw2 + 1, xw1:xw2 + 1], (img_size, img_size))

                # predict ages
                inputs = torch.from_numpy(np.transpose(faces.astype(np.float32), (0, 3, 1, 2))).to(device)
                outputs = F.softmax(model(inputs), dim=-1).cpu().numpy()
                ages = np.arange(0, 101)
                predicted_ages = (outputs * ages).sum(axis=-1)

                # draw results
                for i, d in enumerate(detected):
                    label = "{}".format(int(predicted_ages[i]))
                    draw_label(img, (d.left(), d.top()), label)

            if args.output_dir is not None:
                output_path = output_dir.joinpath(name)
                cv2.imwrite(str(output_path), img)
            else:
                cv2.imshow("result", img)
                key = cv2.waitKey(-1) if img_dir else cv2.waitKey(30)

                if key == 27:  # ESC
                    break