Ejemplo n.º 1
0
def main():
    args = get_args()

    if args.opts:
        cfg.merge_from_list(args.opts)

    cfg.freeze()

    # create model
    print("=> creating model '{}'".format(cfg.MODEL.ARCH))
    model = get_model(model_name=cfg.MODEL.ARCH, pretrained=None)
    device = "cuda" if torch.cuda.is_available() else "cpu"
    model = model.to(device)

    # load checkpoint
    resume_path = args.resume

    if Path(resume_path).is_file():
        print("=> loading checkpoint '{}'".format(resume_path))
        checkpoint = torch.load(resume_path, map_location="cpu")
        model.load_state_dict(checkpoint['state_dict'])
        print("=> loaded checkpoint '{}'".format(resume_path))
    else:
        raise ValueError("=> no checkpoint found at '{}'".format(resume_path))

    if device == "cuda":
        cudnn.benchmark = True

    test_dataset = FaceDataset(args.data_dir, "test", img_size=cfg.MODEL.IMG_SIZE, augment=False)
    test_loader = DataLoader(test_dataset, batch_size=cfg.TEST.BATCH_SIZE, shuffle=False,
                             num_workers=cfg.TRAIN.WORKERS, drop_last=False)

    print("=> start testing")
    _, _, test_mae = validate(test_loader, model, None, 0, device)
    print(f"test mae: {test_mae:.3f}")
Ejemplo n.º 2
0
def main_cs(mydict):
    # py脚本额外参数
    args = get_args()
    # main函数传入参数
    my_data_dir = mydict["data_dir"]
    my_ifSE = mydict["ifSE"]
    my_l1loss = mydict["l1loss"]
    my_resume = mydict["resume"]
    if my_l1loss:
        l1loss = 0.1
    else:
        l1loss = 0.0

    if args.opts:
        cfg.merge_from_list(args.opts)

    cfg.freeze()

    # create model_dir
    print("=> creating model_dir '{}'".format(cfg.MODEL.ARCH))
    # model = get_model(model_name=cfg.MODEL.ARCH, pretrained=None)
    model = my_model(my_ifSE)
    device = "cuda" if torch.cuda.is_available() else "cpu"
    model = model.to(device)

    # load checkpoint
    resume_path = my_resume

    if Path(resume_path).is_file():
        print("=> loading checkpoint '{}'".format(resume_path))
        checkpoint = torch.load(resume_path, map_location="cpu")
        model.load_state_dict(checkpoint['state_dict'])
        print("=>ckpt loaded checkpoint '{}'".format(resume_path))
    else:
        raise ValueError(
            "=> no checkpoTrueint found at '{}'".format(resume_path))

    if device == "cuda":
        cudnn.benchmark = True

    test_dataset = FaceDataset_morph2(my_data_dir,
                                      "test",
                                      img_size=cfg.MODEL.IMG_SIZE,
                                      augment=False)
    test_loader = DataLoader(test_dataset,
                             batch_size=cfg.BATCH_SIZE,
                             shuffle=False,
                             num_workers=cfg.TRAIN.WORKERS,
                             drop_last=False)

    print("=> start testing")
    _, _, test_cs = validate_cs(test_loader, model, None, 0, device, l1loss)
    print(f"test cs list: {test_cs}")
    return test_cs
def main():
    args = get_args()

    if args.opts:
        cfg.merge_from_list(args.opts)

    cfg.freeze()

    # create model
    print("=> creating model '{}'".format(cfg.MODEL.ARCH))
    model = get_model(model_name=cfg.MODEL.ARCH, pretrained=None)
    device = "cuda" if torch.cuda.is_available() else "cpu"
    model = model.to(device)

    # TODO: delete
    if torch.cuda.device_count() > 1:
        print("Let's use [1,2,4,5] GPUs!")
        # dim = 0 [30, xxx] -> [10, ...], [10, ...], [10, ...] on 3 GPUs
        model = nn.DataParallel(model, device_ids=[1, 2, 4, 5])
    model.to(device)

    # load checkpoint
    resume_path = args.resume

    if Path(resume_path).is_file():
        print("=> loading checkpoint '{}'".format(resume_path))
        checkpoint = torch.load(resume_path, map_location="cpu")
        model.load_state_dict(checkpoint['state_dict'])
        print("=> loaded checkpoint '{}'".format(resume_path))
    else:
        raise ValueError("=> no checkpoint found at '{}'".format(resume_path))

    if device == "cuda":
        cudnn.benchmark = True

    test_dataset = FaceDataset(args.data_dir,
                               "test",
                               img_size=cfg.MODEL.IMG_SIZE,
                               augment=False)
    test_loader = DataLoader(test_dataset,
                             batch_size=cfg.TEST.BATCH_SIZE,
                             shuffle=False,
                             num_workers=cfg.TRAIN.WORKERS,
                             drop_last=False)
    criterion = nn.CrossEntropyLoss().to(device)

    print("=> start testing")
    _, _, test_mae, gen_acc = validate(test_loader, model, criterion, 0,
                                       device)
    print(f"Test age mae: {test_mae:.3f}")
    print(f"Test gender accuracy: {gen_acc:.2f}")
Ejemplo n.º 4
0
def main():
    args = get_args()

    if args.opts:
        cfg.merge_from_list(args.opts)

    cfg.freeze()
    start_epoch = 0
    checkpoint_dir = Path(args.checkpoint)
    checkpoint_dir.mkdir(parents=True, exist_ok=True)

    # create model
    print("=> creating model '{}'".format(cfg.MODEL.ARCH))
    model = get_model(model_name=cfg.MODEL.ARCH, homosedastic = cfg.MODEL.HOMOSCEDASTIC)

    if cfg.TRAIN.OPT == "sgd":
        optimizer = torch.optim.SGD(model.parameters(), lr=cfg.TRAIN.LR,
                                    momentum=cfg.TRAIN.MOMENTUM,
                                    weight_decay=cfg.TRAIN.WEIGHT_DECAY)
def main():
    start_time = smtp.print_time("开始测试!!!")
    # py脚本额外参数
    args = get_args()
    # main函数传入参数
    img_path = args.img_path
    my_resume = args.my_resume

    if args.opts:
        cfg.merge_from_list(args.opts)

    cfg.freeze()

    # create model_dir
    print("=> creating model_dir '{}'".format(cfg.MODEL.ARCH))
    model = my_model()
    device = "cuda" if torch.cuda.is_available() else "cpu"
    model = model.to(device)

    # load checkpoint
    resume_path = my_resume

    if Path(resume_path).is_file():
        print("=> loading checkpoint '{}'".format(resume_path))
        checkpoint = torch.load(resume_path, map_location="cpu")
        model.load_state_dict(checkpoint['state_dict'])
        print("=>ckpt loaded checkpoint '{}'".format(resume_path))
    else:
        raise ValueError(
            "=> no checkpoTrueint found at '{}'".format(resume_path))

    if device == "cuda":
        cudnn.benchmark = True

    print("=> start testing")
    # img_path, img_size, model, device
    predict_age = validate_age_estimation(img_path, cfg.MODEL.IMG_SIZE, model,
                                          device)
    print(f"predict_age: {predict_age:.2f}")
    end_time = smtp.print_time("测试结束!!!")
    print(smtp.date_gap_abs(start_time, end_time))
    return int(round(predict_age))
Ejemplo n.º 6
0
def main():
    parser = argparse.ArgumentParser(description="YAML parser")
    parser.add_argument(
        "--config-file",
        default="./setting.yaml",
        metavar="FILE",
        help="path to config file",
    )
    # this makes sure you can use commond like
    # python main.py --config-file setting.yaml MODEL.ARCH PSP
    parser.add_argument(
        "opts",
        help="Modify config options using the command-line",
        default=None,
        nargs=argparse.REMAINDER,
    )
    args = parser.parse_args()
    cfg.merge_from_file(args.config_file)
    cfg.merge_from_list(args.opts)
    cfg.freeze()

    print(cfg.MODEL.ARCH)
Ejemplo n.º 7
0
def main():
    args = get_args()

    if args.opts:
        cfg.merge_from_list(args.opts)

    cfg.freeze()
    start_epoch = 0
    checkpoint_dir = Path(args.checkpoint)
    checkpoint_dir.mkdir(parents=True, exist_ok=True)

    group = {
        0: "  0-5",
        1: " 6-10",
        2: "11-20",
        3: "21-30",
        4: "31-40",
        5: "41-60",
        6: "  61-"
    }
    group_count = torch.zeros(7)
    get_ca = True
    # create model
    print("=> creating model '{}'".format(cfg.MODEL.ARCH))
    model = get_model(model_name=cfg.MODEL.ARCH)

    if cfg.TRAIN.OPT == "sgd":
        optimizer = torch.optim.SGD(model.parameters(),
                                    lr=cfg.TRAIN.LR,
                                    momentum=cfg.TRAIN.MOMENTUM,
                                    weight_decay=cfg.TRAIN.WEIGHT_DECAY)
    else:
        optimizer = torch.optim.Adam(model.parameters(), lr=cfg.TRAIN.LR)

    device = "cuda" if torch.cuda.is_available() else "cpu"
    model = model.to(device)

    # optionally resume from a checkpoint
    resume_path = args.resume

    if resume_path:
        if Path(resume_path).is_file():
            print("=> loading checkpoint '{}'".format(resume_path))
            checkpoint = torch.load(resume_path, map_location="cpu")
            start_epoch = checkpoint['epoch'] - 1
            model.load_state_dict(checkpoint['state_dict'])
            print("=> loaded checkpoint '{}' (epoch {})".format(
                resume_path, checkpoint['epoch']))
            optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
        else:
            print("=> no checkpoint found at '{}'".format(resume_path))

    if args.multi_gpu:
        model = nn.DataParallel(model)

    if device == "cuda":
        cudnn.benchmark = True

    criterion = nn.CrossEntropyLoss().to(device)

    gender = False
    gender_count = "False"
    if args.dataset == "Morph" or args.dataset == "imdb_wiki":
        gender = True
        gender_count = torch.zeros(2)

    val_dataset = FaceVal(args.data_dir,
                          "valid",
                          args.dataset,
                          img_size=cfg.MODEL.IMG_SIZE,
                          augment=False,
                          label=True,
                          gender=gender,
                          expand=args.expand)
    val_loader = DataLoader(val_dataset,
                            batch_size=cfg.TEST.BATCH_SIZE,
                            shuffle=False,
                            num_workers=cfg.TRAIN.WORKERS,
                            drop_last=False)
    print(len(val_dataset))
    # validate
    if args.ldl:
        val_loss, val_acc, val_mae, maes, df = validate_ldl(
            val_loader, model, criterion, start_epoch, device, group_count,
            gender_count, get_ca)
    else:
        val_loss, val_acc, val_mae, maes, df = validate(
            val_loader, model, criterion, start_epoch, device, group_count,
            gender_count, get_ca)

    print("=> Validation finished")
    print(f"additional opts: {args.opts}")
    print(f"Val MAE: {val_mae:.4f}")

    group_mae = maes[0]
    print("Group MAE:")
    for ind, interval in enumerate(group.values()):
        print(interval + ":", (group_mae[ind] / group_count[ind]).item(), "/",
              group_count[ind].item())

    ca = maes[1]

    if gender:
        gender_mae = maes[1]
        ca = maes[2]
        for ind, gen in enumerate(["  Male", "Female"]):
            print(gen + ":", (gender_mae[ind] / gender_count[ind]).item(), "/",
                  gender_count[ind].item())

    if get_ca:
        print("CA3: {:.2f} CA5: {:.2f} CA7: {:.2f}".format(
            ca[3] * 100, ca[5] * 100, ca[7] * 100))
    csv_path = resume_path.split("/")[-1]
    csv_path = csv_path[:-4]
    df.to_csv("csv/" + csv_path + ".csv", index=False)
Ejemplo n.º 8
0
def main():
    args = get_args()

    if args.opts:
        cfg.merge_from_list(args.opts)

    cfg.freeze()

    if args.output_dir is not None:
        if args.img_dir is None:
            raise ValueError(
                "=> --img_dir argument is required if --output_dir is used")

        output_dir = Path(args.output_dir)
        output_dir.mkdir(parents=True, exist_ok=True)

    # create model
    print("=> creating model '{}'".format(cfg.MODEL.ARCH))
    model = get_model(model_name=cfg.MODEL.ARCH, pretrained=None)
    device = "cuda" if torch.cuda.is_available() else "cpu"
    model = model.to(device)

    # load checkpoint
    resume_path = args.resume

    if resume_path is None:
        resume_path = Path(__file__).resolve().parent.joinpath(
            "misc", "megaage_fusion.pth")

        if not resume_path.is_file():
            print(
                f"=> model path is not set; start downloading trained model to {resume_path}"
            )
            url = "https://github.com/yjl450/age-estimation-ldl-pytorch/releases/download/v1.0/megaage_fusion.pth"
            urllib.request.urlretrieve(url, str(resume_path))
            print("=> download finished")

    if Path(resume_path).is_file():
        print("=> loading checkpoint '{}'".format(resume_path))
        checkpoint = torch.load(resume_path, map_location="cpu")
        model.load_state_dict(checkpoint['state_dict'])
        print("=> loaded checkpoint '{}'".format(resume_path))
    else:
        raise ValueError("=> no checkpoint found at '{}'".format(resume_path))

    if device == "cuda":
        cudnn.benchmark = True

    model.eval()
    img_dir = args.img_dir
    # detector = dlib.get_frontal_face_detector()
    mtcnn = MTCNN(device=device, post_process=False, keep_all=False)
    img_size = cfg.MODEL.IMG_SIZE
    image_generator = yield_images_from_dir(
        img_dir) if img_dir else yield_images()
    rank = torch.Tensor([i for i in range(101)]).to(device)

    with torch.no_grad():
        for img, name in image_generator:  # start processing image
            start = perf_counter()
            input_img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
            image = Image.fromarray(input_img)
            # image.show()
            detected, _, landmarks = mtcnn.detect(image, landmarks=True)
            if img_dir and landmarks is not None:
                deg = angel(landmarks[0][0], landmarks[0][1])
                image = image.rotate(deg, resample=Image.BICUBIC, expand=True)
                aligned, _ = mtcnn.detect(image, landmarks=False)
            else:
                aligned = detected

            if aligned is not None and len(detected) > 0:
                detected = detected.astype(int)
                if args.expand > 0:
                    box = expand_bbox(image.size,
                                      detected[0],
                                      ratio=args.expand)
                else:
                    box = detected[0]
                cv2.rectangle(img, (detected[0][0], detected[0][1]),
                              (detected[0][2], detected[0][3]),
                              (255, 255, 255), 2)
                cv2.rectangle(img, (box[0], box[1]), (box[2], box[3]),
                              (255, 0, 0), 2)

                if img_dir:
                    aligned = aligned.astype(int)
                    if args.expand > 0:
                        box = expand_bbox(image.size,
                                          aligned[0],
                                          ratio=args.expand)
                    else:
                        box = aligned[0]
                image = image.crop(box)
                # image.show()
                image.resize((img_size, img_size))
                image = torchvision.transforms.ToTensor()(image)
                image = image.unsqueeze(0).to(device)

                # predict ages
                outputs = model(image)
                outputs = F.softmax(outputs, dim=1)
                if args.ldl:
                    predicted_ages = torch.sum(outputs * rank, dim=1)
                else:
                    _, predicted_ages = outputs.max(1)

                # draw results
                # for i, d in enumerate(detected):
                label = "{}".format(int(predicted_ages[0]))
                draw_label(img, (detected[0][0], detected[0][1]), label)

                # faces = np.array(faces.permute(1, 2, 0)).astype(np.uint8)
                # faces = cv2.cvtColor(faces, cv2.COLOR_RGB2BGR)

            if args.output_dir is not None:
                output_path = output_dir.joinpath(name)
                cv2.imwrite(str(output_path), img)
            else:
                elapsed = perf_counter() - start
                cv2.putText(
                    img,
                    "FPS: " + "{:.1f} Press ESC to exit".format(60 / elapsed),
                    (10, 20), cv2.FONT_HERSHEY_DUPLEX, 0.5, (255, 255, 255), 1)
                cv2.imshow("result", img)
                key = cv2.waitKey(-1) if img_dir else cv2.waitKey(30)

                if key == 27:  # ESC
                    break
Ejemplo n.º 9
0
def main():
    args = get_args()
    prototxtPath = os.path.sep.join(["face_detector", "deploy.prototxt"])
    weightsPath = os.path.sep.join(
        ["face_detector", "res10_300x300_ssd_iter_140000.caffemodel"])
    faceNet = cv2.dnn.readNet(prototxtPath, weightsPath)

    if args.opts:
        cfg.merge_from_list(args.opts)

    cfg.freeze()

    if args.output_dir is not None:
        if args.img_dir is None:
            raise ValueError(
                "=> --img_dir argument is required if --output_dir is used")

        output_dir = Path(args.output_dir)
        output_dir.mkdir(parents=True, exist_ok=True)

    # create model
    print("=> creating model '{}'".format(cfg.MODEL.ARCH))
    model = get_model(model_name=cfg.MODEL.ARCH,
                      pretrained=cfg.MODEL.PRETRAINED)
    device = args.device
    model = model.to(device)

    # load checkpoint
    resume_path = args.resume

    if resume_path is None:
        resume_path = Path(__file__).resolve().parent.joinpath("misc", MODEL)

        if not resume_path.is_file():
            print(
                f"=> model path is not set; start downloading trained model to {resume_path}"
            )
            url = f"https://github.com/anhlnt/age-estimation-pytorch/releases/download/v{VER}/{MODEL}"
            urllib.request.urlretrieve(url, str(resume_path))
            print("=> download finished")

    if Path(resume_path).is_file():
        print("=> loading checkpoint '{}'".format(resume_path))
        checkpoint = torch.load(resume_path, map_location="cpu")
        model.load_state_dict(checkpoint['state_dict'])
        print("=> loaded checkpoint '{}'".format(resume_path))
    else:
        raise ValueError("=> no checkpoint found at '{}'".format(resume_path))

    if device == "cuda":
        cudnn.benchmark = True

    model.eval()
    margin = args.margin
    img_dir = args.img_dir
    img_size = cfg.MODEL.IMG_SIZE
    image_generator = yield_images_from_dir(
        img_dir) if img_dir else yield_images()
    start = time.time()

    with torch.no_grad():
        for img, name in image_generator:
            input_img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
            img_h, img_w, _ = np.shape(input_img)

            # detect faces using ssd
            detected = detect_mask(img, faceNet)
            faces = np.empty((len(detected), img_size, img_size, 3))

            if len(detected) > 0:
                for i, d in enumerate(detected):
                    x1, y1, x2, y2, w, h = d.left(), d.top(
                    ), d.right() + 1, d.bottom() + 1, d.width(), d.height()
                    xw1 = max(int(x1 - margin * w), 0)
                    yw1 = max(int(y1 - margin * h), 0)
                    xw2 = min(int(x2 + margin * w), img_w - 1)
                    yw2 = min(int(y2 + margin * h), img_h - 1)
                    cv2.rectangle(img, (x1, y1), (x2, y2), (0, 0, 255), 2)
                    cv2.imwrite("test/test.jpg", img)
                    print("name: ", name)
                    print("face: ", img[yw1:yw2 + 1, xw1:xw2 + 1])
                    print("x1, y1, x2, y2: ", x1, y1, x2, y2)
                    print("xw1, yw1, xw2, yw2; ", xw1, yw1, xw2, yw2)
                    print("img_w, img_h: ", img_w, img_h)
                    print("len(detected): ", len(detected))
                    face = img[yw1:yw2 + 1, xw1:xw2 + 1]
                    if len(face):
                        faces[i] = cv2.resize(face, (img_size, img_size))

                # predict ages
                inputs = torch.from_numpy(
                    np.transpose(faces.astype(np.float32),
                                 (0, 3, 1, 2))).to(device)
                outputs = F.softmax(model(inputs), dim=-1).cpu().numpy()
                # print("outputs: ", outputs)
                ages = np.arange(0, 101)
                predicted_ages = (outputs * ages).sum(axis=-1)

                # draw results
                for i, d in enumerate(detected):
                    try:
                        label = "{}".format(int(predicted_ages[i]))
                    except BaseException:
                        label = "-"
                    draw_label(img, (d.left(), d.top()), label)

            draw_label(img, (50, 50),
                       "{:.2f}fps".format(1.0 / (time.time() - start)))
            if args.output_dir is not None:
                output_path = output_dir.joinpath(name)
                cv2.imwrite(str(output_path), img)
            else:
                cv2.imshow("result", img)
                key = cv2.waitKey(-1) if img_dir else cv2.waitKey(30)

                if key == 27:  # ESC
                    break

            start = time.time()
Ejemplo n.º 10
0
def main():
    args = get_args()

    if args.opts:
        cfg.merge_from_list(args.opts)

    cfg.freeze()
    start_epoch = 0
    checkpoint_dir = Path(args.checkpoint)
    checkpoint_dir.mkdir(parents=True, exist_ok=True)

    # display nb of workers
    print(f"number of train workers {cfg.TRAIN.WORKERS}")

    # fetch features
    with open('train_features.pkl', 'rb') as f:
        train_features = pickle.load(f)
    with open('train_labels.pkl', 'rb') as f:
        train_labels = pickle.load(f)

    with open('valid_features.pkl', 'rb') as f:
        valid_features = pickle.load(f)
    with open('valid_labels.pkl', 'rb') as f:
        valid_labels = pickle.load(f)

    n_features = train_features.shape[1] * train_features.shape[
        2] * train_features.shape[3]
    n_classes = 101

    # create model
    if cfg.MODEL.ARCH_STYLE == 'classifier':
        print("=> creating classifier")
        model = get_classifier(n_features=n_features, n_classes=n_classes)
    else:
        print("=> creating regressor")
        model = get_regressor(n_features=n_features, n_classes=n_classes)

    # Create optimizer
    if cfg.TRAIN.OPT == "sgd":
        optimizer = torch.optim.SGD(model.parameters(),
                                    lr=cfg.TRAIN.LR,
                                    momentum=cfg.TRAIN.MOMENTUM,
                                    weight_decay=cfg.TRAIN.WEIGHT_DECAY)
    else:
        optimizer = torch.optim.Adam(model.parameters(), lr=cfg.TRAIN.LR)

    device = "cuda" if torch.cuda.is_available() else "cpu"
    model = model.to(device)

    # GPU config
    if args.multi_gpu:
        model = nn.DataParallel(model)

    if device == "cuda":
        cudnn.benchmark = True

    # criterion
    if cfg.MODEL.ARCH_STYLE == 'classifier':
        if cfg.MODEL.SMOOTHING == True:
            print("=> using label smoothing")
            criterion = LabelSmoothingLoss(
                std_smoothing=cfg.MODEL.STD_SMOOTHING,
                n_classes=n_classes).to(device)
        else:
            criterion = nn.CrossEntropyLoss().to(device)
    else:
        if cfg.MODEL.ALEATORIC:
            criterion = HeteroscedasticGaussianLoss().to(device)
        else:
            criterion = nn.L1Loss(reduction="sum").to(device)

    # loaders
    train_loader = get_feature_loader(train_features,
                                      train_labels,
                                      batch_size=cfg.TEST.BATCH_SIZE,
                                      shuffle=True,
                                      num_workers=cfg.TRAIN.WORKERS,
                                      drop_last=True)

    val_loader = get_feature_loader(valid_features,
                                    valid_labels,
                                    batch_size=cfg.TEST.BATCH_SIZE,
                                    shuffle=False,
                                    num_workers=cfg.TRAIN.WORKERS,
                                    drop_last=False)

    scheduler = StepLR(optimizer,
                       step_size=cfg.TRAIN.LR_DECAY_STEP,
                       gamma=cfg.TRAIN.LR_DECAY_RATE,
                       last_epoch=start_epoch - 1)
    best_val_mae = 10000.0
    train_writer = None

    if args.tensorboard is not None:
        opts_prefix = "_".join(args.opts)
        train_writer = SummaryWriter(log_dir=args.tensorboard + "/" +
                                     opts_prefix + "_train")
        val_writer = SummaryWriter(log_dir=args.tensorboard + "/" +
                                   opts_prefix + "_val")

    print('=> Start training')
    for epoch in range(start_epoch, cfg.TRAIN.EPOCHS):
        # train
        train_loss, train_acc = train(train_loader, model, criterion,
                                      optimizer, epoch, device)

        # validate
        val_loss, val_acc, val_mae = validate(val_loader, model, criterion,
                                              epoch, device)

        if args.tensorboard is not None:
            train_writer.add_scalar("loss", train_loss, epoch)
            train_writer.add_scalar("acc", train_acc, epoch)
            val_writer.add_scalar("loss", val_loss, epoch)
            val_writer.add_scalar("acc", val_acc, epoch)
            val_writer.add_scalar("mae", val_mae, epoch)

        # checkpoint
        if val_mae < best_val_mae:
            print(
                f"=> [epoch {epoch:03d}] best val mae was improved from {best_val_mae:.3f} to {val_mae:.3f}"
            )
            model_state_dict = model.module.state_dict(
            ) if args.multi_gpu else model.state_dict()
            torch.save(
                {
                    'epoch': epoch + 1,
                    'arch': cfg.MODEL.ARCH,
                    'state_dict': model_state_dict,
                    'optimizer_state_dict': optimizer.state_dict()
                },
                str(
                    checkpoint_dir.joinpath(
                        "epoch{:03d}_{:.5f}_{:.4f}.pth".format(
                            epoch, val_loss, val_mae))))
            best_val_mae = val_mae
        else:
            print(
                f"=> [epoch {epoch:03d}] best val mae was not improved from {best_val_mae:.3f} ({val_mae:.3f})"
            )

        # adjust learning rate
        scheduler.step()

    print("=> training finished")
    print(f"additional opts: {args.opts}")
    print(f"best val mae: {best_val_mae:.3f}")
Ejemplo n.º 11
0
    rand_movie = random.sample(range(movie_len), cfg.DEMO.PRODUCED_MOVIES)
    for i in rand_movie:
        # for i in range(0, movie_len):
        ML_dict = {col: MLs.iloc[i][col] for col in MLs.columns}
        d = {'age': f'{age}', 'gender': gender_out}
        d.update(ML_dict)
        data.append(d)
    data_df = pd.DataFrame(data)
    return data_df


####################          GLOBAL           ####################

args = get_args()
if args.opts:
    cfg.merge_from_list(args.opts)

cfg.freeze()

if args.output_dir is not None:
    if args.img_dir is None:
        raise ValueError(
            "=> --img_dir argument is required if --output_dir is used")

    output_dir = Path(args.output_dir)
    output_dir.mkdir(parents=True, exist_ok=True)

# create model
print("=> creating model '{}'".format(cfg.MODEL.ARCH))
model = get_model(model_name=cfg.MODEL.ARCH, pretrained=None)
device = "cuda" if torch.cuda.is_available() else "cpu"
Ejemplo n.º 12
0
def main():
    args = get_args()

    if args.opts:
        cfg.merge_from_list(args.opts)

    cfg.freeze()

    if args.output_dir is not None:
        if args.img_dir is None:
            raise ValueError(
                "=> --img_dir argument is required if --output_dir is used")

        output_dir = Path(args.output_dir)
        output_dir.mkdir(parents=True, exist_ok=True)

    # create model
    print("=> creating model '{}'".format(cfg.MODEL.ARCH))
    model = get_model(model_name=cfg.MODEL.ARCH, pretrained=None)
    device = "cuda" if torch.cuda.is_available() else "cpu"
    model = model.to(device)

    # load checkpoint
    resume_path = args.resume

    if resume_path is None:
        resume_path = Path(__file__).resolve().parent.joinpath(
            "misc", "epoch044_0.02343_3.9984.pth")

        if not resume_path.is_file():
            print(
                f"=> model path is not set; start downloading trained model to {resume_path}"
            )
            url = "https://github.com/yu4u/age-estimation-pytorch/releases/download/v1.0/epoch044_0.02343_3.9984.pth"
            urllib.request.urlretrieve(url, str(resume_path))
            print("=> download finished")

    if Path(resume_path).is_file():
        print("=> loading checkpoint '{}'".format(resume_path))
        checkpoint = torch.load(resume_path, map_location="cpu")
        model.load_state_dict(checkpoint['state_dict'])
        print("=> loaded checkpoint '{}'".format(resume_path))
    else:
        raise ValueError("=> no checkpoint found at '{}'".format(resume_path))

    if device == "cuda":
        cudnn.benchmark = True

    model.eval()
    margin = args.margin
    img_dir = args.img_dir
    cnn_face_detection_model = face_recognition_models.cnn_face_detector_model_location(
    )

    detector = dlib.cnn_face_detection_model_v1(cnn_face_detection_model)
    img_size = cfg.MODEL.IMG_SIZE
    image_generator = yield_images_from_dir(
        img_dir) if img_dir else yield_images()

    with torch.no_grad():
        for img, name in image_generator:
            input_img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
            img_h, img_w, _ = np.shape(input_img)

            # detect faces using dlib detector
            detected = [r.rect for r in detector(input_img, 1)]
            faces = np.empty((len(detected), img_size, img_size, 3))

            if len(detected) > 0:
                for i, d in enumerate(detected):
                    x1, y1, x2, y2, w, h = d.left(), d.top(
                    ), d.right() + 1, d.bottom() + 1, d.width(), d.height()
                    xw1 = max(int(x1 - margin * w), 0)
                    yw1 = max(int(y1 - margin * h), 0)
                    xw2 = min(int(x2 + margin * w), img_w - 1)
                    yw2 = min(int(y2 + margin * h), img_h - 1)
                    cv2.rectangle(img, (x1, y1), (x2, y2), (255, 255, 255), 2)
                    cv2.rectangle(img, (xw1, yw1), (xw2, yw2), (255, 0, 0), 2)
                    faces[i] = cv2.resize(img[yw1:yw2 + 1, xw1:xw2 + 1],
                                          (img_size, img_size))

                # predict ages
                inputs = torch.from_numpy(
                    np.transpose(faces.astype(np.float32),
                                 (0, 3, 1, 2))).to(device)
                outputs = F.softmax(model(inputs), dim=-1).cpu().numpy()
                ages = np.arange(0, 101)
                predicted_ages = (outputs * ages).sum(axis=-1)

                # draw results
                for i, d in enumerate(detected):
                    label = "{}".format(int(predicted_ages[i]))
                    draw_label(img, (d.left(), d.top()), label)

            if args.output_dir is not None:
                output_path = output_dir.joinpath(name)
                cv2.imwrite(str(output_path), img)
            else:
                cv2.imshow("result", img)
                key = cv2.waitKey(-1) if img_dir else cv2.waitKey(30)

                if key == 27:  # ESC
                    break
Ejemplo n.º 13
0
def main(mydict):
    print("开始训练时间:")
    start_time = time.strftime('%Y-%m-%d %H:%M:%S',
                               time.localtime(time.time()))
    print(start_time)
    # py脚本额外参数
    args = get_args()
    # main函数传入参数
    my_data_dir = mydict["data_dir"]
    my_tensorboard = mydict["tensorboard"]
    my_checkpoint = mydict["checkpoint"]
    my_ifSE = mydict["ifSE"]
    my_l1loss = mydict["l1loss"]
    if my_l1loss:
        l1loss = 0.1  # 0.1
        # l1loss = my_l1value
    else:
        l1loss = 0.0

    if args.opts:
        cfg.merge_from_list(args.opts)

    cfg.freeze()
    start_epoch = 0

    # checkpoint_dir = Path(args.checkpoint)
    checkpoint_dir = Path(my_checkpoint)
    checkpoint_dir.mkdir(parents=True, exist_ok=True)

    # create model_dir
    print("=> creating model_dir '{}'".format("se_resnext50_32x4d"))
    # model = get_model(model_name="se_resnext50_32x4d")
    model = my_model(my_ifSE)

    if cfg.TRAIN.OPT == "sgd":
        optimizer = torch.optim.SGD(model.parameters(),
                                    lr=cfg.TRAIN.LR,
                                    momentum=cfg.TRAIN.MOMENTUM,
                                    weight_decay=cfg.TRAIN.WEIGHT_DECAY)
    else:
        optimizer = torch.optim.Adam(model.parameters(), lr=cfg.TRAIN.LR)

    device = "cuda" if torch.cuda.is_available() else "cpu"
    model = model.to(device)

    # optionally resume from a checkpoint
    resume_path = args.resume
    if resume_path:
        print(Path(resume_path).is_file())
        if Path(resume_path).is_file():
            print("=> loading checkpoint '{}'".format(resume_path))
            checkpoint = torch.load(resume_path, map_location="cpu")
            start_epoch = checkpoint['epoch']
            model.load_state_dict(checkpoint['state_dict'])
            print("=> loaded checkpoint '{}' (epoch {})".format(
                resume_path, checkpoint['epoch']))
            optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
        else:
            print("=> no checkpoint found at '{}'".format(resume_path))

    if args.multi_gpu:
        model = nn.DataParallel(model)

    if device == "cuda":
        cudnn.benchmark = True

    # 损失计算准则
    criterion = nn.CrossEntropyLoss().to(device)
    train_dataset = FaceDataset_ceface(my_data_dir,
                                       "train",
                                       img_size=cfg.MODEL.IMG_SIZE,
                                       augment=True,
                                       age_stddev=cfg.TRAIN.AGE_STDDEV)
    train_loader = DataLoader(train_dataset,
                              batch_size=cfg.BATCH_SIZE,
                              shuffle=True,
                              num_workers=cfg.TRAIN.WORKERS,
                              drop_last=True)

    val_dataset = FaceDataset_ceface(my_data_dir,
                                     "valid",
                                     img_size=cfg.MODEL.IMG_SIZE,
                                     augment=False)
    val_loader = DataLoader(val_dataset,
                            batch_size=cfg.BATCH_SIZE,
                            shuffle=False,
                            num_workers=cfg.TRAIN.WORKERS,
                            drop_last=False)

    scheduler = StepLR(optimizer,
                       step_size=cfg.TRAIN.LR_DECAY_STEP,
                       gamma=cfg.TRAIN.LR_DECAY_RATE,
                       last_epoch=start_epoch - 1)
    best_val_mae = 10000.0
    train_writer = None
    val_mae_list = []
    train_loss_list = []
    val_loss_list = []

    if my_tensorboard is not None:
        opts_prefix = "_".join(args.opts)
        train_writer = SummaryWriter(log_dir=my_tensorboard + "/" +
                                     opts_prefix + "_train")
        val_writer = SummaryWriter(log_dir=my_tensorboard + "/" + opts_prefix +
                                   "_val")

    for epoch in range(start_epoch, 80):  # cfg.TRAIN.EPOCHS):
        # train
        train_loss, train_acc = train(train_loader, model, criterion,
                                      optimizer, epoch, device, l1loss)
        train_loss_list.append(train_loss)

        # validate
        val_loss, val_acc, val_mae = validate(val_loader, model, criterion,
                                              epoch, device, l1loss)
        val_mae_list.append(val_mae)
        val_loss_list.append(val_loss)

        if my_tensorboard is not None:
            train_writer.add_scalar("loss", train_loss, epoch)
            train_writer.add_scalar("acc", train_acc, epoch)
            val_writer.add_scalar("loss", val_loss, epoch)
            val_writer.add_scalar("acc", val_acc, epoch)
            val_writer.add_scalar("mae", val_mae, epoch)

        if val_mae < best_val_mae or val_mae > 0:
            print(
                f"=> [epoch {epoch:03d}] best val mae was improved from {best_val_mae:.3f} to {val_mae:.3f}"
            )
            best_val_mae = val_mae
            # checkpoint
            if val_mae < 4.0:
                model_state_dict = model.module.state_dict(
                ) if args.multi_gpu else model.state_dict()
                torch.save(
                    {
                        'epoch': epoch + 1,
                        'arch': cfg.MODEL.ARCH,
                        'state_dict': model_state_dict,
                        'optimizer_state_dict': optimizer.state_dict()
                    },
                    str(
                        checkpoint_dir.joinpath(
                            "epoch{:03d}_{:.5f}_{:.4f}.pth".format(
                                epoch, val_loss, val_mae))))
        else:
            print(
                f"=> [epoch {epoch:03d}] best val mae was not improved from {best_val_mae:.3f} ({val_mae:.3f})"
            )

        # adjust learning rate
        scheduler.step()

    print("=> training finished")
    print(f"additional opts: {args.opts}")
    print(f"best val mae: {best_val_mae:.3f}")
    print("结束训练时间:")
    end_time = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time()))
    print(end_time)
    print("训练耗时: " + smtp.date_gap(start_time, end_time))
    # 发邮件
    smtp.main(
        dict_={
            "共训练epochs: ": cfg.TRAIN.EPOCHS,
            "训练耗时: ": smtp.date_gap(start_time, end_time),
            "最低val_mae: ": best_val_mae,
            "平均val_mae: ": np.array(val_mae_list).mean(),
            "vale_mae_list: ": val_mae_list,
            "train_loss_list: ": train_loss_list,
            "val_loss_list: ": val_loss_list,
            "MODEL.IMG_SIZE: ": cfg.MODEL.IMG_SIZE,
            "BATCH_SIZE: ": cfg.BATCH_SIZE,
            "LOSS.l1: ": l1loss,
            "TRAIN.LR: ": cfg.TRAIN.LR,
            "TRAIN.LR_DECAY_STEP: ": cfg.TRAIN.LR_DECAY_STEP,
            "TRAIN.LR_DECAY_RATE:": cfg.TRAIN.LR_DECAY_RATE,
            "TRAIN.OPT: ": cfg.TRAIN.OPT,
            "MODEL.ARCH:": cfg.MODEL.ARCH
        })
    return best_val_mae
def main():
    args = get_args()

    if args.opts:
        cfg.merge_from_list(args.opts)

    cfg.freeze()

    if args.output_dir is not None:
        if args.img_dir is None:
            raise ValueError(
                "=> --img_dir argument is required if --output_dir is used")

        output_dir = Path(args.output_dir)
        output_dir.mkdir(parents=True, exist_ok=True)

    # create model
    print("=> creating model '{}'".format(cfg.MODEL.ARCH))
    model = get_model(model_name=cfg.MODEL.ARCH, pretrained=None)
    device = "cuda" if torch.cuda.is_available() else "cpu"
    model = model.to(device)

    # load checkpoint
    resume_path = args.resume

    # Load 作者model
    if resume_path is None:
        resume_path = Path(__file__).resolve().parent.joinpath(
            "misc", "epoch044_0.02343_3.9984.pth")

    if Path(resume_path).is_file():
        print("=> loading checkpoint '{}'".format(resume_path))
        checkpoint = torch.load(resume_path, map_location="cpu")
        model.load_state_dict(checkpoint['state_dict'])
        print("=> loaded checkpoint '{}'".format(resume_path))
    else:
        raise ValueError("=> no checkpoint found at '{}'".format(resume_path))

    if device == "cuda":
        cudnn.benchmark = True

    model.eval()
    margin = args.margin
    img_dir = args.img_dir
    detector = dlib.get_frontal_face_detector()
    img_size = cfg.MODEL.IMG_SIZE
    image_generator = yield_images_from_dir(
        img_dir) if img_dir else yield_images()

    starttime_without = time.time()  ### delete

    with torch.no_grad():
        for img, name in image_generator:
            input_img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
            img_h, img_w, _ = np.shape(input_img)

            # detect faces using dlib detector
            detected = detector(input_img, 1)
            faces = np.empty((len(detected), img_size, img_size, 3))

            if len(detected) > 0:
                for i, d in enumerate(detected):
                    x1, y1, x2, y2, w, h = d.left(), d.top(
                    ), d.right() + 1, d.bottom() + 1, d.width(), d.height()
                    xw1 = max(int(x1 - margin * w), 0)
                    yw1 = max(int(y1 - margin * h), 0)
                    xw2 = min(int(x2 + margin * w), img_w - 1)
                    yw2 = min(int(y2 + margin * h), img_h - 1)
                    cv2.rectangle(img, (x1, y1), (x2, y2), (255, 255, 255), 2)
                    cv2.rectangle(img, (xw1, yw1), (xw2, yw2), (255, 0, 0), 2)
                    faces[i] = cv2.resize(img[yw1:yw2 + 1, xw1:xw2 + 1],
                                          (img_size, img_size))

                # predict ages
                inputs = torch.from_numpy(
                    np.transpose(faces.astype(np.float32),
                                 (0, 3, 1, 2))).to(device)

                starttime = time.time()  ### delete

                age_out, gender_out = model(inputs)

                ### delete
                endtime = time.time()
                print('Only age model. Executed Time:', (endtime - starttime),
                      'sec')

                age_prob = F.softmax(age_out, dim=-1).cpu().numpy()
                ages = np.arange(0, 101)
                predicted_ages = (age_prob * ages).sum(axis=-1)
                predicted_gender = gender_out.max(1)[1]
                print(
                    f'predicted_ages:{predicted_ages}, predicted_gender:{predicted_gender}'
                )

                for i, (d, ages, gender) in enumerate(
                        zip(detected, predicted_ages, predicted_gender)):
                    starttime = time.time()  ### delete

                    df = produce_data(ages, gender)
                    # print(df)

                    ### delete
                    endtime = time.time()
                    print('produce_data. Executed Time:',
                          (endtime - starttime), 'sec')
                    starttime = time.time()  ### delete

                    test_model_input, linear_feature_columns, dnn_feature_columns = data_preprocess(
                        df)
                    # FIXED!!!! 因為df維度與train不一樣,所以爆炸 (ex. df只有男 => 爆炸)
                    pred_movie_list, pred_rating = recommend_movies(
                        './recommend_system/save_model/xDeepFM_MSE1.0181.h5',
                        test_model_input, linear_feature_columns,
                        dnn_feature_columns, 'cuda:0', df)

                    ### delete
                    endtime = time.time()
                    print('recommend model. Executed Time:',
                          (endtime - starttime), 'sec')

                    gender_out = 'M' if gender == 0 else 'F'
                    print(
                        f'age_out: {int(round(ages))}, gender_out: {gender_out}\n, pred_movie_list: {pred_movie_list}'
                    )

                    label = f"{int(round(ages))}, {gender_out}, {pred_rating[0] :.2f}\n{pred_movie_list[0]}"
                    # draw_label(img, (d.left(), d.top()), label)
                    draw_label(img, (d.left(), d.bottom()), label)

            if args.output_dir is not None:
                output_path = output_dir.joinpath(name)
                cv2.imwrite(str(output_path), img)
            else:
                cv2.imshow("result", img)
                key = cv2.waitKey(-1) if img_dir else cv2.waitKey(30)

                if key == 27:  # ESC
                    break

    ### delete
    endtime_without = time.time()
    print('Without loading model. Executed Time:',
          (endtime_without - starttime_without), 'sec')
Ejemplo n.º 15
0
def main():
    args = get_args()

    if args.opts:
        cfg.merge_from_list(args.opts)

    cfg.freeze()

    # create model
    print("=> creating model '{}'".format(cfg.MODEL.ARCH))
    model = get_model(model_name=cfg.MODEL.ARCH, pretrained=None)
    device = "cuda" if torch.cuda.is_available() else "cpu"
    model = model.to(device)

    # load checkpoint
    resume_path = args.resume

    if Path(resume_path).is_file():
        print("=> loading checkpoint '{}'".format(resume_path))
        checkpoint = torch.load(resume_path, map_location="cpu")
        model.load_state_dict(checkpoint['state_dict'])
        print("=> loaded checkpoint '{}'".format(resume_path))
    else:
        raise ValueError("=> no checkpoint found at '{}'".format(resume_path))

    if device == "cuda":
        cudnn.benchmark = True

    
    
    #preds = []
    #gt = []
    test_list_1 = []
    for i,j,k in os.walk('./gdrive/MyDrive/data/26953_Bébé'):
      
      test_list_1 = np.char.add(np.array([args.data_dir + '/26953_Bébé/']*len(k)), k)
      break
    
    for i,j,k in os.walk(args.data_dir +'/2112_Epicerie_salee'):
      test_list_2 = np.char.add(np.array([args.data_dir + '/2112_Epicerie_salee/']*len(k)), k)
      break
    #test_dataset1 = ProductDataset(args.data_dir, "all", img_size=cfg.MODEL.IMG_SIZE, augment=False,name_list=test_list_1)
    #test_loader1 = DataLoader(test_dataset1, batch_size=cfg.TEST.BATCH_SIZE, shuffle=False,
    #                         num_workers=cfg.TRAIN.WORKERS, drop_last=False)
    test_dataset2 = ProductDataset(args.data_dir, "all", img_size=cfg.MODEL.IMG_SIZE, augment=False,name_list=test_list_2)
    test_loader2 = DataLoader(test_dataset2, batch_size=cfg.TEST.BATCH_SIZE, shuffle=False,
                             num_workers=cfg.TRAIN.WORKERS, drop_last=False)
    model.eval()
    #loss_monitor = AverageMeter()
    #accuracy_monitor = AverageMeter()
    true_label = []
    predicted_label = []
    with torch.no_grad():
        with tqdm(test_loader2) as _tqdm:
            for i, (x, y) in enumerate(_tqdm):
                x = x.to(device)
                y = y.to(device)

                # compute output
                _,predicted = model(x).max(1)
                true_label += y.tolist()
                predicted_label += predicted.tolist()

                correct_num = predicted.eq(y).sum().item()
                sample_num = x.size(0)
                #loss_monitor.update(cur_loss, sample_num)
                #accuracy_monitor.update(correct_num, sample_num)
                

    pd.DataFrame(predicted_label).to_csv('./gdrive/MyDrive/data/pred2.csv')
    pd.DataFrame(true_label).to_csv('./gdrive/MyDrive/data/true2.csv')
def main():
    args = get_args()

    if args.opts:
        cfg.merge_from_list(args.opts)

    cfg.freeze()
    start_epoch = 0
    checkpoint_dir = Path(args.checkpoint)
    checkpoint_dir.mkdir(parents=True, exist_ok=True)

    # create model
    print("=> creating model '{}'".format(cfg.MODEL.ARCH))
    model = get_model(model_name=cfg.MODEL.ARCH)

    if cfg.TRAIN.OPT == "sgd":
        optimizer = torch.optim.SGD(model.parameters(),
                                    lr=cfg.TRAIN.LR,
                                    momentum=cfg.TRAIN.MOMENTUM,
                                    weight_decay=cfg.TRAIN.WEIGHT_DECAY)
    else:
        optimizer = torch.optim.Adam(model.parameters(), lr=cfg.TRAIN.LR)

    device = "cuda" if torch.cuda.is_available() else "cpu"
    model = model.to(device)

    # optionally resume from a checkpoint
    resume_path = args.resume

    if resume_path:
        if Path(resume_path).is_file():
            print("=> loading checkpoint '{}'".format(resume_path))
            checkpoint = torch.load(resume_path, map_location="cpu")
            start_epoch = checkpoint['epoch']
            model.load_state_dict(checkpoint['state_dict'])
            print("=> loaded checkpoint '{}' (epoch {})".format(
                resume_path, checkpoint['epoch']))
            optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
        else:
            print("=> no checkpoint found at '{}'".format(resume_path))

    if args.multi_gpu:
        model = nn.DataParallel(model)

    if device == "cuda":
        cudnn.benchmark = True

    train_list = []
    test_list = []
    if args.class_name == 'all':
        for name in ["26953_Bébé", '2112_Epicerie_salee']:
            train_list_t = []
            test_list_t = []
            for i, j, k in os.walk(args.data_dir + '/' + name):

                train_list_t = np.array([args.data_dir + '/' + name + '/'] *
                                        int(len(k) * 0.9))
                test_list_t = np.array([args.data_dir + '/' + name + '/'] *
                                       (len(k) - int(len(k) * 0.9)))
                t = np.random.choice(k, int(len(k) * 0.9), False)
                v = np.setdiff1d(k, t, True)

                train_list += list(np.char.add(train_list_t, t))
                test_list += list(np.char.add(test_list_t, v))
                break
    else:
        for i, j, k in os.walk(args.data_dir + '/' + args.class_name):
            train_list_t = np.array(
                [args.data_dir + '/' + args.class_name + '/'] *
                int(len(k) * 0.8))
            test_list_t = np.array(
                [args.data_dir + '/' + args.class_name + '/'] *
                (len(k) - int(len(k) * 0.8)))
            t = np.random.choice(k, int(len(k) * 0.8), False)
            v = np.setdiff1d(k, t, True)

            train_list += list(np.char.add(train_list_t, t))
            test_list += list(np.char.add(test_list_t, v))

            break

    criterion = nn.CrossEntropyLoss().to(device)
    train_dataset = ProductDataset(args.data_dir,
                                   args.class_name,
                                   img_size=cfg.MODEL.IMG_SIZE,
                                   augment=True,
                                   name_list=train_list)
    train_loader = DataLoader(train_dataset,
                              batch_size=cfg.TRAIN.BATCH_SIZE,
                              shuffle=True,
                              num_workers=cfg.TRAIN.WORKERS,
                              drop_last=True)

    test_dataset = ProductDataset(args.data_dir,
                                  args.class_name,
                                  img_size=cfg.MODEL.IMG_SIZE,
                                  augment=False,
                                  name_list=test_list)
    test_loader = DataLoader(test_dataset,
                             batch_size=cfg.TEST.BATCH_SIZE,
                             shuffle=False,
                             num_workers=cfg.TRAIN.WORKERS,
                             drop_last=False)

    scheduler = StepLR(optimizer,
                       step_size=cfg.TRAIN.LR_DECAY_STEP,
                       gamma=cfg.TRAIN.LR_DECAY_RATE,
                       last_epoch=start_epoch - 1)

    train_writer = None

    if args.tensorboard is not None:
        opts_prefix = "_".join(args.opts)
        train_writer = SummaryWriter(log_dir=args.tensorboard + "/" +
                                     opts_prefix + "_train")
        test_writer = SummaryWriter(log_dir=args.tensorboard + "/" +
                                    opts_prefix + "_test")

    best_test_acc = 0
    for epoch in range(start_epoch, cfg.TRAIN.EPOCHS):
        # train
        train_loss, train_acc = train(train_loader, model, criterion,
                                      optimizer, epoch, device)
        #train_loss, train_acc = 0,0
        # test
        test_loss, test_acc = validate(test_loader, model, criterion, epoch,
                                       device)
        #test_loss, test_acc = 0,0
        if args.tensorboard is not None:
            train_writer.add_scalar("loss", train_loss, epoch)
            train_writer.add_scalar("acc", train_acc, epoch)
            test_writer.add_scalar("loss", test_loss, epoch)
            test_writer.add_scalar("acc", test_acc, epoch)

        # checkpoint

        if test_acc > best_test_acc:
            print(
                f"=> [epoch {epoch:03d}] best test acc was improved from {best_test_acc:.3f} to {test_acc:.3f}"
            )
            model_state_dict = model.module.state_dict(
            ) if args.multi_gpu else model.state_dict()
            torch.save(
                {
                    'epoch': epoch + 1,
                    'arch': cfg.MODEL.ARCH,
                    'state_dict': model_state_dict,
                    'optimizer_state_dict': optimizer.state_dict()
                },
                str(
                    Path(args.data_dir).joinpath(
                        "epoch{:03d}_{:.5f}_{:.4f}.pth".format(
                            epoch, test_loss, test_acc))))
            best_test_acc = test_acc
        else:
            print(
                f"=> [epoch {epoch:03d}] best test acc was not improved from {best_test_acc:.3f} ({test_acc:.3f})"
            )

        # adjust learning rate
        scheduler.step()

    print("=> training finished")
    print(f"additional opts: {args.opts}")
    print(f"best test acc: {best_test_acc:.3f}")
Ejemplo n.º 17
0
def main():
    args = get_args()

    if args.opts:
        cfg.merge_from_list(args.opts)

    cfg.freeze()
    start_epoch = 0
    checkpoint_dir = Path(args.checkpoint)
    checkpoint_dir.mkdir(parents=True, exist_ok=True)

    # display nb of workers
    print(f"number of train workers {cfg.TRAIN.WORKERS}")

    # create model
    print("=> creating model '{}'".format(cfg.MODEL.ARCH))
    model = get_model(model_name=cfg.MODEL.ARCH)

    if cfg.TRAIN.OPT == "sgd":
        optimizer = torch.optim.SGD(model.parameters(),
                                    lr=cfg.TRAIN.LR,
                                    momentum=cfg.TRAIN.MOMENTUM,
                                    weight_decay=cfg.TRAIN.WEIGHT_DECAY)
    else:
        optimizer = torch.optim.Adam(model.parameters(), lr=cfg.TRAIN.LR)

    device = "cuda" if torch.cuda.is_available() else "cpu"
    model = model.to(device)

    # optionally resume from a checkpoint
    resume_path = args.resume

    if resume_path:
        if Path(resume_path).is_file():
            print("=> loading checkpoint '{}'".format(resume_path))
            checkpoint = torch.load(resume_path, map_location="cpu")
            start_epoch = checkpoint['epoch']
            model.load_state_dict(checkpoint['state_dict'])
            print("=> loaded checkpoint '{}' (epoch {})".format(
                resume_path, checkpoint['epoch']))
            optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
        else:
            print("=> no checkpoint found at '{}'".format(resume_path))

    if args.multi_gpu:
        model = nn.DataParallel(model)

    if device == "cuda":
        cudnn.benchmark = True

    criterion = nn.CrossEntropyLoss().to(device)
    train_dataset = FaceDataset(args.data_dir,
                                "train",
                                img_size=cfg.MODEL.IMG_SIZE,
                                augment=True,
                                age_stddev=cfg.TRAIN.AGE_STDDEV)
    train_loader = DataLoader(train_dataset,
                              batch_size=cfg.TRAIN.BATCH_SIZE,
                              shuffle=True,
                              num_workers=cfg.TRAIN.WORKERS,
                              drop_last=True)

    val_dataset = FaceDataset(args.data_dir,
                              "valid",
                              img_size=cfg.MODEL.IMG_SIZE,
                              augment=False)
    val_loader = DataLoader(val_dataset,
                            batch_size=cfg.TEST.BATCH_SIZE,
                            shuffle=False,
                            num_workers=cfg.TRAIN.WORKERS,
                            drop_last=False)

    scheduler = StepLR(optimizer,
                       step_size=cfg.TRAIN.LR_DECAY_STEP,
                       gamma=cfg.TRAIN.LR_DECAY_RATE,
                       last_epoch=start_epoch - 1)
    best_val_mae = 10000.0
    train_writer = None

    if args.tensorboard is not None:
        opts_prefix = "_".join(args.opts)
        train_writer = SummaryWriter(log_dir=args.tensorboard + "/" +
                                     opts_prefix + "_train")
        val_writer = SummaryWriter(log_dir=args.tensorboard + "/" +
                                   opts_prefix + "_val")

    for epoch in range(start_epoch, cfg.TRAIN.EPOCHS):
        # train
        train_loss, train_acc = train(train_loader, model, criterion,
                                      optimizer, epoch, device)

        # validate
        val_loss, val_acc, val_mae = validate(val_loader, model, criterion,
                                              epoch, device)

        if args.tensorboard is not None:
            train_writer.add_scalar("loss", train_loss, epoch)
            train_writer.add_scalar("acc", train_acc, epoch)
            val_writer.add_scalar("loss", val_loss, epoch)
            val_writer.add_scalar("acc", val_acc, epoch)
            val_writer.add_scalar("mae", val_mae, epoch)

        # checkpoint
        if val_mae < best_val_mae:
            print(
                f"=> [epoch {epoch:03d}] best val mae was improved from {best_val_mae:.3f} to {val_mae:.3f}"
            )
            model_state_dict = model.module.state_dict(
            ) if args.multi_gpu else model.state_dict()
            torch.save(
                {
                    'epoch': epoch + 1,
                    'arch': cfg.MODEL.ARCH,
                    'state_dict': model_state_dict,
                    'optimizer_state_dict': optimizer.state_dict()
                },
                str(
                    checkpoint_dir.joinpath(
                        "epoch{:03d}_{:.5f}_{:.4f}.pth".format(
                            epoch, val_loss, val_mae))))
            best_val_mae = val_mae
        else:
            print(
                f"=> [epoch {epoch:03d}] best val mae was not improved from {best_val_mae:.3f} ({val_mae:.3f})"
            )

        # adjust learning rate
        scheduler.step()

    print("=> training finished")
    print(f"additional opts: {args.opts}")
    print(f"best val mae: {best_val_mae:.3f}")
Ejemplo n.º 18
0
def main():
    args = get_args()

    if args.opts:
        cfg.merge_from_list(args.opts)

    cfg.freeze()
    start_epoch = 0
    checkpoint_dir = Path(args.checkpoint)
    checkpoint_dir.mkdir(parents=True, exist_ok=True)

    # create model
    print("=> creating model '{}'".format(cfg.MODEL.ARCH))
    model = get_model(model_name=cfg.MODEL.ARCH)
    multitask(model)

    if cfg.TRAIN.OPT == "sgd":
        optimizer = torch.optim.SGD(model.parameters(), lr=cfg.TRAIN.LR,
                                    momentum=cfg.TRAIN.MOMENTUM,
                                    weight_decay=cfg.TRAIN.WEIGHT_DECAY)
    else:
        optimizer = torch.optim.Adam(model.parameters(), lr=cfg.TRAIN.LR)

    device = "cuda" if torch.cuda.is_available() else "cpu"
    model = model.to(device)

    # optionally resume from a checkpoint
    resume_path = args.resume

    if resume_path:
        if Path(resume_path).is_file():
            print("=> loading checkpoint '{}'".format(resume_path))
            checkpoint = torch.load(resume_path, map_location="cpu")
            start_epoch = checkpoint['epoch']
            model.load_state_dict(checkpoint['state_dict'])
            print("=> loaded checkpoint '{}' (epoch {})"
                  .format(resume_path, checkpoint['epoch']))
            optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
        else:
            print("=> no checkpoint found at '{}'".format(resume_path))

    if args.multi_gpu:
        model = nn.DataParallel(model)

    if device == "cuda":
        cudnn.benchmark = True
    
    get_ca = True if "megaage" in args.dataset.lower() else True # display cummulative acuracy 
    value_ca = True if "megaage" in args.dataset.lower() else False # use CA to update saved model
    if get_ca:
        print("Cummulative Accuracy will be calculated for", args.dataset)
    if value_ca:
        print("Cummulative Accuracy will be compared to update saved model")

    criterion = nn.CrossEntropyLoss().to(device)
    train_dataset = FaceDataset(args.data_dir, "train", args.dataset, img_size=cfg.MODEL.IMG_SIZE, augment=args.aug,
                                age_stddev=cfg.TRAIN.AGE_STDDEV, label=True, expand= args.expand, gender=True)
    train_loader = DataLoader(train_dataset, batch_size=cfg.TRAIN.BATCH_SIZE, shuffle=True,
                              num_workers=cfg.TRAIN.WORKERS, drop_last=False)

    val_dataset = FaceDataset(args.data_dir, "valid", args.dataset,
                              img_size=cfg.MODEL.IMG_SIZE, augment=False, label=True, expand= args.expand, gender=True)
    val_loader = DataLoader(val_dataset, batch_size=cfg.TEST.BATCH_SIZE, shuffle=False,
                            num_workers=cfg.TRAIN.WORKERS, drop_last=False)

    scheduler = StepLR(optimizer, step_size=cfg.TRAIN.LR_DECAY_STEP, gamma=cfg.TRAIN.LR_DECAY_RATE,
                       last_epoch=start_epoch - 1)
    best_val_mae = 10000.0
    train_writer = None
    global_ca = {3: 0.0, 5: 0.0, 7: 0.0}
    train_count = len(train_dataset)
    val_count = len(val_dataset)

    all_train_loss = []
    all_train_accu = []
    all_val_loss = []
    all_val_accu = []

    # range(start_epoch, cfg.TRAIN.EPOCHS):
    for epoch in range(cfg.TRAIN.EPOCHS):
        # train
        train_loss, train_acc = train(
            train_loader, model, criterion, optimizer, epoch, device, train_count)

        # validate
        val_loss, val_acc, val_mae, new_ca= validate(
            val_loader, model, criterion, epoch, device, val_count, get_ca)

        all_train_loss.append(float(train_loss))
        all_train_accu.append(float(train_acc))
        all_val_loss.append(float(val_loss))
        all_val_accu.append(float(val_mae))

        # checkpoint
        if ((not value_ca) and (val_mae < best_val_mae)) or ((get_ca and value_ca) and (new_ca[3] > global_ca[3])):
            print(
                f"=> [epoch {epoch:03d}] best val mae was improved from {best_val_mae:.3f} to {val_mae:.3f}")
            model_state_dict = model.module.state_dict(
            ) if args.multi_gpu else model.state_dict()
            torch.save(
                {
                    'epoch': epoch + 1,
                    'arch': cfg.MODEL.ARCH,
                    'state_dict': model_state_dict,
                    'optimizer_state_dict': optimizer.state_dict()
                },
                str(checkpoint_dir.joinpath("epoch{:03d}_{}_{:.5f}_{:.4f}_{}_{}_ldl.pth".format(
                    epoch, args.dataset, val_loss, val_mae, datetime.now().strftime("%Y%m%d"), cfg.MODEL.ARCH)))
            )
            best_val_mae = val_mae
            best_checkpoint = str(checkpoint_dir.joinpath("epoch{:03d}_{}_{:.5f}_{:.4f}_{}_{}_ldl.pth".format(epoch, args.dataset, val_loss, val_mae, datetime.now().strftime("%Y%m%d"), cfg.MODEL.ARCH)))
            if get_ca:
                global_ca = new_ca
        else:
            print(
                f"=> [epoch {epoch:03d}] best val mae was not improved from {best_val_mae:.3f} ({val_mae:.3f})")

        # adjust learning rate
        scheduler.step()

    print("=> training finished")
    print(f"additional opts: {args.opts}")
    print(f"best val mae: {best_val_mae:.3f}")
    if get_ca:
        print("CA3: {:.2f} CA5: {:.2f} CA7: {:.2f}".format(global_ca[3] * 100, global_ca[5]*100, global_ca[7]*100))
    print("best mae saved model:", best_checkpoint)

    x = np.arange(cfg.TRAIN.EPOCHS)
    plt.xlabel("Epoch")

    plt.ylabel("Train Loss")
    plt.plot(x, all_train_loss)
    plt.savefig("savefig/{}_{}_{}_train_loss.png".format(args.dataset,
                                                         cfg.MODEL.ARCH, datetime.now().strftime("%Y%m%d")))
    plt.clf()

    plt.ylabel("Train Accuracy")
    plt.plot(x, all_train_accu)
    plt.savefig("savefig/{}_{}_{}_train_accu.png".format(args.dataset,
                                                         cfg.MODEL.ARCH, datetime.now().strftime("%Y%m%d")))
    plt.clf()

    plt.ylabel("Validation Loss")
    plt.plot(x, all_val_loss)
    plt.savefig("savefig/{}_{}_{}_val_loss.png".format(args.dataset,
                                                       cfg.MODEL.ARCH, datetime.now().strftime("%Y%m%d")))
    plt.clf()

    plt.ylabel("Validation Accuracy")
    plt.plot(x, all_val_accu)
    plt.savefig("savefig/{}_{}_{}_val_mae.png".format(args.dataset,
                                                      cfg.MODEL.ARCH, datetime.now().strftime("%Y%m%d")))
Ejemplo n.º 19
0
def main_fd_flask(file_path, fileName):
    start = timeit.default_timer()
    args = get_args()

    if args.opts:
        cfg.merge_from_list(args.opts)

    cfg.freeze()

    # if args.output_dir is not None:
    #     if args.img_dir is None:
    #         raise ValueError("=> --img_dir argument is required if --output_dir is used")
    #
    output_dir = Path(args.output_dir)
    output_dir.mkdir(parents=True, exist_ok=True)

    # # create model
    # print("=> creating model '{}'".format(cfg.MODEL.ARCH))
    # model = get_model(model_name=cfg.MODEL.ARCH, pretrained=None)
    # print("=check= torch.cuda.is_available, '{}'".format(torch.cuda.is_available()))
    # device = "cuda" if torch.cuda.is_available() else "cpu"
    # model = model.to(device)

    # load checkpoint
    resume_path = args.resume

    # if resume_path is None:
    #     resume_path = Path(__file__).resolve().parent.joinpath("misc", "epoch044_0.02343_3.9984.pth")
    #
    #     if not resume_path.is_file():
    #         print(f"=> model path is not set; start downloading trained model to {resume_path}")
    #         url = "https://github.com/yu4u/age-estimation-pytorch/releases/download/v1.0/epoch044_0.02343_3.9984.pth"
    #         urllib.request.urlretrieve(url, str(resume_path))
    #         print("=> download finished")

    # if Path(resume_path).is_file():
        # print("=> loading checkpoint '{}'".format(resume_path))
        # checkpoint = torch.load(resume_path, map_location="cpu")
        # model.load_state_dict(checkpoint['state_dict'])
        # print("=> loaded checkpoint '{}'".format(resume_path))
    # else:
    #     raise ValueError("=> no checkpoint found at '{}'".format(resume_path))

    # print("=> device '{}'".format(device))
    # if device == "cuda":
    #     cudnn.benchmark = True

    # model.eval()
    margin = args.margin
    # img_dir = args.img_dir
    # detector = dlib.get_frontal_face_detector()
    img_size = cfg.MODEL.IMG_SIZE
    image_generator = yield_images_from_dir_flask(file_path, fileName)

    with torch.no_grad():
        for img, name in image_generator:
            # print(name)
            input_img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
            img_h, img_w, _ = np.shape(input_img)

            # detect faces using dlib detector
            detected = detector(input_img, 1)
            # print((len(detected), img_size, img_size, 3))
            faces = np.empty((len(detected), img_size, img_size, 3))
            # print("faces= " , len(detected))

            if len(detected) > 0:
                for i, d in enumerate(detected):
                    x1, y1, x2, y2, w, h = d.left(), d.top(), d.right() + 1, d.bottom() + 1, d.width(), d.height()
                    xw1 = max(int(x1 - margin * w), 0)
                    yw1 = max(int(y1 - margin * h), 0)
                    xw2 = min(int(x2 + margin * w), img_w - 1)
                    yw2 = min(int(y2 + margin * h), img_h - 1)
                    cv2.rectangle(img, (x1, y1), (x2, y2), (255, 255, 255), 2)
                    cv2.rectangle(img, (xw1, yw1), (xw2, yw2), (255, 0, 0), 2)
                    faces[i] = cv2.resize(img[yw1:yw2 + 1, xw1:xw2 + 1], (img_size, img_size))

                # predict ages
                inputs = torch.from_numpy(np.transpose(faces.astype(np.float32), (0, 3, 1, 2))).to(device)
                outputs = F.softmax(model(inputs), dim=-1).cpu().numpy()
                ages = np.arange(0, 101)
                predicted_ages = (outputs * ages).sum(axis=-1)

                # draw results
                for i, d in enumerate(detected):
                    age_label = "{}".format(int(predicted_ages[i]))
                    # print(d, age_label)
                    draw_label(img, (d.left(), d.top()), age_label)

            if args.output_dir is not None:
                # newfilename = datetime.today().strftime('%Y%m%d_%H%M%S') + "_" + f.filename
                # print(name)
                output_path = output_dir.joinpath(datetime.today().strftime('%Y%m%d_%H%M%S') + "_" + name)
                # print(str(output_path))
                cv2.imwrite(str(output_path), img)
            # else:
            #     cv2.imshow("result", img)
            #     key = cv2.waitKey(-1) if img_dir else cv2.waitKey(30)
            #
            #     if key == 27:  # ESC
            #         break
            stop = timeit.default_timer()
            print('exc Time: ', stop - start , " s, fileName= ", fileName)
Ejemplo n.º 20
0
def config_choise(args, flag=9):
    '''
    0:    mobilenetv2  dilated-c1_deepsup
    1:    resnet18 dilated-ppm_deepsup
    2:    resnet50 dilated-ppm_deepsup
    3:    resnet101-upernet
    4:    resnet101 dilated-ppm_deepsup
    '''
    cfg_list = [
        # 0
        'config/ade20k-mobilenetv2dilated-c1_deepsup.yaml',
        # 1
        'config/ade20k-resnet18dilated-ppm_deepsup.yaml',
        # 2
        'config/ade20k-resnet50dilated-ppm_deepsup.yaml',
        # 3
        'config/ade20k-resnet101-upernet.yaml',
        # 4
        'config/ade20k-resnet101dilated-ppm_deepsup.yaml',
        #5 remo psp
        'config/remo_ade20k-resnet50dilated-ppm_deepsup.yaml',
        # 6 remo uper
        'config/ade20k-resnet50-upernet.yaml',
        # 7 remo person stuff
        'config/remo_ade20k-resnet50dilated-ppm_deepsup_person_stuff.yaml',
        # 8 18
        'config/remo_ade20k-resnet18dilated-ppm_deepsup.yaml',
        # 9
        '/home/zhangming/work/mask/scene/semantic-segmentation-pytorch/config/remo_ade20k-resnet50dilated-ppm_deepsup_kk.yaml',
    ]
    weight_list = [
        # 0
        '',
        # 1
        'Module/baseline-resnet18dilated-ppm_deepsup/decoder_epoch_20.pth',
        # 2
        'Module/baseline-resnet50dilated-ppm_deepsup/decoder_epoch_20.pth',
        # 3
        'Module/baseline-resnet101-upernet/decoder_epoch_50.pth',
        # 4
        # 'Module/baseline-resnet50-upernet/decoder_epoch_30.pth',
        'Module/baseline-resnet101dilated-ppm_deepsup/decoder_epoch_25.pth',
        # 5  remo psp
        'Module/resnet50_ade20k_20/decoder_epoch_20.pth',
        # 6 remo uper
        'Module/resnet50-upernet_40/decoder_epoch_40.pth',
        # 7 remo person stuff
        'Module/ade20k-resnet50dilated-ppm_deepsup_person_stuff/decoder_epoch_40.pth',
        # 8
        'Module/baseline-resnet18dilated-ppm_deepsup/decoder_epoch_20.pth',
        # 9
        'remo_ade20k-resnet50dilated-ppm_deepsup_kk/decoder_epoch_15.pth',
    ]

    args.gpu = 0
    args.cfg = cfg_list[flag]
    args.imgs = '/home/xjx/data/videos/mask/save_img'

    cfg.merge_from_file(args.cfg)
    cfg.merge_from_list(args.opts)

    cfg.MODEL.arch_encoder = cfg.MODEL.arch_encoder.lower()
    cfg.MODEL.arch_decoder = cfg.MODEL.arch_decoder.lower()

    cfg.MODEL.weights_encoder = weight_list[flag].replace('decoder', 'encoder')
    cfg.MODEL.weights_decoder = weight_list[flag]

    abs_path = '/home/zhangming/work/mask/scene/semantic-segmentation-pytorch/ckpt'
    cfg.MODEL.weights_encoder = os.path.join(abs_path,
                                             cfg.MODEL.weights_encoder)
    cfg.MODEL.weights_decoder = os.path.join(abs_path,
                                             cfg.MODEL.weights_decoder)
    cfg.list_test = ['x']

    # cfg.DATASET.imgSizes=  (300, 375, 450, 525, 600)
    cfg.DATASET.imgSizes = [450]
    cfg.DATASET.imgMaxSize = 1000
    cfg.DATASET.random_flip = False
    return args, cfg