Beispiel #1
0
def main(args):
    device = torch.device("cuda" if torch.cuda.is_available() and args.useCuda else "cpu")

    ds = pmr.datasets(args.dataset, args.dataDir, "val", train=True)
    indices = torch.randperm(len(ds)).tolist()
    d = torch.utils.data.Subset(ds, indices)

    model = pmr.maskrcnn_resnet50(True, len(ds.classes) + 1).to(device)
    model.eval()
    model.head.score_thresh = 0.3

    if args.chptPath:
        checkpoint = torch.load(args.chptPath, map_location=device)
        model.load_state_dict(checkpoint)
        
    for p in model.parameters():
        p.requires_grad_(False)

    iters = 3

    for i, (image, target) in enumerate(d):
        image = image.to(device)
        target = {k: v.to(device) for k, v in target.items()}
        
        with torch.no_grad():
            result = model(image)

            
        plt.figure(figsize=(12, 15))
        pmr.show(image, result, ds.classes)

        if i >= iters - 1:
            break
Beispiel #2
0
def main(args):
    device = torch.device(
        "cuda" if torch.cuda.is_available() and args.use_cuda else "cpu")
    if device.type == "cuda":
        pmr.get_gpu_prop(show=True)
    print("\ndevice: {}".format(device))

    d_test = pmr.datasets(args.dataset, args.data_dir, "val2017",
                          train=True)  # set train=True for eval

    print(args)
    num_classes = len(d_test.classes) + 1
    model = pmr.maskrcnn_resnet50(False, num_classes).to(device)

    checkpoint = torch.load(args.ckpt_path, map_location=device)
    model.load_state_dict(checkpoint["model"])
    print(checkpoint["eval_info"])
    del checkpoint
    torch.cuda.empty_cache()

    print("evaluating only...")
    B = time.time()
    eval_output, iter_eval = pmr.evaluate(model, d_test, device, args)
    B = time.time() - B
    print(eval_output)
    print(
        "\ntotal time of this evaluation: {:.2f} s, speed: {:.2f} FPS".format(
            B, args.batch_size / iter_eval))
def main(args):
    device = torch.device(
        "cuda" if torch.cuda.is_available() and args.useCuda else "cpu")

    dataset_train = pmr.datasets(args.dataset,
                                 args.dataDir,
                                 "train",
                                 train=True)
    indices = torch.randperm(len(dataset_train)).tolist()
    d_train = torch.utils.data.Subset(dataset_train, indices)
    d_test = pmr.datasets(args.dataset, args.dataDir, "val",
                          train=True)  # set train=True for eval

    print(args)
    num_classes = len(
        d_train.dataset.classes) + 1  # including background class
    model = pmr.maskrcnn_resnet50(True, num_classes).to(device)

    optimizer = torch.optim.Adam(model.parameters(), lr=args.lr)

    if not args.chptPath is None:
        checkpoint = torch.load(args.chptPath, map_location=device)
        model.load_state_dict(checkpoint)

    start_epoch = 0

    since = time.time()
    print("\nalready trained: {} epochs; to {} epochs".format(
        start_epoch, args.epochs))

    for epoch in range(start_epoch, args.epochs):
        print("\nepoch: {}".format(epoch + 1))
        A = time.time()
        pmr.train_one_epoch(model, optimizer, d_train, device, epoch, args)
        A = time.time() - A
        torch.save(model.state_dict(), "result/best.pth")
def main(args):
    device = torch.device(
        "cuda" if torch.cuda.is_available() and args.use_cuda else "cpu")
    if device.type == "cuda":
        pmr.get_gpu_prop(show=True)
    print("\ndevice: {}".format(device))

    # ---------------------- prepare data loader ------------------------------- #

    dataset_train = pmr.datasets(args.dataset,
                                 args.data_dir,
                                 "train2017",
                                 train=True)
    indices = torch.randperm(len(dataset_train)).tolist()
    d_train = torch.utils.data.Subset(dataset_train, indices)

    d_test = pmr.datasets(args.dataset, args.data_dir, "val2017",
                          train=True)  # set train=True for eval

    args.warmup_iters = max(1000, len(d_train))

    # -------------------------------------------------------------------------- #

    print(args)
    num_classes = len(
        d_train.dataset.classes) + 1  # including background class
    model = pmr.maskrcnn_resnet50(True, num_classes).to(device)

    params = [p for p in model.parameters() if p.requires_grad]
    optimizer = torch.optim.SGD(params,
                                lr=args.lr,
                                momentum=args.momentum,
                                weight_decay=args.weight_decay)
    lr_lambda = lambda x: 0.1**bisect.bisect([22, 26], x)

    start_epoch = 0
    prefix, ext = os.path.splitext(args.ckpt_path)
    ckpts = glob.glob(prefix + "-*" + ext)
    ckpts.sort(key=lambda x: int(
        re.search(r"-(\d+){}".format(ext),
                  os.path.split(x)[1]).group(1)))
    if ckpts:
        checkpoint = torch.load(ckpts[-1],
                                map_location=device)  # load last checkpoint
        model.load_state_dict(checkpoint["model"])
        optimizer.load_state_dict(checkpoint["optimizer"])
        start_epoch = checkpoint["epochs"]
        del checkpoint
        torch.cuda.empty_cache()

    since = time.time()
    print("\nalready trained: {} epochs; to {} epochs".format(
        start_epoch, args.epochs))

    # ------------------------------- train ------------------------------------ #

    for epoch in range(start_epoch, args.epochs):
        print("\nepoch: {}".format(epoch + 1))

        A = time.time()
        args.lr_epoch = lr_lambda(epoch) * args.lr
        print("lr_epoch: {:.4f}, factor: {:.4f}".format(
            args.lr_epoch, lr_lambda(epoch)))
        iter_train = pmr.train_one_epoch(model, optimizer, d_train, device,
                                         epoch, args)
        A = time.time() - A

        B = time.time()
        eval_output, iter_eval = pmr.evaluate(model, d_test, device, args)
        B = time.time() - B

        trained_epoch = epoch + 1
        print("training: {:.2f} s, evaluation: {:.2f} s".format(A, B))
        pmr.collect_gpu_info("maskrcnn", [1 / iter_train, 1 / iter_eval])
        print(eval_output.get_AP())

        pmr.save_ckpt(model,
                      optimizer,
                      trained_epoch,
                      args.ckpt_path,
                      eval_info=str(eval_output))

        # it will create many checkpoint files during training, so delete some.
        prefix, ext = os.path.splitext(args.ckpt_path)
        ckpts = glob.glob(prefix + "-*" + ext)
        ckpts.sort(key=lambda x: int(
            re.search(r"-(\d+){}".format(ext),
                      os.path.split(x)[1]).group(1)))
        n = 5
        if len(ckpts) > n:
            for i in range(len(ckpts) - n):
                os.system("rm {}".format(ckpts[i]))

    # -------------------------------------------------------------------------- #

    print("\ntotal time of this training: {:.2f} s".format(time.time() -
                                                           since))
    if start_epoch < args.epochs:
        print("already trained: {} epochs\n".format(trained_epoch))
data_dir = "/mingback/students/jincheng/data/VOC2012/VOCdevkit/VOC2012"

device = torch.device(
    "cuda" if torch.cuda.is_available() and use_cuda else "cpu")
if device.type == "cuda":
    pmr.get_gpu_prop(show=True)
print("\ndevice: {}".format(device))

ds = pmr.datasets(dataset,
                  data_dir,
                  "val2017" if dataset == "coco" else "val",
                  train=True)
indices = torch.randperm(len(ds)).tolist()
d = torch.utils.data.Subset(ds, indices)

model = pmr.maskrcnn_resnet50(True, len(ds.classes) + 1).to(device)
model.eval()
model.head.score_thresh = 0.3

if ckpt_path:
    checkpoint = torch.load(ckpt_path, map_location=device)
    model.load_state_dict(checkpoint["model"])
    print(checkpoint["eval_info"])
    del checkpoint
    torch.cuda.empty_cache()

for p in model.parameters():
    p.requires_grad_(False)

# %%
iters = 3
Beispiel #6
0
def main(args, config):
    log_dir = "./logs"
    log_path = os.path.join(
        log_dir, time.strftime("%Y-%m-%d-%H%M.log", time.localtime(time.time()))
    )
    logging.basicConfig(
        level=logging.INFO,
        format="%(asctime)s - %(levelname)s - %(message)s",
        handlers=[logging.FileHandler(log_path), logging.StreamHandler()],
    )
    logger = logging.getLogger()
    device = torch.device(
        "cuda" if torch.cuda.is_available() and args.use_cuda else "cpu"
    )
    if device.type == "cuda":
        pmr.get_gpu_prop(show=True)
    logger.info("\ndevice: {}".format(device))

    # ---------------------- prepare data loader ------------------------------- #

    dataset_train = pmr.datasets(
        config["dataset"],
        config["data_dir"],
        "train2017" if config["dataset"] == "coco" else "train",
        train=True,
    )
    print("classes = ", dataset_train.classes)
    indices = torch.randperm(len(dataset_train)).tolist()
    d_train = torch.utils.data.Subset(dataset_train, indices)

    d_test = pmr.datasets(
        config["dataset"],
        config["data_dir"],
        "val2017" if config["dataset"] == "coco" else "val",
        train=True,
    )  # set train=True for eval

    args.warmup_iters = max(1000, len(d_train))

    # -------------------------------------------------------------------------- #

    print(args)
    num_classes = len(d_train.dataset.classes) + 1  # including background class
    model = pmr.maskrcnn_resnet50(True, num_classes).to(device)

    params = [p for p in model.parameters() if p.requires_grad]
    optimizer = torch.optim.SGD(
        params, lr=args.lr, momentum=args.momentum, weight_decay=args.weight_decay
    )
    lr_lambda = lambda x: 0.1 ** bisect.bisect([22, 26], x)

    logger.info(model)
    logger.info("-" * 50)

    start_epoch = 0
    prefix, ext = os.path.splitext(args.ckpt_path)
    ckpts = glob.glob(prefix + "-*" + ext)
    ckpts.sort(
        key=lambda x: int(
            re.search(r"-(\d+){}".format(ext), os.path.split(x)[1]).group(1)
        )
    )
    if ckpts:
        checkpoint = torch.load(ckpts[-1], map_location=device)  # load last checkpoint
        model.load_state_dict(checkpoint["model"])
        optimizer.load_state_dict(checkpoint["optimizer"])
        start_epoch = checkpoint["epochs"]
        del checkpoint
        torch.cuda.empty_cache()

    since = time.time()
    logger.info(
        "\nalready trained: {} epochs; to {} epochs".format(
            start_epoch, config["epochs"]
        )
    )

    # ------------------------------- train ------------------------------------ #

    for epoch in range(start_epoch, config["epochs"]):
        logger.info("epoch: {}".format(epoch + 1))

        A = time.time()
        args.lr_epoch = lr_lambda(epoch) * args.lr
        logger.info(
            "lr_epoch: {:.4f}, factor: {:.4f}".format(args.lr_epoch, lr_lambda(epoch))
        )
        iter_train = pmr.train_one_epoch(
            model, optimizer, d_train, device, epoch, args, logger=logger
        )
        A = time.time() - A

        B = time.time()
        eval_output, iter_eval = pmr.evaluate(model, d_test, device, args)
        B = time.time() - B

        trained_epoch = epoch + 1
        print("training: {:.2f} s, evaluation: {:.2f} s".format(A, B))
        pmr.collect_gpu_info("maskrcnn", [1 / iter_train, 1 / iter_eval])
        if len(list(eval_output.buffer)) > 0:
            # print(eval_output.get_AP())
            logger.info(
                "bbox AP: {}; mask AP: {}".format(
                    float(eval_output.get_AP().get("bbox AP")),
                    float(eval_output.get_AP().get("mask AP")),
                )
            )

        pmr.save_ckpt(
            model, optimizer, trained_epoch, args.ckpt_path, eval_info=str(eval_output)
        )

        # it will create many checkpoint files during training, so delete some.
        prefix, ext = os.path.splitext(args.ckpt_path)
        ckpts = glob.glob(prefix + "-*" + ext)
        ckpts.sort(
            key=lambda x: int(
                re.search(r"-(\d+){}".format(ext), os.path.split(x)[1]).group(1)
            )
        )
        n = 5
        if len(ckpts) > n:
            for i in range(len(ckpts) - n):
                os.system("rm {}".format(ckpts[i]))

    # -------------------------------------------------------------------------- #

    logger.info("\ntotal time of this training: {:.2f} s".format(time.time() - since))
    if start_epoch < args.epochs:
        logger.info("already trained: {} epochs\n".format(trained_epoch))