Exemple #1
0
def test_train():
    device = torch.device(
        'cuda') if torch.cuda.is_available() else torch.device('cpu')
    print(device)

    model = maskrcnn_resnet50_fpn(pretrained=True)
    in_features = model.roi_heads.box_predictor.cls_score.in_features
    model.roi_heads.box_predictor = FastRCNNPredictor(in_features, 7)
    in_features_mask = model.roi_heads.mask_predictor.conv5_mask.in_channels
    hidden_layer = 256
    model.roi_heads.mask_predictor = MaskRCNNPredictor(in_features_mask,
                                                       hidden_layer, 7)

    model.load_state_dict(torch.load("./mask_rcnn_2.pth"))
    model.to(device)
    model.train()

    data = SeashipDataset("../SeaShips", None)

    imgs = []
    targets = []

    for i in range(2):
        img, target = data.__getitem__(i)
        imgs.append(F.to_tensor(img).to(device))
        target = {k: v.to(device) for k, v in target.items()}
        targets.append(target)

    targets.append(target)
    targets = [{k: v.to(device) for k, v in t.items()} for t in targets]
    result = model.forward(imgs, targets)
Exemple #2
0
def test():
    os.environ["CUDA_VISIBLE_DEVICES"] = "2"
    device = torch.device(
        'cuda') if torch.cuda.is_available() else torch.device('cpu')

    model = maskrcnn_resnet50_fpn(pretrained=True)
    in_features = model.roi_heads.box_predictor.cls_score.in_features
    model.roi_heads.box_predictor = FastRCNNPredictor(in_features, 7)
    in_features_mask = model.roi_heads.mask_predictor.conv5_mask.in_channels
    hidden_layer = 256
    model.roi_heads.mask_predictor = MaskRCNNPredictor(in_features_mask,
                                                       hidden_layer, 7)

    model.load_state_dict(torch.load("../model/mask_rcnn_5_2_002.pth"))
    model.to(device)
    model.eval()

    data = SeashipDataset("../../SeaShips", None)
    evaluator = Evaluator(7, 0.8)

    with open("./test_data", "r") as f:
        lines = f.readlines()
        test_list = [int(line) for line in lines]
    print(len(test_list))

    result_list = []
    target_list = []
    batch_size = 2
    for idx in range(0, len(test_list), batch_size):
        imgs = []
        targets = []
        try:
            for i in range(idx, idx + batch_size):
                img, target = data.__getitem__(test_list[i] - 1)
                imgs.append(F.to_tensor(img).to(device))
                target = {k: v.to(device) for k, v in target.items()}
                targets.append(target)

            results = model.forward(imgs)

            #result_list.extend(results)
            #target_list.extend(targets)
            for result, target in zip(results, targets):
                target = {k: v.cpu().detach() for k, v in target.items()}
                result = {k: v.cpu().detach() for k, v in result.items()}
                result_list.append(result)
                target_list.append(target)
                #evaluator.get_label(result, target)
        except:
            print(str(traceback.format_exc()))

        if idx % 12 == 0:
            print(idx)

    evaluator.get_ap(result_list, target_list)
Exemple #3
0
def res_mask():
    os.environ["CUDA_VISIBLE_DEVICES"] = "3"
    device = torch.device(
        'cuda') if torch.cuda.is_available() else torch.device('cpu')
    print(device)

    model = maskrcnn_resnet50_fpn(pretrained=True)
    in_features = model.roi_heads.box_predictor.cls_score.in_features
    model.roi_heads.box_predictor = FastRCNNPredictor(in_features, 7)
    in_features_mask = model.roi_heads.mask_predictor.conv5_mask.in_channels
    hidden_layer = 256
    model.roi_heads.mask_predictor = MaskRCNNPredictor(in_features_mask,
                                                       hidden_layer, 7)

    model.load_state_dict(torch.load("../model/mask_rcnn_10_2_005.pth"))
    model.to(device)
    model.eval()

    data = SeashipDataset("../SeaShips", None)

    with open("./train_data", "r") as f:
        lines = f.readlines()
        train_list = [int(line) for line in lines]

    print(len(train_list))

    batch_size = 2

    for idx in range(0, len(train_list), batch_size):
        imgs = []
        for i in range(idx, idx + batch_size):
            img, target = data.__getitem__(train_list[i])
            imgs.append(F.to_tensor(img).to(device))

        original_image_sizes = [img.shape[-2:] for img in imgs]
        result = model.forward(imgs)

        for j, res in enumerate(result):
            scores = res['scores'].cpu().detach().numpy()
            masks = res["masks"].cpu()
            index = np.where(scores > 0.9)  #只要9分以上的
            masks = masks[index]
            masks = torch.where(masks > 0.5, torch.full_like(masks, 1),
                                torch.full_like(masks, 0))
            m = torch.zeros(original_image_sizes[0])
            for mask in masks:
                m += mask[0]
            m = torch.where(m > 0.5, torch.full_like(m, 1),
                            torch.full_like(m, 0))
            img_mask = TensorToPIL(m)
            mask_name = "{:0>6d}.png".format(idx + j + 1)
            path = os.path.join("./res_mask/", mask_name)
            img_mask.save(path)
            print(path)
Exemple #4
0
def get_model(num_classes):
    model = maskrcnn_resnet50_fpn()
    in_features = model.roi_heads.box_predictor.cls_score.in_features
    model.roi_heads.box_predictor = torchvision.models.detection.faster_rcnn.FastRCNNPredictor(
        in_features, num_classes)
    in_features_mask = model.roi_heads.mask_predictor.conv5_mask.in_channels
    hidden_layer = 256
    model.roi_heads.mask_predictor = MaskRCNNPredictor(in_features_mask,
                                                       hidden_layer,
                                                       num_classes)

    return model
Exemple #5
0
def test_one(idx):
    device = torch.device(
        'cuda') if torch.cuda.is_available() else torch.device('cpu')

    model = maskrcnn_resnet50_fpn(pretrained=True)
    in_features = model.roi_heads.box_predictor.cls_score.in_features
    model.roi_heads.box_predictor = FastRCNNPredictor(in_features, 7)
    in_features_mask = model.roi_heads.mask_predictor.conv5_mask.in_channels
    hidden_layer = 256
    model.roi_heads.mask_predictor = MaskRCNNPredictor(in_features_mask,
                                                       hidden_layer, 7)

    model.load_state_dict(torch.load("../model/mask_rcnn_5_2_002.pth"))
    model.to(device)
    model.eval()

    data = SeashipDataset("../../SeaShips", None)
    evaluator = Evaluator(7, 0.5)
    targets = []
    image, target = data.__getitem__(idx)
    img_var = [F.to_tensor(image).to(device)]
    original_image_sizes = [img.shape[-2:] for img in img_var]
    targets.append(target)
    targets = [{k: v.to(device) for k, v in t.items()} for t in targets]
    result = model.forward(img_var)
    #print(target['labels'])
    #print(result)
    evaluator.get_label(result[0], target)
    boxes = result[0]['boxes'].cpu().detach().numpy()
    scores = result[0]['scores'].cpu().detach().numpy()
    masks = result[0]['masks'].cpu()

    index = np.where(scores > 0.5)
    boxes = boxes[index]
    masks = masks[index]
    masks = torch.where(masks > 0.5, torch.full_like(masks, 1),
                        torch.full_like(masks, 0))
    m = torch.zeros(original_image_sizes[0])
    for mask in masks:
        m += mask[0]
    m = torch.where(m > 0.5, torch.full_like(m, 1), torch.full_like(m, 0))

    image = image.convert("RGBA")
    mask = TensorToPIL(m)
    mask.convert("RGBA")
    drawRectangle(image, boxes)
    image = drawMasks(image, mask)

    res_name = "../result/test{:0>6d}.png".format(idx)
    image.save(res_name)
Exemple #6
0
def main(args):
    utils.init_distributed_mode(args)
    print(args)
    device = torch.device(args.device)
    # Data loading code
    print("Loading data")
    dataset_test, num_classes = get_dataset(args.dataset, 'test',
                                            args.root_path, args.use_channel)
    print("Creating data loaders")
    if args.distributed:
        sampler_test = torch.utils.data.distributed.DistributedSampler(
            dataset_test, shuffle=False)
    else:
        sampler_test = torch.utils.data.SequentialSampler(dataset_test)
    dataloader_test = torch.utils.data.DataLoader(dataset_test,
                                                  batch_size=1,
                                                  sampler=sampler_test,
                                                  num_workers=args.workers)
    print("Creating model")
    # maskrcnn_resnet50_fpn
    model = maskrcnn_resnet50_fpn(num_classes=num_classes,
                                  pretrained=args.pretrained)
    # set iou between boxes for nms: 0.7
    model.roi_heads.nms_thresh = 0.3
    # set the max num of rois: 1000
    model.roi_heads.detections_per_img = 1000
    # default: 0.05, 0.5
    model.roi_heads.score_thresh = 0.05
    model.to(device)
    model_without_ddp = model
    if args.distributed:
        model = torch.nn.parallel.DistributedDataParallel(
            model, device_ids=[args.gpu])
        model_without_ddp = model.module
    checkpoint = torch.load(args.resume, map_location='cpu')
    model_without_ddp.load_state_dict(checkpoint['model'])

    test(model,
         dataloader_test,
         device,
         is_vis=args.vis,
         draw_bbox=False,
         vis_dir=args.vis_dir)
def main(args):
    utils.init_distributed_mode(args)
    print(args)

    device = torch.device(args.device)

    # Data loading code
    print("Loading data")
    dataset_train, num_classes = get_dataset(args.dataset,
                                             'train',
                                             get_transform(is_train=True),
                                             args.root_path,
                                             use_channel=args.use_channel,
                                             augmentation=args.augmentation,
                                             train=True)
    dataset_valid, _ = get_dataset(args.dataset,
                                   'valid',
                                   get_transform(is_train=False),
                                   args.root_path,
                                   use_channel=args.use_channel,
                                   augmentation='none',
                                   train=False,
                                   testdomain=args.testdomain)
    dataset_test, _ = get_dataset(args.dataset,
                                  'test',
                                  get_transform(is_train=False),
                                  args.root_path,
                                  use_channel=args.use_channel,
                                  augmentation='none',
                                  train=False,
                                  testdomain=args.testdomain)
    print("Creating data loaders")
    if args.distributed:
        sampler_train = torch.utils.data.distributed.DistributedSampler(
            dataset_train)
        sampler_valid = torch.utils.data.distributed.DistributedSampler(
            dataset_valid)
        sampler_test = torch.utils.data.distributed.DistributedSampler(
            dataset_test)
    else:
        sampler_train = torch.utils.data.RandomSampler(dataset_train)
        sampler_valid = torch.utils.data.SequentialSampler(dataset_valid)
        sampler_test = torch.utils.data.SequentialSampler(dataset_test)
    batchsampler_train = torch.utils.data.BatchSampler(
        sampler_train, args.batch_size, drop_last=True)
    dataloader_train = torch.utils.data.DataLoader(
        dataset_train, batch_sampler=batchsampler_train,
        num_workers=args.workers, collate_fn=utils.collate_fn)
    dataloader_valid = torch.utils.data.DataLoader(
        dataset_valid, batch_size=1,
        sampler=sampler_valid, num_workers=args.workers,
        collate_fn=utils.collate_fn)
    dataloader_test = torch.utils.data.DataLoader(
        dataset_test, batch_size=1,
        sampler=sampler_test, num_workers=args.workers,
        collate_fn=utils.collate_fn)

    print("Creating model")
    model = maskrcnn_resnet50_fpn(num_classes=num_classes,
                                  pretrained=args.pretrained)
    model.roi_heads.nms_thresh = 0.3
    model.roi_heads.detections_per_img = 1000
    model.to(device)

    model_without_ddp = model
    if args.distributed:
        model = torch.nn.parallel.DistributedDataParallel(
            model, device_ids=[args.gpu])
        model_without_ddp = model.module

    params = [p for p in model.parameters() if p.requires_grad]
    optimizer = torch.optim.SGD(
        params, lr=args.lr, momentum=args.momentum,
        weight_decay=args.weight_decay)

    lr_scheduler = torch.optim.lr_scheduler.MultiStepLR(
        optimizer, milestones=args.lr_steps, gamma=args.lr_gamma)

    if args.resume:
        checkpoint = torch.load(args.resume, map_location='cpu')
        model_without_ddp.load_state_dict(checkpoint['model'])
        optimizer.load_state_dict(checkpoint['optimizer'])
        lr_scheduler.load_state_dict(checkpoint['lr_scheduler'])
        args.start_epoch = checkpoint['epoch'] + 1

    if args.test_only:
        vis_dir = 'results/{}/{}'.format(args.resume.split('/')[-2],
                                         args.testdomain)
        evaluate(model, dataloader_test, device, is_test=True,
                 is_vis=args.vis, draw_bbox=False, vis_dir=vis_dir)
        return

    print("Start training")
    start_time = time.time()
    best_score = 0
    iter_count = 0
    warmup_factor = 1. / 1000
    warmup_iters = 1000
    warmup_scheduler = utils.warmup_lr_scheduler(
        optimizer, warmup_iters, warmup_factor)
    for epoch in range(args.start_epoch, args.epochs):
        if args.distributed:
            sampler_train.set_epoch(epoch)
        iter_count, _ = train_one_epoch(model, optimizer, warmup_scheduler,
                                        dataloader_train, device, epoch,
                                        iter_count, args.print_freq)
        lr_scheduler.step()
        if args.output_dir:
            mAP_scores = evaluate(model, dataloader_valid, device=device)
            if best_score < mAP_scores['segm']:
                best_score = mAP_scores['segm']
                utils.save_on_master({
                    'model': model_without_ddp.state_dict(),
                    'optimizer': optimizer.state_dict(),
                    'lr_scheduler': lr_scheduler.state_dict(),
                    'args': args,
                    'epoch': epoch},
                    os.path.join(args.output_dir, 'best_model.pth'))
    total_time = time.time() - start_time
    total_time_str = str(datetime.timedelta(seconds=int(total_time)))
    print('Training time {}'.format(total_time_str))
Exemple #8
0
def weakly_supervision_train():
    device = torch.device(
        'cuda') if torch.cuda.is_available() else torch.device('cpu')
    print(device)

    model = maskrcnn_resnet50_fpn(pretrained=True)
    in_features = model.roi_heads.box_predictor.cls_score.in_features
    model.roi_heads.box_predictor = FastRCNNPredictor(in_features, 7)
    in_features_mask = model.roi_heads.mask_predictor.conv5_mask.in_channels
    hidden_layer = 256
    model.roi_heads.mask_predictor = MaskRCNNPredictor(in_features_mask,
                                                       hidden_layer, 7)

    model.load_state_dict(
        torch.load("../model/mask_rcnn_5_2_002.pth"))  #加载已训练的模型
    model.to(device)
    model.open_weakly_supervision_train()  #打开弱监督训练方式

    for name, params in model.named_parameters():
        if 'mask' not in name:  #冻结mask分支以外的参数
            params.requires_grad = False

    optimizer = torch.optim.SGD(model.parameters(),
                                lr=0.005,
                                momentum=0.9,
                                weight_decay=0.0005)
    num_epochs = 5
    batch_size = 2

    loss_sum = 0

    data = SeashipDataset("../../SeaShips", None)

    with open("./train_data", "r") as f:
        lines = f.readlines()
        train_list = [int(line) for line in lines]

    print(len(train_list))

    for epoch in range(num_epochs):
        for idx in range(0, len(train_list), batch_size):
            try:
                imgs = []
                targets = []
                for i in range(idx, idx + batch_size):
                    img, target = data.getitem2(train_list[i] - 1, epoch)
                    imgs.append(F.to_tensor(img).to(device))
                    target = {k: v.to(device) for k, v in target.items()}
                    targets.append(target)

                original_image_sizes = [img.shape[-2:] for img in imgs]
                loss_dict, result = model.forward(imgs, targets)
                losses = sum(loss for loss in loss_dict.values())
                loss_sum += losses

                #print(result)

                optimizer.zero_grad()
                losses.backward()
                optimizer.step()

                for j, res in enumerate(result):
                    scores = res['scores'].cpu().detach().numpy()
                    masks = res["masks"].cpu()
                    #print(masks[0].shape)
                    index = np.where(scores > 0.9)  #只要9分以上的
                    masks = masks[index]
                    masks = torch.where(masks > 0.5, torch.full_like(masks, 1),
                                        torch.full_like(masks, 0))
                    m = torch.zeros(original_image_sizes[0])
                    for mask in masks:
                        m += mask[0]
                    m = torch.where(m > 0.5, torch.full_like(m, 1),
                                    torch.full_like(m, 0))
                    img_mask = TensorToPIL(m)
                    data.updatemask(idx + j, img_mask, epoch)
            except:
                logger.error(str(traceback.format_exc()))

            if idx % 10 == 0:
                #print("[%d]rpn_loss: %f" %(idx, loss_sum))
                logger.debug("[%d]total_loss: %f" % (idx, loss_sum))
                loss_sum = 0

    torch.save(model.state_dict(), "./mask_rcnn_weakly_5_2_002.pth")
Exemple #9
0
def train_mask():
    os.environ["CUDA_VISIBLE_DEVICES"] = "2"
    device = torch.device(
        'cuda') if torch.cuda.is_available() else torch.device('cpu')
    print(device)

    model = maskrcnn_resnet50_fpn(pretrained=True)
    in_features = model.roi_heads.box_predictor.cls_score.in_features
    model.roi_heads.box_predictor = FastRCNNPredictor(in_features, 7)
    in_features_mask = model.roi_heads.mask_predictor.conv5_mask.in_channels
    hidden_layer = 256
    model.roi_heads.mask_predictor = MaskRCNNPredictor(in_features_mask,
                                                       hidden_layer, 7)

    model.to(device)
    model.train()

    data = SeashipDataset("../../SeaShips", None)

    optimizer = torch.optim.SGD(model.parameters(),
                                lr=0.002,
                                momentum=0.9,
                                weight_decay=0.0005)
    lr_scheduler = torch.optim.lr_scheduler.StepLR(optimizer,
                                                   step_size=3,
                                                   gamma=0.1)
    num_epochs = 5
    batch_size = 2

    loss_sum = 0
    loss_classifier = 0
    loss_box_reg = 0
    loss_mask = 0

    with open("./train_data", "r") as f:
        lines = f.readlines()
        train_list = [int(line) for line in lines]

    print(len(train_list))

    for epoch in range(num_epochs):
        lr_scheduler = None
        if epoch == 0:
            warmup_factor = 1. / 1000
            warmup_iters = min(1000, 5000 - 1)
            lr_scheduler = utils.warmup_lr_scheduler(optimizer, warmup_iters,
                                                     warmup_factor)

        for idx in range(0, len(train_list), batch_size):
            try:
                imgs = []
                targets = []
                for i in range(idx, idx + batch_size):
                    img, target = data.__getitem__(train_list[i] - 1)
                    imgs.append(F.to_tensor(img).to(device))
                    target = {k: v.to(device) for k, v in target.items()}
                    targets.append(target)

                loss_dict = model.forward(imgs, targets)
                losses = sum(loss for loss in loss_dict.values())
                loss_sum += losses

                #loss_classifier += loss_dict['loss_classifier'].values()
                #loss_box_reg += loss_dict['loss_box_reg'].values()
                #loss_mask += loss_dict['loss_mask'].values()

                optimizer.zero_grad()
                losses.backward()
                optimizer.step()

                if lr_scheduler is not None:
                    lr_scheduler.step()
            except:
                logger.error(str(traceback.format_exc()))

            if idx % 12 == 0:
                logger.debug("[%d]total_loss: %f" % (idx, loss_sum))
                #logger.debug("[%d]loss: %f loss_classifier: %f loss_box_reg: %f loss_mask: %f" %(idx, loss_sum, loss_classifier, loss_box_reg, loss_mask))
                loss_sum = 0
                #loss_classifier = 0
                #loss_box_reg = 0
                #loss_mask = 0

    torch.save(model.state_dict(), "../model/mask_rcnn_5_2_002.pth")
    logger.debug("train successfully!")\
Exemple #10
0
def test_mask():
    device = torch.device(
        'cuda') if torch.cuda.is_available() else torch.device('cpu')
    print(device)

    model = maskrcnn_resnet50_fpn(pretrained=True)
    in_features = model.roi_heads.box_predictor.cls_score.in_features
    model.roi_heads.box_predictor = FastRCNNPredictor(in_features, 7)
    in_features_mask = model.roi_heads.mask_predictor.conv5_mask.in_channels
    hidden_layer = 256
    model.roi_heads.mask_predictor = MaskRCNNPredictor(in_features_mask,
                                                       hidden_layer, 7)

    model.load_state_dict(torch.load("./mask_rcnn_2.pth"))
    model.to(device)
    #model.train()
    model.eval()
    data = SeashipDataset("../SeaShips", None)

    targets = []

    image, target = data.__getitem__(1888)
    img_var = [F.to_tensor(image).to(device)]
    original_image_sizes = [img.shape[-2:] for img in img_var]
    targets.append(target)
    targets = [{k: v.to(device) for k, v in t.items()} for t in targets]
    #result = model.forward(img_var, targets)
    result = model.forward(img_var)
    '''
    batch_size = 2

    label_struct = dict()

    for idx in range(5000,7000,batch_size):
        imgs = []
        targets = []
        for i in range(idx, idx+batch_size):
            img, target = data.__getitem__(i)
            imgs.append(F.to_tensor(img).to(device))
            target = {k : v.to(device) for k, v in target.items()}
            targets.append(target)
            
        result = model.forward(imgs)

        for res in result:
            bbox

    '''

    print(result)
    print(target['labels'])
    boxes = result[0]['boxes'].cpu().detach().numpy()
    scores = result[0]['scores'].cpu().detach().numpy()
    masks = result[0]['masks'].cpu()

    index = np.where(scores > 0.9)
    boxes = boxes[index]
    masks = masks[index]
    masks = torch.where(masks > 0.5, torch.full_like(masks, 1),
                        torch.full_like(masks, 0))
    m = torch.zeros(original_image_sizes[0])
    print(m.shape)
    for mask in masks:
        m += mask[0]
    m = torch.where(m > 0.5, torch.full_like(m, 1), torch.full_like(m, 0))
    img_mask = TensorToPIL(m)
    img_mask.save("./result/res11.png")
    #masks[pos] = 1
    #print(boxes)
    #print(masks[0][0][int(boxes[0][1]):int(boxes[0][3]),int(boxes[0][0]):int(boxes[0][2])])
    '''