Ejemplo n.º 1
0
    root_path=data_path,
    train_transforms=train_transform_fn,
    val_transforms=val_transform_fn,
    batch_size=batch_size,
    num_workers=num_workers,
    val_batch_size=batch_size * 2,
    limit_train_num_samples=250 if debug else None,
    limit_val_num_samples=250 if debug else None,
    random_seed=seed)

prepare_batch = prepare_batch_fp32

val_interval = 5

num_classes = 21
model = DeepLabV3(build_resnet18_backbone, num_classes=num_classes)

criterion = nn.CrossEntropyLoss()

lr = 0.007 / 4.0 * batch_size
weight_decay = 5e-4
momentum = 0.9

optimizer = optim.SGD(
    [{
        'params': model.backbone.parameters()
    }, {
        'params': chain(model.aspp.parameters(), model.decoder.parameters())
    }],
    lr=lr,
    momentum=momentum,
from train_gf15.measure import SegmentationMetric
from tqdm import tqdm, trange
from train.LovaszSoftmax import lovasz_softmax

batch_size = 8
niter = 100
class_num = 15
learning_rate = 0.0001 * 1
beta1 = 0.5
cuda = True
num_workers = 1
size_h = 256
size_w = 256
flip = 0
band = 3
net = DeepLabV3(band, class_num)
train_path = '../dataset/GF15/train/'
val_path = '../dataset/GF15/val/'
test_path = '../dataset/GF15/test/'
out_file = './checkpoint/' + net.name
save_epoch = 1
test_step = 300
log_step = 1
num_GPU = 1
index = 2000
pre_trained = True
torch.cuda.set_device(0)

try:
    import os
    os.makedirs(out_file)
Ejemplo n.º 3
0
def main():
    best_metric = 0
    train_transform_det = trans.Compose([
        trans.Scale(cfg.TRANSFROM_SCALES),
    ])
    val_transform_det = trans.Compose([
        trans.Scale(cfg.TRANSFROM_SCALES),
    ])

    train_data = my_dataset.Dataset(cfg.TRAIN_DATA_PATH,
                                    cfg.TRAIN_LABEL_PATH,
                                    cfg.TRAIN_TXT_PATH,
                                    'train',
                                    transform=True,
                                    transform_med=train_transform_det)
    val_data = my_dataset.Dataset(cfg.VAL_DATA_PATH,
                                  cfg.VAL_LABEL_PATH,
                                  cfg.VAL_TXT_PATH,
                                  'val',
                                  transform=True,
                                  transform_med=val_transform_det)
    train_dataloader = DataLoader(train_data,
                                  batch_size=cfg.BATCH_SIZE,
                                  shuffle=True,
                                  num_workers=1,
                                  pin_memory=True)
    val_dataloader = DataLoader(val_data,
                                batch_size=cfg.BATCH_SIZE,
                                shuffle=False,
                                num_workers=1,
                                pin_memory=True)

    model = DeepLabV3(model_id=1, project_dir=cfg.BASE_PATH)
    if cfg.RESUME:
        checkpoint = torch.load(cfg.TRAINED_LAST_MODEL)
        model.load_state_dict(checkpoint['state_dict'])
        print('resume success \n')
    if torch.cuda.device_count() > 1:
        print("Let's use", torch.cuda.device_count(), "GPUs!")
        # dim = 0 [30, xxx] -> [10, ...], [10, ...], [10, ...] on 3 GPUs
        model = nn.DataParallel(model)

    if torch.cuda.is_available():
        model.cuda()

    # if torch.cuda.is_available():
    #     model.cuda()

    # params = [{'params': md.parameters()} for md in model.children() if md in [model.classifier]]
    optimizer = optim.Adam(model.parameters(),
                           lr=cfg.INIT_LEARNING_RATE,
                           weight_decay=cfg.DECAY)
    scheduler = ReduceLROnPlateau(optimizer,
                                  mode='min',
                                  factor=0.2,
                                  patience=5,
                                  verbose=True,
                                  threshold=0.0001,
                                  threshold_mode='rel',
                                  cooldown=2,
                                  eps=1e-08)
    fl = FocalLoss2d(gamma=cfg.FOCAL_LOSS_GAMMA)
    Loss_list = []
    Accuracy_list = []
    for epoch in range(cfg.EPOCH):
        print('epoch {}'.format(epoch + 1))
        #training--------------------------
        train_loss = 0
        train_acc = 0
        for batch_idx, train_batch in enumerate(train_dataloader):
            model.train()
            batch_det_img, batch_y, _, _, _, _, _ = train_batch
            batch_det_img, batch_y = Variable(batch_det_img).cuda(), Variable(
                batch_y).cuda()
            output = model(batch_det_img)
            del batch_det_img
            loss = calc_loss(output, batch_y)
            # train_loss += loss.data[0]
            #should change after
            # pred = torch.max(out, 1)[0]
            # train_correct = (pred == batch_y).sum()
            # train_acc += train_correct.data[0]
            optimizer.zero_grad()
            loss.backward()
            optimizer.step()

            if (batch_idx) % 5 == 0:
                model.eval()
                val_loss = 0
                for v_batch_idx, val_batch in enumerate(val_dataloader):
                    v_batch_det_img, v_batch_y, _, _, _, _, _ = val_batch
                    v_batch_det_img, v_batch_y = Variable(
                        v_batch_det_img).cuda(), Variable(v_batch_y).cuda()
                    val_out = model(v_batch_det_img)
                    del v_batch_det_img
                    val_loss += float(calc_loss(val_out, v_batch_y))
                scheduler.step(val_loss)
                del val_out, v_batch_y
                print("Train Loss: {:.6f}  Val Loss: {:.10f}".format(
                    loss, val_loss))

        if (epoch + 1) % 10 == 0:
            torch.save({'state_dict': model.state_dict()},
                       os.path.join(
                           cfg.SAVE_MODEL_PATH, cfg.TRAIN_LOSS,
                           'model_tif_deeplab18_bce_240*240_' +
                           str(epoch + 1) + '.pth'))
    torch.save({'state_dict': model.state_dict()},
               os.path.join(cfg.SAVE_MODEL_PATH, cfg.TRAIN_LOSS,
                            'model_tif_deeplab18_bce_240*240_last.pth'))
Ejemplo n.º 4
0
def main():

    h, w = map(int, args.input_size.split(','))
    input_size = (h, w)
    cudnn.enabled = True
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
    # initialize parameters
    num_steps = args.num_steps
    batch_size = args.batch_size
    lr = args.lr
    save_cp = args.save_cp
    img_scale = args.scale
    val_percent = args.val / 100

    # data input
    dataset = BasicDataset(IMG_DIRECTORY, MASK_DIRECTORY, img_scale)
    n_val = int(len(dataset) * val_percent)
    n_train = len(dataset) - n_val
    train, val = random_split(dataset, [n_train, n_val])
    tcga_dataset = UnlabeledDataset(TCGA_DIRECTORY)
    n_unlabeled = len(tcga_dataset)

    # create network
    logger = logging.getLogger()
    logger.setLevel(logging.INFO)
    #logger.addHandler(logging.StreamHandler())
    logging.info('Using device %s' % str(device))
    logging.info('Network %s' % args.mod)
    logging.info('''Starting training:
            Num_steps:          %.2f
            Batch size:      %.2f
            Learning rate:   %.4f_transform
            Training size:   %.0f
            Validation size: %.0f
            Unlabeled size:  %.0f
            Checkpoints:     %s
            Device:          %s
            Scale:           %.2f
        ''' % (num_steps, batch_size, lr, n_train, n_val, n_unlabeled,
               str(save_cp), str(device.type), img_scale))
    if args.mod == 'unet':
        net = UNet(n_channels=3, n_classes=NUM_CLASSES)
        print('channels = %d , classes = %d' % (net.n_channels, net.n_classes))
    elif args.mod == 'modified_unet':
        net = modified_UNet(n_channels=3, n_classes=NUM_CLASSES)
        print('channels = %d , classes = %d' % (net.n_channels, net.n_classes))
    elif args.mod == 'deeplabv3':
        net = DeepLabV3(nclass=NUM_CLASSES, pretrained_base=False)
        print('channels = 3 , classes = %d' % net.nclass)
    elif args.mod == 'deeplabv3plus':
        net = DeepLabV3Plus(nclass=NUM_CLASSES, pretrained_base=False)
        print('channels = 3 , classes = %d' % net.nclass)
    elif args.mod == 'nestedunet':
        net = NestedUNet(nclass=NUM_CLASSES, deep_supervision=False)
        print('channels = 3 , classes = %d' % net.nlass)
    elif args.mod == 'inception3':
        net = Inception3(n_classes=4,
                         inception_blocks=None,
                         init_weights=True,
                         bilinear=True)
        print('channels = 3 , classes = %d' % net.n_classes)

    net.to(device=device)
    net.train()

    cudnn.benchmark = True

    # init D
    model_D = FCDiscriminator(num_classes=args.num_classes)
    if args.restore_from_D is not None:
        model_D.load_state_dict(torch.load(args.restore_from_D))
    model_D.train()
    model_D.cuda()

    if not os.path.exists(args.snapshot_dir):
        os.makedirs(args.snapshot_dir)

    if args.semi_train is None:
        train_loader = DataLoader(train,
                                  batch_size=batch_size,
                                  shuffle=True,
                                  num_workers=8,
                                  pin_memory=True)
        val_loader = DataLoader(val,
                                batch_size=batch_size,
                                shuffle=False,
                                num_workers=8,
                                pin_memory=True)
    else:
        #read unlabeled data and labeled data
        train_loader = DataLoader(train,
                                  batch_size=batch_size,
                                  shuffle=True,
                                  num_workers=4,
                                  pin_memory=True)
        val_loader = DataLoader(val,
                                batch_size=batch_size,
                                shuffle=False,
                                num_workers=4,
                                pin_memory=True)

        trainloader_remain = DataLoader(tcga_dataset,
                                        batch_size=batch_size,
                                        shuffle=True,
                                        num_workers=4,
                                        pin_memory=True)
        #trainloader_gt = data.DataLoader(train_gt_dataset,
        #batch_size=args.batch_size, sampler=train_gt_sampler, num_workers=3, pin_memory=True)

        trainloader_remain_iter = enumerate(trainloader_remain)

    trainloader_iter = enumerate(train_loader)

    # implement model.optim_parameters(args) to handle different models' lr setting

    # optimizer for segmentation network
    #optimizer = optim.SGD(net.optim_parameters(args),
    #lr=args.learning_rate, momentum=args.momentum,weight_decay=args.weight_decay)
    optimizer = optim.Adam(net.parameters(),
                           lr=args.lr,
                           weight_decay=args.weight_decay)
    optimizer.zero_grad()
    scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer,
                                                           10000,
                                                           eta_min=1e-6,
                                                           last_epoch=-1)

    # optimizer for discriminator network
    optimizer_D = optim.Adam(model_D.parameters(),
                             lr=args.learning_rate_D,
                             betas=(0.9, 0.99))
    #optimizer_D = optim.SGD(model_D.parameters(), lr=args.learning_rate_D, momentum=args.momentum,weight_decay=args.weight_decay)
    optimizer_D.zero_grad()

    # loss/ bilinear upsampling
    bce_loss = BCEWithLogitsLoss2d()
    interp = nn.Upsample(size=(input_size[1], input_size[0]),
                         mode='bilinear',
                         align_corners=True)
    '''
    if version.parse(torch.__version__) >= version.parse('0.4.0'):
        interp = nn.Upsample(size=(input_size[1], input_size[0]), mode='bilinear', align_corners=True)
    else:
        interp = nn.Upsample(size=(input_size[1], input_size[0]), mode='bilinear')
    '''

    # labels for adversarial training
    pred_label = 0
    gt_label = 1

    for i_iter in range(args.num_steps):

        best_acc = 0
        loss_seg_value = 0
        loss_adv_pred_value = 0
        loss_D_value = 0
        loss_semi_value = 0
        loss_semi_adv_value = 0

        optimizer.zero_grad()
        #adjust_learning_rate(optimizer, i_iter)
        optimizer_D.zero_grad()
        adjust_learning_rate_D(optimizer_D, i_iter)

        for sub_i in range(args.iter_size):

            # train G

            # don't accumulate grads in D
            for param in model_D.parameters():
                param.requires_grad = False
            for param in net.parameters():
                param.requires_grad = True

            # do semi first
            if (args.lambda_semi > 0 or args.lambda_semi_adv > 0
                ) and i_iter >= args.semi_start_adv:
                try:
                    _, batch = trainloader_remain_iter.__next__()
                except:
                    trainloader_remain_iter = enumerate(trainloader_remain)
                    _, batch = trainloader_remain_iter.__next__()

                # only access to img
                images = batch['image']
                images = images.type(torch.FloatTensor)
                images = Variable(images).cuda()

                pred = net(images)
                pred_remain = pred.detach()

                D_out = interp(model_D(F.softmax(pred, dim=1)))
                D_out_sigmoid = torch.sigmoid(
                    D_out).data.cpu().numpy().squeeze(axis=1)
                #D_out_sigmoid = torch.sigmoid(D_out).data.cpu().numpy()

                #ignore_mask_remain = np.zeros(D_out_sigmoid.shape).astype(np.bool)

                targetr = Variable(torch.ones(D_out.shape))
                targetr = Variable(torch.FloatTensor(targetr)).cuda()
                loss_semi_adv = args.lambda_semi_adv * bce_loss(D_out, targetr)
                loss_semi_adv = loss_semi_adv / args.iter_size

                #loss_semi_adv.backward()
                #loss_semi_adv_value += loss_semi_adv.data.cpu().numpy()[0]/args.lambda_semi_adv
                loss_semi_adv_value += loss_semi_adv.cpu().detach().numpy(
                ).item() / args.lambda_semi_adv

                if args.lambda_semi <= 0 or i_iter < args.semi_start:
                    loss_semi_adv.backward()
                    loss_semi_value = 0
                else:
                    # produce ignore mask
                    semi_ignore_mask = (D_out_sigmoid < args.mask_T)

                    semi_gt = pred.data.cpu().numpy().argmax(axis=1)
                    semi_gt[semi_ignore_mask] = 255

                    semi_ratio = 1.0 - float(
                        semi_ignore_mask.sum()) / semi_ignore_mask.size
                    print('semi ratio: {:.4f}'.format(semi_ratio))

                    if semi_ratio == 0.0:
                        loss_semi_value += 0
                    else:
                        semi_gt = torch.FloatTensor(semi_gt)

                        loss_semi = args.lambda_semi * loss_calc(pred, semi_gt)
                        loss_semi = loss_semi / args.iter_size
                        loss_semi_value += loss_semi.cpu().detach().numpy(
                        ).item() / args.lambda_semi
                        loss_semi += loss_semi_adv
                        loss_semi.backward()

            else:
                loss_semi = None
                loss_semi_adv = None

            # train with source

            try:
                _, batch = trainloader_iter.__next__()
            except:
                trainloader_iter = enumerate(train_loader)
                _, batch = trainloader_iter.__next__()

            images = batch['image']
            labels = batch['mask']
            images = images.to(device=device, dtype=torch.float32)
            labels = labels.to(device=device, dtype=torch.long)
            labels = labels.squeeze(1)
            ignore_mask = (labels.cpu().numpy() == 255)
            #pred = interp(net(images))

            pred = net(images)
            criterion = nn.CrossEntropyLoss()
            loss_seg = criterion(pred, labels)
            #loss_seg = loss_calc(pred, labels)

            D_out = interp(model_D(F.softmax(pred, dim=1)))

            targetr = Variable(torch.ones(D_out.shape))
            targetr = Variable(torch.FloatTensor(targetr)).cuda()
            #loss_adv_pred = bce_loss(D_out, targetr)

            if i_iter > args.semi_start_adv:
                loss_adv_pred = bce_loss(D_out, targetr)
                loss = loss_seg + args.lambda_adv_pred * loss_adv_pred
                loss_adv_pred_value += loss_adv_pred.cpu().detach().numpy(
                ).item() / args.iter_size
            else:
                loss = loss_seg

            # proper normalization
            loss = loss / args.iter_size
            loss.backward()
            optimizer.step()
            loss_seg_value += loss_seg.cpu().detach().numpy().item(
            ) / args.iter_size
            #loss_adv_pred_value += loss_adv_pred.cpu().detach().numpy().item()/args.iter_size

            # train D

            # bring back requires_grad
            if i_iter > args.semi_start_adv and i_iter % 3 == 0:
                for param in net.parameters():
                    param.requires_grad = False
                for param in model_D.parameters():
                    param.requires_grad = True

            # train with pred
                pred = pred.detach()

                if args.D_remain:
                    pred = torch.cat((pred, pred_remain), 0)
                #ignore_mask = np.concatenate((ignore_mask,ignore_mask_remain), axis = 0)

                D_out = interp(model_D(F.softmax(pred, dim=1)))
                #targetf = Variable(torch.zeros(D_out.shape))
                targetf = 0.1 * np.random.rand(D_out.shape[0], D_out.shape[1],
                                               D_out.shape[2], D_out.shape[3])
                targetf = Variable(torch.FloatTensor(targetf)).cuda()
                loss_D = bce_loss(D_out, targetf)
                loss_D = loss_D / args.iter_size / 2
                loss_D.backward()
                loss_D_value += loss_D.data.cpu().detach().numpy().item()

                # train with gt
                # get gt labels
                try:
                    _, batch = trainloader_iter.__next__()
                except:
                    trainloader_iter = enumerate(train_loader)
                    _, batch = trainloader_iter.__next__()

                labels_gt = batch['mask']
                D_gt_v = Variable(one_hot(labels_gt)).cuda()
                ignore_mask_gt = (labels_gt.numpy() == 255).squeeze(axis=1)

                D_out = interp(model_D(D_gt_v))
                #targetr = Variable(torch.ones(D_out.shape))
                targetr = 0.1 * np.random.rand(D_out.shape[0], D_out.shape[1],
                                               D_out.shape[2],
                                               D_out.shape[3]) + 0.9
                targetr = Variable(torch.FloatTensor(targetr)).cuda()
                loss_D = bce_loss(D_out, targetr)
                loss_D = loss_D / args.iter_size / 2
                loss_D.backward()
                optimizer_D.step()
                loss_D_value += loss_D.cpu().detach().numpy().item()
        scheduler.step()

        print(
            'iter = {0:8d}/{1:8d}, loss_seg = {2:.3f}, loss_adv_p = {3:.3f}, loss_D = {4:.3f}, loss_semi = {5:.3f}, loss_semi_adv = {6:.3f}'
            .format(i_iter, args.num_steps, loss_seg_value,
                    loss_adv_pred_value, loss_D_value, loss_semi_value,
                    loss_semi_adv_value))
        '''
        if i_iter >= args.num_steps-1:
            print 'save model ...'
            torch.save(model.state_dict(),osp.join(args.snapshot_dir, 'VOC_'+str(args.num_steps)+'.pth'))
            torch.save(model_D.state_dict(),osp.join(args.snapshot_dir, 'VOC_'+str(args.num_steps)+'_D.pth'))
            break

        if i_iter % args.save_pred_every == 0 and i_iter!=0:
            print 'taking snapshot ...'
            torch.save(model.state_dict(),osp.join(args.snapshot_dir, 'VOC_'+str(i_iter)+'.pth'))
            torch.save(model_D.state_dict(),osp.join(args.snapshot_dir, 'VOC_'+str(i_iter)+'_D.pth'))
        '''
        # save checkpoints
        if save_cp and (i_iter % 1000) == 0 and (i_iter != 0):
            try:
                os.mkdir(DIR_CHECKPOINTS)
                logging.info('Created checkpoint directory')
            except OSError:
                pass
            torch.save(net.state_dict(),
                       DIR_CHECKPOINTS + 'i_iter_%d.pth' % (i_iter + 1))
            logging.info('Checkpoint %d saved !' % (i_iter + 1))

        if (i_iter % 1000 == 0) and (i_iter != 0):
            val_score, accuracy, dice_avr, dice_panck, dice_nuclei, dice_lcell = eval_net(
                net, val_loader, device, n_val)
            logging.info('Validation cross entropy: {}'.format(val_score))
            if accuracy > best_acc:
                best_acc = accuracy
            result_file = open('result.txt', 'a', encoding='utf-8')
            result_file.write('best_acc = ' + str(best_acc) + '\n' +
                              'iter = ' + str(i_iter) + '\n')
            result_file.close
Ejemplo n.º 5
0
def train(train_loader, val_loader, class_weights, class_encoding):
    print("\nTraining...\n")

    num_classes = len(class_encoding)

    # 初始化ENet
    model = DeepLabV3(num_classes)
    # model.load_state_dict(torch.load("save/model_13_2_2_2_epoch_580.pth"))
    # model.aspp.conv_1x1_4 = torch.nn.Conv2d(256, num_classes, kernel_size=1)
    # 检查网络结构是否正确
    print(model)

    # 交叉熵的损失函数
    criterion = nn.CrossEntropyLoss(weight=class_weights)

    # Adam as the optimizer
    optimizer = optim.Adam(model.parameters(),
                           lr=args.learning_rate,
                           weight_decay=args.weight_decay)

    # 学习率衰减
    # lr_decay_epochs: 学习率衰减期。
    # lr_decay: 学习率衰减的乘积因子,默认值:-0.1
    lr_updater = lr_scheduler.StepLR(optimizer, args.lr_decay_epochs,
                                     args.lr_decay)

    # 评价指标
    # if not args.ignore_unlabeled:
    #     ignore_index = list(class_encoding).index('unlabeled')
    # else:
    #     ignore_index = None
    ignore_index = None
    metric = IoU(num_classes, ignore_index=ignore_index)

    if use_cuda:
        print("model使用GPU")
        model = model.cuda()
        criterion = criterion.cuda()

    # Optionally 从checkpoint恢复
    if args.resume:
        model, optimizer, start_epoch, best_miou = utils.load_checkpoint(
            model, optimizer, args.save_dir, args.name)
        print("Resuming from model: Start epoch = {0} "
              "| Best mean IoU = {1:.4f}".format(start_epoch, best_miou))
    else:
        start_epoch = 0
        best_miou = 0

    # 开始 Training
    print()
    train = Train(model, train_loader, optimizer, criterion, metric, use_cuda)
    val = Test(model, val_loader, criterion, metric, use_cuda)
    for epoch in range(start_epoch, args.epochs):
        print(">>>> [Epoch: {0:d}] Training".format(epoch))

        lr_updater.step()  # 修改学习率,开始训练
        epoch_loss, (iou, miou) = train.run_epoch(args.print_step)

        # 打印epoch,loss,mean iou
        print(">>>> [Epoch: {0:d}] Avg. loss: {1:.4f} | Mean IoU: {2:.4f}".
              format(epoch, epoch_loss, miou))

        # 如果当前的epochs结束,打印验证的进行一个验证
        if (epoch + 1) % 10 == 0 or epoch + 1 == args.epochs:
            print(">>>> [Epoch: {0:d}] Validation".format(epoch))

            loss, (iou, miou) = val.run_epoch(args.print_step)

            print(">>>> [Epoch: {0:d}] Avg. loss: {1:.4f} | Mean IoU: {2:.4f}".
                  format(epoch, loss, miou))

            # Print per class IoU on last epoch or if best iou
            if epoch + 1 == args.epochs or miou > best_miou:
                for key, class_iou in zip(class_encoding.keys(), iou):
                    print("{0}: {1:.4f}".format(key, class_iou))

            # Save the model if it's the best thus far
            if miou > best_miou:
                print("\nBest model thus far. Saving...\n")
                best_miou = miou
                utils.save_checkpoint(model, optimizer, epoch + 1, best_miou,
                                      args)

    return model
Ejemplo n.º 6
0
    # 导入需要的数据
    if args.dataset.lower() == 'lane':
        from data_provider.Lane_dataset import Lane_dataset as dataset

    loaders, w_class, class_encoding = load_dataset(dataset)
    train_loader, val_loader, test_loader = loaders

    if args.mode.lower() in {'train', 'full'}:
        model = train(train_loader, val_loader, w_class, class_encoding)
        if args.mode.lower() == 'full':
            test(model, test_loader, w_class, class_encoding)
    elif args.mode.lower() == 'test':
        # 初始化新的 ENet model
        num_classes = len(class_encoding)
        model = DeepLabV3(num_classes)
        # model.load_state_dict(torch.load("save/model_13_2_2_2_epoch_580.pth"))
        # model.aspp.conv_1x1_4 = torch.nn.Conv2d(256, num_classes, kernel_size=1)
        if use_cuda:
            model = model.cuda()

        # 初始化优化器
        # checkpoint
        optimizer = optim.Adam(model.parameters())

        # 加载以前存储过的ENet模型
        model = utils.load_checkpoint(model, optimizer, args.save_dir,
                                      args.name)[0]
        print(model)
        test(model, test_loader, w_class, class_encoding)
    else:
Ejemplo n.º 7
0
def prediction( weight):

    best_metric = 0
    train_transform_det = trans.Compose([
        trans.Scale(cfg.TRANSFROM_SCALES),
    ])
    val_transform_det = trans.Compose([
        trans.Scale(cfg.TRANSFROM_SCALES),
    ])
    test_transform_det = trans.Compose([
        trans.Scale((960,960)),
    ])
    model = DeepLabV3(model_id=1,project_dir=cfg.BASE_PATH)
    # model=torch.nn.DataParallel(model)
    if torch.cuda.is_available():
        model.cuda()
    # model.load_state_dict({k.replace('module.', ''): v for k, v in torch.load(weight).items()})
    # model.load_state_dict(torch.load(weight))
    checkpoint = torch.load(weight)
    model.load_state_dict(checkpoint['state_dict'])

    # test_data = my_dataset.Dataset(cfg.TEST_DATA_PATH, '',cfg.TEST_TXT_PATH, 'test', transform=True, transform_med=test_transform_det)
    test_data = my_dataset.Dataset(cfg.VAL_DATA_PATH, cfg.VAL_LABEL_PATH,cfg.VAL_TXT_PATH, 'val', transform=True, transform_med=test_transform_det)
    # test_data = my_dataset.Dataset(cfg.TRAIN_DATA_PATH, cfg.TRAIN_LABEL_PATH,cfg.TRAIN_TXT_PATH, 'train', transform=True, transform_med=test_transform_det)
    test_dataloader = DataLoader(test_data, batch_size=cfg.TEST_BATCH_SIZE, shuffle=False, num_workers=8, pin_memory=True)
    crop = 0

    for batch_idx, val_batch in enumerate(test_dataloader):
        model.eval()
        #
        # batch_x1, batch_x2, _, filename, h, w, green_mask1, green_mask2 = val_batch
        batch_det_img, _, filename, h, w,_,green_mask2 = val_batch
        # green_mask1 = green_mask1.view(output_w, output_h, -1).data.cpu().numpy()
        filename = filename[0].split('/')[-1].replace('image','mask_2017')
        if crop:
            pass
            # outputs = np.zeros((cfg.TEST_BATCH_SIZE,1,960, 960))
            #
            # while (i + w // rows <= w):
            #     j = 0
            #     while (j + h // cols <= h):
            #         batch_x1_ij = batch_x1[0, :, i:i + w // rows, j:j + h // cols]
            #         batch_x2_ij = batch_x2[0, :, i:i + w // rows, j:j + h // cols]
            #         # batch_y_ij = batch_y[batch_idx,: , i:i + w // rows, j:j + h // cols]
            #         batch_x1_ij = np.expand_dims(batch_x1_ij, axis=0)
            #         batch_x2_ij = np.expand_dims(batch_x2_ij, axis=0)
            #         batch_x1_ij, batch_x2_ij = Variable(torch.from_numpy(batch_x1_ij)).cuda(), Variable(
            #             torch.from_numpy(batch_x2_ij)).cuda()
            #         with torch.no_grad():
            #             output = model(batch_x1_ij, batch_x2_ij)
            #         output_w, output_h = output.shape[-2:]
            #         output = torch.sigmoid(output).view(-1, output_w, output_h)
            #
            #         output = output.data.cpu().numpy()  # .resize([80, 80, 1])
            #         output = np.where(output > cfg.THRESH, 255, 0)
            #         outputs[0, :, i:i + w // rows, j:j + h // cols] = output
            #
            #         j += h // cols
            #     i += w // rows
            #
            #
            # if not os.path.exists('./change'):
            #     os.mkdir('./change')
            # print('./change/{}'.format(filename))
            # cv2.imwrite('./change/crop_{}'.format(filename), outputs[0,0,:,:])
        else:
            batch_det_img = Variable(batch_det_img).cuda()
            with torch.no_grad():
                outputs = model(batch_det_img)

            output_w, output_h = outputs[0].shape[-2:]

            # green_mask2 = green_mask2.view(output_w, output_h, -1).data.cpu().numpy()

            output = torch.sigmoid(outputs).view(output_w, output_h, -1).data.cpu().numpy()
            # print(output.min(),output.max())
            output = np.where((output  > cfg.THRESH) , 255, 0)
            if not os.path.exists('./change'):
                os.mkdir('./change')

            print('./change/{}'.format(filename))
            cv2.imwrite('./change/{}'.format(filename), output)
Ejemplo n.º 8
0
def train(FLAGS):

    # Defining the hyperparameters
    device = FLAGS.cuda
    batch_size = FLAGS.batch_size
    epochs = FLAGS.epochs
    lr = FLAGS.learning_rate
    print_every = FLAGS.print_every
    eval_every = FLAGS.eval_every
    save_every = FLAGS.save_every
    nc = FLAGS.num_classes
    wd = FLAGS.weight_decay
    ip = FLAGS.input_path_train
    lp = FLAGS.label_path_train
    ipv = FLAGS.input_path_val
    lpv = FLAGS.label_path_val
    print('[INFO]Defined all the hyperparameters successfully!')

    # Get the class weights
    print('[INFO]Starting to define the class weights...')
    pipe = loader(ip, lp, batch_size='all')
    class_weights = get_class_weights(pipe, nc)
    print('[INFO]Fetched all class weights successfully!')

    # Get an instance of the model
    deeplabv3 = DeepLabV3(nc)
    print('[INFO]Model Instantiated!')

    # Move the model to cuda if available
    deeplabv3 = deeplabv3.to(device)

    # Define the criterion and the optimizer
    criterion = nn.CrossEntropyLoss(
        weight=torch.FloatTensor(class_weights).to(device))
    optimizer = torch.optim.Adam(deeplabv3.parameters(),
                                 lr=lr,
                                 weight_decay=wd)
    print('[INFO]Defined the loss function and the optimizer')

    # Training Loop starts
    print('[INFO]Staring Training...')
    print()

    train_losses = []
    eval_losses = []

    # Assuming we are using the CamVid Dataset
    bc_train = 367 // batch_size
    bc_eval = 101 // batch_size

    pipe = loader(ip, lp, batch_size)
    eval_pipe = loader(ipv, lpv, batch_size)

    epochs = epochs

    for e in range(1, epochs + 1):

        train_loss = 0
        print('-' * 15, 'Epoch %d' % e, '-' * 15)

        deeplabv3.train()

        for _ in tqdm(range(bc_train)):
            X_batch, mask_batch = next(pipe)

            #assert (X_batch >= 0. and X_batch <= 1.0).all()

            X_batch, mask_batch = X_batch.to(device), mask_batch.to(device)

            optimizer.zero_grad()

            out = deeplabv3(X_batch.float())

            loss = criterion(out, mask_batch.long())
            loss.backward()
            optimizer.step()

            train_loss += loss.item()

        print()
        train_losses.append(train_loss)

        if (e + 1) % print_every == 0:
            print('Epoch {}/{}...'.format(e, epochs),
                  'Loss {:6f}'.format(train_loss))

        if e % eval_every == 0:
            with torch.no_grad():
                deeplabv3.eval()

                eval_loss = 0

                for _ in tqdm(range(bc_eval)):
                    inputs, labels = next(eval_pipe)

                    inputs, labels = inputs.to(device), labels.to(device)
                    out = deeplabv3(inputs)

                    loss = criterion(out, labels.long())

                    eval_loss += loss.item()

                print()
                print('Loss {:6f}'.format(eval_loss))

                eval_losses.append(eval_loss)

        if e % save_every == 0:
            checkpoint = {'epochs': e, 'state_dict': deeplabv3.state_dict()}
            torch.save(checkpoint,
                       './ckpt-deeplabv3-{}-{}.pth'.format(e, train_loss))
            print('Model saved!')

        print('Epoch {}/{}...'.format(e + 1, epochs),
              'Total Mean Loss: {:6f}'.format(sum(train_losses) / epochs))

    print('[INFO]Training Process complete!')
Ejemplo n.º 9
0
                    type=str,
                    default='./data/Training/',
                    help='Image range for test')

args = parser.parse_args()

DATA_FOLDER = args.data_folder
SAVE_TEST_IMAGES = False  #True
SAVE_TEST_3D = True
TEST_SAVE_DIR = './results/'

print(args.test_img_range)
mrange = tuple(args.test_img_range)

# load the model
model = DeepLabV3(num_classes=len(CLASSES) + 1)
model.load_state_dict(torch.load(args.model_folder))
model.cuda()
model.eval()


def save_images(img,
                subject_num,
                slice_depth,
                multiplier=1.0,
                real_img=False,
                tag=''):
    print("SAVING IMAGES")
    #for i in range(img.size()[0]):

    new_arr = (img[:, :] * multiplier).cpu().byte()