Exemple #1
0
def prediction(img1, img2, label, weight):
    print("weight")

    best_metric = 0
    train_transform_det = trans.Compose([
        trans.Scale(cfg.TRANSFROM_SCALES),
    ])
    val_transform_det = trans.Compose([
        trans.Scale(cfg.TRANSFROM_SCALES),
    ])
    test_transform_det = trans.Compose([
        trans.Scale((960, 960)),
    ])
    model = SiamUNetU(in_ch=3)
    model = torch.nn.DataParallel(model)
    if torch.cuda.is_available():
        model.cuda()
    # model.load_state_dict({k.replace('module.', ''): v for k, v in torch.load(weight).items()})
    # model.load_state_dict(torch.load(weight))
    checkpoint = torch.load(weight)
    model.load_state_dict(checkpoint['state_dict'])

    # test_data = my_dataset.Dataset(cfg.TEST_DATA_PATH, '',cfg.TEST_TXT_PATH, 'test', transform=True, transform_med=test_transform_det)
    test_data = my_dataset.Dataset(cfg.VAL_DATA_PATH,
                                   cfg.VAL_LABEL_PATH,
                                   cfg.VAL_TXT_PATH,
                                   'val',
                                   transform=True,
                                   transform_med=test_transform_det)
    test_dataloader = DataLoader(test_data,
                                 batch_size=cfg.TEST_BATCH_SIZE,
                                 shuffle=False,
                                 num_workers=8,
                                 pin_memory=True)
    crop = 0

    rows = 12
    cols = 12
    i = 0
    for batch_idx, val_batch in enumerate(test_dataloader):
        model.eval()
        batch_x1, batch_x2, _, filename, h, w = val_batch
        filename = filename[0].split('/')[-1].replace('image',
                                                      'mask_2017').replace(
                                                          '.png', '.tif')
        if crop:
            outputs = np.zeros((cfg.TEST_BATCH_SIZE, 1, 960, 960))

            while (i + w // rows <= w):
                j = 0
                while (j + h // cols <= h):
                    batch_x1_ij = batch_x1[batch_idx, :, i:i + w // rows,
                                           j:j + h // cols]
                    batch_x2_ij = batch_x2[batch_idx, :, i:i + w // rows,
                                           j:j + h // cols]
                    # batch_y_ij = batch_y[batch_idx,: , i:i + w // rows, j:j + h // cols]
                    batch_x1_ij = np.expand_dims(batch_x1_ij, axis=0)
                    batch_x2_ij = np.expand_dims(batch_x2_ij, axis=0)
                    batch_x1_ij, batch_x2_ij = Variable(
                        torch.from_numpy(batch_x1_ij)).cuda(), Variable(
                            torch.from_numpy(batch_x2_ij)).cuda()
                    with torch.no_grad():
                        output = model(batch_x1_ij, batch_x2_ij)
                    output_w, output_h = output.shape[-2:]
                    output = torch.sigmoid(output).view(output_w, output_h, -1)

                    output = output.data.cpu().numpy()  # .resize([80, 80, 1])
                    output = np.where(output > cfg.THRESH, 255, 0)
                    outputs[batch_idx, :, i:i + w // rows,
                            j:j + h // cols] = output

                    j += h // cols
                i += w // rows

            print(batch_idx)

            if not os.path.exists('./change'):
                os.mkdir('./change')
            print('./change/{}'.format(filename))
            cv2.imwrite('./change/crop_{}'.format(filename), outputs[batch_idx,
                                                                     0, :, :])
        else:
            batch_x1, batch_x2 = Variable(batch_x1).cuda(), Variable(
                batch_x2).cuda()
            with torch.no_grad():
                output = model(batch_x1, batch_x2)
            output_w, output_h = output.shape[-2:]
            output = torch.sigmoid(output).view(output_w, output_h, -1)
            output = output.data.cpu().numpy()  # .resize([80, 80, 1])
            output = np.where(output > cfg.THRESH, 255, 0)
            # output_final=cv2.merge(output)
            if not os.path.exists('./change'):
                os.mkdir('./change')

            print('./change/{}'.format(filename))
            cv2.imwrite('./change/{}'.format(filename), output)
Exemple #2
0
def prediction(weight):
    print("weight")

    best_metric = 0
    train_transform_det = trans.Compose([
        trans.Scale(cfg.TRANSFROM_SCALES),
    ])
    val_transform_det = trans.Compose([
        trans.Scale(cfg.TRANSFROM_SCALES),
    ])

    test_transform_det = trans.Compose([
        trans.Scale(cfg.TEST_TRANSFROM_SCALES),
    ])
    model = SiamUNet()
    # model=torch.nn.DataParallel(model)

    if torch.cuda.is_available():
        model.cuda()
        print('gpu')

    # model.load_state_dict({k.replace('module.', ''): v for k, v in torch.load(weight).items()})
    # model.load_state_dict(torch.load(weight))
    checkpoint = torch.load(weight)
    model.load_state_dict(checkpoint['state_dict'])
    test_data = my_dataset.Dataset(cfg.TEST_DATA_PATH,
                                   cfg.TEST_LABEL_PATH,
                                   cfg.TEST_TXT_PATH,
                                   'val',
                                   transform=True,
                                   transform_med=test_transform_det)
    test_dataloader = DataLoader(test_data,
                                 batch_size=cfg.TEST_BATCH_SIZE,
                                 shuffle=False,
                                 num_workers=8,
                                 pin_memory=True)
    crop = 0

    rows = 12
    cols = 12
    i = 0
    for batch_idx, val_batch in enumerate(test_dataloader):
        model.eval()

        batch_x1, batch_x2, mask, im_name, h, w = val_batch
        print('mask_type{}'.format(mask.type))

        with torch.no_grad():
            batch_x1, batch_x2 = Variable((batch_x1)).cuda(), Variable(
                ((batch_x2))).cuda()

            try:
                print('try')
                output = model(batch_x1, batch_x2)
                del batch_x1, batch_x2
            except RuntimeError as exception:
                if 'out of memory' in str(exception):
                    print('WARNING: out of memory')
                    if hasattr(torch.cuda, 'empty_cache'):
                        torch.cuda.empty_cache()
                else:
                    print('exception')
                    raise exception
        # print(output)
        output_w, output_h = output.shape[-2:]
        output = torch.sigmoid(output).view(output_w, output_h, -1)
        # print(output)
        output = output.data.cpu().numpy()  # .resize([80, 80, 1])
        output = np.where(output > cfg.THRESH, 255, 0)
        # print(output)
        # have no mask so can not eval_cal
        # precision,recall,F1=eval_cal(output,mask)
        # print('precision:{}\nrecall:{}\nF1:{}'.format(precision,recall,F1))

        print(im_name)
        im_n = im_name[0].split('/')[1].split('.')[0].split('_')
        im__path = 'final_result/weight50_dmc/mask_2017_2018_960_960_' + im_n[
            4] + '.tif'

        # im__path = 'weitht50_tif.tif'
        im_data = np.squeeze(output)
        print(im_data.shape)
        im_data = np.array([im_data])
        print(im_data.shape)
        im_geotrans = (0.0, 1.0, 0.0, 0.0, 0.0, 1.0)
        im_proj = ''
        im_width = 960
        im_height = 960
        im_bands = 1
        datatype = gdal.GDT_Byte
        driver = gdal.GetDriverByName("GTiff")
        dataset = driver.Create(im__path, im_width, im_height, im_bands,
                                datatype)
        if dataset != None:
            print("----{}".format(im__path))
            dataset.SetGeoTransform(im_geotrans)
            dataset.SetProjection(im_proj)
        for i in range(im_bands):
            dataset.GetRasterBand(i + 1).WriteArray(im_data[i])

        del dataset
Exemple #3
0
#sys.path.insert(0, os.path.join('exps', 'voc'))
import config as cfg

assert torch.cuda.is_available(), 'Error: CUDA not found!'
best_loss = float('inf')
start_epoch = 0
lr = cfg.lr

print('Preparing data..')

train_transform_list = [
    transforms.ToTensor(),
    transforms.Normalize(cfg.mean, cfg.std)
]
if cfg.scale is not None:
    train_transform_list.insert(0, transforms.Scale(cfg.scale))
train_transform = transforms.Compose(train_transform_list)
val_transform = transforms.Compose(
    [transforms.ToTensor(),
     transforms.Normalize(cfg.mean, cfg.std)])

trainset = VocLikeDataset(image_dir=cfg.image_dir,
                          annotation_file=cfg.annotation_file,
                          imageset_fn=cfg.train_imageset_fn,
                          image_ext=cfg.image_ext,
                          classes=cfg.classes,
                          encoder=DataEncoder(),
                          transform=train_transform)
valset = VocLikeDataset(image_dir=cfg.image_dir,
                        annotation_file=cfg.annotation_file,
                        imageset_fn=cfg.val_imageset_fn,
Exemple #4
0
def main():
    best_metric = 0
    train_transform_det = trans.Compose([
        trans.Scale(cfg.TRANSFROM_SCALES),
    ])
    val_transform_det = trans.Compose([
        trans.Scale(cfg.TRANSFROM_SCALES),
    ])

    train_data = my_dataset.Dataset(cfg.TRAIN_DATA_PATH,
                                    cfg.TRAIN_LABEL_PATH,
                                    cfg.TRAIN_TXT_PATH,
                                    'train',
                                    transform=True,
                                    transform_med=train_transform_det)
    val_data = my_dataset.Dataset(cfg.VAL_DATA_PATH,
                                  cfg.VAL_LABEL_PATH,
                                  cfg.VAL_TXT_PATH,
                                  'val',
                                  transform=True,
                                  transform_med=val_transform_det)
    train_dataloader = DataLoader(train_data,
                                  batch_size=cfg.BATCH_SIZE,
                                  shuffle=True,
                                  num_workers=1,
                                  pin_memory=True)
    val_dataloader = DataLoader(val_data,
                                batch_size=cfg.BATCH_SIZE,
                                shuffle=False,
                                num_workers=1,
                                pin_memory=True)

    model = DeepLabV3(model_id=1, project_dir=cfg.BASE_PATH)
    if cfg.RESUME:
        checkpoint = torch.load(cfg.TRAINED_LAST_MODEL)
        model.load_state_dict(checkpoint['state_dict'])
        print('resume success \n')
    if torch.cuda.device_count() > 1:
        print("Let's use", torch.cuda.device_count(), "GPUs!")
        # dim = 0 [30, xxx] -> [10, ...], [10, ...], [10, ...] on 3 GPUs
        model = nn.DataParallel(model)

    if torch.cuda.is_available():
        model.cuda()

    # if torch.cuda.is_available():
    #     model.cuda()

    # params = [{'params': md.parameters()} for md in model.children() if md in [model.classifier]]
    optimizer = optim.Adam(model.parameters(),
                           lr=cfg.INIT_LEARNING_RATE,
                           weight_decay=cfg.DECAY)
    scheduler = ReduceLROnPlateau(optimizer,
                                  mode='min',
                                  factor=0.2,
                                  patience=5,
                                  verbose=True,
                                  threshold=0.0001,
                                  threshold_mode='rel',
                                  cooldown=2,
                                  eps=1e-08)
    fl = FocalLoss2d(gamma=cfg.FOCAL_LOSS_GAMMA)
    Loss_list = []
    Accuracy_list = []
    for epoch in range(cfg.EPOCH):
        print('epoch {}'.format(epoch + 1))
        #training--------------------------
        train_loss = 0
        train_acc = 0
        for batch_idx, train_batch in enumerate(train_dataloader):
            model.train()
            batch_det_img, batch_y, _, _, _, _, _ = train_batch
            batch_det_img, batch_y = Variable(batch_det_img).cuda(), Variable(
                batch_y).cuda()
            output = model(batch_det_img)
            del batch_det_img
            loss = calc_loss(output, batch_y)
            # train_loss += loss.data[0]
            #should change after
            # pred = torch.max(out, 1)[0]
            # train_correct = (pred == batch_y).sum()
            # train_acc += train_correct.data[0]
            optimizer.zero_grad()
            loss.backward()
            optimizer.step()

            if (batch_idx) % 5 == 0:
                model.eval()
                val_loss = 0
                for v_batch_idx, val_batch in enumerate(val_dataloader):
                    v_batch_det_img, v_batch_y, _, _, _, _, _ = val_batch
                    v_batch_det_img, v_batch_y = Variable(
                        v_batch_det_img).cuda(), Variable(v_batch_y).cuda()
                    val_out = model(v_batch_det_img)
                    del v_batch_det_img
                    val_loss += float(calc_loss(val_out, v_batch_y))
                scheduler.step(val_loss)
                del val_out, v_batch_y
                print("Train Loss: {:.6f}  Val Loss: {:.10f}".format(
                    loss, val_loss))

        if (epoch + 1) % 10 == 0:
            torch.save({'state_dict': model.state_dict()},
                       os.path.join(
                           cfg.SAVE_MODEL_PATH, cfg.TRAIN_LOSS,
                           'model_tif_deeplab18_bce_240*240_' +
                           str(epoch + 1) + '.pth'))
    torch.save({'state_dict': model.state_dict()},
               os.path.join(cfg.SAVE_MODEL_PATH, cfg.TRAIN_LOSS,
                            'model_tif_deeplab18_bce_240*240_last.pth'))
Exemple #5
0
def main():
    best_metric = 0
    train_transform_det = trans.Compose([
        trans.Scale(cfg.TRANSFROM_SCALES),
    ])
    val_transform_det = trans.Compose([
        trans.Scale(cfg.TRANSFROM_SCALES),
    ])

    train_data = my_dataset.Dataset(cfg.TRAIN_DATA_PATH,
                                    cfg.TRAIN_LABEL_PATH,
                                    cfg.TRAIN_TXT_PATH,
                                    'train',
                                    transform=True,
                                    transform_med=train_transform_det)
    val_data = my_dataset.Dataset(cfg.VAL_DATA_PATH,
                                  cfg.VAL_LABEL_PATH,
                                  cfg.VAL_TXT_PATH,
                                  'val',
                                  transform=True,
                                  transform_med=val_transform_det)
    train_dataloader = DataLoader(train_data,
                                  batch_size=cfg.BATCH_SIZE,
                                  shuffle=True,
                                  num_workers=8,
                                  pin_memory=True)
    val_dataloader = DataLoader(val_data,
                                batch_size=cfg.BATCH_SIZE,
                                shuffle=False,
                                num_workers=8,
                                pin_memory=True)

    model = SiamUNet(in_ch=3)
    if cfg.RESUME:
        checkpoint = torch.load(cfg.TRAINED_LAST_MODEL)
        model.load_state_dict(checkpoint['state_dict'])
        print('resume success \n')
    if torch.cuda.device_count() > 1:
        print("Let's use", torch.cuda.device_count(), "GPUs!")
        # dim = 0 [30, xxx] -> [10, ...], [10, ...], [10, ...] on 3 GPUs
        model = nn.DataParallel(model)

    if torch.cuda.is_available():
        model.cuda()

    # if torch.cuda.is_available():
    #     model.cuda()

    # params = [{'params': md.parameters()} for md in model.children() if md in [model.classifier]]
    optimizer = optim.Adam(model.parameters(),
                           lr=cfg.INIT_LEARNING_RATE,
                           weight_decay=cfg.DECAY)
    fl = FocalLoss2d(gamma=cfg.FOCAL_LOSS_GAMMA)
    Loss_list = []
    Accuracy_list = []
    scheduler = StepLR(optimizer, step_size=8, gamma=0.1)
    for epoch in range(cfg.EPOCH):
        scheduler.step()
        print('epoch {}'.format(epoch + 1))
        #training--------------------------
        train_loss = 0
        train_acc = 0
        for batch_idx, train_batch in enumerate(train_dataloader):
            model.train()
            batch_x1, batch_x2, batch_y, _, _, _ = train_batch
            batch_x1, batch_x2, batch_y = Variable(batch_x1).cuda(), Variable(
                batch_x2).cuda(), Variable(batch_y).cuda()
            outputs = model(batch_x1, batch_x2)
            del batch_x1, batch_x2
            loss = calc_loss_L4(outputs[0], outputs[1], outputs[2], outputs[3],
                                batch_y)
            # train_loss += loss.data[0]
            #should change after
            # pred = torch.max(out, 1)[0]
            # train_correct = (pred == batch_y).sum()
            # train_acc += train_correct.data[0]
            optimizer.zero_grad()
            loss.backward()
            optimizer.step()

            if (batch_idx) % 5 == 0:
                model.eval()
                val_loss = 0
                for v_batch_idx, val_batch in enumerate(val_dataloader):
                    v_batch_x1, v_batch_x2, v_batch_y, _, _, _ = val_batch
                    v_batch_x1, v_batch_x2, v_batch_y = Variable(
                        v_batch_x1).cuda(), Variable(
                            v_batch_x2).cuda(), Variable(v_batch_y).cuda()
                    val_outs = model(v_batch_x1, v_batch_x2)
                    del v_batch_x1, v_batch_x2
                    val_loss += float(
                        calc_loss_L4(val_outs[0], val_outs[1], val_outs[2],
                                     val_outs[3], v_batch_y))
                del val_outs, v_batch_y
                print("Train Loss: {:.6f}  Val Loss: {:.10f}".format(
                    loss, val_loss))

        if (epoch + 1) % 5 == 0:
            torch.save({'state_dict': model.state_dict()},
                       os.path.join(cfg.SAVE_MODEL_PATH, cfg.TRAIN_LOSS,
                                    'model_tif_' + str(epoch + 1) + '.pth'))
    torch.save({'state_dict': model.state_dict()},
               os.path.join(cfg.SAVE_MODEL_PATH, cfg.TRAIN_LOSS,
                            'model_tif_last.pth'))
Exemple #6
0
def prediction( weight):

    best_metric = 0
    train_transform_det = trans.Compose([
        trans.Scale(cfg.TRANSFROM_SCALES),
    ])
    val_transform_det = trans.Compose([
        trans.Scale(cfg.TRANSFROM_SCALES),
    ])
    test_transform_det = trans.Compose([
        trans.Scale((960,960)),
    ])
    model = DeepLabV3(model_id=1,project_dir=cfg.BASE_PATH)
    # model=torch.nn.DataParallel(model)
    if torch.cuda.is_available():
        model.cuda()
    # model.load_state_dict({k.replace('module.', ''): v for k, v in torch.load(weight).items()})
    # model.load_state_dict(torch.load(weight))
    checkpoint = torch.load(weight)
    model.load_state_dict(checkpoint['state_dict'])

    # test_data = my_dataset.Dataset(cfg.TEST_DATA_PATH, '',cfg.TEST_TXT_PATH, 'test', transform=True, transform_med=test_transform_det)
    test_data = my_dataset.Dataset(cfg.VAL_DATA_PATH, cfg.VAL_LABEL_PATH,cfg.VAL_TXT_PATH, 'val', transform=True, transform_med=test_transform_det)
    # test_data = my_dataset.Dataset(cfg.TRAIN_DATA_PATH, cfg.TRAIN_LABEL_PATH,cfg.TRAIN_TXT_PATH, 'train', transform=True, transform_med=test_transform_det)
    test_dataloader = DataLoader(test_data, batch_size=cfg.TEST_BATCH_SIZE, shuffle=False, num_workers=8, pin_memory=True)
    crop = 0

    for batch_idx, val_batch in enumerate(test_dataloader):
        model.eval()
        #
        # batch_x1, batch_x2, _, filename, h, w, green_mask1, green_mask2 = val_batch
        batch_det_img, _, filename, h, w,_,green_mask2 = val_batch
        # green_mask1 = green_mask1.view(output_w, output_h, -1).data.cpu().numpy()
        filename = filename[0].split('/')[-1].replace('image','mask_2017')
        if crop:
            pass
            # outputs = np.zeros((cfg.TEST_BATCH_SIZE,1,960, 960))
            #
            # while (i + w // rows <= w):
            #     j = 0
            #     while (j + h // cols <= h):
            #         batch_x1_ij = batch_x1[0, :, i:i + w // rows, j:j + h // cols]
            #         batch_x2_ij = batch_x2[0, :, i:i + w // rows, j:j + h // cols]
            #         # batch_y_ij = batch_y[batch_idx,: , i:i + w // rows, j:j + h // cols]
            #         batch_x1_ij = np.expand_dims(batch_x1_ij, axis=0)
            #         batch_x2_ij = np.expand_dims(batch_x2_ij, axis=0)
            #         batch_x1_ij, batch_x2_ij = Variable(torch.from_numpy(batch_x1_ij)).cuda(), Variable(
            #             torch.from_numpy(batch_x2_ij)).cuda()
            #         with torch.no_grad():
            #             output = model(batch_x1_ij, batch_x2_ij)
            #         output_w, output_h = output.shape[-2:]
            #         output = torch.sigmoid(output).view(-1, output_w, output_h)
            #
            #         output = output.data.cpu().numpy()  # .resize([80, 80, 1])
            #         output = np.where(output > cfg.THRESH, 255, 0)
            #         outputs[0, :, i:i + w // rows, j:j + h // cols] = output
            #
            #         j += h // cols
            #     i += w // rows
            #
            #
            # if not os.path.exists('./change'):
            #     os.mkdir('./change')
            # print('./change/{}'.format(filename))
            # cv2.imwrite('./change/crop_{}'.format(filename), outputs[0,0,:,:])
        else:
            batch_det_img = Variable(batch_det_img).cuda()
            with torch.no_grad():
                outputs = model(batch_det_img)

            output_w, output_h = outputs[0].shape[-2:]

            # green_mask2 = green_mask2.view(output_w, output_h, -1).data.cpu().numpy()

            output = torch.sigmoid(outputs).view(output_w, output_h, -1).data.cpu().numpy()
            # print(output.min(),output.max())
            output = np.where((output  > cfg.THRESH) , 255, 0)
            if not os.path.exists('./change'):
                os.mkdir('./change')

            print('./change/{}'.format(filename))
            cv2.imwrite('./change/{}'.format(filename), output)