示例#1
0
def build_validation_set(train_df, data_path):
    """
    Build a validation dataset from the train dataset. We are
    using stratified sampling to split the dataset and it is sampled
    by taking into accound the coverage of salt beds in the images
    """
    load_func = lambda x: load_mask(x, data_path + 'train/masks')
    train_df['coverage'] = train_df.id.apply(load_func) / pow(101, 2)
    train_df['cuts'] = pd.cut(train_df.coverage,
                              bins=10,
                              labels=list(range(10)))
    train_df = train_df.set_index('id')

    train_set = train_df.drop(stratified_split(train_df)).reset_index().id
    val_set = train_df.loc[stratified_split(train_df)].reset_index().id

    return train_set, val_set
示例#2
0
    def __getitem__(self, index):

        img_id = self.train_test_id[index]

        ### load image
        image_file = self.image_path + '%s.h5' % img_id
        img_np = load_image(image_file)
        ### load masks
        mask_np = load_mask(self.image_path, img_id, self.attribute)

        if self.train:
            img_np, mask_np = self.transform_fn(img_np, mask_np)

        img_np = img_np.astype('float32')
        ind = self.mask_ind[index, :]
        #ind = np.array(ind)
        #print(ind)
        #print(ind.shape)

        return img_np, mask_np, ind
示例#3
0
    def __data_generation(self, ids):

        X = np.empty((0, *self.img_size, self.no_channels))  # noqa
        y = np.empty((0, *self.img_size, self.n_classes))  # noqa

        for index, id_ in enumerate(ids):

            img = cv2.imread(self.img_fps[id_])[:, :, ::-1]  # BGR2RGB
            mask = load_mask(img, self.labels[id_])
            mask = cv2.resize(mask, self.img_size)

            # tuning mask
            mask[mask >= self.mask_thres] = 1
            mask[mask < self.mask_thres] = 0

            img = load_image(img, img_size=self.img_size, expand=False)

            if img is None or mask is None:
                continue

            # add augmentation
            if self.augment is not None:
                aug = self.augment(image=img, mask=mask)
                img = aug["image"]
                mask = aug["mask"]

            img = img.astype(np.float32)
            mask = mask.astype(np.float32)

            X = np.vstack((X, np.expand_dims(img, axis=0)))
            y = np.vstack((y, np.expand_dims(mask, axis=0)))

        X = X.astype(np.float32)
        y = y.astype(np.float32)

        assert X.shape[0] == y.shape[0]
        return X, y
示例#4
0
d_mask = path.join(args.target_ds, "masks") + "/*"
img_paths = sorted(glob.glob(d_img))
mask_paths = sorted(glob.glob(d_mask))

j, wrong_count = 0, 0
y_pred, y_true = [], []
print(img_paths)
for img_path, mask_path in zip(img_paths, mask_paths):

    img = load_test_image(img_path, dsize=(IMG_HEIGHT, IMG_WIDTH))
    img_hsv = rgb2hsv(img, normalization=True)
    mask = pb_predict_mask(sess, img) > CLOUD_THRES
    #mask = pb_predict_window_mask(sess, img_hsv, window_size = SLIDING_WINDOW_SIZE) > CLOUD_THRES
    #mask = np.zeros_like(img_hsv[...,0])
    mask_pct = np.sum(mask) / (mask.shape[0] * mask.shape[1]) * 100
    gt_mask = load_mask(mask_path,
                        dsize=(IMG_HEIGHT, IMG_WIDTH)) / 255.0 > CLOUD_THRES
    gt_mask_pct = np.sum(gt_mask) / (gt_mask.shape[0] * gt_mask.shape[1]) * 100
    y_pred.append(mask_pct)
    y_true.append(gt_mask_pct)

    if pct_to_label(mask_pct) != pct_to_label(gt_mask_pct):
        wrong_count += 1
        print("Wrong Prediction. {} has around {}% of sky area. Predicted:{}%".
              format(img_path, PCT_LVL[pct_to_label(gt_mask_pct)],
                     PCT_LVL[pct_to_label(mask_pct)]))
        print("More info on unsampled pct. GT:{}% Predicted:{}%".format(
            gt_mask_pct, mask_pct))

    if not path.exists('./results_cloud'):
        makedirs('./results_cloud')
    visualize(img,
示例#5
0
def main(args):
    files = sorted([
        x for x in os.listdir(args.img_dir)
        if '.png' in x or '.jpg' in x.lower()
    ])

    # jump over processed images
    idx = 0
    # while idx < len(files) and osp.exists(osp.join(args.save_dir, files[idx])):
    #     idx += 1

    cv2.namedWindow(args.windows_name)
    cv2.setMouseCallback(args.windows_name, on_mouse)
    cv2.createTrackbar('brush size', args.windows_name, 3, args.max_radius,
                       lambda x: None)

    global draw_color, mask, show_img, radius
    print('after gloabl', id(show_img))
    while idx < len(files):
        img_path = osp.join(args.img_dir, files[idx])
        mask_path = osp.join(args.save_dir, files[idx])
        print('process %s' % files[idx])

        img = load_image(img_path, args.max_height, args.max_width)
        mask = load_mask(mask_path, img.shape, args.use_prev_mask)
        mask[mask == 0] = 2  # convert GC_BGD to GC_PR_BGD
        # mask[mask == 1] = 3  # convert GC_FGD to GC_PR_FGD
        print('after load mask', id(show_img))

        bgd_model = np.zeros((1, 65), np.float64)
        fgd_model = np.zeros((1, 65), np.float64)

        while True:
            radius = cv2.getTrackbarPos('brush size', args.windows_name)
            show_img = linear_combine(img, mask2color(mask),
                                      [0, 0.7, 1][draw_color]).astype('uint8')

            cv2.circle(show_img, cur_mouse, radius, (200, 200, 200),
                       (2 if left_mouse_down else 1))
            cv2.imshow(args.windows_name, show_img)
            key = cv2.waitKey(100)

            if key == ord('w'):
                draw_color = (draw_color + 1) % 3
            elif key == ord('e'):
                draw_color = (draw_color - 1) % 3
            elif key == 32:  # space
                print('segmenting...', end='')
                cv2.waitKey(1)
                # mask enum
                # GC_BGD    = 0,  //背景
                # GC_FGD    = 1,  //前景
                # GC_PR_BGD = 2,  //可能背景
                # GC_PR_FGD = 3   //可能前景
                hist, _ = np.histogram(mask, [0, 1, 2, 3, 4])
                if hist[0] + hist[2] != 0 and hist[1] + hist[3] != 0:
                    print('grabcut: ', id(show_img))
                    cv2.grabCut(img, mask, None, bgd_model, fgd_model,
                                args.iter_count, cv2.GC_INIT_WITH_MASK)
                print('done')
            elif key == ord('s') or key == 10:
                cv2.imwrite(mask_path, mask2color(mask))
                print('save label %s.' % mask_path)
                idx += 1
                break
            elif key == ord('p') and idx > 0:
                idx -= 1
                break
            elif key == ord('n') or key == 32:
                idx += 1
                break
            elif key == ord('q') or key == 27:
                return
示例#6
0
print("model loaded.")

d_img = path.join(args.target_ds, "images") + "/*"
d_mask = path.join(args.target_ds, "masks") + "/*"
img_paths = sorted(glob.glob(d_img))
mask_paths = sorted(glob.glob(d_mask))

j, wrong_count = 0, 0
y_pred, y_true = [], []

for img_path, mask_path in zip(img_paths, mask_paths):
    img = load_test_image(img_path, dsize=(IMG_HEIGHT, IMG_WIDTH))
    img_gray = np.expand_dims(rgb2gray(img), axis=-1)
    mask = pb_predict_mask(sess, img_gray) < 0.3
    mask_pct = np.sum(mask) / (mask.shape[0] * mask.shape[1]) * 100
    gt_mask = load_mask(mask_path, dsize=(IMG_HEIGHT, IMG_WIDTH)) / 255.0 > 0.5
    gt_mask_pct = np.sum(gt_mask) / (gt_mask.shape[0] * gt_mask.shape[1]) * 100
    y_pred.append(mask_pct)
    y_true.append(gt_mask_pct)

    if pct_to_label(mask_pct) != pct_to_label(gt_mask_pct):
        wrong_count += 1
        print("Wrong Prediction. {} has around {}% of sky area. Predicted:{}%".
              format(img_path, PCT_LVL[pct_to_label(gt_mask_pct)],
                     PCT_LVL[pct_to_label(mask_pct)]))
        print("More info on unsampled pct. GT:{}% Predicted:{}%".format(
            gt_mask_pct, mask_pct))

    if not path.exists('./results_shadow'):
        makedirs('./results_shadow')
    visualize(img,
示例#7
0
anno_dir = sys.argv[2]
output_dir = sys.argv[3]

for f in os.listdir(img_dir):
    if not ('.png' in f.lower() or '.jpg' in f.lower()):
        continue

    print('processing {}'.format(f))

    fn_im = os.path.join(img_dir, f)
    fn_anno = os.path.join(anno_dir, f)
    fn_output = os.path.join(output_dir, f)

    # Read images and annotation
    img = load_image(fn_im, 400, 600)
    mask = load_mask(fn_anno, img.shape)

    assert img.shape[:2] == mask.shape

    labels = mask.flatten()
    n_labels = 2

    # Setup the CRF model
    d = dcrf.DenseCRF2D(img.shape[1], img.shape[0], n_labels)
    U = unary_from_labels(labels, n_labels, gt_prob=0.7, zero_unsure=False)
    d.setUnaryEnergy(U)
    d.addPairwiseGaussian(sxy=(3, 3),
                          compat=3,
                          kernel=dcrf.DIAG_KERNEL,
                          normalization=dcrf.NORMALIZE_SYMMETRIC)
    d.addPairwiseBilateral(sxy=(80, 80),