def selective_search(I, color_spaces = ['rgb'], ks = [100], feature_masks = [features.SimilarityMask(1, 1, 1, 1)], eraseMap=None, n_jobs = -1):
    parameters = itertools.product(color_spaces, ks, feature_masks)
    region_set = joblib.Parallel(n_jobs=n_jobs)(joblib.delayed(_selective_search_one)(I, color, k, mask, eraseMap) for (color, k, mask) in parameters)

    # region_set = region_set[0:len(region_set):4]

    #flatten list of list of tuple to list of tuple
    # regions = sum(region_set, [])
    return region_set  #  sorted(regions), region_labels
 def test_similarity_user_all(self, monkeypatch):
     monkeypatch.setattr(features.Features, '_Features__sim_size',
                         lambda self, i, j: 1)
     monkeypatch.setattr(features.Features, '_Features__sim_texture',
                         lambda self, i, j: 1)
     monkeypatch.setattr(features.Features, '_Features__sim_color',
                         lambda self, i, j: 1)
     monkeypatch.setattr(features.Features, '_Features__sim_fill',
                         lambda self, i, j: 1)
     w = features.SimilarityMask(1, 1, 1, 1)
     f = features.Features(self.dummy_image, self.dummy_label, 1, w)
     assert f.similarity(0, 1) == 4
    def __parameter_changed(self):
        # obtain parameters
        color_spaces = [color.lower() for color in self.chosen_colors]
        ks = [float(k) for k in self.chosen_ks]
        similarity_masks = [
            features.SimilarityMask('S' in mask, 'C' in mask, 'T' in mask, 'F'
                                    in mask)
            for mask in self.chosen_similarities
        ]

        self.regions = selective_search.selective_search(
            self.ndimg, color_spaces, ks, similarity_masks)
        self.slider.setMaximum(len(self.regions))
        self.slider.setValue(int(len(self.regions) / 4))
        self.__draw()
def hierarchical_segmentation(I,
                              k=100,
                              feature_mask=features.SimilarityMask(1, 1, 1,
                                                                   1)):
    F0, n_region = segment.segment_label(I, 0.8, k, 100)
    adj_mat, A0 = _calc_adjacency_matrix(F0, n_region)
    feature_extractor = features.Features(I, F0, n_region)

    # stores list of regions sorted by their similarity
    S = _build_initial_similarity_set(A0, feature_extractor)

    # stores region label and its parent (empty if initial).
    R = {i: () for i in range(n_region)}

    A = [A0]  # stores adjacency relation for each step
    F = [F0]  # stores label image for each step

    # greedy hierarchical grouping loop
    while len(S):
        (s, (i, j)) = S.pop()
        t = feature_extractor.merge(i, j)

        # record merged region (larger region should come first)
        R[t] = (
            i,
            j) if feature_extractor.size[j] < feature_extractor.size[i] else (
                j, i)

        Ak = _new_adjacency_dict(A[-1], i, j, t)
        A.append(Ak)

        S = _merge_similarity_set(feature_extractor, Ak, S, i, j, t)

        F.append(_new_label_image(F[-1], i, j, t))

    # bounding boxes for each hierarchy
    L = feature_extractor.bbox

    return (R, F, L)
Beispiel #5
0
def gen_regions(image, dims, pad, ks):
    """
    Generates candidate regions for object detection using selective search.
    """

    print "Generating cropped regions..."
    assert (len(dims) == 3)
    regions = selective_search(image,
                               ks=[ks],
                               feature_masks=[
                                   features.SimilarityMask(
                                       size=1,
                                       color=1,
                                       texture=1,
                                       fill=1,
                                   )
                               ])

    crops = []
    for conf, (y0, x0, y1, x1) in regions:
        if x0 - pad >= 0:
            x0 = x0 - pad
        if y0 - pad >= 0:
            y0 = y0 - pad
        if x1 + pad <= dims[0]:
            x1 = x1 + pad
        if y1 + pad <= dims[0]:
            y1 = y1 + pad
        # Images are rows, then columns, then channels.
        region = image[y0:y1, x0:x1, :]
        candidate = resize(region, dims)
        crops.append((conf, candidate, region, (x0, y0, x1, y1)))

    print "Generated {} crops".format(len(crops))

    return crops
Beispiel #6
0
    parser.add_argument('-k', '--k',        type=int,   default=100, help='threshold k for initial segmentation')
    parser.add_argument('-c', '--color',    nargs=1,    default='rgb', choices=['rgb', 'lab', 'rgi', 'hsv', 'nrgb', 'hue'], help='color space')
    parser.add_argument('-f', '--feature',  nargs="+",  default=['texture', 'fill'], choices=['size', 'color', 'texture', 'fill'], help='feature for similarity calculation')
    parser.add_argument('-o', '--output',   type=str,   default='result', help='prefix of resulting images')
    parser.add_argument('-a', '--alpha',    type=float, default=1.0, help='alpha value for compositing result image with input image')
    args = parser.parse_args()

    img = skimage.io.imread(args.image)
    if len(img.shape) == 2:
        img = skimage.color.gray2rgb(img)

    print('k:', args.k)
    print('color:', args.color)
    print('feature:', ' '.join(args.feature))

    mask = features.SimilarityMask('size' in args.feature, 'color' in args.feature, 'texture' in args.feature, 'fill' in args.feature)
    (R, F, L) = selective_search.hierarchical_segmentation(img, args.k, mask)
    print('result filename: %s_[0000-%04d].png' % (args.output, len(F) - 1))

    # suppress warning when saving result images
    warnings.filterwarnings("ignore", category = UserWarning)
    
    step_title = "[INFO] Generating segments: "
    widgets = [step_title, progressbar.Percentage(),
               " ", progressbar.Bar(), " ", progressbar.ETA()]
    pbar = progressbar.ProgressBar(maxval=len(F), widgets=widgets).start()

    colors = generate_color_table(R)
    for depth, label in enumerate(F):
        result = colors[label]
        result = (result * args.alpha + img * (1. - args.alpha)).astype(numpy.uint8)
Beispiel #7
0
def hierarchical_segmentation(I,
                              feature_mask=features.SimilarityMask(1, 1, 1, 1),
                              F0=None,
                              k=100,
                              return_stacks=False):
    """
    I: Image

    Returns:
    R: Merge dictionary (key: label of parent, value: (label of child1, label of child2))
    scales: scale dictionary (key: label, value: scale)
    """
    relabeled = False

    #pass
    #Remap if labels are not contiguous
    sorted_labels = np.asarray(sorted(np.unique(F0).ravel()))
    if (np.any((sorted_labels[1:] - sorted_labels[0:-1]) > 1)):
        relabeled = True
        map_dict = {sorted_labels[i]: i for i in range(sorted_labels.shape[0])}
        F0 = relabel(F0, map_dict)

    n_region = np.unique(F0.ravel()).shape[0]
    adj_mat, A0 = _calc_adjacency_matrix(F0, n_region)
    feature_extractor = features.Features(I, F0, n_region, feature_mask)

    # stores list of regions sorted by their similarity
    S = _build_initial_similarity_set(A0, feature_extractor)

    #Initialize scale dictionary
    unique_labels = np.unique(F0.ravel()).tolist()

    g = nx.DiGraph()
    g.add_nodes_from(unique_labels, n_elems=1, stack=0)

    # stores region label and its parent (empty if initial).
    R = {i: () for i in range(n_region)}

    A = [A0]  # stores adjacency relation for each step
    F = [F0]  # stores label image for each step

    # greedy hierarchical grouping loop
    stack = 0
    while len(S):
        stack += 1
        (s, (i, j)) = S.pop()
        t = feature_extractor.merge(i, j)
        n_elems_left = nx.get_node_attributes(g, 'n_elems')[i]
        n_elems_right = nx.get_node_attributes(g, 'n_elems')[j]
        new_n_elems = n_elems_left + n_elems_right
        g.add_node(t, n_elems=new_n_elems, stack=stack)
        g.add_edge(t, i, diff_n_elems=new_n_elems - n_elems_left)
        g.add_edge(t, j, diff_n_elems=new_n_elems - n_elems_right)

        # record merged region (larger region should come first)
        R[t] = (
            i,
            j) if feature_extractor.size[j] < feature_extractor.size[i] else (
                j, i)

        Ak = _new_adjacency_dict(A[-1], i, j, t)
        A.append(Ak)

        S = _merge_similarity_set(feature_extractor, Ak, S, i, j, t)

        F.append(_new_label_image(F[-1], i, j, t))

    # bounding boxes for each hierarchy
    L = feature_extractor.bbox

    if (relabeled):
        inv_map = {v: k for k, v in map_dict.items()}
        g = nx.relabel_nodes(g, inv_map, copy=True)
        #g = nx.relabel_nodes(g,map_dict)

    if (relabeled and return_stacks):
        print('relabeling stacks')
        with progressbar.ProgressBar(maxval=len(F)) as bar:
            for i in range(len(F)):
                bar.update(i)
                F[i] = relabel(F[i], inv_map)

    if (return_stacks):
        return (R, F, g)
    else:
        return g
def hierarchical_segmentation(I, k = 100, feature_mask = features.SimilarityMask(1, 1, 1, 1), eraseMap=None):
    # F0, n_region = segment.segment_label(I, 0.8, k, 100)
    F0, n_region = segment.segment_label(I, 0.5, k, 500)
    n_region = F0.max() + 1
    # ++ calculate outside region labels
    if eraseMap is not None:
        eraseLabels = set(list(F0[numpy.where(eraseMap == 1)].flatten()))
    else:
        eraseLabels = []
    adj_mat, A0 = _calc_adjacency_matrix(F0, n_region)
    feature_extractor = features.Features(I, F0, n_region)

    # stores list of regions sorted by their similarity
    S = _build_initial_similarity_set(A0, feature_extractor)
    # ++ pop out regions outside circle
    if eraseMap is not None:
        ii = len(S) - 1
        while ii >= 0:
            if (S[ii][1][0] in eraseLabels) or (S[ii][1][1] in eraseLabels):
                S.pop(ii)
            ii -= 1
    # stores region label and its parent (empty if initial).
    R = {i : () for i in range(n_region)}

    A = [A0]    # stores adjacency relation for each step
    F = [F0]    # stores label image for each step

    # greedy hierarchical grouping loop
    while len(S):
        (s, (i, j)) = S.pop()
        t = feature_extractor.merge(i, j)

        # record merged region (larger region should come first)
        R[t] = (i, j) if feature_extractor.size[j] < feature_extractor.size[i] else (j, i)

        Ak = _new_adjacency_dict(A[-1], i, j, t)
        A.append(Ak)

        S = _merge_similarity_set(feature_extractor, Ak, S, i, j, t)
        # ++ pop out regions outside circle
        if eraseMap is not None:
            ii = len(S) - 1
            while ii >= 0:
                if (S[ii][1][0] in eraseLabels) or (S[ii][1][1] in eraseLabels):
                    S.pop(ii)
                ii -= 1

        F.append(_new_label_image(F[-1], i, j, t))

    # bounding boxes for each hierarchy
    L = feature_extractor.bbox

    L_regions = {}
    for r, l in R.iteritems():
        if r < n_region:
            L_regions[r] = [r]
        else:
            ll = []
            if l[0] >= n_region:
                ll = ll + L_regions[l[0]]
            else:
                ll.append(l[0])

            if l[1] >= n_region:
                ll = ll + L_regions[l[1]]
            else:
                ll.append(l[1])
            L_regions[r] = ll
    return (R, F, L, L_regions, eraseLabels)
def hierarchical_segmentation_M(paras, feature_mask = features.SimilarityMask(1, 1, 1, 1)):
    # F0, n_region = segment.segment_label(I, 0.8, k, 100)
    # F0, n_region = segment.segment_label(I, 0.5, k, 500)
    # ++ calculate outside region labels
    train = paras['train']
    is_rotate = paras['is_rotate']
    if train:
        eraseRegionLabels = paras['eraseRegionLabels']
        F0, region_labels, eraseLabels = rcm.region_special_map(paras)
        # regions_special = []
        # regions_rest = []
        # regions_common = []
        # for _, v in region_labels.iteritems():
        #     regions_special = regions_special + v[0]
        #     regions_rest = regions_rest + v[1]
        #     regions_common = regions_common + v[2]
        coordinates_special = numpy.zeros((0, 2))
        print region_labels
        for ls in region_labels.values()[0][0]:
            # print 'ls: ' + str(ls)
            coordinates_special = numpy.vstack([coordinates_special, numpy.argwhere(F0==ls)])
        pca = PCA()
        pca.fit(coordinates_special)

        components = pca.components_
        main_ax = components[0]
        angle = math.atan(main_ax[0] / main_ax[1]) * (180.0 / math.pi)
        # print 'F0 before: '
        # print F0.dtype
        # eraseLabels = list(eraseLabels)
        # el = eraseLabels[0]
        # for ell in eraseLabels:
        #     F0[numpy.where(F0==ell)] = el
        # F0 = numpy.array(rotate(F0, angle).round(), dtype='i')
        # print 'F0 after: '
        # print F0.dtype
        n_region = len(set(list(F0.flatten())))

        I = paras['im']
        # print 'I before: '
        # print I.dtype
        # I = numpy.array(rotate(I, angle), dtype='i')
        # print 'I after: '
        # print I.dtype

        eraseRegions_fb = {'fg': [], 'bg': []}
        for rl in range(3):
            if rl in eraseRegionLabels:
                eraseRegions_fb['fg'] = eraseRegions_fb['fg'] + region_labels.values()[0][rl]
            else:
                eraseRegions_fb['bg'] = eraseRegions_fb['bg'] + region_labels.values()[0][rl]
        Rd = {}
        Fd = {}
        Ld = {}
        L_regionsd = {}
        for fb_k, fb_v in eraseRegions_fb.iteritems():
            print 'n_region', n_region
            print fb_k, fb_v
            eraseLabels_k = set(fb_v + list(eraseLabels))

            adj_mat, A0 = _calc_adjacency_matrix(F0, n_region)
            feature_extractor = features.Features(I, F0, n_region)

            # stores list of regions sorted by their similarity
            S = _build_initial_similarity_set(A0, feature_extractor)
            # ++ pop out regions outside circle and selected common
            ii = len(S) - 1
            while ii >= 0:
                if (S[ii][1][0] in eraseLabels_k) or (S[ii][1][1] in eraseLabels_k):
                    S.pop(ii)
                ii -= 1
            # stores region label and its parent (empty if initial).
            R = {i : () for i in range(n_region)}

            A = [A0]    # stores adjacency relation for each step
            F = [F0]    # stores label image for each step

            # greedy hierarchical grouping loop
            while len(S):
                (s, (i, j)) = S.pop()
                t = feature_extractor.merge(i, j)

                # record merged region (larger region should come first)
                R[t] = (i, j) if feature_extractor.size[j] < feature_extractor.size[i] else (j, i)

                Ak = _new_adjacency_dict(A[-1], i, j, t)
                A.append(Ak)

                S = _merge_similarity_set(feature_extractor, Ak, S, i, j, t)
                # ++ pop out regions outside circle and selected common
                ii = len(S) - 1
                while ii >= 0:
                    if (S[ii][1][0] in eraseLabels_k) or (S[ii][1][1] in eraseLabels_k):
                        S.pop(ii)
                    ii -= 1

                F.append(_new_label_image(F[-1], i, j, t))

            # bounding boxes for each hierarchy
            L = feature_extractor.bbox

            L_regions = {}
            for r, l in R.iteritems():
                if r < n_region:
                    L_regions[r] = [r]
                else:
                    ll = []
                    if l[0] >= n_region:
                        ll = ll + L_regions[l[0]]
                    else:
                        ll.append(l[0])

                    if l[1] >= n_region:
                        ll = ll + L_regions[l[1]]
                    else:
                        ll.append(l[1])
                    L_regions[r] = ll
            Rd[fb_k] = R
            Fd[fb_k] = F
            Ld[fb_k] = L
            L_regionsd[fb_k] = L_regions
        return (Rd, Fd, Ld, L_regionsd, eraseRegions_fb)
    else:
        # if is_rotate:
        #     F0, region_labels, eraseLabels, special_labels = rcm.region_special_map(paras)
        #     coordinates_special = numpy.zeros((0, 2))
        #     for ls in special_labels:
        #         print 'ls: ' + str(ls)
        #         coordinates_special = numpy.vstack([coordinates_special, numpy.argwhere(F0 == ls)])
        #     print 'coordinates_special', coordinates_special
        #     pca = PCA()
        #     pca.fit(coordinates_special)
        #
        #     components = pca.components_
        #     main_ax = components[0]
        #     angle = math.atan(main_ax[0] / main_ax[1]) * (180.0 / math.pi)
        # else:
        #     F0, region_labels, eraseLabels = rcm.region_special_map(paras)

        F0, region_labels, eraseLabels, _ = rcm.region_special_map(paras)
        eraseLabels = set(region_labels + list(eraseLabels))
        n_region = len(set(list(F0.flatten())))
        I = paras['im']

        special_labels = [x for x in range(n_region) if x not in eraseLabels]
        # print special_labels
        if len(special_labels) == 0:
            angle = 0
        else:
            coordinates_special = numpy.zeros((0, 2))
            for ls in special_labels:
                # print 'ls: ' + str(ls)
                coordinates_special = numpy.vstack([coordinates_special, numpy.argwhere(F0 == ls)])
            # print 'coordinates_special', coordinates_special
            pca = PCA()
            pca.fit(coordinates_special)

            components = pca.components_
            main_ax = components[0]
            angle = math.atan(main_ax[0] / main_ax[1]) * (180.0 / math.pi)
        # if eraseMap is not None:
        #     eraseLabels = set(list(F0[numpy.where(eraseMap == 1)].flatten()))
        adj_mat, A0 = _calc_adjacency_matrix(F0, n_region)
        feature_extractor = features.Features(I, F0, n_region)

        # stores list of regions sorted by their similarity
        S = _build_initial_similarity_set(A0, feature_extractor)
        # ++ pop out regions outside circle and selected common
        ii = len(S) - 1
        while ii >= 0:
            if (S[ii][1][0] in eraseLabels) or (S[ii][1][1] in eraseLabels):
                S.pop(ii)
            ii -= 1
        # stores region label and its parent (empty if initial).
        R = {i : () for i in range(n_region)}

        A = [A0]    # stores adjacency relation for each step
        F = [F0]    # stores label image for each step

        # greedy hierarchical grouping loop
        while len(S):
            (s, (i, j)) = S.pop()
            t = feature_extractor.merge(i, j)

            # record merged region (larger region should come first)
            R[t] = (i, j) if feature_extractor.size[j] < feature_extractor.size[i] else (j, i)

            Ak = _new_adjacency_dict(A[-1], i, j, t)
            A.append(Ak)

            S = _merge_similarity_set(feature_extractor, Ak, S, i, j, t)
            # ++ pop out regions outside circle and selected common
            ii = len(S) - 1
            while ii >= 0:
                if (S[ii][1][0] in eraseLabels) or (S[ii][1][1] in eraseLabels):
                    S.pop(ii)
                ii -= 1

            F.append(_new_label_image(F[-1], i, j, t))

        # bounding boxes for each hierarchy
        L = feature_extractor.bbox

        L_regions = {}
        for r, l in R.iteritems():
            if r < n_region:
                L_regions[r] = [r]
            else:
                ll = []
                if l[0] >= n_region:
                    ll = ll + L_regions[l[0]]
                else:
                    ll.append(l[0])

                if l[1] >= n_region:
                    ll = ll + L_regions[l[1]]
                else:
                    ll.append(l[1])
                L_regions[r] = ll
        if is_rotate:
            return (R, F, L, L_regions, eraseLabels, angle)
        else:
            return (R, F, L, L_regions, eraseLabels)
Beispiel #10
0
def generate_with_selective_search_3d(l_start, l_end, dataset, gt_set,
                                      im_in_DIR, im_out_DIR, mat_out_DIR, data,
                                      cfg):
    """
        generate the proposals with the selective search method
    """

    assert data in ['MASK','MAT'], \
    '[ERROR] unknow type of data: {}'.format(data)

    im_names = dataset.sets[gt_set]["im_names"]
    timer = Timer()

    for im_i in xrange(l_start, l_end):
        im_nm = im_names[im_i]
        im_out_pn = osp.join(im_out_DIR, im_nm + '.png')
        mat_out_pn = osp.join(mat_out_DIR, im_nm + '.mat')
        _, im_pn = dataset.built_im_path(im_nm, im_in_DIR)

        im_RAW = None
        timer.tic()

        if data == 'MASK' and not osp.exists(im_out_pn):
            im_RAW = cv2.imread(im_pn)
            im_RAW = cv2.cvtColor(im_RAW, cv2.COLOR_BGR2RGB)

            mask = features.SimilarityMask('size' in cfg.SEGM_SS_FEATURE,
                                           'color' in cfg.SEGM_SS_FEATURE,
                                           'texture' in cfg.SEGM_SS_FEATURE,
                                           'fill' in cfg.SEGM_SS_FEATURE)

            (R, F, _) = selective_search.hierarchical_segmentation(
                im_RAW, cfg.SEGM_SS_K, mask)
            colors = generate_color_table(R)
            is_mask = False

            for depth, label in enumerate(F):
                result = colors[label]
                result = (result * cfg.SEGM_SS_ALPHA + im_RAW *
                          (1. - cfg.SEGM_SS_ALPHA)).astype(np.uint8)

                # extract on targeted depth
                if depth == cfg.SEGM_SS_DEPTH:
                    mask_RAW = result
                    is_mask = True
                    break

            assert is_mask == True, "[ERROR] unable to extract segm on depth {}".format(
                im_nm)

            # save raw-img output
            mask_RAW = np.array(mask_RAW, dtype=np.int8)
            #print 'mask_RAW.shape: {}'.format(mask_RAW.shape)
            cv2.imwrite(im_out_pn, mask_RAW)
            timer.toc()
            print '[INFO] %d/%d use time (s) %f' % (im_i + 1, len(im_names),
                                                    timer.average_time)
        """ parse proposals as a MAT for easy handle """
        if data == 'MAT' and not osp.exists(mat_out_pn):
            assert osp.exists(im_out_pn), \
            '[ERROR] missing MASK for {}. Please generate MASK first.'.format(im_out_pn)
            #print 'im_out_pn: {}'.format(im_out_pn)

            mask_RAW = cv2.imread(im_out_pn)
            mask_RAW = cv2.cvtColor(mask_RAW, cv2.COLOR_BGR2RGB)

            R, G, B = cv2.split(mask_RAW)
            R_uniq_vals = np.unique(R)
            G_uniq_vals = np.unique(G)
            B_uniq_vals = np.unique(B)
            #print 'len(R_uniq_vals): {}'.format(len(R_uniq_vals))
            #print 'len(G_uniq_vals): {}'.format(len(G_uniq_vals))
            #print 'len(B_uniq_vals): {}'.format(len(B_uniq_vals))

            labels = []
            bboxes = []
            superpixels = np.zeros((mask_RAW.shape[0], mask_RAW.shape[1]),
                                   dtype=np.float)
            #num_ids = min(len(R_uniq_vals)*len(G_uniq_vals)*len(B_uniq_vals),255)
            num_ids = len(R_uniq_vals) * len(G_uniq_vals) * len(B_uniq_vals)
            labels_ids = np.random.permutation(xrange(1, num_ids + 1))
            #print 'num_ids: {}'.format(num_ids)
            #print 'len(labels_ids): {}'.format(len(labels_ids))
            #print 'labels_ids: {}'.format(labels_ids)
            idx = 0

            for R_val in R_uniq_vals:
                for G_val in G_uniq_vals:
                    for B_val in B_uniq_vals:
                        [row_ids, col_ids] = np.array(np.where((R == R_val) & \
                                                               (G == G_val) & \
                                                               (B == B_val)))

                        if len(row_ids) > 0 and len(row_ids) == len(col_ids):
                            y1 = np.min(row_ids)
                            x1 = np.min(col_ids)
                            y2 = np.max(row_ids)
                            x2 = np.max(col_ids)
                            box = np.array([x1, y1, x2, y2])

                            assert idx < num_ids, \
                            '[ERROR] idx out labels range: {} vs. {}'.format(idx,num_ids)

                            label = labels_ids[idx]
                            superpixels[row_ids, col_ids] = label

                            labels.append(label)
                            bboxes.append(box)
                            idx += 1

            im_mat = {
                "labels": labels,
                "bboxes": bboxes,
                "superpixels": superpixels
            }

            # save mat-img output
            sio.savemat(mat_out_pn, im_mat, do_compression=True)
            timer.toc()
            print '[INFO] %d/%d use time (s) %f' % (im_i + 1, len(im_names),
                                                    timer.average_time)
 def test_similarity_user_all(self):
     self.setup_method()
     w = features.SimilarityMask(1, 1, 1, 1)
     f = features.Features(self.dummy_image, self.dummy_label, 1, w)
     assert f.similarity(0, 1) == 4