Example #1
0
def writer_jsons():

    for idx, img_name in enumerate(os.listdir(dst_imgs_path)):
        print(img_name)

        ImgInfo = {}
        ImgInfo.update({"img_id": img_name})

        img_id = img_name.split('.')[0]
        dst_json_name = os.path.join(dst_json_path, img_id + '.json')
        if os.path.exists(dst_json_name):
            continue
        else:
            imgPath = os.path.join(dst_imgs_path, img_name)
            img = Image.open(imgPath)
            size_map = cv.imread(imgPath.replace('images', 'size_map'),
                                 cv.IMREAD_GRAYSCALE)
            size_map = torch.from_numpy(size_map)
            size_map = F.max_pool2d(size_map[None, None, :, :].float(),
                                    (249, 249), 16, 124)
            size_map = F.interpolate(size_map, scale_factor=16).squeeze()

            size_map = size_map.numpy()

            w, h = img.size
            w_size_map, h_size_map = size_map.shape

            print('resize', w, h, h_size_map, w_size_map)

            if img_id <= '1201':
                gt_path = os.path.join(train_path,
                                       'img_' + img_id + '_ann.mat')
                ori_imgPath = os.path.join(train_path,
                                           'img_' + img_id + '.jpg')
            else:
                gt_path = os.path.join(
                    test_path,
                    'img_' + str(int(img_id) - 1201).zfill(4) + '_ann.mat')
                ori_imgPath = os.path.join(
                    test_path,
                    'img_' + str(int(img_id) - 1201).zfill(4) + '.jpg')
            gtInf = scio.loadmat(gt_path)  # format [ w, h ]
            ori_img = Image.open(ori_imgPath)
            ori_w, ori_h = ori_img.size
            print('ori', ori_w, ori_h)

            w_rate, h_rate = w / ori_w, h / ori_h
            annPoints = gtInf['annPoints']

            annPoints[:, 0] = annPoints[:, 0] * w_rate
            annPoints[:, 1] = annPoints[:, 1] * h_rate
            annPoints = annPoints.astype(int)
            # print(annPoints)

            ImgInfo.update({"human_num": len(annPoints)})
            center_w, center_h = [], []
            xy = []
            wide, heiht = [], []
            for head in annPoints:

                x, y = min(head[0], w - 1), min(head[1], h - 1)
                center_w.append(x)
                center_h.append(y)
                xy.append([int(head[0]), int(head[1])])

                if ImgInfo["human_num"] > 4:
                    dists = euclidean_dist(head[None, :], annPoints)
                    dists = dists.squeeze()
                    id = np.argsort(dists)
                    p1_y, p1_x = min(annPoints[id[1]][1],
                                     h - 1), min(annPoints[id[1]][0], w - 1)
                    p2_y, p2_x = min(annPoints[id[2]][1],
                                     h - 1), min(annPoints[id[2]][0], w - 1)
                    p3_y, p3_x = min(annPoints[id[3]][1],
                                     h - 1), min(annPoints[id[3]][0], w - 1)
                    # print(id)
                    # import pdb
                    scale = average_del_min([
                        size_map[y, x], size_map[p1_y, p1_x],
                        size_map[p2_y, p2_x], size_map[p3_y, p3_x]
                    ])

                    scale = max(scale, 4)
                else:
                    scale = max(size_map[y, x], 4)
                # print(x,y, scale)
                area = np.exp(scale)
                length = int(np.sqrt(area))
                wide.append(length)
                heiht.append(length)
            ImgInfo.update({"points": xy})
            # new_heads = [[i, j] for [i, j] in zip(new_x, new_y)]

            xywh = []
            for _, (x, y, x_len,
                    y_len) in enumerate(zip(center_w, center_h, wide, heiht)):
                # print(x,y,x_len,y_len)

                x_left_top, y_left_top = max(int(x - x_len / 2),
                                             0), max(int(y - y_len / 2), 0)
                x_right_bottom, y_right_bottom = min(
                    int(x + x_len / 2), w - 1), min(int(y + y_len / 2), h - 1)
                xywh.append(
                    [x_left_top, y_left_top, x_right_bottom, y_right_bottom])

            ImgInfo.update({"boxes": xywh})
            # print(ImgInfo)

            # plot(center_w, center_h, 'g*')
            # plt.imshow(img)
            # for (x_, y_, w_, h_) in ImgInfo["boxes"]:
            #     plt.gca().add_patch(plt.Rectangle((x_, y_), w_ - x_, h_ - y_, fill=False, edgecolor='r', linewidth=1))
            # plt.show()

            with open(dst_json_name, 'w') as f:
                json.dump(ImgInfo, f)
Example #2
0
f_cen_lst = []
for idx, face in enumerate( faces ):   
    f_eye_l = face['faceLandmarks']['eyeLeftInner']
    f_eye_r = face['faceLandmarks']['eyeRightInner']
    f_cen = ( (f_eye_l['x'] + f_eye_r['x'])/2.0, (f_eye_l['y'] + f_eye_r['y'])/2.0 )
    f_gen = [ 1 if face['attributes']['gender'] else 0 ][0]
    f_age_s = face['attributes']['age']
    f_age = [ int(fn.median(a_c)) for a_c in age_cate if f_age_s in a_c ][0]
    v_label = v_label_dict[ f_age, f_gen ]
    G_q.add_node( idx, label=v_label )
    f_cen_lst.append( f_cen )
    
f_cen_combi = list( itertools.combinations( f_cen_lst, 2 ) )
for a, b in f_cen_combi:
    px_dist = fn.euclidean_dist( a, b )
    a_idx = f_cen_lst.index( a )
    b_idx = f_cen_lst.index( b )
    G_q.add_edge( a_idx, b_idx, weight=px_dist )

# Minimum Spanning Tree
G_q_mst = nx.minimum_spanning_tree( G_q )   
# Set initial number of Order Distance as 0 For MST edges
for v1, v2 in G_q_mst.edges():
    G_q_mst[ v1 ][ v2 ][ 'weight' ] = 0
# Assign Order Distance for All Edges    
G_q_od = nx.all_pairs_shortest_path_length( G_q_mst, cutoff=4 )
# Remove the edges visiting itself
# Decrease Order Distances with -1 
# Copy MST's Order Distance to Edges Attrs. of Graph G
for v1 in G_q_od.keys():
Example #3
0
def generate_masks():
    file_list = glob.glob(os.path.join(dst_imgs_path, '*.jpg'))

    print(len(file_list))
    for idx, img_path in enumerate(file_list):
        if idx < -1:
            break
        img_id = img_path.split('/')[-1].split('.')[0]
        img_ori = Image.open(img_path)
        w, h = img_ori.size

        print(img_id)
        print(w, h)
        mask_map = np.zeros((h, w), dtype='uint8')
        gt_name = os.path.join(dst_json_path, img_id.split('.')[0] + '.json')

        with open(gt_name) as f:
            ImgInfo = json.load(f)

        centroid_list = []
        wh_list = []
        for id, (w_start, h_start, w_end,
                 h_end) in enumerate(ImgInfo["boxes"], 0):
            centroid_list.append([(w_end + w_start) / 2,
                                  (h_end + h_start) / 2])
            wh_list.append(
                [max((w_end - w_start) / 2, 3),
                 max((h_end - h_start) / 2, 3)])
        # print(len(centroid_list))
        centroids = np.array(centroid_list.copy(), dtype='int')
        wh = np.array(wh_list.copy(), dtype='int')
        wh[wh > 15] = 15
        human_num = ImgInfo["human_num"]
        for point in centroids:
            point = point[None, :]

            dists = euclidean_dist(point, centroids)
            dists = dists.squeeze()
            id = np.argsort(dists)

            for start, first in enumerate(id, 0):
                if start > 0 and start < 5:
                    src_point = point.squeeze()
                    dst_point = centroids[first]

                    src_w, src_h = wh[id[0]][0], wh[id[0]][1]
                    dst_w, dst_h = wh[first][0], wh[first][1]

                    count = 0
                    if (src_w + dst_w
                        ) - np.abs(src_point[0] - dst_point[0]) > 0 and (
                            src_h + dst_h) - np.abs(src_point[1] -
                                                    dst_point[1]) > 0:
                        w_reduce = ((src_w + dst_w) -
                                    np.abs(src_point[0] - dst_point[0])) / 2
                        h_reduce = ((src_h + dst_h) -
                                    np.abs(src_point[1] - dst_point[1])) / 2
                        threshold_w, threshold_h = max(
                            -int(max(src_w - w_reduce, dst_w - w_reduce) / 2.),
                            -60
                        ), max(
                            -int(max(src_h - h_reduce, dst_h - h_reduce) / 2.),
                            -60)

                    else:
                        threshold_w, threshold_h = max(
                            -int(max(src_w, dst_w) / 2.),
                            -60), max(-int(max(src_h, dst_h) / 2.), -60)
                    # threshold_w, threshold_h = -5, -5
                    while (src_w + dst_w) - np.abs(
                            src_point[0] - dst_point[0]) > threshold_w and (
                                src_h + dst_h) - np.abs(
                                    src_point[1] - dst_point[1]) > threshold_h:

                        if (dst_w * dst_h) > (src_w * src_h):
                            wh[first][0] = max(int(wh[first][0] * 0.9), 2)
                            wh[first][1] = max(int(wh[first][1] * 0.9), 2)
                            dst_w, dst_h = wh[first][0], wh[first][1]
                        else:
                            wh[id[0]][0] = max(int(wh[id[0]][0] * 0.9), 2)
                            wh[id[0]][1] = max(int(wh[id[0]][1] * 0.9), 2)
                            src_w, src_h = wh[id[0]][0], wh[id[0]][1]

                        if human_num >= 3:
                            dst_point_ = centroids[id[start + 1]]
                            dst_w_, dst_h_ = wh[id[start +
                                                   1]][0], wh[id[start + 1]][1]
                            if (dst_w_ * dst_h_) > (src_w * src_h) and (
                                    dst_w_ * dst_h_) > (dst_w * dst_h):
                                if (src_w + dst_w_) - np.abs(
                                        src_point[0] - dst_point_[0]
                                ) > -3 and (src_h + dst_h_) - np.abs(
                                        src_point[1] - dst_point_[1]) > -3:
                                    wh[id[start + 1]][0] = max(
                                        int(wh[id[start + 1]][0] * 0.9), 2)
                                    wh[id[start + 1]][1] = max(
                                        int(wh[id[start + 1]][1] * 0.9), 2)

                        count += 1
                        if count > 40:
                            break
        for (center_w, center_h), (width, height) in zip(centroids, wh):
            assert (width > 0 and height > 0)

            if (0 < center_w < w) and (0 < center_h < h):
                h_start = (center_h - height)
                h_end = (center_h + height)

                w_start = center_w - width
                w_end = center_w + width
                #
                if h_start < 0:
                    h_start = 0

                if h_end > h:
                    h_end = h

                if w_start < 0:
                    w_start = 0

                if w_end > w:
                    w_end = w

                if cycle:
                    mask = generate_cycle_mask(height, width)
                    mask_map[h_start:h_end, w_start:w_end] = mask

                else:
                    mask_map[h_start:h_end, w_start:w_end] = 1

        mask_map = mask_map * 255

        cv.imwrite(os.path.join(dst_mask_path, img_id + '.png'), mask_map,
                   [cv.IMWRITE_PNG_BILEVEL, 1])
Example #4
0
def generate_masks():
    file_list = glob.glob(os.path.join(dst_imgs_path, '*.jpg'))

    print(len(file_list))
    for idx, img_path in enumerate(file_list):
        if idx < -1:
            break
        img_id = img_path.split('/')[-1].split('.')[0]
        img_ori = Image.open(img_path)
        w, h = img_ori.size

        print(img_id)
        print(w, h)
        mask_map = np.zeros((h, w), dtype='uint8')
        gt_name = os.path.join(dst_json_path, img_id.split('.')[0] + '.json')

        with open(gt_name) as f:
            ImgInfo = json.load(f)

        centroid_list = []
        wh_list = []
        for id, (w_start, h_start, w_end,
                 h_end) in enumerate(ImgInfo["boxes"], 0):
            centroid_list.append([(w_end + w_start) / 2,
                                  (h_end + h_start) / 2])
            wh_list.append(
                [max((w_end - w_start) / 2, 3),
                 max((h_end - h_start) / 2, 3)])
        # print(len(centroid_list))
        centroids = np.array(centroid_list.copy(), dtype='int')
        wh = np.array(wh_list.copy(), dtype='int')
        wh[wh > 15] = 15
        human_num = ImgInfo["human_num"]
        for point in centroids:
            point = point[None, :]

            dists = euclidean_dist(point, centroids)
            dists = dists.squeeze()
            id = np.argsort(dists)

            for start, first in enumerate(id, 0):
                if start > 0 and start < 5:
                    src_point = point.squeeze()
                    dst_point = centroids[first]

                    src_w, src_h = wh[id[0]][0], wh[id[0]][1]
                    dst_w, dst_h = wh[first][0], wh[first][1]

                    count = 0
                    if (src_w + dst_w
                        ) - np.abs(src_point[0] - dst_point[0]) > 0 and (
                            src_h + dst_h) - np.abs(src_point[1] -
                                                    dst_point[1]) > 0:
                        w_reduce = ((src_w + dst_w) -
                                    np.abs(src_point[0] - dst_point[0])) / 2
                        h_reduce = ((src_h + dst_h) -
                                    np.abs(src_point[1] - dst_point[1])) / 2
                        threshold_w, threshold_h = max(
                            -int(max(src_w - w_reduce, dst_w - w_reduce) / 2.),
                            -60
                        ), max(
                            -int(max(src_h - h_reduce, dst_h - h_reduce) / 2.),
                            -60)

                    else:
                        threshold_w, threshold_h = max(
                            -int(max(src_w, dst_w) / 2.),
                            -60), max(-int(max(src_h, dst_h) / 2.), -60)
                    # threshold_w, threshold_h = -5, -5
                    while (src_w + dst_w) - np.abs(
                            src_point[0] - dst_point[0]) > threshold_w and (
                                src_h + dst_h) - np.abs(
                                    src_point[1] - dst_point[1]) > threshold_h:

                        if (dst_w * dst_h) > (src_w * src_h):
                            wh[first][0] = max(int(wh[first][0] * 0.9), 2)
                            wh[first][1] = max(int(wh[first][1] * 0.9), 2)
                            dst_w, dst_h = wh[first][0], wh[first][1]
                        else:
                            wh[id[0]][0] = max(int(wh[id[0]][0] * 0.9), 2)
                            wh[id[0]][1] = max(int(wh[id[0]][1] * 0.9), 2)
                            src_w, src_h = wh[id[0]][0], wh[id[0]][1]

                        if human_num >= 3:
                            dst_point_ = centroids[id[start + 1]]
                            dst_w_, dst_h_ = wh[id[start +
                                                   1]][0], wh[id[start + 1]][1]
                            if (dst_w_ * dst_h_) > (src_w * src_h) and (
                                    dst_w_ * dst_h_) > (dst_w * dst_h):
                                if (src_w + dst_w_) - np.abs(
                                        src_point[0] - dst_point_[0]
                                ) > -3 and (src_h + dst_h_) - np.abs(
                                        src_point[1] - dst_point_[1]) > -3:
                                    wh[id[start + 1]][0] = max(
                                        int(wh[id[start + 1]][0] * 0.9), 2)
                                    wh[id[start + 1]][1] = max(
                                        int(wh[id[start + 1]][1] * 0.9), 2)

                        count += 1
                        if count > 40:
                            break
        for (center_w, center_h), (width, height) in zip(centroids, wh):
            assert (width > 0 and height > 0)

            if (0 < center_w < w) and (0 < center_h < h):
                h_start = (center_h - height)
                h_end = (center_h + height)

                w_start = center_w - width
                w_end = center_w + width
                #
                if h_start < 0:
                    h_start = 0

                if h_end > h:
                    h_end = h

                if w_start < 0:
                    w_start = 0

                if w_end > w:
                    w_end = w

                if cycle:
                    mask = generate_cycle_mask(height, width)
                    mask_map[h_start:h_end, w_start:w_end] = mask

                else:
                    mask_map[h_start:h_end, w_start:w_end] = 1

        mask_map = mask_map * 255

        cv.imwrite(os.path.join(dst_mask_path, img_id + '.png'), mask_map,
                   [cv.IMWRITE_PNG_BILEVEL, 1])

        # plt.imshow(img_ori)

        saveImg = plt.gca()
        plt.imshow(img_ori)
        for a, b in zip(centroid_list, wh_list):

            x_, y_, w_, h_ = a[0], a[1], b[0], b[1]
            saveImg.add_patch(
                plt.Rectangle((x_ - w_, y_ - h_),
                              2 * w_,
                              2 * h_,
                              fill=False,
                              edgecolor='g',
                              linewidth=1))

        saveImg.axes.get_yaxis().set_visible(False)
        saveImg.axes.get_xaxis().set_visible(False)
        saveImg.spines['top'].set_visible(False)
        saveImg.spines['bottom'].set_visible(False)
        saveImg.spines['left'].set_visible(False)
        saveImg.spines['right'].set_visible(False)
        dst_vis_path = os.path.join(dst_Root, 'box_vis')
        if not os.path.exists(dst_vis_path):
            os.makedirs(dst_vis_path)
        plt.savefig(os.path.join(dst_vis_path, img_id + '.jpg'),
                    bbox_inches='tight',
                    pad_inches=0,
                    dpi=300)
        plt.close()
Example #5
0
f_cen_lst = []
for idx, face in enumerate(faces):
    f_eye_l = face['faceLandmarks']['eyeLeftInner']
    f_eye_r = face['faceLandmarks']['eyeRightInner']
    f_cen = ((f_eye_l['x'] + f_eye_r['x']) / 2.0,
             (f_eye_l['y'] + f_eye_r['y']) / 2.0)
    f_gen = [1 if face['attributes']['gender'] else 0][0]
    f_age_s = face['attributes']['age']
    f_age = [int(fn.median(a_c)) for a_c in age_cate if f_age_s in a_c][0]
    v_label = v_label_dict[f_age, f_gen]
    G_q.add_node(idx, label=v_label)
    f_cen_lst.append(f_cen)

f_cen_combi = list(itertools.combinations(f_cen_lst, 2))
for a, b in f_cen_combi:
    px_dist = fn.euclidean_dist(a, b)
    a_idx = f_cen_lst.index(a)
    b_idx = f_cen_lst.index(b)
    G_q.add_edge(a_idx, b_idx, weight=px_dist)

# Minimum Spanning Tree
G_q_mst = nx.minimum_spanning_tree(G_q)
# Set initial number of Order Distance as 0 For MST edges
for v1, v2 in G_q_mst.edges():
    G_q_mst[v1][v2]['weight'] = 0
# Assign Order Distance for All Edges
G_q_od = nx.all_pairs_shortest_path_length(G_q_mst, cutoff=4)
# Remove the edges visiting itself
# Decrease Order Distances with -1
# Copy MST's Order Distance to Edges Attrs. of Graph G
for v1 in G_q_od.keys():
Example #6
0
def build_facegraph(json_obj):
    """
    # Build Face Graph from query image 
    """
    # create Graph of Query & BoFG
    G_q = nx.Graph()
    G_bofg = nx.Graph()
    # from Query
    faces = json_obj
    age_range = [(1, 3), (3, 8), (8, 13), (13, 20), (20, 37), (37, 66), (66, 85)]
    age_cate = [range(s, e) for s, e in age_range]
    v_label_dict = {
        (1, 1): 0,
        (5, 1): 1,
        (10, 1): 2,
        (16, 1): 3,
        (28, 1): 4,
        (51, 1): 5,
        (75, 1): 6,
        (1, 2): 7,
        (5, 2): 8,
        (10, 2): 9,
        (16, 2): 10,
        (28, 2): 11,
        (51, 2): 12,
        (75, 2): 13,
    }

    f_cen_lst = []
    for idx, face in enumerate(faces):
        f_eye_l = face["faceLandmarks"]["eyeLeftInner"]
        f_eye_r = face["faceLandmarks"]["eyeRightInner"]
        f_cen = ((f_eye_l["x"] + f_eye_r["x"]) / 2.0, (f_eye_l["y"] + f_eye_r["y"]) / 2.0)
        f_gen = [1 if face["attributes"]["gender"] else 0][0]
        f_age_s = face["attributes"]["age"]
        f_age = [int(fn.median(a_c)) for a_c in age_cate if f_age_s in a_c][0]
        v_label = v_label_dict[f_age, f_gen]
        G_q.add_node(idx, label=v_label)
        f_cen_lst.append(f_cen)

    f_cen_combi = list(itertools.combinations(f_cen_lst, 2))
    for a, b in f_cen_combi:
        px_dist = fn.euclidean_dist(a, b)
        a_idx = f_cen_lst.index(a)
        b_idx = f_cen_lst.index(b)
        G_q.add_edge(a_idx, b_idx, weight=px_dist)

    # Minimum Spanning Tree
    G_q_mst = nx.minimum_spanning_tree(G_q)
    # Set initial number of Order Distance as 0 For MST edges
    for v1, v2 in G_q_mst.edges():
        G_q_mst[v1][v2]["weight"] = 0
    # Assign Order Distance for All Edges
    G_q_od = nx.all_pairs_shortest_path_length(G_q_mst, cutoff=4)
    # Remove the edges visiting itself
    # Decrease Order Distances with -1
    # Copy MST's Order Distance to Edges Attrs. of Graph G
    for v1 in G_q_od.keys():
        for v2 in G_q_od[v1].keys():
            if v1 == v2:  # Remove Nodes self to self
                del G_q_od[v1][v2]
            else:  # Modify Weights of Other Nodes & Assign them to Graph G
                # Edge Label
                order_dist = G_q_od[v1][v2]
                order_dist -= 1
                G_q.add_edge(v1, v2, weight=order_dist)

    return G_q
Example #7
0
def build_facegraph(json_obj):
    """
    # Build Face Graph from query image 
    """
    # create Graph of Query & BoFG
    G_q = nx.Graph()
    G_bofg = nx.Graph()
    # from Query
    faces = json_obj
    age_range = [(1, 3), (3, 8), (8, 13), (13, 20), (20, 37), (37, 66),
                 (66, 85)]
    age_cate = [range(s, e) for s, e in age_range]
    v_label_dict = {
        (1, 1): 0,
        (5, 1): 1,
        (10, 1): 2,
        (16, 1): 3,
        (28, 1): 4,
        (51, 1): 5,
        (75, 1): 6,
        (1, 2): 7,
        (5, 2): 8,
        (10, 2): 9,
        (16, 2): 10,
        (28, 2): 11,
        (51, 2): 12,
        (75, 2): 13
    }

    f_cen_lst = []
    for idx, face in enumerate(faces):
        f_eye_l = face['faceLandmarks']['eyeLeftInner']
        f_eye_r = face['faceLandmarks']['eyeRightInner']
        f_cen = ((f_eye_l['x'] + f_eye_r['x']) / 2.0,
                 (f_eye_l['y'] + f_eye_r['y']) / 2.0)
        f_gen = [1 if face['attributes']['gender'] else 0][0]
        f_age_s = face['attributes']['age']
        f_age = [int(fn.median(a_c)) for a_c in age_cate if f_age_s in a_c][0]
        v_label = v_label_dict[f_age, f_gen]
        G_q.add_node(idx, label=v_label)
        f_cen_lst.append(f_cen)

    f_cen_combi = list(itertools.combinations(f_cen_lst, 2))
    for a, b in f_cen_combi:
        px_dist = fn.euclidean_dist(a, b)
        a_idx = f_cen_lst.index(a)
        b_idx = f_cen_lst.index(b)
        G_q.add_edge(a_idx, b_idx, weight=px_dist)

    # Minimum Spanning Tree
    G_q_mst = nx.minimum_spanning_tree(G_q)
    # Set initial number of Order Distance as 0 For MST edges
    for v1, v2 in G_q_mst.edges():
        G_q_mst[v1][v2]['weight'] = 0
    # Assign Order Distance for All Edges
    G_q_od = nx.all_pairs_shortest_path_length(G_q_mst, cutoff=4)
    # Remove the edges visiting itself
    # Decrease Order Distances with -1
    # Copy MST's Order Distance to Edges Attrs. of Graph G
    for v1 in G_q_od.keys():
        for v2 in G_q_od[v1].keys():
            if v1 == v2:  # Remove Nodes self to self
                del G_q_od[v1][v2]
            else:  # Modify Weights of Other Nodes & Assign them to Graph G
                # Edge Label
                order_dist = G_q_od[v1][v2]
                order_dist -= 1
                G_q.add_edge(v1, v2, weight=order_dist)

    return G_q