Exemplo n.º 1
0
def generate():
    propcountarr = []
    with open("/Data2TB/correctly_registered/S12/train/output.json") as f:
        jsonprop = json.load(f)
    png_files = [
        f for f in listdir(depth_folder) if isfile(join(depth_folder, f))
    ]
    for i in range(0, len(png_files)):
        print(png_files[i])
        image_name = png_files[i]  #jsonprop["proposals"][i]["file"]
        count = 0
        for j in range(0, len(jsonprop)):  #[i]["objects"])):
            if (jsonprop[j]["file"] == image_name):
                propcount = 0
                for k in range(0, len(jsonprop[j]["objects"])):
                    propcount = propcount + 1
                    x = jsonprop[j]["objects"][k]["x"]
                    y = jsonprop[j]["objects"][k]["y"]
                    z = jsonprop[j]["objects"][k]["z"]
                    xmiw, ymiw, xmaw, ymaw = core_functions.get_bounding_box_WASSIMEA(
                        x, y, z)
                    prop_object = [xmiw, ymiw, xmaw, ymaw, y, x]
                    headpoint = [prop_object[4], prop_object[5]]
                    image, headpoint_new = core_functions.get_neg_roi(
                        depth_folder + image_name, prop_object, headpoint)
                    width, height = image.shape[1], image.shape[0]
                    if (width > 30 and height > 30):
                        c1, c2, c3, mod = core_functions.get_channels(
                            image, headpoint_new)
                        cv2.imwrite(
                            out_folder + image_name.replace(".png", "_id_") +
                            str(count) + ".jpg", mod)
                        count = count + 1
    zabre = np.mean(propcountarr)
    pingit = 1
Exemplo n.º 2
0
def get_proposals(filename, jsonprop):
    proparray = []
    for im_object in jsonprop:
        if im_object["file"] == filename:
            for object_prop in im_object["objects"]:
                x = object_prop["x"]
                y = object_prop["y"]
                z = object_prop["z"]
                xmiw, ymiw, xmaw, ymaw = core_functions.get_bounding_box_WASSIMEA(
                    x, y, z)
                proparray.append([xmiw, ymiw, xmaw, ymaw, y, x])
    return proparray
Exemplo n.º 3
0
def process_image():
    with open("/home/wassimea/Desktop/wzebb.json") as f:
        jsonprop = json.load(f)
    filename = "/Data2TB/correctly_registered/S12/test/depth/video_fifth_2018-04-23_CAM1_1524500433782.png"
    filename_only = "video_fifth_2018-04-23_CAM1_1524500433782.png"
    png_image = cv2.imread(filename, -1)
    for im_object in jsonprop["proposals"]:
        if im_object["file"] == filename_only:
            count = 0
            for object_prop in im_object["objects"]:
                x = object_prop["x"]
                y = object_prop["y"]
                z = object_prop["z"]
                xmiw,ymiw,xmaw,ymaw = core_functions.get_bounding_box_WASSIMEA(x,y,z)
                prop_object = [xmiw,ymiw,xmaw,ymaw,y,x]
                headpoint = [y,x]
                image, headpoint_new = core_functions.get_neg_roi(filename_only, prop_object, headpoint)
                width,height = image.shape[1], image.shape[0]
                c1,c2,c3,mod = core_functions.get_channels(image, headpoint_new)
                cv2.imwrite("/home/wassimea/Desktop/tst_out/testing_2/" + filename_only.replace(".png", "_id_") + str(count) + ".jpg", mod)
                count = count + 1
Exemplo n.º 4
0
def generate():
    propcountarr = []
    with open(config.prop_json) as f:
        jsonprop = json.load(f)
    rgb_files = [f for f in listdir(rgb_folder) if isfile(join(rgb_folder, f))]
    for i in range(0, len(rgb_files)):
        #print(rgb_files[i])
        image_name = rgb_files[i]  #jsonprop["proposals"][i]["file"]
        image_name_png = image_name.replace("jpg", "png")
        count = 0
        for j in range(0, len(jsonprop)):  #[i]["objects"])):
            if (jsonprop[j]["file"] == image_name_png):
                propcount = 0
                for k in range(0, len(jsonprop[j]["objects"])):
                    propcount = propcount + 1
                    x = jsonprop[j]["objects"][k]["x"]
                    y = jsonprop[j]["objects"][k]["y"]
                    z = jsonprop[j]["objects"][k]["z"]
                    xmiw, ymiw, xmaw, ymaw = core_functions.get_bounding_box_WASSIMEA(
                        x, y, z)
                    prop_object = [xmiw, ymiw, xmaw, ymaw, y, x]
                    headpoint = [prop_object[4], prop_object[5]]
                    image, headpoint_new = core_functions.get_neg_roi(
                        rgb_folder + image_name, prop_object, headpoint)
                    if config.mod == True:
                        image = mod.get_channels(image, headpoint_new)
                        cv2.normalize(image, image, 0, 255, cv2.NORM_MINMAX)
                    width, height = image.shape[1], image.shape[0]
                    if (width > 30 and height > 30):
                        #c1,c2,c3,mod = core_functions.get_channels(image, headpoint_new)
                        cv2.imwrite(
                            out_folder + image_name.replace(".jpg", "_id_") +
                            str(count) + ".jpg", image)
                        count = count + 1
    zabre = np.mean(propcountarr)
    pingit = 1
Exemplo n.º 5
0
def annotate(
):  #function similar to evaluate() in terms of principle, differs in that it draws the detected boxes and the ground truth boxes instead of calculating accuracies
    with tf.device('/gpu:0'):
        pingit = HeadClassifier()
    depth_folder = "/Data2TB/correctly_registered/S12/test/depth/"
    out_folder = "/home/wassimea/Desktop/SMATS/images/8bit/test_out/"
    with open("/home/wassimea/Desktop/wzebb.json") as f:
        jsonprop = json.load(f)

    images = [
        f for f in listdir(depth_folder) if isfile(join(depth_folder, f))
    ]
    for image in images:
        rgb_image = cv2.imread(
            "/Data2TB/correctly_registered/S12/test/color/" +
            image.replace(".png", ".jpg"))
        cvimage = cv2.imread(depth_folder + image, -1)
        count = 0
        for frame in jsonprop["proposals"]:
            if frame["file"] == image:
                for object in frame["objects"]:
                    x = object["x"]
                    y = object["y"]
                    z = object["z"]
                    cv2.circle(rgb_image, (x, y), 3, (0, 0, 255), 2)
                    headpoint = [y, x]
                    xmiw, ymiw, xmaw, ymaw = core_functions.get_bounding_box_WASSIMEA(
                        x, y, z)
                    #cv2.rectangle(rgb_image,(xmiw,ymiw),(xmaw,ymaw),(255,0,0),2)
                    roi = cvimage[ymiw:ymaw, xmiw:xmaw]
                    width, height = roi.shape[1], roi.shape[0]
                    if (width > 0 and height > 0):  # and z <= 4500):
                        headpoint[0] = headpoint[0] - ymiw
                        headpoint[1] = headpoint[1] - xmiw
                        c1, c2, c3, mod = core_functions.get_channels(
                            roi, headpoint)
                        #cv2.imwrite("/home/wassimea/Desktop/chl_work/outs/" + str(count)+ ".jpg",mod)
                        #all = pingit.get_classification(mod)
                        boxes = pingit.get_classification(mod)[0]
                        scores = pingit.get_classification(mod)[1]
                        classes = pingit.get_classification(mod)[2]
                        scores_sorted = sorted(scores, reverse=True)
                        print(str(scores_sorted[0][0]))
                        #scores = sorted(scores,reverse = True)
                        maximum = max(scores)
                        maxclass = max(classes)
                        #cv2.imshow("ayre",mod)
                        #cv2.waitKey()
                        localcount = 0
                        for i in range(0, len(scores[0])):
                            if (scores[0][i] >= 0.003):
                                localcount = localcount + 1
                                if (classes[0][i] == 1.0):
                                    xmin = xmiw + int(boxes[0][i][1] * width)
                                    ymin = xmaw + int(boxes[0][i][0] * height)
                                    xmax = xmaw + int(boxes[0][i][3] * width)
                                    ymax = ymaw + int(boxes[0][i][2] * height)
                                    #cv2.rectangle(rgb_image,xmin, ymin, xmax, ymaw + ymax),(0,255,0), 3)
                                    cv2.rectangle(rgb_image,
                                                  (xmiw - 1, ymiw - 1),
                                                  (xmaw + 1, ymaw + 1),
                                                  (0, 0, 255), 2)
                                #else:
                                #cv2.rectangle(mod,(int(boxes[0][i][1] * width), int(boxes[0][i][0] * height)), (int(boxes[0][i][3] * width), int(boxes[0][i][2] * height)),(255,0,0), 3)
                        if (localcount > 1):
                            toll = 0
                        #cv2.imwrite("/home/wassimea/Desktop/chl_work/outs/" + image,mod)
                        #print("check")
                        #cv2.waitKey()
                    count = count + 1
                cv2.imwrite(
                    "/home/wassimea/Desktop/chl_work/outs/" +
                    image.replace(".png", "_") + str(count) + ".jpg",
                    rgb_image)

    for k in range(0, len(images)):
        if ("resized-1-rotated-0" in images[k]):
            print(images[k])
            img = np.array(Image.open(folder + images[k]))
            new_img = img
            for i in range(0, len(jsondata['frames'])):
                if (jsondata['frames'][i]['file'] == images[k]):
                    for j in range(0,
                                   len(jsondata['frames'][i]['annotations'])):
                        if (jsondata['frames'][i]['annotations'][j]['label'] ==
                                'Head'):
                            xmin = jsondata['frames'][i]['annotations'][j]['x']
                            ymin = jsondata['frames'][i]['annotations'][j]['y']
                            xmax = xmin + jsondata['frames'][i]['annotations'][
                                j]['width']
                            ymax = ymin + jsondata['frames'][i]['annotations'][
                                j]['height']
                            cv2.rectangle(new_img, (xmin, ymin), (xmax, ymax),
                                          (255, 0, 0), 3)

            boxes = pingit.get_classification(img)[0]
            scores = pingit.get_classification(img)[1]

            for i in range(0, len(scores[0])):
                if (scores[0][i] > 0.15):
                    cv2.rectangle(
                        new_img,
                        (int(boxes[0][i][1] * 640), int(boxes[0][i][0] * 480)),
                        (int(boxes[0][i][3] * 640), int(boxes[0][i][2] * 480)),
                        (0, 0, 255), 3)
            cv2.imwrite(out_folder + images[k], new_img)
Exemplo n.º 6
0
def generate():
    gtheads = 0
    propheads = 0
    image_count = 0
    propcountarr = []
    depth_folder = "/Data2TB/correctly_registered/S2/tester/"
    with open("/home/wassimea/Desktop/wzebb.json") as f:
        jsonprop = json.load(f)
    with open("/Data2TB/sample/annotations.json") as f:
        jsongt = json.load(f)
    png_files = [
        f for f in listdir(depth_folder) if isfile(join(depth_folder, f))
    ]
    for i in range(0, len(png_files)):
        image_name = png_files[i]  #jsonprop["proposals"][i]["file"]
        image_name_jpg = image_name.replace("png", "jpg")
        if image_name_jpg in jsongt:
            image_count = image_count + 1
            paletted_image = cv2.imread(
                "/Data2TB/correctly_registered/S2/tester/paletted/" +
                image_name_jpg)
            gtarray = []  #np.zeros(shape = (0,4))
            for k in range(0, len(jsongt[image_name_jpg]["annotations"])):
                if (jsongt[image_name_jpg]["annotations"][k]["category"] ==
                        "Head"):
                    xmingt = jsongt[image_name_jpg]["annotations"][k]["x"] + 5
                    ymingt = jsongt[image_name_jpg]["annotations"][k]["y"]
                    width = jsongt[image_name_jpg]["annotations"][k]["width"]
                    height = jsongt[image_name_jpg]["annotations"][k]["height"]
                    xmaxgt = xmingt + width
                    ymaxgt = ymingt + height
                    if (xmingt >= 0 and ymingt >= 0 and xmingt <= 1000
                            and ymingt <= 1000):
                        gtheads = gtheads + 1
                        gtarray.append(
                            [xmingt, ymingt, xmaxgt, ymaxgt]
                        )  # = np.append(gtarray, [xmingt,ymingt, xmaxgt,ymaxgt])
                        cv2.rectangle(paletted_image, (xmingt, ymingt),
                                      (xmaxgt, ymaxgt), (0, 0, 255), 3)
            #os.mkdir("/home/wassimea/Desktop/testing/" + str(i))
            proparray = []  #np.zeros(shape = (0,4))
            for j in range(0, len(jsonprop["proposals"])):  #[i]["objects"])):
                if (jsonprop["proposals"][j]["file"] == image_name):
                    propcount = 0
                    for k in range(0,
                                   len(jsonprop["proposals"][j]["objects"])):
                        propcount = propcount + 1
                        x = jsonprop["proposals"][j]["objects"][k]["x"]
                        y = jsonprop["proposals"][j]["objects"][k]["y"]
                        z = jsonprop["proposals"][j]["objects"][k]["z"]
                        cv2.circle(paletted_image, (x, y), 2, (255, 0, 0), 3)
                        xmiw, ymiw, xmaw, ymaw = core_functions.get_bounding_box_WASSIMEA(
                            x, y, z)
                        proparray.append([xmiw, ymiw, xmaw, ymaw])
                        cv2.rectangle(paletted_image, (xmiw, ymiw),
                                      (xmaw, ymaw), (0, 255, 0), 3)
                    propcountarr.append(propcount)
            cv2.imshow("AYRE", paletted_image)
            cv2.waitKey()
            #cv2.imwrite("/home/wassimea/Desktop/testing/" + image_name.replace("png","jpg"),paletted_image)
            for m in range(0, len(gtarray)):
                for n in range(0, len(proparray)):
                    iou = core_functions.bb_intersection_over_union(
                        gtarray[m], proparray[n])
                    contained = False  #check_if_rectangle_contained(gtarray[m],proparray[n])
                    if (iou > 0.5):
                        propheads = propheads + 1
                        contained = True
                        break
                if (contained == False):
                    print(image_name_jpg)
                    #cv2.imshow("kissimmik", paletted_image)
                    #cv2.waitKey()
    zabre = np.mean(propcountarr)
    pingit = 1
Exemplo n.º 7
0
def process_image(filename, jsongt, jsonprop):
    png_image = cv2.imread(png_folder + filename, -1)
    gtarray = []
    proparray = []
    print(filename)
    counter = 0
    filename_jpg = filename.replace("png", "jpg")
    if filename_jpg in jsongt and len(jsongt[filename_jpg]["annotations"]) > 0:
        if not os.path.exists(out_folder + filename.replace(".png", "")):
            os.mkdir(out_folder + filename.replace(".png", ""))
        for object_gt in jsongt[filename_jpg]["annotations"]:
            if object_gt["category"] == "Head":
                xmingt = object_gt["x"]  #+ 5
                ymingt = object_gt["y"]
                width = object_gt["width"]
                height = object_gt["height"]
                xmaxgt = xmingt + width
                ymaxgt = ymingt + height
                id = object_gt["id"]
                gtarray.append([xmingt, ymingt, xmaxgt, ymaxgt, id])
        for im_object in jsonprop["proposals"]:
            if im_object["file"] == filename:
                for object_prop in im_object["objects"]:
                    x = object_prop["x"]
                    y = object_prop["y"]
                    z = object_prop["z"]
                    xmiw, ymiw, xmaw, ymaw = core_functions.get_bounding_box_WASSIMEA(
                        x, y, z)
                    proparray.append([xmiw, ymiw, xmaw, ymaw, y, x])
                #head = [object["x"],object["y"],object["x"] + object["width"],object["y"] + object["height"]]
                #right_shoulder = [-1,-1]
                #left_shoulder = [-1,-1]
                #id = object["id"]
                if not os.path.exists(out_folder +
                                      filename.replace(".png", "") + "/" +
                                      str(id)):
                    os.mkdir(out_folder + filename.replace(".png", "") + "/" +
                             str(id))
                #for object_candidate in jsongt[filename_jpg]["annotations"]:
                #if object_candidate["id"] == id and object_candidate["category"] == "Right Shoulder":
                #right_shoulder = [object_candidate["x"],object_candidate["y"]]
                #elif object_candidate["id"] == id and object_candidate["category"] == "Left Shoulder":
                #left_shoulder = [object_candidate["x"],object_candidate["y"]]
        for gt_object in gtarray:
            id = gt_object[4]
            for prop_object in proparray:
                iou = bb_intersection_over_union(gt_object, prop_object)
                if (iou > 0.4):
                    contained = True
                    right_shoulder = [-1, -1]
                    left_shoulder = [-1, -1]
                    for object_candidate in jsongt[filename_jpg][
                            "annotations"]:
                        if object_candidate["id"] == id and object_candidate[
                                "category"] == "Right Shoulder":
                            right_shoulder = [
                                object_candidate["x"], object_candidate["y"]
                            ]
                        elif object_candidate["id"] == id and object_candidate[
                                "category"] == "Left Shoulder":
                            left_shoulder = [
                                object_candidate["x"], object_candidate["y"]
                            ]

                    headpoint = [prop_object[4], prop_object[5]]
                    image, head_new, right_shoulder_new, left_shoulder_new, headpoint_new = get_new_roi_with_annotations(
                        filename, prop_object, gt_object, right_shoulder,
                        left_shoulder, headpoint)
                    num_zeros = (image == 0).sum()
                    width, height = image.shape[1], image.shape[0]
                    if (width > 2 and height > 2 and num_zeros <
                        (width * height) / 3):
                        data['frames'].append({
                            'file':
                            filename.replace(".png", "_id_") + str(id) +
                            ".jpg",
                            'width':
                            width,
                            'height':
                            height,
                            'annotations': [{
                                'label': 'Head',
                                'x': head_new[0],
                                'y': head_new[1],
                                'width': head_new[2] - head_new[0],
                                'height': head_new[3] - head_new[1]
                            }, {
                                'label': 'Headpoint',
                                'x': headpoint_new[0],
                                'y': headpoint_new[1]
                            }, {
                                'label': 'Right Shoulder',
                                'x': right_shoulder_new[1],
                                'y': right_shoulder_new[0]
                            }, {
                                'label': 'Left Shoulder',
                                'x': left_shoulder_new[1],
                                'y': left_shoulder_new[0]
                            }]
                        })
                        c1, c2, c3, mod = core_functions.get_channels(
                            image, headpoint_new)
                        #cv2.rectangle(mod,(head_new[0],head_new[1]),(head_new[2] ,head_new[3]),(255,0,0),3)
                        #cv2.circle(mod,(right_shoulder_new[0], right_shoulder_new[1]), 5, (0,255,0), -1)
                        #cv2.circle(mod,(left_shoulder_new[0], left_shoulder_new[1]), 5, (0,255,0), -1)
                        #cv2.imwrite(out_folder + filename.replace(".png","") + "/" + str(id) + "/c1.png",c1)
                        #cv2.imwrite(out_folder + filename.replace(".png","") + "/" + str(id) + "/c2.jpg",c2)
                        #cv2.imwrite(out_folder + filename.replace(".png","") + "/" + str(id) + "/c3.jpg",c3)
                        #cv2.imwrite(out_folder + filename.replace(".png","") + "/" + str(id) + "/mod.jpg",mod)
                        cv2.imwrite(
                            out_folder.replace("testing", "testing_2/") +
                            filename.replace(".png", "_id_") + str(id) +
                            ".jpg", mod)
                    break
                else:
                    headpoint = [prop_object[4], prop_object[5]]
                    image, headpoint_new = core_functions.get_neg_roi(
                        filename, prop_object, headpoint)
                    width, height = image.shape[1], image.shape[0]
                    data['frames'].append({
                        'file':
                        filename.replace(".png", "_id_") + str(id) + ".jpg",
                        'width':
                        width,
                        'height':
                        height,
                        'annotations': []
                    })
                    if (width > 5 and height > 5):
                        c1, c2, c3, mod = core_functions.get_channels(
                            image, headpoint_new)
                        cv2.imwrite(
                            out_folder.replace("testing", "testing_2/") +
                            filename.replace(".png", "_id_") +
                            str(proparray.index(prop_object)) + ".jpg", mod)