Ejemplo n.º 1
0
def getMentors():
    db = get_db()
    mentors = getMentorTable(db)
    mentee = request.get_json()
    print(mentee)
    matches = matching(mentors, mentee)
    return jsonify(matches), 201
Ejemplo n.º 2
0
 def update(self):
     # keep looping infinitely
     while True:
         # if the thread indicator variable is set, stop the
         # thread
         if self.stopped:
             if self.save_video:
                 self.stream.release()
             return
         # otherwise, ensure the queue is not empty
         if not self.Q.empty():
             (boxes, scores, hm_data, pt1, pt2, orig_img,
              im_name) = self.Q.get()
             orig_img = np.array(orig_img, dtype=np.uint8)
             #if there is not even one person detected
             if boxes is None:
                 if opt.save_img or opt.save_video or opt.vis:
                     img = orig_img
                     if opt.vis:
                         cv2.imshow("AlphaPose Demo", img)
                         cv2.waitKey(30)
                     if opt.save_img:
                         cv2.imwrite(
                             os.path.join(opt.outputpath, 'vis', im_name),
                             img)
                     if opt.save_video:
                         self.stream.write(img)
             else:
                 # location prediction (n, kp, 2) | score prediction (n, kp, 1)
                 if opt.matching:
                     preds = getMultiPeakPrediction(hm_data, pt1.numpy(),
                                                    pt2.numpy(),
                                                    opt.inputResH,
                                                    opt.inputResW,
                                                    opt.outputResH,
                                                    opt.outputResW)
                     result = matching(boxes, scores.numpy(), preds)
                 else:
                     preds_hm, preds_img, preds_scores = getPrediction(
                         hm_data, pt1, pt2, opt.inputResH, opt.inputResW,
                         opt.outputResH, opt.outputResW)
                     result = pose_nms(boxes, scores, preds_img,
                                       preds_scores)
                 result = {'imgname': im_name, 'result': result}
                 self.final_result.append(result)
                 if opt.save_img or opt.save_video or opt.vis:
                     img = vis_frame(orig_img, result)
                     print('result \n', result)
                     if opt.vis:
                         cv2.imshow("AlphaPose Demo", img)
                         cv2.waitKey(30)
                     if opt.save_img:
                         cv2.imwrite(
                             os.path.join(opt.outputpath, 'vis', im_name),
                             img)
                     if opt.save_video:
                         self.stream.write(img)
         else:
             time.sleep(0.1)
Ejemplo n.º 3
0
def parse_matchings_file(matchings_file):
    print "Parsing matching file " + matchings_file
    tree = defusedxml.ElementTree.parse(matchings_file)
    root = tree.getroot().find(
        "{http://knowledgeweb.semanticweb.org/heterogeneity/alignment}Alignment"
    )
    #check if the file could even be an OWL file in RDF XML syntax
    if root.tag == "{http://knowledgeweb.semanticweb.org/heterogeneity/alignment}Alignment":
        matchings = []
        maps = root.findall(
            "{http://knowledgeweb.semanticweb.org/heterogeneity/alignment}map")
        #print maps
        for map_ in maps:
            children = map_.find(
                "{http://knowledgeweb.semanticweb.org/heterogeneity/alignment}Cell"
            ).getchildren()
            #print "Element: " + str(map_)
            #print "-> " + str(children)
            match = matching.matching()
            tmp_elements = []
            tmp_properties = []
            for child in children:
                #print "-> " + str(child)
                if re.match(
                        "[{]http[:][/][/]knowledgeweb[.]semanticweb[.]org[/]heterogeneity[/]alignment[}]entity[0-9]*",
                        child.tag, re.IGNORECASE):
                    tmp_elements.append(child.attrib[
                        "{http://www.w3.org/1999/02/22-rdf-syntax-ns#}resource"]
                                        )
                elif re.match(
                        "[{]http[:][/][/]knowledgeweb[.]semanticweb[.]org[/]heterogeneity[/]alignment[}]measure",
                        child.tag, re.IGNORECASE):
                    tmp_properties.append([child.tag, child.text])
                elif re.match(
                        "[{]http[:][/][/]knowledgeweb[.]semanticweb[.]org[/]heterogeneity[/]alignment[}]relation",
                        child.tag, re.IGNORECASE):
                    tmp_properties.append([child.tag, child.text])
                #get the label if the entity has one
                label = child.find(
                    "{http://www.w3.org/2000/01/rdf-schema#}label")
                #print label.text
                if label is not None and label is not "":
                    tmp_properties.append([
                        "{http://www.w3.org/2000/01/rdf-schema#}label",
                        label.text
                    ])
            match.add_elements(tmp_elements)
            match.add_properties(tmp_properties)
            #print match.tostring()
            matchings.append(match)
        #print matchings
        return matchings
Ejemplo n.º 4
0
def run():
    variations = [
        #      ('daisy_bow_sigma', ['daisy_bow', 'gauss_window_sigma'], [8]),
        #      ('daisy_bow_clusters', ['features', 'daisy_bow', 'num_clusters'],
        #                             [1100]),
        #      ('daisy_radius', ['features', 'daisy_bow', 'feature_opts', 'radius'],
        #                             [12,13,14,15]),
        #      ('daisy_norm', ['features', 'daisy_bow', 'feature_opts', 'normalization'],
        #                             ['l1','l2']),
        #      ('jet_bow_clusters', ['features', 'jet_bow', 'num_clusters'],
        #                             [900, 1000, 1100]),
        #      ('jet_sigma', ['features', 'jet_bow', 'feature_opts', 'sigma'],
        #                             [4, 5, 6, 7]),
        (
            "experiment",
            ["dataset", "experiment"],
            ["afskaering", "ekstra1", "ekstra2", "mishandling", "normal", "ophaengning"],
        )
    ]

    summary = ""
    for opts, var_name in dicvariations(options.options, variations):
        features_opts = opts["features"]
        dataset_opts = opts["dataset"]
        matching_opts = opts["matching"]
        run_name = var_name
        print "# Running " + run_name
        params = training(features_opts)

        files1, features1, files2, features2 = extract_features(features_opts, dataset_opts, params)

        distances = matching(features1, features2, matching_opts)

        results = output(
            run_name,
            files1,
            features1,
            files2,
            features2,
            distances,
            features_opts,
            dataset_opts,
            matching_opts,
            params,
        )
        print results
        summary += run_name + ": " + str(results["Mispredictions"]) + "\n"

    print "# Summary\n" + summary
Ejemplo n.º 5
0
    def find(self, path, center=False, sensitive=False):
        '''
            Return [Center of Image's Position] or [None]
        '''
        pos = None
        if not os.path.isdir(path):
            #base_pos = list(self.config["location"]['widthxheight'])
            #self.mouse.position = ((base_pos[0]//2)+50,base_pos[1]//2)
            self.mouse.position = self.config["location"]['base']

            if sensitive:
                print(">> Sensitive Mode: Enabled")
                pos = matching.matching(
                    template_img_path=path,
                    source="screenshot",
                    debug=False,
                    score_threshold=self.config["ocr_settings"]
                    ['matching_threshold'],
                    center=center)
            else:
                if not center:
                    pos = pyautogui.locateOnScreen(path)
                else:
                    pos = pyautogui.locateCenterOnScreen(path)

            if pos:
                print(">> found:", path)
            else:
                #print(">> not found:", path)
                pass

            return pos

        else:
            print("> checking dir:", path)
            for index, file in enumerate(glob.glob(f"{path}/*.png")):
                pos = self.find(file, center=center, sensitive=sensitive)
                if pos:
                    self.mouse.position = pos
                    return pos
            return None
Ejemplo n.º 6
0
    def update(self):
        count = 0
#         filepath = '/home/yurik/Documents/Program/Alphapose_zed_video/testdata/20191014/walkstraightly/walkstraightly.svo'
#         init = sl.InitParameters(svo_input_filename=filepath,svo_real_time_mode=False)
#         init.depth_mode = sl.DEPTH_MODE.DEPTH_MODE_QUALITY
#         cam = sl.Camera()
#         runtime = sl.RuntimeParameters()
#         status = cam.open(init)
#         mat = sl.Mat()
#         zeroarr = np.zeros((720,1280,3))
        # keep looping infinitely
        while True:
            # if the thread indicator variable is set, stop the
            # thread
            if self.stopped:
                if self.save_video:
                    self.stream.release()
                return
            # otherwise, ensure the queue is not empty
            if not self.Q.empty():
                (boxes, scores, hm_data, pt1, pt2, orig_img, im_name) = self.Q.get()
                orig_img = np.array(orig_img, dtype=np.uint8)
                if boxes is None:
                    if opt.save_img or opt.save_video or opt.vis:
                        img = orig_img
                        if opt.vis:
                            cv2.imshow("AlphaPose Demo", img)
                            cv2.waitKey(30)
                        if opt.save_img:
                            cv2.imwrite(os.path.join(opt.outputpath, 'vis', im_name), img)
                        if opt.save_video:
                            self.stream.write(img)
                else:
                    # location prediction (n, kp, 2) | score prediction (n, kp, 1)
                    if opt.matching:
                        preds = getMultiPeakPrediction(
                            hm_data, pt1.numpy(), pt2.numpy(), opt.inputResH, opt.inputResW, opt.outputResH, opt.outputResW)
                        result = matching(boxes, scores.numpy(), preds)
                    else:
                        preds_hm, preds_img, preds_scores = getPrediction(
                            hm_data, pt1, pt2, opt.inputResH, opt.inputResW, opt.outputResH, opt.outputResW)
                        result = pose_nms(
                            boxes, scores, preds_img, preds_scores)
                    result = {
                        'imgname': im_name,
                        'result': result
                    }

                    # 3D coordinates computation
                    ppl = result['result']
                    ppl_num = len(ppl)
                    self.coordinates_u, self.coordinates_v, self.truex, self.truey, self.dists = fl.people_3d_coord(ppl, ppl_num,
                                                                             self.video_mode, self.camMtx1, orig_img)

                    self.final_result.append(result)
                    if opt.save_img or opt.save_video or opt.vis:
                        img = vis_frame(orig_img, result)
#                         err = cam.grab(runtime)
#                         if err == sl.ERROR_CODE.SUCCESS:
#                             cam.retrieve_image(mat, sl.VIEW.VIEW_DEPTH)
#                             depthmap = mat.get_data()
#                             if img.shape[2] == 3:
#                                 depthmap = cv2.cvtColor(depthmap, cv2.COLOR_RGBA2RGB)
#                             depthmap = cv2.resize(depthmap, (int(img.shape[1]/2), img.shape[0]))
#                             depthmap = cv2.applyColorMap(depthmap, cv2.COLORMAP_JET)
#                             depthmap = np.hstack((depthmap, zeroarr))
#                             depthmap = depthmap.astype(np.uint8)
#                             img = cv2.addWeighted(img, 0.5, depthmap, 0.5, 3)
                        if len(self.coordinates_v) > 0 and len(self.coordinates_u) > 0:
                            for i in range(len(self.coordinates_v)):
#                                 cv2.putText(img, 'z:' + str(round((self.dists[i] / 10), 1)),
#                                             (int(self.coordinates_u[i]), int(self.coordinates_v[i]) - 15),
#                                             cv2.FONT_HERSHEY_PLAIN, 2, (0, 255, 0), 3, 8)
                                cv2.putText(img, str(round((self.truex[i] / 10), 1)),
                                            (int(self.coordinates_u[i]), int(self.coordinates_v[i]) - 15),
                                            cv2.FONT_HERSHEY_PLAIN, 2, (0, 255, 0), 3, 8)
                                cv2.putText(img, str(round((self.truey[i] / 10), 1)),
                                            (int(self.coordinates_u[i]) + 200, int(self.coordinates_v[i]) - 15),
                                            cv2.FONT_HERSHEY_PLAIN, 2, (255, 0, 0), 3, 8)
                                cv2.putText(img, str(round((self.dists[i] / 10), 1)),
                                            (int(self.coordinates_u[i]) + 400, int(self.coordinates_v[i]) - 15),
                                            cv2.FONT_HERSHEY_PLAIN, 2, (0, 0, 255), 3, 8)
                                cv2.putText(img, 'frames: ' + str(count), (620, 620), cv2.FONT_HERSHEY_PLAIN, 2, (0,100,90), 3, 8)
                        else:
                            cv2.putText(img, '[N/A]',
                                        (40, 620), cv2.FONT_HERSHEY_PLAIN, 2, (0, 100, 90), 3, 8)
                        if opt.vis:
                            cv2.imshow("AlphaPose Demo", img)
                            cv2.waitKey(30)
                        if opt.save_img:
                            cv2.imwrite(os.path.join(opt.outputpath, 'vis', im_name), img)
                        if opt.save_video:
                            self.stream.write(img)
            else:
                time.sleep(0.1)
            count = count + 1
def run_on_video(video_path, output_video_name, conf_thresh):
    cap = cv2.VideoCapture(video_path, cv2.CAP_DSHOW)
    height = cap.get(cv2.CAP_PROP_FRAME_HEIGHT)
    width = cap.get(cv2.CAP_PROP_FRAME_WIDTH)
    fps = cap.get(cv2.CAP_PROP_FPS)
    fourcc = cv2.VideoWriter_fourcc(*'XVID')
    total_frames = cap.get(cv2.CAP_PROP_FRAME_COUNT)
    if not cap.isOpened():
        raise ValueError("Video open failed.")
    status = True
    idx = 0
    while status:
        start_stamp = time.time()
        status, img_raw = cap.read()
        img_raw = cv2.cvtColor(img_raw, cv2.COLOR_BGR2RGB)
        read_frame_stamp = time.time()
        if (status):
            inference(img_raw,
                      conf_thresh,
                      iou_thresh=0.5,
                      target_shape=(260, 260),
                      draw_result=True,
                      show_result=False)
            cv2.imshow('image', img_raw[:, :, ::-1])

            k = cv2.waitKey(10)
            inference_stamp = time.time()
            write_frame_stamp = time.time()
            idx += 1
            global i
            if k == ord('p'):

                db.create()  #创建数据库

                time_now, the_path = create_file()
                cv2.imwrite(
                    str(the_path) + '/' + 'demo' + str(i) + '.jpg',
                    img_raw[:, :, ::-1])
                #print("现在时间:" + time_now[11:])
                print("成功捕获第{}张图像!".format(i + 1))
                isHelmet = hg.classifier(
                    str(the_path) + '/' + 'demo' + str(i) + '.jpg')
                flag = isHelmet
                db.store(flag, time_now)
                #mysql.store(flag,time_now)
                i += 1
                f = open('record.txt', 'a')
                if flag == 1:
                    f.write('时间:  ' + time_now + '   识别结果:未佩戴安全帽 ' + '\n')
                    print("未佩戴安全帽")
                if flag == 0:
                    f.write('时间:  ' + time_now + '    识别结果:已佩戴安全帽 ' + '\n')
                    print("已佩戴安全帽")
                if flag == -1:
                    f.write('时间:  ' + time_now + '    识别结果: 未识别到人像' + '\n')
                    print("未识别到人像")

                matching.matching(i, the_path)
                #匹配
            if k == ord('o'):
                print("over")
                cap.release()
                cv2.destroyAllWindows()
                return
import argparse
import pyautogui
import OCR
import matching

from pynput import mouse as Mouse


if __name__ == '__main__':
    parser = argparse.ArgumentParser()
    parser.add_argument("-p", "--path", required=True,help="image yolu")
    parser.add_argument("-o", "--offset",type=int, nargs='+', required=False,help="image yolu")
    args = parser.parse_args()
    print("args", args)

    pos = matching.matching(args.path, source="screenshot", debug=True,score_threshold=0.005)
    #pos = pyautogui.locateOnScreen(args.path)
    
    if pos:
        print("Position", list(pos))
        if args.offset:
            offset = args.offset
            pos = list(pos)
            pos[0] += offset[0]
            pos[1] += offset[1]
            pos[2] = offset[2]
            pos[3] = offset[3]
            payment_solver_text = OCR.resolve_region(pos, show=True, save='test.png')
            print(payment_solver_text)

        else:
Ejemplo n.º 9
0
    'violet': {2, 3, 4, 5, 6},
    'green': {7, 8},
    'yellow': {9}
}, {
    'orange': {1, 2, 3},
    'violet': {4, 5, 6, 7},
    'green': {8, 9}
}]

correct_temporal_communities = [{(0, 'violet'), (1, 'violet'), (2, 'violet')},
                                {(0, 'orange'), (2, 'orange')},
                                {(2, 'green'), (1, 'green'), (0, 'green')}]

# see figure 6 in paper

temporal_communities = matching(timeseries, 2)

print(temporal_communities)

# test if results are identical
found = set()
for temp_comm in temporal_communities:

    for i, correct_temp_comm in enumerate(correct_temporal_communities):

        if temp_comm == correct_temp_comm:
            found.add(i)
            break
    else:
        raise Exception('wrong communtity found')
Ejemplo n.º 10
0
    def update(self):
        next_id = 0
        car_next_id = 0
        bbox_dets_list_list = []
        keypoints_list_list = []
        car_dets_list_list = []

        while True:
            # if the thread indicator variable is set, stop the
            # thread
            if self.stopped:
                if self.save_video:
                    self.stream.release()
                return
            # otherwise, ensure the queue is not empty

            if not self.Q.empty():
                start_time = getTime()

                (boxes, scores, hm_data, pt1, pt2, orig_img, img_id, CAR) = self.Q.get()

                orig_img = np.array(orig_img, dtype=np.uint8)
                if boxes is not None:
                    boxes = boxes.astype(np.int32)

                img = orig_img

                # text_filled2(img,(5,200),str(img_id),LIGHT_GREEN,2,2)

                bbox_dets_list = []  # keyframe: start from empty
                keypoints_list = []  # keyframe: start from empty
                # print(boxes)
                if boxes is None:  # No person detection
                    pass
                    # bbox_det_dict = {"img_id": img_id,
                    #                  "det_id": 0,
                    #                  "track_id": None,
                    #                  "bbox": [0, 0, 2, 2]}
                    # bbox_dets_list.append(bbox_det_dict)
                    #
                    # keypoints_dict = {"img_id": img_id,
                    #                   "det_id": 0,
                    #                   "track_id": None,
                    #                   "keypoints": []}
                    # keypoints_list.append(keypoints_dict)


                else:
                    if opt.matching:
                        preds = getMultiPeakPrediction(
                            hm_data, pt1.numpy(), pt2.numpy(), opt.inputResH, opt.inputResW, opt.outputResH,
                            opt.outputResW)
                        result = matching(boxes, scores.numpy(), preds)
                    else:

                        preds_hm, preds_img, preds_scores = getPrediction(hm_data, pt1, pt2, opt.inputResH,
                                                                          opt.inputResW, opt.outputResH,
                                                                          opt.outputResW)

                        # print('number of result', preds_hm,  preds_scores )
                        result = pose_nms(boxes, scores, preds_img, preds_scores)  # list type
                        # result = {  'keypoints': ,  'kp_score': , 'proposal_score': ,  'bbox' }

                    if img_id > 0:  # First frame does not have previous frame
                        bbox_list_prev_frame = bbox_dets_list_list[img_id - 1].copy()
                        keypoints_list_prev_frame = keypoints_list_list[img_id - 1].copy()
                    else:
                        bbox_list_prev_frame = []
                        keypoints_list_prev_frame = []

                    # boxes.size(0)
                    num_dets = len(result)

                    for bbox in boxes:
                        x, y, w, h = bbox.astype(np.uint32)
                        cv2.rectangle(orig_img, (x, y), (x + w, y + h), (253, 222, 111), 1)

                    for det_id in range(num_dets):  # IOU tracking for detections in current frame.
                        # detections for current frame
                        # obtain bbox position and track id

                        result_box = result[det_id]
                        kp_score = result_box['kp_score']
                        proposal_score = result_box['proposal_score'].numpy()[0]
                        if proposal_score < 1.3:
                            continue

                        keypoints = result_box['keypoints']  # torch, (17,2)
                        keypoints_pf = np.zeros((15, 2))

                        idx_list = [16, 14, 12, 11, 13, 15, 10, 8, 6, 5, 7, 9, 0, 0, 0]
                        for i, idx in enumerate(idx_list):
                            keypoints_pf[i] = keypoints[idx]
                        keypoints_pf[12] = (keypoints[5] + keypoints[6]) / 2  # neck

                        # COCO-order {0-nose    1-Leye    2-Reye    3-Lear    4Rear    5-Lsho    6-Rsho    7-Lelb    8-Relb    9-Lwri    10-Rwri    11-Lhip    12-Rhip    13-Lkne    14-Rkne    15-Lank    16-Rank} 
                        # PoseFLow order  #{0-Rank    1-Rkne    2-Rhip    3-Lhip    4-Lkne    5-Lank    6-Rwri    7-Relb    8-Rsho    9-Lsho   10-Lelb    11-Lwri    12-neck  13-nose 14-TopHead}

                        bbox_det = bbox_from_keypoints(keypoints)  # xxyy

                        # bbox_in_xywh = enlarge_bbox(bbox_det, enlarge_scale)
                        # bbox_det = x1y1x2y2_to_xywh(bbox_in_xywh)

                        # Keyframe: use provided bbox
                        # if bbox_invalid(bbox_det):
                        #     track_id = None  # this id means null
                        #     keypoints = []
                        #     bbox_det = [0, 0, 2, 2]
                        #     # update current frame bbox
                        #     bbox_det_dict = {"img_id": img_id,
                        #                      "det_id": det_id,
                        #                      "track_id": track_id,
                        #                      "bbox": bbox_det}
                        #     bbox_dets_list.append(bbox_det_dict)
                        #     # update current frame keypoints
                        #     keypoints_dict = {"img_id": img_id,
                        #                       "det_id": det_id,
                        #                       "track_id": track_id,
                        #                       "keypoints": keypoints}
                        #     keypoints_list.append(keypoints_dict)
                        #     continue

                        # # update current frame bbox

                        if img_id == 0:  # First frame, all ids are assigned automatically
                            track_id = next_id
                            next_id += 1
                        else:
                            track_id, match_index = get_track_id_SpatialConsistency(bbox_det, bbox_list_prev_frame)
                            # print('track' ,track_id, match_index)

                            if track_id != -1:  # if candidate from prev frame matched, prevent it from matching another
                                del bbox_list_prev_frame[match_index]
                                del keypoints_list_prev_frame[match_index]

                        # update current frame bbox
                        bbox_det_dict = {"img_id": img_id,
                                         "det_id": det_id,
                                         "track_id": track_id,
                                         "bbox": bbox_det}

                        # update current frame keypoints
                        keypoints_dict = {"img_id": img_id,
                                          "det_id": det_id,
                                          "track_id": track_id,
                                          "keypoints": keypoints,
                                          'kp_poseflow': keypoints_pf,
                                          'kp_score': kp_score,
                                          'bbox': bbox_det,
                                          'proposal_score': proposal_score}

                        bbox_dets_list.append(bbox_det_dict)
                        keypoints_list.append(keypoints_dict)

                    num_dets = len(bbox_dets_list)
                    for det_id in range(num_dets):  # if IOU tracking failed, run pose matching tracking.
                        bbox_det_dict = bbox_dets_list[det_id]
                        keypoints_dict = keypoints_list[det_id]

                        # assert (det_id == bbox_det_dict["det_id"])
                        # assert (det_id == keypoints_dict["det_id"])

                        if bbox_det_dict["track_id"] == -1:  # this id means matching not found yet
                            # track_id = bbox_det_dict["track_id"]
                            track_id, match_index = get_track_id_SGCN(bbox_det_dict["bbox"], bbox_list_prev_frame,
                                                                      keypoints_dict["kp_poseflow"],
                                                                      keypoints_list_prev_frame)

                            if track_id != -1:  # if candidate from prev frame matched, prevent it from matching another
                                del bbox_list_prev_frame[match_index]
                                del keypoints_list_prev_frame[match_index]
                                bbox_det_dict["track_id"] = track_id
                                keypoints_dict["track_id"] = track_id

                            # if still can not find a match from previous frame, then assign a new id
                            # if track_id == -1 and not bbox_invalid(bbox_det_dict["bbox"]):
                            if track_id == -1:
                                bbox_det_dict["track_id"] = next_id
                                keypoints_dict["track_id"] = next_id
                                next_id += 1

                    # update frame
                    # print('keypoint list', len(keypoints_list))
                    vis_frame(img, keypoints_list)

                """
                Car
                """

                if CAR is not None:
                    car_np = CAR
                    new_car_bboxs = car_np[:, 0:4].astype(np.uint32)  # b/  x y w h c / cls_conf, cls_idx
                    new_car_score = car_np[:, 4]
                    cls_conf = car_np[:, 4]

                    # print("id: ", img_id , " ------------ " , new_car_bboxs, new_car_score)
                    # cls_conf = car_np[:, 6]
                    car_dest_list = []

                    if img_id > 1:  # First frame does not have previous frame
                        car_bbox_list_prev_frame = car_dets_list_list[img_id - 1].copy()
                    else:
                        car_bbox_list_prev_frame = []

                    # print('car bbox list prev frame ', len(car_bbox_list_prev_frame))
                    for c, score, conf in zip(new_car_bboxs, new_car_score, cls_conf):
                        # car_bbox_det = c
                        # car_bbox_det = x1y1x2y2_to_xywh(c)
                        bbox_det = c
                        # bbox_in_xywh = enlarge_bbox(car_bbox_det, enlarge_scale)
                        # bbox_det = x1y1x2y2_to_xywh(bbox_in_xywh)

                        if img_id == 0:  # First frame, all ids are assigned automatically
                            car_track_id = car_next_id
                            car_next_id += 1
                        else:
                            car_track_id, match_index = get_track_id_SpatialConsistency(bbox_det,
                                                                                        car_bbox_list_prev_frame)
                            # print(car_track_id, match_index)
                            if car_track_id != -1:  # if candidate from prev frame matched, prevent it from matching another
                                del car_bbox_list_prev_frame[match_index]

                        bbox_det_dict = {"img_id": img_id,
                                         "track_id": car_track_id,
                                         "bbox": bbox_det,
                                         "score": score,
                                         "conf": conf}
                        car_dest_list.append(bbox_det_dict)

                    for car_bbox_det_dict in car_dest_list:  # detections for current frame
                        if car_bbox_det_dict["track_id"] == -1:  # this id means matching not found yet
                            car_bbox_det_dict["track_id"] = car_next_id
                            car_next_id += 1

                    self.tracking(car_dest_list)
                    car_dets_list_list.append(car_dest_list)

                else:
                    car_dest_list = []
                    bbox_det_dict = {"img_id": img_id,
                                     "det_id": 0,
                                     "track_id": None,
                                     "bbox": [0, 0, 2, 2],
                                     "score": 0,
                                     "conf": 0}
                    car_dest_list.append(bbox_det_dict)
                    car_dets_list_list.append(car_dest_list)

                bbox_dets_list_list.append(bbox_dets_list)
                keypoints_list_list.append(keypoints_list)

                if img_id != 0:
                    self.car_person_detection(car_dest_list, bbox_dets_list, img)
                    self.car_parking_detection(car_dest_list, img, img_id)

                ckpt_time, det_time = getTime(start_time)
                cv2.putText(img, str(1 / det_time), (5, 30), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 0), 1)
                if opt.vis:
                    cv2.imshow("AlphaPose Demo", img)
                    cv2.waitKey(33)
                if opt.save_video:
                    self.stream.write(img)
            else:
                time.sleep(0.1)
Ejemplo n.º 11
0
    def update(self):
        # keep looping infinitely
        while True:
            sys.stdout.flush()
            print("generator len : " + str(self.Q.qsize()))

            # if the thread indicator variable is set, stop the
            # thread
            if self.stopped:
                if self.save_video:
                    self.stream.release()
                return
            # otherwise, ensure the queue is not empty
            if not self.det_processor.Q.empty():

                with torch.no_grad():
                    (inps, orig_img, im_name, boxes, scores, pt1,
                     pt2) = self.det_processor.read()

                    if orig_img is None:
                        print(f'{im_name} image read None: handle_video')
                        break

                    orig_img = np.array(orig_img, dtype=np.uint8)
                    if boxes is None or boxes.nelement() == 0:
                        (boxes, scores, hm_data, pt1, pt2, orig_img,
                         im_name) = (None, None, None, None, None, orig_img,
                                     im_name.split('/')[-1])

                        # if opt.save_img or opt.save_video or opt.vis:
                        #     img = orig_img
                        #     if opt.vis:
                        #         cv2.imshow("AlphaPose Demo", img)
                        #         cv2.waitKey(30)
                        #     if opt.save_img:
                        #         cv2.imwrite(os.path.join(opt.outputpath, 'vis', im_name), img)
                        #     if opt.save_video:
                        #         self.stream.write(img)
                    else:
                        # location prediction (n, kp, 2) | score prediction (n, kp, 1)

                        datalen = inps.size(0)
                        batchSize = 10  #args.posebatch()
                        leftover = 0
                        if datalen % batchSize:
                            leftover = 1
                        num_batches = datalen // batchSize + leftover
                        hm = []

                        sys.stdout.flush()
                        print("hhhh")

                        for j in range(num_batches):
                            inps_j = inps[j * batchSize:min(
                                (j + 1) * batchSize, datalen)]  #.cuda()
                            hm_j = self.pose_model(inps_j)
                            hm.append(hm_j)

                        hm = torch.cat(hm)
                        hm = hm.cpu().data

                        (boxes, scores, hm_data, pt1, pt2, orig_img,
                         im_name) = (boxes, scores, hm, pt1, pt2, orig_img,
                                     im_name.split('/')[-1])

                        if opt.matching:
                            preds = getMultiPeakPrediction(
                                hm_data, pt1.numpy(), pt2.numpy(),
                                opt.inputResH, opt.inputResW, opt.outputResH,
                                opt.outputResW)
                            result = matching(boxes, scores.numpy(), preds)
                        else:
                            preds_hm, preds_img, preds_scores = getPrediction(
                                hm_data, pt1, pt2, opt.inputResH,
                                opt.inputResW, opt.outputResH, opt.outputResW)
                            result = pose_nms(boxes, scores, preds_img,
                                              preds_scores)
                        result = {'imgname': im_name, 'result': result}
                        self.final_result.append(result)

                        kpts = []
                        no_person = []
                        if not result['result']:  # No people
                            self.Q.put(None)  #TODO
                        else:
                            self.Q.put(result)

                        # if opt.save_img or opt.save_video or opt.vis:
                        #     img = vis_frame(orig_img, result)
                        #     if opt.vis:
                        #         cv2.imshow("AlphaPose Demo", img)
                        #         cv2.waitKey(30)
                        #     if opt.save_img:
                        #         cv2.imwrite(os.path.join(opt.outputpath, 'vis', im_name), img)
                        #     if opt.save_video:
                        #         self.stream.write(img)
            else:
                time.sleep(0.1)
Ejemplo n.º 12
0
    def person_tracking(self, boxes, scores, hm_data, pt1, pt2, img_id):

        person_list = []

        if boxes is None:
            self.person_list_list.append([])
            return person_list

        if opt.matching:  # TODO Check the difference,
            preds = getMultiPeakPrediction(hm_data, pt1.numpy(), pt2.numpy(),
                                           opt.inputResH, opt.inputResW,
                                           opt.outputResH, opt.outputResW)
            # result = matching(boxes, scores.numpy(), preds)
            result = matching(boxes, scores, preds)
        else:
            preds_hm, preds_img, preds_scores = getPrediction(
                hm_data, pt1, pt2, opt.inputResH, opt.inputResW,
                opt.outputResH, opt.outputResW)
            result = pose_nms(boxes, scores, preds_img,
                              preds_scores)  # list type
            # result = {  'keypoints': ,  'kp_score': , 'proposal_score': ,  'bbox' }

        to_final_result = {'imgname': img_id, 'result': result, 'boxes': boxes}

        self.final_result.append(to_final_result)

        if img_id > 0:  # First frame does not have previous frame
            person_list_prev_frame = self.person_list_list[img_id - 1].copy()
        else:
            person_list_prev_frame = []
        # print(result)

        num_dets = len(result)
        for det_id in range(
                num_dets):  # IOU tracking for detections in current frame.
            # detections for current frame, obtain bbox position and track id

            result_box = result[det_id]
            kp_score = result_box['kp_score']
            if opt.matching:
                proposal_score = result_box['proposal_score']
            else:
                proposal_score = result_box['proposal_score'].numpy()[0]

            if proposal_score < 0.2:  # TODO check person proposal threshold
                continue

            if isnan(proposal_score):
                continue

            keypoints = result_box['keypoints']  # torch, (17,2)
            keypoints_pf = np.zeros((15, 2))

            idx_list = [16, 14, 12, 11, 13, 15, 10, 8, 6, 5, 7, 9, 0, 0, 0]
            for i, idx in enumerate(idx_list):
                keypoints_pf[i] = keypoints[idx]
            keypoints_pf[12] = (keypoints[5] + keypoints[6]) / 2  # neck

            keypoints_norm = keypoints_pf - keypoints_pf[12]

            # COCO-order {0-nose    1-Leye    2-Reye    3-Lear    4Rear    5-Lsho    6-Rsho    7-Lelb    8-Relb    9-Lwri    10-Rwri    11-Lhip    12-Rhip    13-Lkne    14-Rkne    15-Lank    16-Rank}
            # PoseFLow order  #{0-Rank    1-Rkne    2-Rhip    3-Lhip    4-Lkne    5-Lank    6-Rwri    7-Relb    8-Rsho    9-Lsho   10-Lelb    11-Lwri    12-neck  13-nose 14-TopHead}
            bbox_det = bbox_from_keypoints(keypoints)  # xxyy

            # enlarge bbox by 20% with same center position
            bbox_in_xywh = enlarge_bbox(bbox_det, enlarge_scale)
            bbox_det = x1y1x2y2_to_xywh(bbox_in_xywh)

            # # update current frame bbox
            if img_id == 0:  # First frame, all ids are assigned automatically
                track_id = self.person_next_id
                self.person_next_id += 1
            else:
                track_id, match_index = get_track_id_SpatialConsistency(
                    bbox_det, person_list_prev_frame)
                if track_id != -1:  # if candidate from prev frame matched, prevent it from matching another
                    del person_list_prev_frame[match_index]

            person_det_dict = {
                "img_id": img_id,
                "det_id": det_id,
                "track_id": track_id,
                "bbox": bbox_det,
                "keypoints": keypoints,
                'kp_norm': keypoints_norm,
                'kp_poseflow': keypoints_pf,
                'kp_score': kp_score,
                'proposal_score': proposal_score
            }

            person_list.append(person_det_dict)

        num_dets = len(person_list)
        for det_id in range(
                num_dets
        ):  # if IOU tracking failed, run pose matching tracking.
            person_dict = person_list[det_id]

            if person_dict[
                    "track_id"] == -1:  # this id means matching not found yet
                # track_id = bbox_det_dict["track_id"]
                track_id, match_index = get_track_id_SGCN(
                    person_dict["bbox"], person_list_prev_frame,
                    person_dict["kp_poseflow"])

                if track_id != -1:  # if candidate from prev frame matched, prevent it from matching another
                    del person_list_prev_frame[match_index]
                    person_dict["track_id"] = track_id
                else:
                    # if still can not find a match from previous frame, then assign a new id
                    # if track_id == -1 and not bbox_invalid(bbox_det_dict["bbox"]):
                    person_dict["track_id"] = self.person_next_id
                    self.person_next_id += 1

        self.person_list_list.append(person_list)
        return person_list
Ejemplo n.º 13
0
    def update(self):
        # keep looping infinitely

        frame_prev = -1
        frame_cur = 0
        img_id = -1
        next_id = 0
        bbox_dets_list_list = []
        keypoints_list_list = []
        car_dets_list_list = []

        car_next_id = 0

        while True:
            # if the thread indicator variable is set, stop the
            # thread
            if self.stopped:
                if self.save_video:
                    self.stream.release()
                return
            # otherwise, ensure the queue is not empty

            if not self.Q.empty():

                (boxes, scores, hm_data, pt1, pt2, orig_img, img_id,
                 CAR) = self.Q.get()
                # print(img_id)
                orig_img = np.array(orig_img, dtype=np.uint8)
                img = orig_img

                bbox_dets_list = []  # keyframe: start from empty
                keypoints_list = []  # keyframe: start from empty

                if boxes is None:  # No person detection
                    bbox_det_dict = {
                        "img_id": img_id,
                        "det_id": 0,
                        "track_id": None,
                        "bbox": [0, 0, 2, 2]
                    }
                    bbox_dets_list.append(bbox_det_dict)

                    keypoints_dict = {
                        "img_id": img_id,
                        "det_id": 0,
                        "track_id": None,
                        "keypoints": []
                    }
                    keypoints_list.append(keypoints_dict)

                    bbox_dets_list_list.append(bbox_dets_list)
                    keypoints_list_list.append(keypoints_list)

                else:
                    if opt.matching:
                        preds = getMultiPeakPrediction(hm_data, pt1.numpy(),
                                                       pt2.numpy(),
                                                       opt.inputResH,
                                                       opt.inputResW,
                                                       opt.outputResH,
                                                       opt.outputResW)
                        result = matching(boxes, scores.numpy(), preds)
                    else:

                        preds_hm, preds_img, preds_scores = getPrediction(
                            hm_data, pt1, pt2, opt.inputResH, opt.inputResW,
                            opt.outputResH, opt.outputResW)
                        result = pose_nms(boxes, scores, preds_img,
                                          preds_scores)  # list type

                        # 'keypoints':
                        # 'kp_score':
                        # 'proposal_score':
                        # 'bbox'
                    #
                    # print('boexes', boxes.size(), boxes)
                    # for aa in result:
                    #     keys = aa['keypoints']
                    #     bbox2  = aa['bbox']
                    #     print('pose nms keys', keys.size())
                    #     print('pose nms, box', bbox2.size(), bbox2)
                    #
                    # _result = {
                    #     'imgname': img_id,
                    #     'result': result,
                    #     'pt1': pt1,
                    #     'pt2': pt2
                    # }

                    if img_id > 0:  # First frame does not have previous frame
                        bbox_list_prev_frame = bbox_dets_list_list[img_id -
                                                                   1].copy()
                        keypoints_list_prev_frame = keypoints_list_list[
                            img_id - 1].copy()
                    else:
                        bbox_list_prev_frame = []
                        keypoints_list_prev_frame = []

                    # boxes.size(0)
                    num_dets = len(result)
                    for det_id in range(
                            num_dets):  # detections for current frame
                        # obtain bbox position and track id

                        result_box = result[det_id]

                        kp_score = result_box['kp_score']
                        proposal_score = result_box['proposal_score'].numpy(
                        )[0]
                        if proposal_score < 1.3:
                            continue

                        keypoints = result_box['keypoints']
                        bbox_det = bbox_from_keypoints(keypoints)  # xxyy

                        # enlarge bbox by 20% with same center position
                        # bbox_x1y1x2y2 = xywh_to_x1y1x2y2(bbox_det)
                        bbox_in_xywh = enlarge_bbox(bbox_det, enlarge_scale)
                        # print('enlared', bbox_in_xywh)
                        bbox_det = x1y1x2y2_to_xywh(bbox_in_xywh)
                        # print('converted', bbox_det)

                        # Keyframe: use provided bbox
                        # if bbox_invalid(bbox_det):
                        #     track_id = None  # this id means null
                        #     keypoints = []
                        #     bbox_det = [0, 0, 2, 2]
                        #     # update current frame bbox
                        #     bbox_det_dict = {"img_id": img_id,
                        #                      "det_id": det_id,
                        #                      "track_id": track_id,
                        #                      "bbox": bbox_det}
                        #     bbox_dets_list.append(bbox_det_dict)
                        #     # update current frame keypoints
                        #     keypoints_dict = {"img_id": img_id,
                        #                       "det_id": det_id,
                        #                       "track_id": track_id,
                        #                       "keypoints": keypoints}
                        #     keypoints_list.append(keypoints_dict)
                        #     continue

                        # # update current frame bbox

                        # obtain keypoints for each bbox position in the keyframe

                        # print('img id ', img_id)

                        if img_id == 0:  # First frame, all ids are assigned automatically
                            track_id = next_id
                            next_id += 1

                        else:
                            track_id, match_index = get_track_id_SpatialConsistency(
                                bbox_det, bbox_list_prev_frame)
                            # print('track' ,track_id, match_index)

                            if track_id != -1:  # if candidate from prev frame matched, prevent it from matching another
                                del bbox_list_prev_frame[match_index]
                                del keypoints_list_prev_frame[match_index]

                        # update current frame bbox
                        bbox_det_dict = {
                            "img_id": img_id,
                            "det_id": det_id,
                            "track_id": track_id,
                            "bbox": bbox_det
                        }
                        bbox_dets_list.append(bbox_det_dict)

                        # update current frame keypoints
                        keypoints_dict = {
                            "img_id": img_id,
                            "det_id": det_id,
                            "track_id": track_id,
                            "keypoints": keypoints,
                            'kp_score': kp_score,
                            'bbox': bbox_det,
                            'proposal_score': proposal_score
                        }
                        keypoints_list.append(keypoints_dict)

                    num_dets = len(bbox_dets_list)
                    for det_id in range(
                            num_dets):  # detections for current frame
                        bbox_det_dict = bbox_dets_list[det_id]
                        keypoints_dict = keypoints_list[det_id]
                        # assert (det_id == bbox_det_dict["det_id"])
                        # assert (det_id == keypoints_dict["det_id"])

                        if bbox_det_dict[
                                "track_id"] == -1:  # this id means matching not found yet
                            track_id = bbox_det_dict["track_id"]
                            # track_id, match_index = get_track_id_SGCN(bbox_det_dict["bbox"], bbox_list_prev_frame,
                            #                                           keypoints_dict["keypoints"],
                            #                                           keypoints_list_prev_frame)

                            if track_id != -1:  # if candidate from prev frame matched, prevent it from matching another
                                del bbox_list_prev_frame[match_index]
                                del keypoints_list_prev_frame[match_index]
                                bbox_det_dict["track_id"] = track_id
                                keypoints_dict["track_id"] = track_id

                            # if still can not find a match from previous frame, then assign a new id
                            if track_id == -1 and not bbox_invalid(
                                    bbox_det_dict["bbox"]):
                                bbox_det_dict["track_id"] = next_id
                                keypoints_dict["track_id"] = next_id
                                next_id += 1

                    # update frame

                    bbox_dets_list_list.append(bbox_dets_list)
                    keypoints_list_list.append(keypoints_list)

                    # draw keypoints

                    vis_frame(img, keypoints_list)
                    # _pt1, _pt2 = _result['pt1'].numpy(), _result['pt2'].numpy()
                    # pt1 = _pt1.astype(np.uint32)
                    # pt2 = _pt2.astype(np.uint32)
                    # for p1, p2 in zip(pt1, pt2):
                    #     cv2.rectangle(img, (p1[0], p1[1]), (p2[0], p2[1]), (34, 154, 11), 1)

                if CAR is not None:  # No car detection
                    car_track_id = 0
                    car_np = CAR
                    new_car_bboxs = car_np[:, 0:4].astype(np.uint32)
                    new_car_score = car_np[:, 4]
                    car_dest_list = []

                    if img_id > 1:  # First frame does not have previous frame
                        car_bbox_list_prev_frame = car_dets_list_list[
                            img_id - 1].copy()
                    else:
                        car_bbox_list_prev_frame = []

                    # print('car bbox list prev frame ', len(car_bbox_list_prev_frame))
                    for c, score in zip(new_car_bboxs, new_car_score):
                        car_bbox_det = c
                        bbox_in_xywh = enlarge_bbox(car_bbox_det,
                                                    enlarge_scale)
                        bbox_det = x1y1x2y2_to_xywh(bbox_in_xywh)

                        # obtain keypoints for each bbox position in the keyframe

                        # print('img id ', img_id)

                        if img_id == 0:  # First frame, all ids are assigned automatically
                            car_track_id = car_next_id
                            car_next_id += 1
                            # print('if img id zero' , car_next_id)

                        else:
                            car_track_id, match_index = get_track_id_SpatialConsistency(
                                bbox_det, car_bbox_list_prev_frame)
                            # print(car_track_id, match_index)
                            if car_track_id != -1:  # if candidate from prev frame matched, prevent it from matching another
                                del car_bbox_list_prev_frame[match_index]

                        bbox_det_dict = {
                            "img_id": img_id,
                            "track_id": car_track_id,
                            "bbox": bbox_det
                        }
                        car_dest_list.append(bbox_det_dict)

                    # print()
                    num_dets = len(car_dest_list)
                    for det_id in range(
                            num_dets):  # detections for current frame
                        car_bbox_det_dict = car_dest_list[det_id]
                        # assert (det_id == bbox_det_dict["det_id"])
                        # assert (det_id == keypoints_dict["det_id"])
                        # print(Pose_matchercar_bbox_det_dict["track_id"])
                        if car_bbox_det_dict[
                                "track_id"] == -1:  # this id means matching not found yet
                            car_bbox_det_dict["track_id"] = car_next_id
                            car_next_id += 1
                            # print('car net id ', car_next_id)

                    self.tracking(car_dest_list, img_id)

                    for car in car_dest_list:
                        x, y, w, h = car['bbox']
                        track_id = car['track_id']

                        tracker = self.track_dict[track_id]
                        history = tracker['history']
                        moved = np.sum(history[-10:])
                        last_moved = np.sum(history[-60:])

                        COLOR_MOVING = (0, 255, 0)
                        COLOR_RED = (0, 0, 255)

                        COLOR_INACTIVE = (255, 0, 0)

                        cv2.rectangle(img, (x, y), (x + w, y + h),
                                      COLOR_INACTIVE, 1)
                        text_filled(img, (x, y), f'{track_id} Inactive',
                                    COLOR_INACTIVE)

                        # if moved:
                        #     cv2.rectangle(img, (x, y), (x + w, y + h), COLOR_MOVING, 1)
                        #     text_filled(img, (x, y), f'CAR {track_id} Active', COLOR_MOVING)
                        # else:
                        #
                        #     if last_moved:
                        #         cv2.rectangle(img, (x, y), (x + w, y + h), COLOR_RED, 1)
                        #         text_filled(img, (x, y), f'CAR {track_id} Standstill', COLOR_RED)
                        #
                        #         cropped = img[y:y+h, x:x+w,:]
                        #         filter = np.zeros(cropped.shape,dtype=img.dtype)
                        #         # print(cropped.shape, filter.shape)
                        #         filter[:,:,2] = 255
                        #         # print(overlay.shape)
                        #         # cv2.rectangle(overlay, (0, 0), (w, h), COLOR_RED, -1)
                        #         overlayed = cv2.addWeighted(cropped,0.8,filter,0.2,0)
                        #         img[y:y+h, x:x+w,:] = overlayed[:,:,:]
                        #     else:
                        #         cv2.rectangle(img, (x, y), (x + w, y + h), COLOR_INACTIVE, 1)
                        #         text_filled(img, (x, y), f'{track_id} Inactive', COLOR_INACTIVE)

                    car_dets_list_list.append(car_dest_list)

                else:
                    car_dest_list = []
                    bbox_det_dict = {
                        "img_id": img_id,
                        "det_id": 0,
                        "track_id": None,
                        "bbox": [0, 0, 2, 2]
                    }
                    car_dest_list.append(bbox_det_dict)
                    car_dets_list_list.append(car_dest_list)

                # if img_id != 0:
                #     for car in car_dets_list_list[-1]:
                #         car_track_id = car['track_id']
                #         if car_track_id is None:
                #             continue
                #
                #         car_bbox = car['bbox']
                #         for human in bbox_dets_list_list[-1]:
                #             human_track_id = human['track_id']
                #             if human_track_id is None:
                #                 continue
                #             hum_bbox = human['bbox']
                #             boxa = xywh_to_x1y1x2y2(hum_bbox)
                #             boxb = xywh_to_x1y1x2y2(car_bbox)
                #             x,y,w,h = x1y1x2y2_to_xywh(boxa)
                #             area = iou(boxa,boxb)
                #
                #             if area > 0.02:
                #                 cropped = img[y:y+h, x:x+w,:]
                #                 filter = np.zeros(cropped.shape,dtype=img.dtype)
                #                 filter[:,:,2] = 255
                #                 overlayed = cv2.addWeighted(cropped,0.9,filter,0.1,0)
                #                 img[y:y+h, x:x+w,:] = overlayed[:,:,:]

                if opt.vis:
                    cv2.imshow("AlphaPose Demo", img)
                    cv2.waitKey(1)
                if opt.save_video:
                    self.stream.write(img)
            else:
                time.sleep(0.1)
Ejemplo n.º 14
0
def detect_main(args, im_names, yolo_model, pose_net):
    # Load input images
    data_loader = ImageLoader(im_names, batchSize=args.detbatch, format='yolo').start()

    # Load detection loader
    det_loader = DetectionLoader(data_loader, model=yolo_model, batchSize=args.detbatch).start()
    det_processor = DetectionProcessor(det_loader).start()

    runtime_profile = {
        'dt': [],
        'pt': [],
        'pn': []
    }

    # Init data writer
    # writer = DataWriter(args.save_video).start()

    data_len = data_loader.length()
    fall_res_all = []
    batchSize = args.posebatch
    for i in range(data_len):
        start_time = getTime()
        with torch.no_grad():
            (inps, orig_img, im_name, boxes, scores, pt1, pt2) = det_processor.read()
            if boxes is None or boxes.nelement() == 0:
                # writer.save(None, None, None, None, None, orig_img, im_name.split('/')[-1])
                continue

            ckpt_time, det_time = getTime(start_time)
            runtime_profile['dt'].append(det_time)
            # Pose Estimation
            # print(im_name)
            datalen = inps.size(0)
            leftover = 0
            if (datalen) % batchSize:
                leftover = 1
            num_batches = datalen // batchSize + leftover
            hm = []
            for j in range(num_batches):
                inps_j = inps[j * batchSize:min((j + 1) * batchSize, datalen)].cuda()
                hm_j = pose_net(inps_j)
                hm.append(hm_j)
            hm = torch.cat(hm)
            ckpt_time, pose_time = getTime(ckpt_time)
            runtime_profile['pt'].append(pose_time)
            hm = hm.cpu()
            # writer.save(boxes, scores, hm, pt1, pt2, orig_img, im_name.split('/')[-1])
            fall_res = []
            fall_res.append(im_name.split('/')[-1])
            if boxes is None:
                cv2.imwrite(opt.outputpath + '/' + im_name.split('/')[-1], img)
            else:
                if opt.matching:
                    preds = getMultiPeakPrediction(
                        hm, pt1.numpy(), pt2.numpy(), opt.inputResH, opt.inputResW, opt.outputResH, opt.outputResW)
                    result = matching(boxes, scores.numpy(), preds)
                else:
                    preds_hm, preds_img, preds_scores = getPrediction(hm, pt1, pt2, opt.inputResH, opt.inputResW,
                                                                      opt.outputResH, opt.outputResW)
                    result = pose_nms(boxes, scores, preds_img, preds_scores)
                    result = {'imgname': im_name, 'result': result}
                img = vis_frame(orig_img, result)
               
                for human in result['result']:
                    keypoint = human['keypoints']
                    keypoint = keypoint.numpy()
                    xmax = max(keypoint[:, 0])
                    xmin = min(keypoint[:, 0])
                    ymax = max(keypoint[:, 1])
                    ymin = min(keypoint[:, 1])
                    w = xmax - xmin
                    h = ymax - ymin
                    distance = abs((keypoint[15][1] + keypoint[16][1]) / 2 - (keypoint[11][1] + keypoint[12][1]) / 2)
                    if w / h >= 0.95:
                        cv2.rectangle(img, (xmin, ymin), (xmax, ymax), (0, 0, 255), 2)
                        font = cv2.FONT_HERSHEY_SIMPLEX
                        cv2.putText(img, 'Warning!Fall', (int(xmin + 10), int(ymax - 10)), font, 1, (0, 0, 255), 2)
                        fall_res.append([xmin,ymin,xmax,ymax])
                        '''
                        print('1 location:[%f,' % (xmin) + '%f]' % (ymin) + ' [%f,' % (xmax) + '%f]' % (
                            ymin) + ' [%f,' % (
                                  xmin) + '%f]' % (ymax) + ' [%f,' % (xmax) + '%f]' % (ymax))
                        '''
                    else:
                        if distance < 55:
                            cv2.rectangle(img, (xmin, ymin), (xmax, ymax), (0, 255, 0), 2)
                            font = cv2.FONT_HERSHEY_SIMPLEX
                            cv2.putText(img, 'Warning!Fall!', (int(xmin + 10), int(ymax - 10)), font, 1, (0, 255, 0), 2)
                            fall_res.append(1)
                            fall_res.append([xmin,ymin,xmax,ymax])
                            '''
                            print('1 location:[%f,' % (xmin) + '%f]' % (ymin) + ' [%f,' % (xmax) + '%f]' % (
                                ymin) + ' [%f,' % (
                                      xmin) + '%f]' % (ymax) + ' [%f,' % (xmax) + '%f]' % (ymax))
                            '''
                        else:
                            cv2.rectangle(img, (xmin, ymin), (xmax, ymax), (255, 0, 0), 2)
                #cv2.imwrite(os.path.join(opt.outputpath, 'vis', im_name), img)
                print(fall_res)
                cv2.imwrite(opt.outputpath + '/' + im_name.split('/')[-1], img)
            
            ckpt_time, post_time = getTime(ckpt_time)
            runtime_profile['pn'].append(post_time)
            fall_res_all.append(fall_res)
    return fall_res_all
Ejemplo n.º 15
0
    def update(self):
        # keep looping infinitely
        while True:
            # if the thread indicator variable is set, stop the
            # thread
            if self.stopped:
                if self.save_video:
                    self.stream.release()
                return
            # otherwise, ensure the queue is not empty
            if not self.Q.empty():
                (boxes, scores, hm_data, pt1, pt2, orig_img,
                 im_name) = self.Q.get()

                orig_img = np.array(orig_img, dtype=np.uint8)

                # print('++++++++++++++++++++we will print original image+++++++++++++++++++++++++')
                # cv2.imwrite(os.path.join(opt.outputpath, 'det_img', im_name), orig_img)

                if boxes is None:
                    if opt.save_img or opt.save_video or opt.vis:
                        img = orig_img
                        if opt.vis:
                            cv2.imshow("AlphaPose Demo", img)
                            cv2.waitKey(30)
                        if opt.save_img:
                            cv2.imwrite(
                                os.path.join(opt.outputpath, 'vis', im_name),
                                img)
                        if opt.save_video:
                            self.stream.write(img)
                else:
                    # location prediction (n, kp, 2) | score prediction (n, kp, 1)
                    if opt.matching:
                        preds = getMultiPeakPrediction(hm_data, pt1.numpy(),
                                                       pt2.numpy(),
                                                       opt.inputResH,
                                                       opt.inputResW,
                                                       opt.outputResH,
                                                       opt.outputResW)
                        result = matching(boxes, scores.numpy(), preds)
                    else:
                        preds_hm, preds_img, preds_scores = getPrediction(
                            hm_data, pt1, pt2, opt.inputResH, opt.inputResW,
                            opt.outputResH, opt.outputResW)
                        pose_result = pose_nms(boxes, scores, preds_img,
                                               preds_scores)

                    result = {'imgname': im_name, 'result': pose_result}

                    # occlusion evalution
                    result = occlud_eval(result)
                    '''pan edit on 20200406'''
                    # bbox semantic code
                    result = bbox_code_process(result)

                    self.final_result.append(result)
                    if opt.save_img or opt.save_video or opt.vis:
                        img = vis_frame(orig_img, result)
                        if opt.vis:
                            cv2.imshow("AlphaPose Demo", img)
                            cv2.waitKey(30)
                        if opt.save_img:
                            cv2.imwrite(
                                os.path.join(opt.outputpath, 'vis', im_name),
                                img)
                        if opt.save_video:
                            self.stream.write(img)
            else:
                time.sleep(0.1)
Ejemplo n.º 16
0
'''

#Проверяем стартовый протокол
# table = PrettyTable(['ID', 'name', 'club', 'wins', 'defeats', 'hits_got', 'hits_given'])
# for fencer in index:
#    table.add_row([fencer.ID,fencer.name,fencer.club,fencer.wins,fencer.defeats,fencer.hits_got,fencer.hits_given])
# print(table)

#создаём базу
create_db()

#вбрасываем первых ребят
first_update(index)

#вызов функции должен быть перенесен в main.py
matching(index)

#прописываем подравшихся драчунов
update_table(update)

#дёргаем текущую версию базы
for x in ask_table():
    print(x)

#делаем рейтинг
dwarfing(index)

#смотрим табличку
table = PrettyTable(
    ['ID', 'name', 'club', 'wins', 'defeats', 'hits_got - hits_given'])
for fencer in index:
Ejemplo n.º 17
0
    def update(self):

        # keep looping infinitely
        while True:
            sys.stdout.flush()
            print("generator len : " + str(self.Q.qsize()))

            # if the thread indicator variable is set, stop the
            # thread
            # if self.stopped:
            #     cv2.destroyAllWindows()
            #     if self.save_video:
            #         self.stream.release()
            #     return
            # otherwise, ensure the queue is not empty
            if not self.det_processor.Q.empty():

                with torch.no_grad():
                    (inps, orig_img, im_name, boxes, scores, pt1,
                     pt2) = self.det_processor.read()

                    if orig_img is None:
                        sys.stdout.flush()
                        print(f'{im_name} image read None: handle_video')
                        break

                    orig_img = np.array(orig_img, dtype=np.uint8)
                    if boxes is None or boxes.nelement() == 0:
                        (boxes, scores, hm_data, pt1, pt2, orig_img,
                         im_name) = (None, None, None, None, None, orig_img,
                                     im_name.split('/')[-1])

                        res = {'keypoints': -1, 'image': orig_img}
                        self.Q.put(res)  #TODO

                        # cv2.imwrite("/home/hrs/Desktop/dd/now.jpg", orig_img)

                        # img = orig_img
                        # cv2.imshow("AlphaPose Demo", img)
                        # cv2.waitKey(30)
                        ######################################################################################
                        # self.image = self.ax_in.imshow(orig_img, aspect='equal')
                        # self.image.set_data(orig_img)
                        # plt.draw()
                        # plt.pause(0.000000000000000001)
                        ######################################################################################

                        # if opt.save_img or opt.save_video or opt.vis:
                        #     img = orig_img
                        #     if opt.vis:
                        #         cv2.imshow("AlphaPose Demo", img)
                        #         cv2.waitKey(30)
                        #     if opt.save_img:
                        #         cv2.imwrite(os.path.join(opt.outputpath, 'vis', im_name), img)
                        #     if opt.save_video:
                        #         self.stream.write(img)
                    else:
                        # location prediction (n, kp, 2) | score prediction (n, kp, 1)

                        datalen = inps.size(0)
                        batchSize = 20  #args.posebatch()
                        leftover = 0
                        if datalen % batchSize:
                            leftover = 1
                        num_batches = datalen // batchSize + leftover
                        hm = []

                        # sys.stdout.flush()
                        # print("hhhh")

                        for j in range(num_batches):
                            inps_j = inps[j * batchSize:min(
                                (j + 1) * batchSize, datalen)].cuda()
                            hm_j = self.pose_model(inps_j)
                            hm.append(hm_j)

                        # time1 = time.time()
                        hm = torch.cat(hm)
                        hm = hm.cpu().data

                        (boxes, scores, hm_data, pt1, pt2, orig_img,
                         im_name) = (boxes, scores, hm, pt1, pt2, orig_img,
                                     im_name.split('/')[-1])

                        if opt.matching:
                            preds = getMultiPeakPrediction(
                                hm_data, pt1.numpy(), pt2.numpy(),
                                opt.inputResH, opt.inputResW, opt.outputResH,
                                opt.outputResW)
                            result = matching(boxes, scores.numpy(), preds)
                        else:
                            preds_hm, preds_img, preds_scores = getPrediction(
                                hm_data, pt1, pt2, opt.inputResH,
                                opt.inputResW, opt.outputResH, opt.outputResW)
                            result = pose_nms(boxes, scores, preds_img,
                                              preds_scores)
                        result = {'imgname': im_name, 'result': result}
                        self.final_result.append(result)

                        # time2 = time.time()
                        # print(time2-time1)
                        ######################################################################################
                        # img = vis_frame(orig_img, result)

                        # cv2.imshow("AlphaPose Demo", img)
                        # cv2.imwrite("/home/hrs/Desktop/dd/now.jpg", img)
                        # cv2.waitKey(30)
                        ########################################################################
                        # self.point.set_offsets(keypoints[self.i])

                        # self.image = self.ax_in.imshow(orig_img, aspect='equal')
                        # self.image.set_data(orig_img)
                        # plt.draw()
                        # plt.pause(0.000000000000000001)
                        ##########################################################################
                        if not result['result']:  # No people
                            res = {'keypoints': -1, 'image': orig_img}
                            self.Q.put(res)  #TODO
                        else:
                            kpt = max(
                                result['result'],
                                key=lambda x: x['proposal_score'].data[0] *
                                calculate_area(x['keypoints']),
                            )['keypoints']

                            res = {'keypoints': kpt, 'image': orig_img}

                            self.Q.put(res)

                            # kpt_np = kpt.numpy()
                            # n = kpt_np.shape[0]
                            # print(kpt_np.shape)
                            # point_list = [(kpt_np[m, 0], kpt_np[m, 1]) for m in range(17)]
                            # for point in point_list:
                            #     cv2.circle(pose_img, point, 1, (0, 43, 32), 4)

                        # cv2.imshow(self.window, pose_img)
                        # cv2.waitKey()

                        # if opt.save_img or opt.save_video or opt.vis:
                        #     img = vis_frame(orig_img, result)
                        #     if opt.vis:
                        #         cv2.imshow("AlphaPose Demo", img)
                        #         cv2.waitKey(30)
                        #     if opt.save_img:
                        #         cv2.imwrite(os.path.join(opt.outputpath, 'vis', im_name), img)
                        #     if opt.save_video:
                        #         self.stream.write(img)
            else:
                time.sleep(0.1)
Ejemplo n.º 18
0
    def update(self):
        # keep looping infinitely
        while True:
            # if the thread indicator variable is set, stop the
            # thread
            if self.stopped:
                if self.save_video:
                    self.stream.release()
                return
            # otherwise, ensure the queue is not empty
            if not self.Q.empty():
                (boxes, scores, hm_data, pt1, pt2, orig_img,
                 im_name) = self.Q.get()
                orig_img = np.array(orig_img, dtype=np.uint8)
                if boxes is None:
                    '''
                    with open("IP2.csv",'a',newline='') as t:
                        writer=csv.writer(t)
                        writer.writerow([0])
                    '''
                    if opt.save_img or opt.save_video or opt.vis:
                        img = orig_img
                        if opt.vis:
                            cv2.imshow("AlphaPose Demo", img)
                            cv2.waitKey(0)
                        if opt.save_img:
                            cv2.imwrite(
                                os.path.join(opt.outputpath, 'vis', im_name),
                                img)

                        if opt.save_video:
                            self.stream.write(img)
                else:
                    # location prediction (n, kp, 2) | score prediction (n, kp, 1)
                    if opt.matching:
                        preds = getMultiPeakPrediction(hm_data, pt1.numpy(),
                                                       pt2.numpy(),
                                                       opt.inputResH,
                                                       opt.inputResW,
                                                       opt.outputResH,
                                                       opt.outputResW)
                        result = matching(boxes, scores.numpy(), preds)
                    else:
                        preds_hm, preds_img, preds_scores = getPrediction(
                            hm_data, pt1, pt2, opt.inputResH, opt.inputResW,
                            opt.outputResH, opt.outputResW)
                        result = pose_nms(boxes, scores, preds_img,
                                          preds_scores)
                    result = {'imgname': im_name, 'result': result}
                    '''
                    with open("IP2.csv",'a',newline='') as t:
                        writer=csv.writer(t)
                        writer.writerow([data])
                    '''

                    self.final_result.append(result)
                    if opt.save_img or opt.save_video or opt.vis:
                        img, data = vis_frame(orig_img, result)
                        #img = vis_frame(orig_img, result) #draw human pose limbs and keypoints
                        if opt.vis:
                            cv2.imshow("AlphaPose Demo", img)
                            cv2.waitKey(30)
                        if opt.save_img:
                            cv2.imwrite(
                                os.path.join(opt.outputpath, 'vis', im_name),
                                img)
                        if opt.save_video:
                            self.stream.write(img)
            else:
                time.sleep(0.1)
Ejemplo n.º 19
0
def find_eyes(tpl, frame):
	threshold = 0.15
	ROI = m.matching(motion.grayify(tpl),motion.grayify(frame),threshold)
	return ROI