Exemplo n.º 1
0
def four_pts(pix_pts, pix_real, real_pix, real_origin, img):
    #convert to gps coords
    real_pts = tform.transform_pt_array(pix_pts, pix_real)
    final = []

    #calculate locations six feet away at given bearings and add to array
    for pt in real_pts:
        degrees = calc_angle(pt, real_origin)
        # print(degrees)
        for angle in degrees:
            location = six_ft(pt, angle)
            final.append(location)

    #convert list of pts to numpy array
    final = np.array([final])
    final = np.squeeze(np.asarray(final))
    #check if final has any elements?
    #convert to pixel coords
    # pt = final[5]
    # pt2 = final[7]

    # dist = math.sqrt((pt2[0] - pt[0])**2 + (pt2[1] - pt[1])**2)
    # print(dist)

    final = tform.transform_pt_array(final, real_pix)

    #show edge circles or not
    # for pt in final:

    #     pt2 = (int(pt[0]), int(pt[1]))
    #     cv2.circle(img, pt2, 10, (0,0,255), -1, 8)

    return final, img
Exemplo n.º 2
0
def overhead(pts, origin, pix_real):
    a = np.array([2592, 1944])
    b = np.array([2592, 972])
    c = np.array([2592, 0])
    d = np.array([1296, 0])
    e = np.array([0, 0])
    f = np.array([0, 972])
    g = np.array([0, 1944])
    h = np.array([1296, 1944])
    corners = np.array([a, b, c, d, e, f, g, h])
    real_corners = tform.transform_pt_array(corners, pix_real)
    #find locations of extremes
    real_pts = tform.transform_pt_array(pts, pix_real)

    #find most negative values in both directions
    mins = np.ndarray.min(real_corners, axis=0)
    mins[mins > 0] = 0
    mins = np.absolute(mins)

    #add to both directions until all values are positive
    shifted = real_corners + mins
    real_pts = real_pts + mins

    #scale frame size
    maxs = np.ndarray.max(shifted, axis=0)
    frame_hgt = int(maxs[0])
    frame_wdt = int(maxs[1])

    #generate blank frame
    img = np.zeros((frame_hgt, frame_wdt, 3), np.uint8)

    #draw circles for all included points
    i = 0
    for pt in real_pts:
        x = int(pt[0])
        y = int(pt[1])
        cv2.circle(img, (x, y), 6, (0, 255, 255), -1)
        # if i<4:
        #     cv2.circle(img, (x, y), 6, (0, 0, 255), -1)
        # elif i<8:
        #     cv2.circle(img, (x, y), 6, (0, 255, 255), -1)
        # elif i<12:
        #     cv2.circle(img, (x, y), 6, (0, 255, 0), -1)
        # elif i<16:
        #     cv2.circle(img, (x, y), 6, (255, 255, 0), -1)
        # else:
        #     cv2.circle(img, (x, y), 6, (255, 0, 0), -1)
        i = i + 1
    origin = origin + mins
    x = int(origin[0])
    y = int(origin[1])

    cv2.circle(img, (x, y), 6, (255, 255, 0), -1)
    return img
Exemplo n.º 3
0
def show_overhead(parallel, corners, pix_real, length, wth, roi_only):
    pts = tform.transform_pt_array(parallel, pix_real)

    #scale frame size
    if roi_only:
        frame_hgt = length
        frame_wdt = wth

    #calculate points at extremeties
    else:
        #find locations of extremes
        extremes = tform.transform_pt_array(corners, pix_real)

        #find most negative values in both directions
        mins = np.ndarray.min(extremes, axis=0)
        mins[mins > 0] = 0
        mins = np.absolute(mins)

        #add to both directions until all values are positive
        shifted = extremes + mins

        #scale frame size
        maxs = np.ndarray.max(shifted, axis=0)
        pts = pts + mins
        frame_hgt = int(maxs[1])
        frame_wdt = int(maxs[0])

    #generate blank frame
    img = np.zeros((frame_hgt, frame_wdt, 3), np.uint8)

    #draw circles for all included points
    if not roi_only:
        for pt in shifted:
            x = int(pt[0])
            y = int(pt[1])
            cv2.circle(img, (x, y), 5, (0, 0, 255), -1)

    for pt in pts:
        x = int(pt[0])
        y = int(pt[1])
        cv2.circle(img, (x, y), 5, (0, 255, 255), -1)

    #display image
    try:
        cv2.namedWindow("Result", cv2.WINDOW_NORMAL)
        cv2.imshow("Result", img)
        cv2.waitKey(0)

        cv2.destroyAllWindows()
    except:
        print("Unexpected error:", sys.exc_info())
        cv2.destroyAllWindows()
Exemplo n.º 4
0
def proc_video(ind, i_lock, frames, times, bbox_q, cameras, gpu):
    classes = utils.read_class_names("./config/coco.names")
   
    worker = Worker(gpu)
    while(True):
        if worker.avail:
            worker.mark_unavail()
            # save current index so it doesn't change while processing
            #lock ensures that multiple processes aren't working on same frame
            try:
                with i_lock:
                    i = ind.value
                    ind.value = ind.value + 1
                    ind.value = ind.value % len(frames)
            except:
                worker.mark_avail()
                continue
            
            #loop through frames, find people, record occupants and infractions 
            camera = cameras[i]
                
            try:
                worker.set_frame(np.asarray(frames[i]))
            except ValueError:
                worker.mark_avail()
                torch.cuda.empty_cache()
                continue
                
            ped_bboxes,veh_bboxes,blur = worker.get_bboxes()

            # denormalize
            im = F.normalize(worker.gpu_frame[0],mean = [-0.485/0.229, -0.456/0.224, -0.406/0.225],
                                       std = [1/0.229, 1/0.224, 1/0.225])
            im = F.to_pil_image(im.cpu())
            open_cv_image = np.array(im)
            im = open_cv_image.copy()/255.0
            #im = im[:,:,::-1]
            
            # blur all peds regardless of confidence
            for ped in blur:
                im = utils.find_blur_face(ped.int(),im)
            
            
            # parse metrics and write output frame
            filename = camera["output"]
            cam_name = camera["name"]
            pix_real = camera["im-gps"]
            frame_save = camera["save_frames"]
            dt = times[i]
            
            #find ft pts and convert to real_world
            ped_pts = utils.get_ftpts(ped_bboxes)
            realpts = tform.transform_pt_array(ped_pts, pix_real)
            
            # also convert veh points to realworld
            real_veh_pts = tform.transform_pt_array(utils.get_ftpts(veh_bboxes),pix_real)
            
            # verifies there is more than one point in the list (each point has size 2)]
            if realpts.size > 2:
                mytree = scipy.spatial.cKDTree(realpts)
                errors = utils.compliance_count(mytree, realpts)
                
                #FIXME can probably do these both in 1 function
                avg_dist = utils.find_dist(mytree, realpts)
                avg_min_dist = utils.find_min_dist(mytree, realpts)
            else:
                errors = 0
                avg_min_dist = None
                avg_dist = None
            occupants = len(ped_bboxes)
            
            #save frames with occupants
            if frame_save and (occupants > 12 or errors > 5):
                result = prep_frame(ped_pts, im, camera, errors, occupants, ped_bboxes,veh_bboxes,classes)
                dt_fixed = str(dt).replace(":","-").split(".")[0]
                frame_name = "{}/{}.jpg".format(camera["frame_dir"],dt_fixed)
                cv2.imwrite(frame_name,result*255)
            
            #combine so bounding boxes remain associated with camera
            output = [realpts,real_veh_pts,dt,errors,occupants,avg_dist,avg_min_dist,cam_name,i,worker.id]
            bbox_q.put(output)            
            
            
            worker.count += 1
            worker.mark_avail()
                

    return
Exemplo n.º 5
0
def test():
    # define where video comes from
    # video_path = './data/AOTsample3.mp4'
    video_path = 'C:/Users/Nikki/Documents/work/inputs-outputs/video/vid_short.mp4'
    video_path = 'C:/Users/Nikki/Documents/work/inputs-outputs/video/AOTsample1_1.mp4'

    # pt1 = np.array([36.150434, -86.800694])
    # pt2 = np.array([36.150748, -86.800867])
    # pt3 = np.array([36.150617, -86.801132])

    # print(GPS_to_ft(pt1, pt2))
    # print(GPS_to_ft(pt2, pt3))

    # get transfer function from known GPS and pixel locations

    #AOT 1
    pix_real = [[-1.10159024e-01, -2.79958285e-01, 2.15642254e+02],
                [-1.13855523e-02, -1.39473016e+00, 8.15814671e+02],
                [-2.11104956e-04, -3.64903525e-03, 1.00000000e+00]]
    real_pix = [[1.05161691e+01, -3.36930756e+00, 4.81000000e+02],
                [-1.06900637e+00, -4.29603818e-01, 5.81000000e+02],
                [-1.68082662e-03, -2.27891683e-03, 1.00000000e+00]]

    pix_real = [[-2.07813620e-01, -5.14012432e-01, 4.01979808e+02],
                [-1.45283091e-16, -3.02228294e+00, 1.72572356e+03],
                [4.24715690e-04, -7.70456596e-03, 1.00000000e+00]]
    real_pix = [[1.63574796e+01, -4.11269628e+00, 5.22000000e+02],
                [1.16697172e+00, -6.02703438e-01, 5.71000000e+02],
                [2.04373330e-03, -2.89684039e-03, 1.00000000e+00]]

    #vid_short
    # pix_real = [[ 2.91445619e-01,  4.86714183e-01, -2.14894512e+02],
    #             [ 2.36746272e-03,  1.20740599e+00, -4.15252961e+00],
    #             [ 7.42232523e-04,  5.70630790e-03,  1.00000000e+00]]
    # real_pix = [[ 3.51000287e+00, -4.88385701e+00,  7.34000000e+02],
    #             [-1.55374099e-02,  1.28569924e+00,  2.00000000e+00],
    #             [-2.51657708e-03, -3.71163815e-03,  1.00000000e+00]]

    # load in sample pts
    # a = np.array([36.148342, -86.799332])   #closest lamp
    # b = np.array([36.148139, -86.799375])   #lamp across street, right
    # c = np.array([36.148349, -86.799135])   #closest left corner of furthest crosswalk dash to right
    # d = np.array([36.147740, -86.799218])   #sixth tree down the street

    a = np.array([1296, 1944 / 6 * 5])  #far left street pole
    b = np.array([1296, 1944 / 6 * 4])  #pole by bike sign
    c = np.array([1296, 1944 / 6 * 3])  #corner of sidewalk
    d = np.array([1296,
                  1944 / 6 * 2])  #right of sidewalk stripe closest to camera

    e = np.array([1296 / 2, 1944 / 6 * 2])
    f = np.array([1296 / 2, 1944 / 6 * 3])
    g = np.array([1296 / 2, 1944 / 6 * 4])
    h = np.array([1296 / 2, 1944 / 6 * 5])

    i = np.array([1296 / 2 * 3, 1944 / 6 * 2])
    j = np.array([1296 / 2 * 3, 1944 / 6 * 3])
    k = np.array([1296 / 2 * 3, 1944 / 6 * 4])
    l = np.array([1296 / 2 * 3, 1944 / 6 * 5])

    m = np.array([1296 / 4 * 3, 1944 / 6 * 2])
    n = np.array([1296 / 4 * 3, 1944 / 6 * 3])
    o = np.array([1296 / 4 * 3, 1944 / 6 * 4])
    p = np.array([1296 / 4 * 3, 1944 / 6 * 5])
    orig = np.array([1296, 1944])
    #orig = np.array([1296*2, 0])

    # a = np.array([1280/2, 720/6*5])   #far left street pole
    # b = np.array([1280/2, 720/6*4])   #pole by bike sign
    # c = np.array([1280/2, 720/6*3])   #corner of sidewalk
    # d = np.array([1280/2, 720/6*2])   #right of sidewalk stripe closest to camera

    # e = np.array([1280/2/2, 720/6*2])
    # f = np.array([1280/2/2, 720/6*3])
    # g = np.array([1280/2/2, 720/6*4])
    # h = np.array([1280/2/2, 720/6*5])

    # i = np.array([1280/2/2*3, 720/6*2])
    # j = np.array([1280/2/2*3, 720/6*3])
    # k = np.array([1280/2/2*3, 720/6*4])
    # l = np.array([1280/2/2*3, 720/6*5])

    # m = np.array([1280/2/4*3, 720/6*2])
    # n = np.array([1280/2/4*3, 720/6*3])
    # o = np.array([1280/2/4*3, 720/6*4])
    # p = np.array([1280/2/4*3, 720/6*5])
    # orig = np.array([1280/2, 720])
    #d = np.array([1296, 1944])
    x = np.array([a, b, c, d, e, f, g, h, i, j, k, l, m, n, o, p, orig])

    pix_pts = x

    # start video
    print("Video from: ", video_path)
    vid = cv2.VideoCapture(video_path)
    wdt = int(vid.get(3))
    hgt = int(vid.get(4))
    print(wdt)
    print(hgt)

    origin = orig
    print(origin)
    real_origin = tform.transform_pt_array(origin, pix_real)
    real_origin = np.squeeze(np.asarray(origin))
    print(real_origin)

    try:
        while True:
            # skip desired number of frames to speed up processing
            for i in range(10):
                vid.grab()

            # read frame
            return_value, frame = vid.read()
            # if frame doesn't exist, exit
            if not return_value:
                cv2.destroyWindow('result')
                print('Video has ended')
                break
            # draw ellipse
            img, count = draw_radius(frame, pix_pts, real_pix, pix_real,
                                     real_origin)
            pt = (int(orig[0]), int(orig[1]))
            cv2.circle(img, pt, 20, (0, 0, 255), -1, 8)

            img2 = overhead(pix_pts, origin, pix_real)

            cv2.namedWindow("result", cv2.WINDOW_NORMAL)
            cv2.imshow("result", img)
            cv2.namedWindow("Overhead", cv2.WINDOW_NORMAL)
            cv2.imshow("Overhead", img2)
            if cv2.waitKey(1000) & 0xFF == ord('q'): break

        # end video, close viewer, stop writing to file
        vid.release()
        cv2.destroyAllWindows()

    # if interrupted, end video, close viewer, stop writing to file
    except:
        print("Unexpected error:", sys.exc_info())
        vid.release()
        cv2.destroyAllWindows()
Exemplo n.º 6
0
def draw_ellipse(frame, pts, centers, mytree, pix_real):
    #define qualities of the ellipse
    thickness = -1
    line_type = 8

    #set transparency
    alpha = 0.25

    #create separate image for ellipses to be drawn into
    ellipses = frame.copy()

    #iterate through list of ellipse points and centers, drawing each into ellipse image
    i = 0
    count = 0
    ct = 0
    gps_centers = tform.transform_pt_array(centers, pix_real)
    while i < pts.shape[0]:
        a = pts[i]

        b = pts[i + 1]
        c = pts[i + 2]
        d = pts[i + 3]
        # this has performed worse on all samples except maybe aot2
        # possible_minor = int((math.sqrt(math.pow((c[0]-a[0]), 2) + math.pow((c[1]-a[1]), 2)))/2)
        possible_minor = int(abs(c[1] - a[1]) / 2)
        possible_major = int((math.sqrt(
            math.pow((d[0] - b[0]), 2) + math.pow((d[1] - b[1]), 2))) / 2)

        minor = min([possible_major, possible_minor])
        major = max([possible_major, possible_minor])

        if centers.size <= 2:
            centers = np.array([centers])
        center = centers[i // 4]

        x = int(center[0])
        y = int(center[1])

        # TODO could probably query all points simultaneously, could be a little more efficient
        if centers.size > 2:
            gps_center = gps_centers[i // 4]
            dist, ind = mytree.query(gps_center, k=2)
            closest = mytree.data[ind[1]]
            #dist = GPS_to_ft(gps_center, closest)
            if dist[1] < 6:
                cv2.ellipse(ellipses, (x, y), (major, minor), 0, 0, 360,
                            (255, 0, 0), thickness, line_type)
                count = count + 1
            elif dist[1] < 8:
                cv2.ellipse(ellipses, (x, y), (major, minor), 0, 0, 360,
                            (255, 140, 0), thickness, line_type)
            elif dist[1] < 10:
                cv2.ellipse(ellipses, (x, y), (major, minor), 0, 0, 360,
                            (255, 255, 0), thickness, line_type)
            else:
                cv2.ellipse(ellipses, (x, y), (major, minor), 0, 0, 360,
                            (0, 255, 0), thickness, line_type)

            # if ct<4:
            #     cv2.ellipse(ellipses, (x,y), (major, minor), 0, 0, 360, (0, 0, 255), thickness, line_type)
            # elif ct<8:
            #     cv2.ellipse(ellipses, (x,y), (major, minor), 0, 0, 360, (0, 255, 255), thickness, line_type)
            # elif ct<12:
            #     cv2.ellipse(ellipses, (x,y), (major, minor), 0, 0, 360, (0, 255, 0), thickness, line_type)
            # elif ct<16:
            #     cv2.ellipse(ellipses, (x,y), (major, minor), 0, 0, 360, (255, 255, 0), thickness, line_type)

        else:
            cv2.ellipse(ellipses, (x, y), (major, minor), 0, 0, 360,
                        (0, 255, 0), thickness, line_type)
        i = i + 4
        ct = ct + 1

    #combine original image and ellipse image into one
    all_img = cv2.addWeighted(ellipses, alpha, frame, 1 - alpha, 0)
    return all_img, count
Exemplo n.º 7
0
def load_tree(pix_pts, pix_real):
    real = tform.transform_pt_array(pix_pts, pix_real)
    mytree = scipy.spatial.cKDTree(real)

    return mytree
def post_processor(bbox_q, cameras, out_q, frames, times, image_q=None):
    classes = utils.read_class_names("./config/coco.names")

    try:
        while True:
            if not bbox_q.empty():
                box_ind = bbox_q.get()
                ped_bboxes = box_ind[0]
                veh_bboxes = box_ind[1]
                i = box_ind[2]
                frame = box_ind[3]

                # first, try and show frame
                #frame = frame.transpose(1, 2, 0)
                # cv2.imshow("test",frame)
                # cv2.waitKey(0)

                camera = cameras[i]
                filename = camera["address"]
                pix_real = camera["im-gps"]
                frame_show = camera["save_frames"]
                dt = times[i]

                #find ft pts and convert to real_world
                ped_pts = utils.get_ftpts(ped_bboxes)
                realpts = tform.transform_pt_array(ped_pts, pix_real)

                # verifies there is more than one point in the list (each point has size 2)]
                if realpts.size > 2:
                    mytree = scipy.spatial.cKDTree(realpts)
                    errors = utils.compliance_count(mytree, realpts)

                    #FIXME can probably do these both in 1 function
                    avg_dist = utils.find_dist(mytree, realpts)
                    avg_min_dist = utils.find_min_dist(mytree, realpts)
                else:
                    errors = 0
                    avg_min_dist = None
                    avg_dist = None
                occupants = len(ped_bboxes)
                #output info to csv file
                with open(filename, 'a', newline='') as base_f:
                    writer = csv.writer(base_f)
                    utils.video_write_info(writer, realpts, str(dt), errors,
                                           occupants, avg_dist, avg_min_dist)

                stats = [i, errors, occupants, avg_min_dist]

                #put outpt data into queue so it is accessible by the analyzer
                if out_q.full():
                    out_q.get()
                out_q.put(stats)

                result = prep_frame(ped_pts, frame, camera, errors, occupants,
                                    ped_bboxes, veh_bboxes, classes)

                cv2.imshow("frame", result)
                cv2.waitKey(0)

                # if frame_save or frame_show:
                #     result = prep_frame(ftpts, frame, vid, errors, occupants, bboxes)

                ### UNCOMMENT AND USE
                # frame_save = True
                # #save frames
                # if frame_save:
                #     outpt_frame(result, vid)

                # if frame_show:
                #     if image_q.full():
                #         image_q.get()
                #     image_q.put(result)

                # # FIXME - just for debugging, show frame on screen
                show_frame(result, i)
                if cv2.waitKey(1) & 0xFF == ord('q'): break
    except KeyboardInterrupt:
        print("Unexpected postprocessing error:", sys.exc_info()[0])
        cv2.destroyAllWindows()
    return
def main(errs, ocpts, dists, updated, frames, times, avgs, avg_lock, i_lock,
         ind, out_q, bbox_q, image_q, config, ctx):

    # file containing camera information
    # transform_f = 'C:/Users/Nikki/Documents/work/inputs-outputs/transforms.csv'
    # transform_f = 'C:/Users/Nikki/Documents/work/inputs-outputs/test.csv'
    # transform_f = 'C:/Users/Nikki/Documents/work/inputs-outputs/test_all.csv'
    # transform_f = './config/LAMBDA_TEST.config'

    #create VidObjs to store information about each camera
    cameras = initialize_cams(config)

    num_cams = len(cameras)
    #length of queues, kinda arbitrary - this is the number that will be used for moving avg analytics
    buf_num = 3

    #need to fix these references
    # errs = var_list[0]
    # ocpts = var_list[1]
    # dists = var_list[2]
    # updated = var_list[3]
    # frames = var_list[4]
    # times = var_list[5]
    # avgs = var_list[6]
    # avg_lock = var_list[7]
    # i_lock = var_list[8]
    # ind = var_list[9]
    # bbox_q = var_list[10]
    # ind = var_list[11]

    #uncomment if running from this file

    # manager = mp.Manager()
    # print('MP manager created')
    # #  create manager to handle shared variables across processes
    # updated = manager.Value(c_bool, False)
    # frames = manager.list([None]* num_cams)
    # times = manager.list([None]* num_cams)
    # avgs = manager.list([None] * 5)
    # avg_lock = manager.Lock()
    # i_lock = manager.Lock()
    # out_q = manager.Queue(num_cams*2)
    # bbox_q = manager.Queue()
    # ind = manager.Value(int, 0)
    # image_q = manager.Queue(num_cams*2)

    # # sample = manager.list([None] * 5)

    # errs = manager.list()
    # ocpts = manager.list()
    # dists = manager.list()
    # s_lock = manager.Lock()
    # # for _ in range(num_cams):
    # #     ocpts.append( [None]* buf_num)

    # for i in range(num_cams):
    #     errs.append(manager.list([None]))
    #     ocpts.append(manager.list([None]))
    #     dists.append(manager.list([None]))

    classes = utils.read_class_names("./config/coco.names")

    #stores frame data that has been transfered to GPU
    GPU_LIST = [i for i in range(torch.cuda.device_count())]
    # workers = setup_gpus(vids, gpu_list = GPU_LIST)
    #start model
    # model = detector.start_model()
    streamers = []

    worker = Worker(2)
    frame = cv2.imread("/home/worklab/Desktop/test1.png")
    worker.set_frame(frame)
    ped, veh = worker.get_bboxes()
    cameras[0]["frame_size"] = (worker.gpu_frame[0].shape[:2])

    im = F.normalize(worker.gpu_frame[0],
                     mean=[-0.485 / 0.229, -0.456 / 0.224, -0.406 / 0.225],
                     std=[1 / 0.229, 1 / 0.224, 1 / 0.225])
    im = F.to_pil_image(im.cpu())
    open_cv_image = np.array(im)
    im = open_cv_image.copy() / 255.0
    im = im[:, :, ::-1]
    #combine so bounding boxes remain associated with camera
    i = 0
    box_ind = (ped, veh, i, im)

    ped_bboxes = box_ind[0]
    veh_bboxes = box_ind[1]
    i = box_ind[2]
    frame = box_ind[3]

    # first, try and show frame
    #frame = frame.transpose(1, 2, 0)
    # cv2.imshow("test",frame)
    # cv2.waitKey(0)

    camera = cameras[i]
    filename = camera["address"]
    pix_real = camera["im-gps"]
    frame_show = camera["save_frames"]
    dt = times[i]

    #find ft pts and convert to real_world
    ped_pts = utils.get_ftpts(ped_bboxes)
    realpts = tform.transform_pt_array(ped_pts, pix_real)

    # verifies there is more than one point in the list (each point has size 2)]
    if realpts.size > 2:
        mytree = scipy.spatial.cKDTree(realpts)
        errors = utils.compliance_count(mytree, realpts)

        #FIXME can probably do these both in 1 function
        avg_dist = utils.find_dist(mytree, realpts)
        avg_min_dist = utils.find_min_dist(mytree, realpts)
    else:
        errors = 0
        avg_min_dist = None
        avg_dist = None
    occupants = len(ped_bboxes)
    #output info to csv file
    with open(filename, 'a', newline='') as base_f:
        writer = csv.writer(base_f)
        utils.video_write_info(writer, realpts, str(dt), errors, occupants,
                               avg_dist, avg_min_dist)

    stats = [i, errors, occupants, avg_min_dist]

    #put outpt data into queue so it is accessible by the analyzer
    if out_q.full():
        out_q.get()
    out_q.put(stats)

    result = prep_frame(ped_pts, frame, camera, errors, occupants, ped_bboxes,
                        veh_bboxes, classes)

    cv2.imshow("frame", result)
    cv2.waitKey(0)
def post_processor(bbox_q, cameras, out_q, frames, times, image_q = None):
    classes = utils.read_class_names("./config/coco.names")
    
    start_time = time.time()
    start_time = time.strftime('%Y-%m-%d %H-%M-%S', time.localtime(start_time))
    
    f_directory = cameras[0]["output"].split("/")[:-1]
    f_directory.append("{}_output_frames".format(start_time))
    f_directory = "/".join(f_directory)                                                
    os.mkdir(f_directory)     

    frames_processed = np.zeros(len(cameras))
    
    try:
        while True:
            
            if not bbox_q.empty():
                box_ind = bbox_q.get()
                ped_bboxes = box_ind[0]
                veh_bboxes = box_ind[1]
                i = int(box_ind[2])
                frame = box_ind[3]
                
                # first, try and show frame
                #frame = frame.transpose(1, 2, 0)
                # cv2.imshow("test",frame)
                # cv2.waitKey(0)
                
                camera = cameras[i]
                filename = camera["output"].format(start_time)
                cam_name = camera["name"]
                pix_real = camera["im-gps"]
                frame_save = camera["save_frames"]
                dt = times[i]
                
                #find ft pts and convert to real_world
                ped_pts = utils.get_ftpts(ped_bboxes)
                realpts = tform.transform_pt_array(ped_pts, pix_real)
                
                # verifies there is more than one point in the list (each point has size 2)]
                if realpts.size > 2:
                    mytree = scipy.spatial.cKDTree(realpts)
                    errors = utils.compliance_count(mytree, realpts)
                    
                    #FIXME can probably do these both in 1 function
                    avg_dist = utils.find_dist(mytree, realpts)
                    avg_min_dist = utils.find_min_dist(mytree, realpts)
                else:
                    errors = 0
                    avg_min_dist = None
                    avg_dist = None
                occupants = len(ped_bboxes)
                #output info to csv file  
                with open(filename, 'a', newline='') as base_f:
                    writer = csv.writer(base_f)
                    utils.video_write_info(writer, realpts, str(dt), errors, occupants, avg_dist, avg_min_dist,cam_name)
                        
                stats = [i, errors, occupants, avg_min_dist]
                
                #put outpt data into queue so it is accessible by the analyzer
                if out_q.full():
                    temp = out_q.get()
                out_q.put(stats)
                
                if frame_save:
                    result = prep_frame(ped_pts, frame, camera, errors, occupants, ped_bboxes,veh_bboxes,classes)
                
                
      
                #save frames
                if frame_save:
                    frame_name = "{}/{}_{}_processed.jpg".format(f_directory,cam_name,int(frames_processed[i]))
                    cv2.imwrite(frame_name,result*255)
                        
                # if frame_show:
                #     if image_q.full():
                #         image_q.get()
                #     image_q.put(result)
                    
                # # FIXME - just for debugging, show frame on screen
                # show_frame(result, i)  
                # if cv2.waitKey(1) & 0xFF == ord('q'): break
                  
                frames_processed[i] += 1
                    
    except:
        print("Unexpected postprocessing error:", sys.exc_info()[0])
        cv2.destroyAllWindows()
    return
Exemplo n.º 11
0
def main():
    #length of queues, kinda arbitrary - this is the number that will be used for moving avg analytics
    buf_num = 3

    #list of workers

    #uncomment to verify that GPU is being used
    tf.debugging.set_log_device_placement(False)
    importlib.reload(mp)

    ips = []
    vids = []

    # file containing camera information
    transform_f = 'C:/Users/Nikki/Documents/work/inputs-outputs/transforms.csv'
    transform_f = 'C:/Users/Nikki/Documents/work/inputs-outputs/test.csv'
    transform_f = 'C:/Users/Nikki/Documents/work/inputs-outputs/test_all.csv'
    transform_f = 'C:/Users/Nikki/Documents/work/inputs-outputs/aot_transforms_better.csv'

    manager = mp.Manager()

    #create VidObjs to store information about each camera
    initialize_cams(transform_f, ips, vids)

    num_cams = len(vids)

    #  create manager to handle shared variables across processes
    updated = manager.Value(c_bool, False)
    frames = manager.list([None] * num_cams)
    times = manager.list([None] * num_cams)
    avgs = manager.list([None] * 5)

    errors = manager.list()
    occupants = manager.list()
    avg_dists = manager.list()

    e_lock = manager.Lock()
    o_lock = manager.list()
    d_lock = manager.list()
    avg_lock = manager.Lock()
    out_q = manager.Queue(num_cams * 2)

    #stores frame data that has been transfered to GPU
    gpu_frames = [None] * num_cams

    #start model
    model = detector.start_model()

    try:
        #grab video frames in separate process
        streamer = mp.Process(target=ip_streamer.stream_all,
                              args=(frames, times, ips, updated))
        streamer.start()
        print('Separate process started')

        # analysis = mp.Process(target=adat.main, args=(all_output_stats, buf_num, avgs, removed))
        analysis = mp.Process(target=adat.main,
                              args=(out_q, buf_num, num_cams, avgs, avg_lock,
                                    errors, occupants, avg_dists, e_lock,
                                    o_lock, d_lock))
        analysis.start()
        print('Separate process started')

        #wait until frames are starting to be read
        while (not updated.value):
            continue

        #find and assign the frame size of each stream
        #TODO may have to resize frames or something for running through the model in batches
        for i, vid in enumerate(vids):
            vid.set_size(frames[i].shape[:2])

        prev_time = time.time()

        #continuously loop until keyboard interrupt
        while (True):

            curr_time = time.time()

            # save outputs every 5 minutes
            if (curr_time - prev_time) > (5 * 60):
                save_files(vids)
                prev_time = curr_time

            # verify that frame is not the same as it was last time it was displayed
            if updated.value == True:
                # detector.batch_bboxes(model, frames)
                # detector.batch_bboxes(model, frames)

                #loop through frames and move to GPU (TODO - enable batching)
                for i, frame in enumerate(frames):
                    gpu_frames[i] = detector.frame_to_gpu(frame)

                #loop through frames, find people, record occupants and infractions
                for i, vid in enumerate(vids):
                    frame = frames[i]
                    gpu_frame = gpu_frames[i]
                    dt = times[i]
                    bboxes = detector.person_bboxes(model, gpu_frame,
                                                    vid.frame_size)

                    #find ft pts and convert to real_world
                    ftpts = utils.get_ftpts(bboxes)
                    realpts = tform.transform_pt_array(ftpts, vid.pix_real)

                    # verifies there is more than one p[oint in the list (each point has size 2)]
                    if realpts.size > 2:
                        mytree = scipy.spatial.cKDTree(realpts)
                        errs = detector.compliance_count(mytree, realpts)
                        avg_dist = detector.find_dist(mytree, realpts)
                    else:
                        errs = 0
                        avg_dist = None
                    ocpts = ftpts.size // 2

                    #output info to csv file
                    utils.video_write_info(vid.csvfile, realpts, str(dt), errs,
                                           ocpts, avg_dist)

                    stats = [i, errs, ocpts, avg_dist]

                    #put outpt data into queue so it is accessible by the analyzer
                    if out_q.full():
                        a = out_q.get()
                        print("Leaving queue: ", a)
                    out_q.put(stats)
                    # if len(avg_dists) > 0:
                    #     print (adat.get_o_avg(occupants, i))
                    #     print (adat.get_e_avg(errors, i))
                    #     print (adat.get_dist_avg(avg_dists, i))
                    #     print()
                    avg_lock.acquire()
                    print("Avgs: ", avgs)
                    avg_lock.release()
                    #save frames
                    if vid.frame_save:
                        outpt_frame(ftpts, frame, vid, errs, ocpts, bboxes)

                    # FIXME - just for debugging, show frame on screen
                    show_frame(ftpts, frame, vid, errs, ocpts, bboxes, i)
                    if cv2.waitKey(1) & 0xFF == ord('q'): break

    except:
        print("Unexpected error:", sys.exc_info())
        cv2.destroyAllWindows()
        for vid in vids:
            vid.base_f.close()
        streamer.terminate()
        analysis.terminate()
Exemplo n.º 12
0
def post_processor(bbox_q, vids, out_q, frames, times, image_q=None):
    try:
        while True:
            if not bbox_q.empty():
                box_ind = bbox_q.get()
                bboxes = box_ind[0]
                i = box_ind[1]
                frame = box_ind[2]

                vid = vids[i]
                filename = vid[0]
                frame_save = vid[1]
                pix_real = vid[2]

                dt = times[i]

                #find ft pts and convert to real_world
                ftpts = utils.get_ftpts(bboxes)
                realpts = tform.transform_pt_array(ftpts, pix_real)
                # verifies there is more than one p[oint in the list (each point has size 2)]
                if realpts.size > 2:
                    mytree = scipy.spatial.cKDTree(realpts)
                    errors = detector.compliance_count(mytree, realpts)

                    #FIXME can probably do these both in 1 function
                    avg_dist = detector.find_dist(mytree, realpts)
                    avg_min_dist = detector.find_min_dist(mytree, realpts)
                else:
                    errors = 0
                    avg_min_dist = None
                    avg_dist = None
                occupants = ftpts.size // 2
                #output info to csv file
                with open(filename, 'a', newline='') as base_f:
                    writer = csv.writer(base_f)
                    utils.video_write_info(writer, realpts, str(dt), errors,
                                           occupants, avg_dist, avg_min_dist)

                stats = [i, errors, occupants, avg_min_dist]

                #put outpt data into queue so it is accessible by the analyzer
                if out_q.full():
                    out_q.get()
                out_q.put(stats)

                #FIXME this should be set somewhere else
                frame_show = True

                result = prep_frame(ftpts, frame, vid, errors, occupants,
                                    bboxes)

                # if frame_save or frame_show:
                #     result = prep_frame(ftpts, frame, vid, errors, occupants, bboxes)

                #save frames
                if frame_save:
                    outpt_frame(result, vid)

                if frame_show:
                    if image_q.full():
                        image_q.get()
                    image_q.put(result)

                # # FIXME - just for debugging, show frame on screen
                # show_frame(result, i)
                # if cv2.waitKey(1) & 0xFF == ord('q'): break
    except:
        print("Unexpected error:", sys.exc_info()[0])
        cv2.destroyAllWindows()
    return