コード例 #1
0
 def __init__(self, dcname, dmname, config_name, checkpoint_gen,
              checkpoint_id, trainer_pth_name, trainer_config_name, ap_name,
              pwc_name, drate, ishape):
     detector_init(dcname, dmname)
     # REID init
     self.reid = ReID(config_name, checkpoint_gen, checkpoint_id,
                      trainer_pth_name, trainer_config_name)
     # pose init
     self.ap = AlphaPose(ap_name)
     self.drate = drate
     self.pwc_name = pwc_name
     # max_tracker number
     self.mtracker_id = 0
     self.trackers = []
     # optical flow
     if pwc_name is None:
         # Tracking init, optical flow init
         self.tracking = Tracking(self.drate, ishape)
     else:
         nshape = ((ishape[0] // 64 + int(ishape[0] % 64 > 0)) * 64,
                   (ishape[1] // 64 + int(ishape[1] % 64 > 0)) * 64,
                   ishape[2])
         pwc_opticalflow = Pwcnet(self.pwc_name, w=nshape[1], h=nshape[0])
         # Tracking init, optical flow init
         self.tracking = Tracking(self.drate, ishape, pwc_opticalflow,
                                  nshape)
     self.latest_detection_frame = None
     self.last_frame = None
コード例 #2
0
ファイル: main.py プロジェクト: JoePacman/maze-game
def solve_maze(maze_i: Maze, start_x, start_y):

    coordinates = CoordinateArray(maze_i.width, maze_i.height)
    x = start_x
    y = start_y
    tracking = Tracking()
    exit_found = False

    def multiple_paths_actions():
        routes_new = maze.routes_available(coordinates.get_coordinate(x, y))
        tracking.next_direction = tracking.get_next_viable_multiple(coordinates).get_direction_untried(routes_new)
        tracking.add_direction_tried_from_current_multiple(tracking.next_direction)
        tracking.current_path_after_multiple = []

    while exit_found is False:
        if tracking.check_coordinates_not_a_previous_multiple(x, y):
            routes_available = maze.routes_available(coordinates.get_coordinate(x, y), tracking.previous_direction)

            # dead end  - find last multiple with remaining routes to try
            if len(routes_available) is 0:
                coordinates.update_coordinate(x, y, Status.X)
                # update all coordinates in current path to X (don't include the M)
                for x_y in tracking.current_path_after_multiple:
                    coordinates.update_coordinate(x_y[0], x_y[1], Status.X)
                # reset x, y back to multiples position
                x, y = tracking.get_next_viable_multiple(coordinates).get_location()
                coordinates.update_coordinate(x, y, Status.M)
                multiple_paths_actions()

            # one route available - continue moving
            elif len(routes_available) is 1:
                tracking.next_direction = routes_available[0]
                coordinates.update_coordinate(x, y, Status.C)
                tracking.current_path_after_multiple.append([x, y])

            # more than one route available - (FIRST time) multiple found
            elif len(routes_available) > 1:
                tracking.next_direction = routes_available[0]
                coordinates.update_coordinate(x, y, Status.M)
                tracking.add_multiple(Multiple(x, y, [tracking.previous_direction, tracking.next_direction],
                                               routes_available))
                tracking.current_path_after_multiple = [[x, y]]

        # SUBSEQUENT time we have returned to this multiple
        else:
            multiple_paths_actions()

        x, y = move_one(x, y, tracking.next_direction)
        tracking.previous_direction = find_previous_direction(tracking.next_direction)

        maze_i.print_maze(coordinates)

        # check if outside maze
        if x < 0 or x is maze_i.width or y < 0 or y is maze_i.height:
            coordinates.update_path_found()
            maze_i.print_maze(coordinates)
            print("SUCCESS outside maze at position [%s, %s] (bottom left is [0, 0])" % (x, y))
            exit_found = True
コード例 #3
0
ファイル: testvideo.py プロジェクト: zacarkim/fyp
    def __init__(self):
        super().__init__()

        self._controller = VideoController()
        self._shown_frame = self._controller.frame

        self._initial_point = (0, 0)
        self._selecting = False
        self._tracking = Tracking()

        cv2.namedWindow('output', cv2.WINDOW_AUTOSIZE)
        cv2.setMouseCallback('output', self._mouse_callback)
コード例 #4
0
ファイル: argparsing.py プロジェクト: loigom/livealertx
 def add() -> None:
     if argc < 3:
         print("missing arg: add (username)")
     else:
         r = username_exists(sys.argv[2])
         if r is None:
             print("Twitch API down, try again later")
         elif r:
             r = Tracking.get()
             r.add(sys.argv[2])
             Tracking.set(r)
         else:
             print(f"{sys.argv[2]} is not a valid Twitch username")
コード例 #5
0
 def __init__(self, parent=None):
     super(CalcThtead, self).__init__()
     self.vidSource = VideoSource(ap)
     self.Video1 = self.vidSource.getVid1()
     self.Video2 = self.vidSource.getVid2()
     self.stopC = 0
     self.db = DB()
     self.tracker1 = Tracking()
     self.tracker2 = Tracking()
     self.center = None
     self.runIndicator = 0
     self.camInd = None
     self.trapezium = Trapezium()
コード例 #6
0
ファイル: argparsing.py プロジェクト: loigom/livealertx
 def remove() -> None:
     if argc < 3:
         print("missing arg: remove (username)")
     else:
         r = Tracking.get()
         if sys.argv[2] in r:
             r.remove(sys.argv[2])
             Tracking.set(r)
             for f in os.listdir(Paths.PFP):
                 if f.split(".")[0].lower() == sys.argv[2].lower():
                     os.remove(f"{Paths.PFP}/{f}")
                     break
         else:
             print(f"{sys.argv[2]} not being tracked")
コード例 #7
0
def main():
    Config = ConfigParser.SafeConfigParser()

    cfg_file = "make_pr.conf"
    if os.path.isfile(cfg_file):
        Config.readfp(open(cfg_file))
        for section in Config.sections():
            for option in Config.options(section):
                cfg[option] = Config.get(section, option)

    cfg["git_mirror_dir"] = os.path.expandvars(
        os.path.expanduser(cfg["gitdir"]))

    tracking = Tracking(dbpath=cfg["db_conn"])
    prev_max_pull_id = tracking.get_max_pull_id()

    print "prev_max_pull_id: %d" % prev_max_pull_id

    repo_obj = GitRepo(repo_path=cfg["git_mirror_dir"])
    all_pull_ids = repo_obj.get_all_pull_ids()

    pull_ids_to_work = [
        elem for elem in all_pull_ids if elem > prev_max_pull_id
    ]

    print "pull_ids_to_work: %s" % pull_ids_to_work

    newcount = 0
    for pull_id in pull_ids_to_work:
        message = make_gnats_message(pull_id=pull_id,
                                     cfg=cfg,
                                     pr_template=pr_template,
                                     repo_obj=repo_obj)
        fname = "pr%d.txt" % pull_id
        pr_file = open(fname, "w")
        pr_file.write(message)
        pr_file.close()
        print "Wrote out: %s" % fname
        # shell command to send-pr:
        #    yes s |send-pr -f x.out
        subprocess.check_call("yes s |send-pr -f %s" % fname, shell=True)
        tracking.record_pr_sent(pull_id)
        newcount += 1

    if newcount > 0:
        print "Finished successfully, made %d new prs!" % newcount
    else:
        print "Finished successfully, no new prs made"

    return 0
コード例 #8
0
    def process(self, video_file_path: str):
        tracking = Tracking(self.cfg,
                            state_noise=self.cfg.TRACKING.STATE_NOISE,
                            r_scale=self.cfg.TRACKING.R_SCALE,
                            q_var=self.cfg.TRACKING.Q_VAR,
                            iou_threshold=self.cfg.TRACKING.IOU_THRESHOLD,
                            max_misses=self.cfg.TRACKING.MAX_MISSES,
                            min_hits=self.cfg.TRACKING.MIN_HITS)

        self.tracked_frames = list()
        draw_tracks = False
        trajectories, colors = None, None

        video_iterator = self.video_iter(video_file_path)
        fps = int(next(video_iterator))

        # length of a trajectory to predict
        future_len = int(self.cfg.OUTPUT_VIDEO.CYCLE_LEN * fps)

        for i, next_frame in tqdm(enumerate(video_iterator)):
            next_frame_to_visual = np.array(next_frame)

            # draw full trajectory on current frame
            if draw_tracks and i % future_len != 0:
                self.draw_trajectories(next_frame_to_visual,
                                       trajectories=trajectories,
                                       future_len=future_len,
                                       colors=colors)

            # draw new trajectory frame by frame
            if i != 0 and i % future_len == 0:
                draw_tracks = True
                trajectories, colors = tracking.predict_trajectories(
                    future_len,
                    min_age=int(self.cfg.OUTPUT_VIDEO.MIN_AGE_FOR_TRAJECTORY *
                                fps))

                next_frame_to_visual = self.draw_trajectories(
                    next_frame_to_visual,
                    trajectories=trajectories,
                    colors=colors,
                    future_len=future_len,
                    save_intermediate=True)

            # get current tracked objects
            tracked_detections = tracking.track(next_frame)
            self.draw_tracked_detections(next_frame_to_visual,
                                         tracked_detections)
            self.tracked_frames.append(next_frame_to_visual)
コード例 #9
0
    def get_tracking(self, email_id) -> Optional[tracking.Tracking]:
        mail = self.get_all_mail_folder()

        result, data = mail.uid("FETCH", email_id, "(RFC822)")
        raw_email = str(data[0][1]).replace("=3D", "=").replace(
            '=\\r\\n', '').replace('\\r\\n', '').replace('&amp;', '&')
        to_email = self.get_to_address(data)
        date = self.get_date_from_msg(data)
        url = self.get_order_url_from_email(raw_email)
        price = self.get_price_from_email(raw_email)
        order_ids = self.get_order_ids_from_email(raw_email)
        tracking_number = self.get_tracking_number_from_email(raw_email)
        if tracking_number == None:
            self.failed_email_ids.append(email_id)
            print(
                "Could not find tracking number from email with order(s) %s" %
                order_ids)
            self.mark_as_unread(email_id)
            return None

        group = self.get_buying_group(raw_email)
        items = self.get_items_from_email(data)
        if group == None:
            self.failed_email_ids.append(email_id)
            print("Could not find buying group for email with order(s) %s" %
                  order_ids)
            self.mark_as_unread(email_id)
            return None

        merchant = self.get_merchant()
        return Tracking(tracking_number, group, order_ids, price, to_email,
                        url, date, 0.0, items, merchant)
コード例 #10
0
ファイル: pong.py プロジェクト: piotut/Pong
    def __init__(self, file1=None, file2=None):
        pygame.mixer.pre_init(44100, -16, 2, 2048)
        pygame.init()
        self.fps = pygame.time.Clock()
        flag = DOUBLEBUF

        self.board = pygame.display.set_mode(screenRect, flag)
        pygame.display.set_caption('[ --- Pong --- ]')

        self.state = 1  # 1 - run, 0 - exit

        self.track = Tracking(file1, file2)

        self.sound = Sound()
        self.p1 = Paddle(self.board, (200, 100, 100), screenRect)
        self.p1.setInitialPostition(0, screenHeight / 2)
        self.p2 = Paddle(self.board, (100, 200, 100), screenRect)
        self.p2.setInitialPostition(screenWidth - self.p2.get()['width'],
                                    screenHeight / 2)
        self.ball = Ball(self.board, (50, 50, 250), screenRect, self.sound)
        self.ball.setInitialPostition(screenWidth / 2, screenHeight / 2)
        self.arena = Arena(self.board, screenRect)
        self.referee = Referee(self.ball, self.p1, self.p2, screenRect,
                               self.sound)

        self.t = Thread(target=self.track.run)
        #self.track.run()
        self.t.start()

        self.p1_pos = 0
        self.p2_pos = 0

        self.loop()
コード例 #11
0
ファイル: make_pr.py プロジェクト: splbio/github2bugzilla
def main():
    Config = ConfigParser.SafeConfigParser()

    cfg_file = "make_pr.conf"
    if os.path.isfile(cfg_file):
        Config.readfp(open(cfg_file))
        for section in Config.sections():
            for option in Config.options(section):
                cfg[option] = Config.get(section, option)

    cfg["git_mirror_dir"] = os.path.expandvars(os.path.expanduser(cfg["gitdir"]))

    tracking = Tracking(dbpath=cfg["db_conn"])
    prev_max_pull_id = tracking.get_max_pull_id()

    print "prev_max_pull_id: %d" % prev_max_pull_id

    repo_obj = GitRepo(repo_path=cfg["git_mirror_dir"])
    all_pull_ids = repo_obj.get_all_pull_ids()

    pull_ids_to_work = [elem for elem in all_pull_ids if elem > prev_max_pull_id]

    print "pull_ids_to_work: %s" % pull_ids_to_work

    newcount = 0
    for pull_id in pull_ids_to_work:
        message = make_gnats_message(pull_id=pull_id,
                cfg=cfg,
                pr_template=pr_template,
                repo_obj=repo_obj)
        fname = "pr%d.txt" % pull_id
        pr_file = open(fname, "w")
        pr_file.write(message)
        pr_file.close()
        print "Wrote out: %s" % fname
        # shell command to send-pr:
        #    yes s |send-pr -f x.out
        subprocess.check_call("yes s |send-pr -f %s" % fname, shell=True)
        tracking.record_pr_sent(pull_id)
        newcount += 1

    if newcount > 0:
        print "Finished successfully, made %d new prs!" % newcount
    else:
        print "Finished successfully, no new prs made"

    return 0
コード例 #12
0
def calculate_formula(request):
    variables_code = ''

    for key, value in request.variables.items():
        if isinstance(value, list):
            variables_code += key + '=' + '[' + ', '.join(value) + ']' + '\n'
        else:
            variables_code += key + '=' + value + '\n'

    exec_response = {}
    formula_with_variables = variables_code + request.formula
    tracking = Tracking(formula_with_variables)
    tracking.transform()

    exec(tracking.modified_code, globals(), exec_response)

    formula_response = FormulaResponse(request, exec_response, tracking)
    return formula_response
コード例 #13
0
ファイル: MyBot.py プロジェクト: Hiro863/ants_RL
    def do_turn(self, ants):
        self.turn += 1
        if not self.tracking:
            self.tracking = Tracking()
        self.tracking.update(ants)

        for ant_loc in ants.my_ants():
            state = self.storage.state(ants, ant_loc)
            direction_onehot = self.think(state)
            direction = self.directions[np.where(direction_onehot == 1)[0][0]]


            if direction != 'r':
                new_loc = ants.destination(ant_loc, direction)
            else:
                new_loc = ant_loc
            log((self.turn, 'Moving ant ', ant_loc, ' to ', new_loc))

            # remember what have we done this turn
            label = self.tracking.loc_to_ants[ant_loc]
            future_food = self.tracking.adjacent_food(new_loc, ants)
            self.append_history(state, direction_onehot, label, future_food)

            if direction != 'r':
                self.tracking.move_ant(ant_loc, direction, ants)

            # TODO: how often are we running out of time?
            if ants.time_remaining() < 10:
                log(('timeout'))
                break


        # we need to know the outcome before we calculate the reward
        # thats why only previous turn is stored
        offset = 1
        if len(self.history) > offset:
            for prev_state, prev_action, prev_label, food in self.history[self.turn - offset]:
                self.storage.remember(
                    prev_state, prev_action,
                    self.reward(food, self.tracking.is_killed(prev_label)), prev_label,
                    self.turn - offset
                )

        self.dmake.save_epsilon()
コード例 #14
0
    def __init__(self, path, config):
        QWidget.__init__(self)

        self.path = path
        self.config = config

        self.setWindowTitle('AR4maps')
        self.move(0, 0)
        self.video_size = QSize(VIDEO.WIDTH, VIDEO.HEIGHT)
        self.setup_ui()

        self.markerImg = cv.imread(self.path + self.config['target'])
        # cv.imshow("target",targetImg)
        self._cam = Camera().start()
        self._track = Tracking(self.markerImg)
        self._rendering = Rendering(self.markerImg, self.config['coords'])
        self._fps = FPS()

        self.setup_render()
コード例 #15
0
ファイル: camera.py プロジェクト: arbaz52/darts-python-server
 def __init__(self, _id, url, lat, lng):
     self._id = _id
     self.url = url
     self.lat = lat
     self.lng = lng
     self.processedFrame = None
     self.processedFrameTime = 0
     self.tk = Tracking(1)
     #self.track = Track()
     self.setup()
     self.invalidframescount = 0
コード例 #16
0
ファイル: camera.py プロジェクト: arbaz52/darts-python-server
 def loadSnapShot(self, ss):
     self.processedFrame = ss[0]
     self.processedFrameTime = ss[1]
     self.tk = ss[2]
     self.invalidframescount = ss[3]
     if ss[4] != self.url or ss[5] != self.lat or ss[6] != self.lng:
         Logger._log("INFO", "Physical aspects of this camera changed",
                     True)
         Logger._log("INFO", "Tracking restarted for this camera", True)
         self.tk = Tracking(1)
         return True
     return False
コード例 #17
0
    def __init__(self, id, shared_variables, score, classification, box, *args,
                 **kwargs):
        super(TrackingBox, self).__init__(*args, **kwargs)
        self.classification = classification
        self.shared_variables = shared_variables
        self.counter = 0
        self.x = box[0]
        self.y = box[1]
        self.width = box[2]
        self.height = box[3]
        self.id = id
        self.splash_pix = QPixmap('./images/box2.png')
        self.splash_pix = self.splash_pix.scaled(
            round(self.width * self.shared_variables.DETECTION_SCALE),
            round(self.height * self.shared_variables.DETECTION_SCALE))
        self.setPixmap(self.splash_pix)

        self.setWindowFlag(Qt.WindowStaysOnTopHint)
        self.setAttribute(Qt.WA_TranslucentBackground)
        self.setAttribute(Qt.WA_NoSystemBackground)

        label = QLabel(self)
        label.setWordWrap(True)
        label.move(30, 30)
        label.setStyleSheet(" color: rgb(0, 100, 200); font-size: 15pt; ")

        label.setText(str(int(100 * score)) + "%" + " " + classification)
        self.move(self.x, self.y)
        self.show()

        self.tracking = Tracking((self.x, self.y, self.width, self.height),
                                 self.shared_variables)

        self.threadpool = QThreadPool()

        #print("New Box Created at ",self.x,self.y, " Size ", self.width, self.height)

        self.start_worker()
コード例 #18
0
def loop(TICK_RATE_SECONDS: int) -> None:
    previously_live = set()

    while True:
        tracking = Tracking.get()
        if tracking:
            custom_headers = AuthorizationManager.headers()
            if custom_headers is not None:
                query = f"https://api.twitch.tv/helix/streams?{'&'.join((f'user_login={x}' for x in tracking))}"
                response = requests.get(query, headers=custom_headers)
                if response.status_code == 200:
                    j = response.json()
                    if j["data"]:
                        live_now = {
                            dp["user_name"] : {"title": dp["title"]}
                            for dp in j["data"]
                        }
                        for k in set(live_now).difference(previously_live):
                            notify(k, live_now[k]["title"])
                        previously_live = set(live_now)

        time.sleep(TICK_RATE_SECONDS)
コード例 #19
0
ファイル: argparsing.py プロジェクト: loigom/livealertx
 def lst() -> None:
     r = Tracking.get()
     if r:
         print(", ".join(r))
     else:
         print(f"{Paths.TRACKING} is empty")
コード例 #20
0
    while True:
        #check if rotor is moving
        print Rotor.isMoving()
        if Rotor.isMoving():
            print "Rotor moving, sleeping"
            time.sleep(5)
        else:
            print "Rotor stopped, calculating"

            alt, az, dist = Tracker.calcAltAz()
            Rotor.move(alt.degrees, az.degrees)


#setup location
planets = load('de421.bsp')
earth = planets['earth']

location = earth + Topos('36.31205 N', '81.35347 W')

#setup test target
#barnard = Star(ra_hours=(17,57,48.49803), dec_degrees(4,41,36.2072))
testTarget = Star(ra_hours=(22, 57, 39.52), dec_degrees=(-29, 37, 24))
Tracker = Tracking(testTarget, location)

Tracker.calcAltAz()

Rotor = RotorController()
Rotor.connect("/dev/ttyACM0")
Rotor.move(0, 0)
__start()
コード例 #21
0
    lowerTrackingRegion = map(int, parser.get('PeopleCounting', 'lowerTrackingRegion').split(','))
    inactiveThreshold = parser.getint('PeopleCounting', 'inactiveThreshold')
    singlePersonBlobSize = parser.getint('PeopleCounting', 'singlePersonBlobSize')
    Debug = parser.getboolean('PeopleCounting', 'Debug')
    Visualize = parser.getboolean('PeopleCounting', 'Visualize') or Debug

    """ Initialize MOG2, VideoWriter, and tracking """
    fgbg = cv2.BackgroundSubtractorMOG2(mog2History, mog2VarThrsh, mog2Shadow)
    kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(kernelSize,kernelSize))

    output_width  = int(frame.shape[1] * scale)
    output_height = int(frame.shape[0] * scale)
    CODE_TYPE = cv2.cv.CV_FOURCC('m','p','4','v')
    video = cv2.VideoWriter('output_detection.avi',CODE_TYPE,30,(output_width,output_height*2),1)

    trackingObj = Tracking(countingRegion, upperTrackingRegion, lowerTrackingRegion, peopleBlobSize)
    tracks = []
    totalUp = 0
    totalDown = 0
    frameInd = startOffset

    while(cap.isOpened()):
        start = time.clock()
        ret, frame = cap.read()
        # frame = getFrame(cap,frameInd)
        if ret == False:
            break

        print 'Frame # %s' % frameInd
        frameInd += 1
コード例 #22
0
ファイル: MyBot.py プロジェクト: Hiro863/ants_RL
class MyBot:
    def __init__(self, storage):
        self.storage = storage
        self.dmake = DecisionMaker()
        self.directions = {
            0: 'n',
            1: 'e',
            2: 's',
            3: 'w',
            4: 'r'
        }
        self.history = {}
        self.history_length = 2
        self.turn = 0
        self.tracking = None

    def do_setup(self, ants):
        pass

    def think(self, sparse_state):
        state_list = []
        for sparse_state_channel in sparse_state:
            state_channel = sparse_state_channel.toarray()
            state_list.append(state_channel)
        state = np.stack(state_list)
        decision = self.dmake.make_decision(state)

        return decision

    def reward(self, food, is_killed):
        # number of food x 100, if killed subtract 100000
        reward = food * 100 - is_killed * 200
        return reward

    def append_history(self, state, action, label, future_food):
        if self.turn in self.history:
            self.history[self.turn].append((state, action, label, future_food))
        else:
            self.history[self.turn] = [(state, action, label, future_food)]

        expired = (key for key in self.history.keys() if key <= (self.turn - self.history_length))
        for turn in list(expired):
            del self.history[turn]

    @logexcept
    def do_turn(self, ants):
        self.turn += 1
        if not self.tracking:
            self.tracking = Tracking()
        self.tracking.update(ants)

        for ant_loc in ants.my_ants():
            state = self.storage.state(ants, ant_loc)
            direction_onehot = self.think(state)
            direction = self.directions[np.where(direction_onehot == 1)[0][0]]


            if direction != 'r':
                new_loc = ants.destination(ant_loc, direction)
            else:
                new_loc = ant_loc
            log((self.turn, 'Moving ant ', ant_loc, ' to ', new_loc))

            # remember what have we done this turn
            label = self.tracking.loc_to_ants[ant_loc]
            future_food = self.tracking.adjacent_food(new_loc, ants)
            self.append_history(state, direction_onehot, label, future_food)

            if direction != 'r':
                self.tracking.move_ant(ant_loc, direction, ants)

            # TODO: how often are we running out of time?
            if ants.time_remaining() < 10:
                log(('timeout'))
                break


        # we need to know the outcome before we calculate the reward
        # thats why only previous turn is stored
        offset = 1
        if len(self.history) > offset:
            for prev_state, prev_action, prev_label, food in self.history[self.turn - offset]:
                self.storage.remember(
                    prev_state, prev_action,
                    self.reward(food, self.tracking.is_killed(prev_label)), prev_label,
                    self.turn - offset
                )

        self.dmake.save_epsilon()
コード例 #23
0
def test_video(fname, config_name, checkpoint_gen, checkpoint_id, trainer_pth_name, trainer_config_name, \
            ap_name, pwc_name=None, drate=5, is_visualize=False, downsample_rate=1):
    cap = cv2.VideoCapture(fname)
    if not cap.isOpened():
        print('read video file failed')

    # frame count
    fid = 0
    # max_tracker number
    mtracker_id = 0
    trackers = []
    # person detection of 1st frame
    res, img = cap.read()
    fid += 1
    if res == False:
        print('read over')
        return
    # REID init
    reid = ReID(config_name, checkpoint_gen, checkpoint_id, trainer_pth_name,
                trainer_config_name)
    # pose init
    ap = AlphaPose(ap_name)
    dboxes = detector_inference(img)
    if len(dboxes) != 0:
        npdboxes = np.array(dboxes)
        appts = ap.inference(img, npdboxes[:, 0:4], npdboxes[:, 4:5])
    else:
        appts = []
    for i, dbox in enumerate(dboxes):
        mtracker_id += 1
        nt = Tracker(mtracker_id)
        # reid feature
        # 未做边界处理!!!!
        roi = img[dbox[1]:dbox[3], dbox[0]:dbox[2]]
        feature = reid.GetFeature(roi)
        appt = appts[i]
        nt.update(dbox, fid, feature, appt)
        trackers.append(nt)

    ishape = img.shape
    if pwc_name is None:
        # Tracking init, optical flow init
        tracking = Tracking(drate, ishape)
        # when detection, update optical flow features
        tracking.good_feature_track_cpu(img, trackers)
    else:
        # gpu, pwcnet init
        nshape = ((ishape[0] // 64 + int(ishape[0] % 64 > 0)) * 64,
                  (ishape[1] // 64 + int(ishape[1] % 64 > 0)) * 64, ishape[2])
        pwc_opticalflow = Pwcnet(pwc_name, w=nshape[1], h=nshape[0])

        # Tracking init, optical flow init
        tracking = Tracking(drate, ishape, pwc_opticalflow, nshape)

    # save latest detection frame
    latest_detection_frame = img.copy()
    last_frame = img.copy()

    basename = os.path.basename(fname)
    extnames = os.path.splitext(basename)
    if is_visualize:
        fourcc = cv2.VideoWriter_fourcc(*'mpeg')
        vname = os.path.join('./', extnames[0] + '.mp4')
        out = cv2.VideoWriter(
            vname, fourcc, 25,
            (img.shape[1] // downsample_rate, img.shape[0] // downsample_rate))
        # Create some random colors
        colors = np.random.randint(0, 255, (500, 3)).tolist()
        visualize(img.copy(), out, trackers, fid, colors, downsample_rate)

    frame_num = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
    print('total frame number: %d' % (frame_num))
    while True:
        res, img = cap.read()
        fid += 1
        # if fid > 10:
        #     break
        if res == False:
            print('read over')
            break
        print('fid: %d' % (fid))
        # detection
        if fid % drate == 0:
            dboxes = detector_inference(img)
            features = []
            for dbox in dboxes:
                roi = img[dbox[1]:dbox[3], dbox[0]:dbox[2]]
                feature = reid.GetFeature(roi)
                features.append(feature)
            if pwc_name is None:
                # tracking, optical flow
                old_gray = cv2.cvtColor(latest_detection_frame,
                                        cv2.COLOR_BGR2GRAY)
                frame_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
                tracking.optical_flow_cpu(old_gray,
                                          frame_gray,
                                          trackers,
                                          fid,
                                          is_visualize=is_visualize)
            else:
                tracking.optical_flow_gpu(last_frame,
                                          img,
                                          trackers,
                                          fid,
                                          is_visualize=is_visualize)
            print('tracking')
            # if is_visualize:
            #     visualize(img.copy(), out, trackers, fid, colors)

            if len(dboxes) != 0:
                npdboxes = np.array(dboxes)
                appts = ap.inference(img, npdboxes[:, 0:4], npdboxes[:, 4:5])
            else:
                appts = []
            # tracking
            trackers, mtracker_id = tracking.tracking(trackers, dboxes,
                                                      features, appts, fid,
                                                      mtracker_id)
            if pwc_name is None:
                # when detection, update optical flow features
                tracking.good_feature_track_cpu(img, trackers)
            # save latest detection frame
            latest_detection_frame = img.copy()
            print('detection and matching:%d' % len(trackers))
        else:
            if pwc_name is None:
                # tracking, optical flow
                old_gray = cv2.cvtColor(latest_detection_frame,
                                        cv2.COLOR_BGR2GRAY)
                frame_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
                tracking.optical_flow_cpu(old_gray,
                                          frame_gray,
                                          trackers,
                                          fid,
                                          is_visualize=is_visualize)
            else:
                tracking.optical_flow_gpu(last_frame,
                                          img,
                                          trackers,
                                          fid,
                                          is_visualize=is_visualize)
            print('tracking')
        last_frame = img.copy()

        if is_visualize:
            visualize(img.copy(), out, trackers, fid, colors, downsample_rate)

    if is_visualize:
        out.release()
    cap.release()

    # output json
    jname = os.path.join('./', extnames[0] + '.json')
    wf = open(jname, 'w')
    jdict = {}
    for tracker in trackers:
        history_boxes = tracker.history_boxes
        keys = history_boxes.keys()
        values = history_boxes.values()
        nvalues = [list(map(float, v)) for v in values]
        nhb = dict(zip(keys, nvalues))
        jdict[tracker.tracker_id] = {
            "state": tracker.state,
            "latest_fid": tracker.latest_fid,
            "latest_box": list(map(float, tracker.latest_box)),
            "history_cpts": tracker.history_cpts,
            "history_boxes": nhb,
            "mfeature": tracker.mfeature.tolist()[0]
        }
    json.dump(jdict, wf)
    wf.close()
コード例 #24
0
    frame_height=frame_height, frame_width=frame_width, fps=fps)

# IDs of smarticles to be tracked--these correspond to IDs of AprilTags
smart_ids = [1, 12]
ring_ids = [100, 101, 102]
tag_ids = smart_ids + ring_ids
values = []
# make length_dict, relating tag_id to tag_dimensions
# smarticle tags get dimension set by constat SMARTICLE_TAG_LENGTH_MM)
for id in tag_ids:
    if id in smart_ids:
        values.append(SMARTICLE_TAG_LENGTH_MM)
    else:
        values.append(None)
    length_dict = dict(zip(tag_ids, values))
track = Tracking(tag_ids, history_len=None, length_dict=length_dict)

show_timer = True
counter = 0
track.start(cam)
scale = track.get_scale_factor()
side_length = RING_DIAM_MM * roi_safety_factor * scale
step = lambda: step_function(cam, track, smart_ids, ring_ids, side_length)
while (True):

    t0 = time.time()
    step()
    if track.q_pressed():
        break
    t_elapsed = time.time() - t0
    # prints true tracking rate; this will be different than the specified fps
コード例 #25
0
def eval_tracking_MaximumOverlap(vidcap, test_len, params, opticalFlow=None):

    print("Evaluating Tracking")
    gt = read_annotations(params["gt_path"], grouped=True, use_parked=True)
    det = read_detections(params["det_path"], grouped=True, confidenceThr=0.4)
    frame_id = int(vidcap.get(cv2.CAP_PROP_POS_FRAMES))
    first_frame_id = frame_id
    print(frame_id)

    detections = []
    annotations = {}
    list_positions = {}

    center_seen_last5frames = {}
    id_seen_last5frames = {}

    tracking = Tracking()
    det_bboxes_old = -1

    old_frame = None

    # Create an accumulator that will be updated during each frame
    accumulator = mm.MOTAccumulator(auto_id=True)

    for t in tqdm(range((train_len + test_len) - first_frame_id)):

        _, frame = vidcap.read()
        # cv2.imshow('Frame', frame)
        # keyboard = cv2.waitKey(30)

        flow_aux = np.zeros(shape=(frame.shape[0], frame.shape[1], 2))

        if params['use_optical_flow'] and old_frame is not None:
            for d in det_bboxes_old:
                # print(d)
                flow = None
                # print("Computing optical flow")
                flow = computeOpticalFlow(old_frame,
                                          frame,
                                          d,
                                          option=params['optical_flow_option'])
                d.flow = [flow[0], -flow[1]]

                flow_aux[int(d.ytl):int(d.ybr),
                         int(d.xtl):int(d.xbr), :] = flow

            plot_flow(
                old_frame[:, :, [2, 1, 0]],
                flow_aux,
                step=16,
                fname=
                '/home/oscar/workspace/master/modules/m6/project/mcv-m6-2021-team4/W4/OF_BB/'
                + f"tracking_{str(frame_id)}_IoU.png")

        det_bboxes = det[frame_id]
        det_bboxes = tracking.set_frame_ids(det_bboxes, det_bboxes_old)
        detections += det_bboxes

        id_seen = []
        gt_bboxes = []
        if frame_id in gt:
            gt_bboxes = gt[frame_id]
        annotations[frame_id] = gt_bboxes

        objs = [bbox.center for bbox in gt_bboxes]
        hyps = [bbox.center for bbox in det_bboxes]

        for object_bb in det_bboxes:
            if object_bb.id in list(list_positions.keys()):
                if t < 5:
                    id_seen_last5frames[object_bb.id] = object_bb.id
                    center_seen_last5frames[object_bb.id] = object_bb.center
                list_positions[object_bb.id].append(
                    [int(x) for x in object_bb.center])
            else:
                if (t < 5):
                    id_seen_last5frames[object_bb.id] = object_bb.id
                    center_seen_last5frames[object_bb.id] = object_bb.center

                id_seen.append(object_bb)
                list_positions[object_bb.id] = [[
                    int(x) for x in object_bb.center
                ]]

        # To detect pared cars
        for bbox in id_seen:
            for idx in list(id_seen_last5frames.keys()):
                if idx != bbox.id:
                    center = [center_seen_last5frames[idx]]
                    mse = (np.square(
                        np.subtract(np.array(center),
                                    np.array([int(x)
                                              for x in bbox.center])))).mean()
                    if mse < 300:
                        setattr(bbox, 'id', idx)

        accumulator.update(
            [bbox.id
             for bbox in gt_bboxes],  # Ground truth objects in this frame
            [bbox.id
             for bbox in det_bboxes],  # Detector hypotheses in this frame
            mm.distances.norm2squared_matrix(
                objs, hyps
            )  # Distances from object 1 to hypotheses 1, 2, 3 and Distances from object 2 to hypotheses 1, 2, 3
        )

        if params['show_boxes']:
            drawed_frame_aux = draw_boxes(image=frame,
                                          boxes=det_bboxes,
                                          color='r',
                                          linewidth=3,
                                          det=False,
                                          boxIds=True,
                                          tracker=list_positions)
            drawed_frame = deepcopy(drawed_frame_aux)
            if not det_bboxes_old == -1:
                drawed_frame = draw_boxes_old(image=drawed_frame,
                                              boxes=det_bboxes_old,
                                              color='r',
                                              linewidth=3,
                                              det=False,
                                              boxIds=True,
                                              tracker=list_positions)
            cv2.rectangle(drawed_frame, (10, 2), (120, 20), (255, 255, 255),
                          -1)
            cv2.putText(drawed_frame, str(vidcap.get(cv2.CAP_PROP_POS_FRAMES)),
                        (15, 15), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 0))
            cv2.imshow('Frame', drawed_frame)
            cv2.waitKey(30)
            cv2.imwrite(
                params['results_path'] + f"tracking_{str(frame_id)}_IoU.jpg",
                drawed_frame.astype(int))

            drawed_frame2 = deepcopy(drawed_frame_aux)
            if not det_bboxes_old == -1:
                drawed_frame2 = draw_boxes_old(image=drawed_frame2,
                                               boxes=det_bboxes_old,
                                               color='r',
                                               linewidth=3,
                                               det=False,
                                               boxIds=True,
                                               tracker=list_positions,
                                               shifted=True)
            cv2.rectangle(drawed_frame2, (10, 2), (120, 20), (255, 255, 255),
                          -1)
            cv2.putText(drawed_frame2,
                        str(vidcap.get(cv2.CAP_PROP_POS_FRAMES)), (15, 15),
                        cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 0))
            cv2.imshow('Frame', drawed_frame2)
            cv2.waitKey(30)
            cv2.imwrite(
                './W4/OF_shifted_BB/' + f"tracking_{str(frame_id)}_IoU.jpg",
                drawed_frame2.astype(int))

        if params['save_results'] and frame_id >= (355 + 535) and frame_id < (
                410 + 535):  # if frame_id >= 535 and frame_id < 550
            drawed_frame = frame
            drawed_frame = draw_boxes(image=drawed_frame,
                                      boxes=det_bboxes,
                                      color='r',
                                      linewidth=3,
                                      det=False,
                                      boxIds=True,
                                      tracker=list_positions)
            if not det_bboxes_old == -1:
                drawed_frame = draw_boxes_old(image=drawed_frame,
                                              boxes=det_bboxes_old,
                                              color='r',
                                              linewidth=3,
                                              det=False,
                                              boxIds=True,
                                              tracker=list_positions)
            cv2.rectangle(drawed_frame, (10, 2), (120, 20), (255, 255, 255),
                          -1)
            cv2.putText(drawed_frame, str(vidcap.get(cv2.CAP_PROP_POS_FRAMES)),
                        (15, 15), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 0))

            cv2.imwrite(
                params['results_path'] + f"tracking_{str(frame_id)}_IoU.jpg",
                drawed_frame.astype(int))

        frame_id += 1
        old_frame = frame
        det_bboxes_old = det_bboxes

    mh = mm.metrics.create()
    summary = mh.compute(accumulator,
                         metrics=['precision', 'recall', 'idp', 'idr', 'idf1'],
                         name='acc')
    print(summary)
コード例 #26
0
class MultiObjectTracking:
    def __init__(self, dcname, dmname, config_name, checkpoint_gen,
                 checkpoint_id, trainer_pth_name, trainer_config_name, ap_name,
                 pwc_name, drate, ishape):
        detector_init(dcname, dmname)
        # REID init
        self.reid = ReID(config_name, checkpoint_gen, checkpoint_id,
                         trainer_pth_name, trainer_config_name)
        # pose init
        self.ap = AlphaPose(ap_name)
        self.drate = drate
        self.pwc_name = pwc_name
        # max_tracker number
        self.mtracker_id = 0
        self.trackers = []
        # optical flow
        if pwc_name is None:
            # Tracking init, optical flow init
            self.tracking = Tracking(self.drate, ishape)
        else:
            nshape = ((ishape[0] // 64 + int(ishape[0] % 64 > 0)) * 64,
                      (ishape[1] // 64 + int(ishape[1] % 64 > 0)) * 64,
                      ishape[2])
            pwc_opticalflow = Pwcnet(self.pwc_name, w=nshape[1], h=nshape[0])
            # Tracking init, optical flow init
            self.tracking = Tracking(self.drate, ishape, pwc_opticalflow,
                                     nshape)
        self.latest_detection_frame = None
        self.last_frame = None

    def FeedFirst(self, img):
        dboxes = detector_inference(img)
        # pose
        if len(dboxes) != 0:
            npdboxes = np.array(dboxes)
            appts = self.ap.inference(img, npdboxes[:, 0:4], npdboxes[:, 4:5])
        else:
            appts = []
        for i, dbox in enumerate(dboxes):
            self.mtracker_id += 1
            nt = Tracker(self.mtracker_id)
            # reid feature
            # 未做边界处理!!!!
            roi = img[dbox[1]:dbox[3], dbox[0]:dbox[2]]
            feature = self.reid.GetFeature(roi)
            appt = appts[i]
            nt.update(dbox, 1, feature, appt)
            self.trackers.append(nt)
        # optical flow
        if self.pwc_name is None:
            # when detection, update optical flow features
            self.tracking.good_feature_track_cpu(img, self.trackers)

        # save latest detection frame
        self.latest_detection_frame = img.copy()
        self.last_frame = img.copy()
        dict_data = {}
        for tracker in self.trackers:
            if tracker.state == 0:
                continue
            dtracker = {}
            dtracker['bbox_body'] = list(map(float, tracker.latest_box))
            dtracker['pose'] = tracker.pose_pts
            dtracker['fea_body'] = tracker.mfeature.tolist()[0]
            dict_data[tracker.tracker_id] = dtracker
        return dict_data

    def Feed(self, img, fid):
        # detection
        if fid % self.drate == 0:
            dboxes = detector_inference(img)
            features = []
            for dbox in dboxes:
                roi = img[dbox[1]:dbox[3], dbox[0]:dbox[2]]
                feature = self.reid.GetFeature(roi)
                features.append(feature)
            if self.pwc_name is None:
                # tracking, optical flow
                old_gray = cv2.cvtColor(self.latest_detection_frame,
                                        cv2.COLOR_BGR2GRAY)
                frame_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
                self.tracking.optical_flow_cpu(old_gray, frame_gray,
                                               self.trackers, fid)
            else:
                self.tracking.optical_flow_gpu(self.last_frame, img,
                                               self.trackers, fid)
            print('tracking')
            if len(dboxes) != 0:
                npdboxes = np.array(dboxes)
                appts = self.ap.inference(img, npdboxes[:, 0:4], npdboxes[:,
                                                                          4:5])
            else:
                appts = []
            # tracking
            trackers, mtracker_id = self.tracking.tracking(
                self.trackers, dboxes, features, appts, fid, self.mtracker_id)
            self.mtracker_id = mtracker_id
            if self.pwc_name is None:
                # when detection, update optical flow features
                self.tracking.good_feature_track_cpu(img, trackers)
            # save latest detection frame
            self.latest_detection_frame = img.copy()
            print('detection and matching:%d' % len(self.trackers))
        else:
            if self.pwc_name is None:
                # tracking, optical flow
                old_gray = cv2.cvtColor(self.latest_detection_frame,
                                        cv2.COLOR_BGR2GRAY)
                frame_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
                self.tracking.optical_flow_cpu(old_gray, frame_gray,
                                               self.trackers, fid)
            else:
                self.tracking.optical_flow_gpu(self.last_frame, img,
                                               self.trackers, fid)
            print('tracking')
        self.last_frame = img.copy()
        dict_data = {}
        for tracker in self.trackers:
            if tracker.state == 0:
                continue
            dtracker = {}
            dtracker['bbox_body'] = list(map(float, tracker.latest_box))
            dtracker['pose'] = tracker.pose_pts
            dtracker['fea_body'] = tracker.mfeature.tolist()[0]
            dict_data[tracker.tracker_id] = dtracker
        return dict_data

    def DisplayRes(self, img, frame_data):
        img_dis = img.clone()
        return img_dis
コード例 #27
0
import cv2
from person_detection import PersonDetection
from tracking import Tracking
import numpy as np

cap = cv2.VideoCapture("./files/output_0.avi")
mode = 1    #simple just video
w, h = int(cap.get(3)), int(cap.get(4))
forcc = cv2.VideoWriter_fourcc(*'XVID')
out = cv2.VideoWriter("./files/output_"+str(mode)+".avi", forcc, 25, (w, h))

pd = PersonDetection()
tk = Tracking()

while cap.isOpened():
    ret, frame = cap.read()
    if not ret:
        continue
    
    if mode == 1:
        #person detection
        bboxes, conf = pd.detect(frame)
        #tracking
        if len(bboxes) > 0:
            tracks = tk.track(frame, bboxes, conf)
            
    out.write(frame)
    print(".", end="")
    cv2.imshow("frame", frame)
    
    if cv2.waitKey(1) & 0xFF == ord('q'):
コード例 #28
0
class TrackingBox(QSplashScreen):
    splash_pix = None
    done = False

    def __init__(self, id, shared_variables, score, classification, box, *args,
                 **kwargs):
        super(TrackingBox, self).__init__(*args, **kwargs)
        self.classification = classification
        self.shared_variables = shared_variables
        self.counter = 0
        self.x = box[0]
        self.y = box[1]
        self.width = box[2]
        self.height = box[3]
        self.id = id
        self.splash_pix = QPixmap('./images/box2.png')
        self.splash_pix = self.splash_pix.scaled(
            round(self.width * self.shared_variables.DETECTION_SCALE),
            round(self.height * self.shared_variables.DETECTION_SCALE))
        self.setPixmap(self.splash_pix)

        self.setWindowFlag(Qt.WindowStaysOnTopHint)
        self.setAttribute(Qt.WA_TranslucentBackground)
        self.setAttribute(Qt.WA_NoSystemBackground)

        label = QLabel(self)
        label.setWordWrap(True)
        label.move(30, 30)
        label.setStyleSheet(" color: rgb(0, 100, 200); font-size: 15pt; ")

        label.setText(str(int(100 * score)) + "%" + " " + classification)
        self.move(self.x, self.y)
        self.show()

        self.tracking = Tracking((self.x, self.y, self.width, self.height),
                                 self.shared_variables)

        self.threadpool = QThreadPool()

        #print("New Box Created at ",self.x,self.y, " Size ", self.width, self.height)

        self.start_worker()

    def progress_fn(self, n):
        #print("%d%% done" % n)
        pass

    def remove(self):
        self.shared_variables.list.remove(self)
        self.done = True
        self.threadpool.cancel

    def execute_this_fn(self, progress_callback):

        if (not self.tracking.running):
            if not self.done:  # Remove ourself from gui list
                self.shared_variables.list.remove(self)
                self.done = True
                self.threadpool.cancel
        else:
            self.tracking.run()

        return "Done."

    def print_output(self, s):
        #print(str(self.id))
        self.hide()
        self.repaint_size(
            round(self.tracking.box[2] *
                  self.shared_variables.DETECTION_SCALE),
            round(self.tracking.box[3] *
                  self.shared_variables.DETECTION_SCALE))
        self.move(
            round(self.tracking.box[0] *
                  self.shared_variables.DETECTION_SCALE),
            round(self.tracking.box[1] *
                  self.shared_variables.DETECTION_SCALE))
        self.show()

    def thread_complete(self):
        #print("THREAD COMPLETE!")
        self.start_worker()

    def start_worker(self):
        # Pass the function to execute
        worker = Worker(
            self.execute_this_fn
        )  # Any other args, kwargs are passed to the run function
        worker.signals.result.connect(self.print_output)
        worker.signals.finished.connect(self.thread_complete)
        worker.signals.progress.connect(self.progress_fn)

        # Execute
        self.threadpool.start(worker)

    def repaint_size(self, width, height):
        #splash_pix = QPixmap('../images/box2.png')
        self.splash_pix = self.splash_pix.scaled(width, height)
        self.setPixmap(self.splash_pix)

    def get_box(self):
        return self.tracking.box
コード例 #29
0
from gridmap import GridMap
from detection import Detection
from tracking import Tracking
import time

# Choose scenario
path = '/home/simonappel/KITTI/raw/'
date = '2011_09_26'
drive = '0001'
frame_range = range(0, 100, 1)

# Construct objects
dataset = DataSet(path, date, drive, frame_range)
grid = GridMap()
detector = Detection()
tracker = Tracking()

# Loop through frames
for frame in range(0, 2, 1):
    print "-----Frame " + str(frame) + "-----"
    point_cloud = dataset.get_point_cloud(frame)
    pose = dataset.get_pose(frame)
    timeframe = dataset.get_timeframe(frame)
    print "Pose " + str(pose)
    print "Timeframe " + str(timeframe)
    t0 = time.time()
    grid.fill_point_cloud_in_grid(point_cloud)
    t1 = time.time()
    print "Fill grid in " + str(t1 - t0) + "s"
    #grid.display_grid_map()
コード例 #30
0
        data = json.dumps(self.countingData)
        try:
            requests.post(url, data=data, headers=headers)
            # clean up the countingData after successful upload
            self.countingData = []

        except Exception, e:
            print "error posting countingData" + str(e)
    


if __name__ == '__main__':
    paramObj = Parameters()
    uploadURLfull = uploadURL + '/' + str(paramObj.wl_dev_cam_id)
    countingObj = PeopleCounting(paramObj)
    trackingObj = Tracking(paramObj.countingRegion, paramObj.upperTrackingRegion, paramObj.lowerTrackingRegion, paramObj.peopleBlobSize, paramObj.useRatioCriteria)
    bkModelObj = bkgModel(paramObj)
    
    frame = countingObj.getFrame()
    output_width = int(frame.shape[1] * paramObj.scale)
    output_height = int(frame.shape[0] * paramObj.scale)
    if paramObj.Visualize:
        visObj = visualize(paramObj,output_width,output_height)

    if useVideo:
        criteria = countingObj.cap.isOpened()
    elif useRTSP:
        criteria = True
    while(criteria):
        start = time.clock()
        frame = countingObj.getFrame()
コード例 #31
0
from face import FaceDAndR
import numpy as np

from Inventory import Suspect, Person, Track

#overlapping of bboxes
from imutils.object_detection import non_max_suppression

import time

cap = cv2.VideoCapture(0)

st = time.time()
print("Loading Models")
pd = PersonDetection()
tk = Tracking(1)
#fdr = FaceDAndR()
print("Done")
et = time.time()
print("Time: " + str(et - st) + "s")

st = time.time()
print("Loading suspects")
files = ["./tmp/arbaz.png", "./tmp/majid.png"]
suspects = []
for f in files:
    img = cv2.imread(f)
    faces = fdr.extractFaces(img)
    if len(faces) <= 0:
        continue
コード例 #32
0
    'onRemove': lineStorage.remove,
    'onTypeChoose': lineStorage.onTypeChoose
})
builder.get_object("lineMenu").add(lineSelectionList)
window.show_all()
builder.connect_signals(Handler())


def show_frame(frame):
    frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
    pb = GdkPixbuf.Pixbuf.new_from_data(frame.tostring(),
                                        GdkPixbuf.Colorspace.RGB,
                                        False,
                                        8,
                                        frame.shape[1],
                                        frame.shape[0],
                                        frame.shape[2]*frame.shape[1])
    image.set_from_pixbuf(pb.copy())
    image.show_all()


from tracking import Tracking
# tracking = Tracking(loop,show_frame, lineStorage,"/data/livetraffic/2017-08-27/3/tokyo.mp4",0.5)
tracking = Tracking(loop,show_frame, lineStorage,"/data/livetraffic/2017-07-18/taiwan.mp4",1,frameStart=80000)
# GLib.idle_add(tracking)
# GLib.idle_add(show_frame)
# Gtk.main()
asyncio.async(tracking())

loop.run_forever()
コード例 #33
0
class CalcThtead(QThread):
    GetNewCoordinatesInt = pyqtSignal(int, int)
    GetNewCoordinatesStr = pyqtSignal(str)

    def __init__(self, parent=None):
        super(CalcThtead, self).__init__()
        self.vidSource = VideoSource(ap)
        self.Video1 = self.vidSource.getVid1()
        self.Video2 = self.vidSource.getVid2()
        self.stopC = 0
        self.db = DB()
        self.tracker1 = Tracking()
        self.tracker2 = Tracking()
        self.center = None
        self.runIndicator = 0
        self.camInd = None
        self.trapezium = Trapezium()

    def run(self):
        self.tracker1.getVideo(self.Video1)
        self.tracker2.getVideo(self.Video2)
        self.db.connect()
        self._id = 0
        self.idChecker = CamId()

        while True:
            if self.stopC == 1:
                break

            center1 = self.tracker1.trackFrame()
            center2 = self.tracker2.trackFrame()

            if center1 == -1 or center2 == -1:
                break

            if center1 == None:
                scaledCenter = Transformation(
                    center2, self.trapezium.getTrapeziumBaseLarge(),
                    self.trapezium.getTrapeziumBaseSmall(),
                    self.trapezium.getTrapeziumHeight(),
                    self.trapezium.getZeroPointPosition())
                self.center = Rotate(scaledCenter)
                delta = None
                self.camInd = "Вторая"
            elif center2 == None:
                scaledCenter = Transformation(
                    center1, self.trapezium.getTrapeziumBaseLarge(),
                    self.trapezium.getTrapeziumBaseSmall(),
                    self.trapezium.getTrapeziumHeight(),
                    self.trapezium.getZeroPointPosition())
                self.center = scaledCenter
                delta = None
                self.camInd = "Первая"
            else:
                scaledCenter1 = Transformation(
                    center1, self.trapezium.getTrapeziumBaseLarge(),
                    self.trapezium.getTrapeziumBaseSmall(),
                    self.trapezium.getTrapeziumHeight(),
                    self.trapezium.getZeroPointPosition())
                scaledCenter2 = Transformation(
                    center2, self.trapezium.getTrapeziumBaseLarge(),
                    self.trapezium.getTrapeziumBaseSmall(),
                    self.trapezium.getTrapeziumHeight(),
                    self.trapezium.getZeroPointPosition())

                delta = Compare(scaledCenter1, scaledCenter2)
                self.center = ([
                    int((scaledCenter1[0] + scaledCenter2[0]) / 2),
                    int((scaledCenter1[1] + scaledCenter2[1]) / 2)
                ])
                delta = [int(delta[0]), int(delta[1])]
                self.camInd = "Обе"

            self.center = [int(self.center[0]), int(self.center[1])]

            if not self.idChecker.isCurrent(self.center):
                self._id = self._id + 1

            self.GetNewCoordinatesInt.emit(self.center[0], self.center[1])
            self.GetNewCoordinatesStr.emit("Позиция = " + str(self.center) +
                                           "  Камера: " + self.camInd +
                                           "  Объект: " + str(self._id))

            self.db.vrite(self.center, delta, self.camInd, self._id)
            if self.runIndicator != 1:
                self.runIndicator = 1

        self.db.commit()
        self.db.disconnect()
        self.tracker1.stop()
        self.tracker2.stop()
        if self.stopC != 0:
            self.stopC = 0
        if self.runIndicator != 0:
            self.runIndicator = 0