コード例 #1
0
ファイル: memtrack.py プロジェクト: songheony/A3T
 def __init__(self):
     super(MemTrack, self).__init__("MemTrack")
     config_proto = tf.ConfigProto()
     config_proto.gpu_options.allow_growth = True
     sess = tf.Session(config=config_proto)
     ckpt = tf.train.get_checkpoint_state(path_config.MEMTRACK_MODEL)
     model = Model(sess, ckpt.model_checkpoint_path)
     self.tracker = Tracker(model)
コード例 #2
0
ファイル: memtrack.py プロジェクト: songheony/A3T
class MemTrack(BaseTracker):
    def __init__(self):
        super(MemTrack, self).__init__("MemTrack")
        config_proto = tf.ConfigProto()
        config_proto.gpu_options.allow_growth = True
        sess = tf.Session(config=config_proto)
        ckpt = tf.train.get_checkpoint_state(path_config.MEMTRACK_MODEL)
        model = Model(sess, ckpt.model_checkpoint_path)
        self.tracker = Tracker(model)

    def initialize(self, image_file, box):
        self.tracker.initialize(image_file, box)

    def track(self, image_file):
        bbox, _ = self.tracker.track(image_file)
        return bbox
コード例 #3
0
    def __init__(self, mqtt_master, model, camera_front, camera_down):
        self.master = mqtt_master

        self.model = model

        self.cam_handler_front = camera_front
        self.cam_handler_down = camera_down

        self.detector = Detector(self.model)

        self.tracker = Tracker(self.model)
        self.tracker.reset()

        self.avoider = AvoidObject()
        self.avoid_start_time = None
        self.already_avoided = 0

        self._policy()
コード例 #4
0
def run_tracker():
    config_proto = tf.ConfigProto()
    config_proto.gpu_options.allow_growth = True
    with tf.Graph().as_default(), tf.Session(config=config_proto) as sess:
        os.chdir('../')
        model = Model(sess)
        tracker = Tracker(model)
        init_rect, s_frames = load_seq_config('Basketball')
        bbox = init_rect
        res = []
        res.append(bbox)
        start_time = time.time()
        tracker.initialize(s_frames[0], bbox)

        for idx in range(1, len(s_frames)):
            tracker.idx = idx
            bbox, cur_frame = tracker.track(s_frames[idx])
            display_result(cur_frame, bbox, idx)
            res.append(bbox.tolist())
        end_time = time.time()
        type = 'rect'
        fps = idx / (end_time - start_time)

    return res, type, fps
コード例 #5
0
from inferences.non_optimized import Network
from tracking.tracker import Tracker
import cv2
import os
BASE_DIR = "/".join(os.path.realpath(__file__).split("/")[:-1])

trk_thresh = 0.4
det_thresh = 0.5

net   = Network()
video = BASE_DIR + "/video/Pedestrian_Detect_2_1_1.mp4"
video = cv2.VideoCapture(video)
FPS = video.get(cv2.CAP_PROP_FPS)
Trk = Tracker(trk_thresh,FPS)

while True:
    _,frame = video.read()
    if not _:
        break
    detections = net.predict(frame,det_thresh,False,10)
    image, conteo = Trk.track_dets(detections,frame)
    print("Personas ingresadas:",conteo)
    cv2.imshow("win",image)
    cv2.waitKey(1)

cv2.destroyAllWindows()
コード例 #6
0
class Policy:

    STRAIGHT_THRESH_BRICK = 400
    STRAIGHT_THRESH_BOX = 330

    IGNORE_OBSTACLE_THRESH = 150

    BASELINE = 30
    FACTOR = 0.3

    THRES_R = lambda y: 320 + Policy.BASELINE + Policy.FACTOR * (480 - y)
    THRES_L = lambda y: 320 - Policy.BASELINE - Policy.FACTOR * (480 - y)

    MOVEMENT_STATE_LEFT = "move_left"
    MOVEMENT_STATE_RIGHT = "move_right"
    MOVEMENT_STATE_STRAIGHT = "move_straight"
    MOVEMENT_STATE_APPROACH = "move_approach"

    POLICY_STATE_SCANNING = "scanning"
    POLICY_STATE_ON_TARGET = "on_target"
    POLICY_STATE_APPROACHING = "approaching"
    POLICY_STATE_AVOID = "avoid"
    POLICY_STATE_PICK_UP_DURING_AVOIDANCE = "pick_up_during_avoidance"

    GRIPPER_STATE_EMPTY = "gripper_empty"
    GRIPPER_STATE_FULL = "gripper_full"

    AVOID_STRAIGHT_FOR = 10

    def __init__(self, mqtt_master, model, camera_front, camera_down):
        self.master = mqtt_master

        self.model = model

        self.cam_handler_front = camera_front
        self.cam_handler_down = camera_down

        self.detector = Detector(self.model)

        self.tracker = Tracker(self.model)
        self.tracker.reset()

        self.avoider = AvoidObject()
        self.avoid_start_time = None
        self.already_avoided = 0

        self._policy()

    def _policy(self):
        policy_state = Policy.POLICY_STATE_SCANNING
        gripper_state = Policy.GRIPPER_STATE_EMPTY
        cluster_id = -1
        while True:
            if policy_state == Policy.POLICY_STATE_SCANNING:
                print("scanning")
                if gripper_state == Policy.GRIPPER_STATE_EMPTY:
                    policy_state = self._scan_policy(assess_fn=self._assess_brick)

                else:
                    policy_state = self._scan_policy(assess_fn=lambda: self._assess_box(cluster_id + 1))
            elif policy_state == Policy.POLICY_STATE_ON_TARGET:
                if gripper_state == Policy.GRIPPER_STATE_EMPTY:
                    policy_state = self._go_to_target_policy(assess_fn=self._assess_brick,
                                                             straight_thresh=Policy.STRAIGHT_THRESH_BRICK)

                else:
                    policy_state = self._go_to_target_policy(assess_fn=lambda: self._assess_box(cluster_id + 1),
                                                             straight_thresh=Policy.STRAIGHT_THRESH_BOX)

            elif policy_state == Policy.POLICY_STATE_APPROACHING:
                if gripper_state == Policy.GRIPPER_STATE_EMPTY:
                    assert gripper_state == Policy.GRIPPER_STATE_EMPTY, "Wrong state - gripper not empty for pick up"
                    policy_state, cluster_id = self._pick_up_brick()
                    gripper_state = Policy.GRIPPER_STATE_FULL
                else:
                    assert gripper_state == Policy.GRIPPER_STATE_FULL, "Wrong state - gripper not full for release"
                    policy_state = self._put_brick_into_box()
                    gripper_state = Policy.GRIPPER_STATE_EMPTY

            elif policy_state == Policy.POLICY_STATE_AVOID:
                policy_state = self._avoid_policy(pick_up_during_avoidance=(gripper_state == Policy.GRIPPER_STATE_EMPTY))

            elif policy_state == Policy.POLICY_STATE_PICK_UP_DURING_AVOIDANCE:
                assert gripper_state == Policy.GRIPPER_STATE_EMPTY, "Wrong state - gripper not empty for pick up"
                policy_state, cluster_id = self._pick_up_brick()
                policy_state = Policy.POLICY_STATE_AVOID
                gripper_state = Policy.GRIPPER_STATE_FULL

            else:
                assert False, "Unknown State"

    def _put_brick_into_box(self):
        self.master.blocking_call(Master.to_slave, 'stop_motion()')
        self.master.blocking_call(Master.to_slave, 'raise_lift()')
        self.master.blocking_call(Master.to_slave, 'approach_brick()')
        self.master.blocking_call(Master.to_slave, 'open_gripper()')
        self.master.blocking_call(Master.to_slave, 'move_straight(-1000, 1000)')
        self.master.blocking_call(Master.to_slave, 'turn_right(180)')
        self.master.blocking_call(Master.to_slave, 'raise_lift_driving()')
        return Policy.POLICY_STATE_SCANNING

    def _pick_up_brick(self):
        self.master.blocking_call(Master.to_slave, 'stop_motion()')
        self.master.blocking_call(Master.to_slave, 'lower_lift()')
        cluster_id = self._approach_brick()
        self.master.blocking_call(Master.to_slave, 'close_gripper()')
        self.master.blocking_call(Master.to_slave, 'raise_lift_driving()')
        return Policy.POLICY_STATE_SCANNING, cluster_id

    def _approach_brick(self):
        self.master.blocking_call(Master.to_slave, "move_forever()")
        while True:
            cluster_id = self.detector.detect_brick_gripper(self.cam_handler_down.get_next_frame())
            if cluster_id is not None:
                print("Cluster", cluster_id)
                break
        self.master.blocking_call(Master.to_slave, "stop_motion()")
        return cluster_id

    def _assess_brick(self):

        img = self.cam_handler_front.get_next_frame()
        lego_map, obstacle_map = self.model.extract_hmap(img)
        obstacle = self.avoider.getObstacle(obstacle_map)
        cv2.imshow('Legomap',lego_map)
        print("Obstacle brick: ",obstacle)
        if obstacle is not None:
            return AssessResult(object=AssessResult.OBJECT_OBSTACLE, point=obstacle[0], max_y=obstacle[1])
        else:
            target = self.tracker.track(lego_map)
            if target is not None:
                return AssessResult(object=AssessResult.OBJECT_TARGET_BRICK, point=target)
            else:
                return AssessResult(object=AssessResult.OBJECT_NONE)
	
    def _assess_box(self, cid):
        assert cid >= 0, "Invalid Cluster ID"

        acc_point = np.zeros(2)
        not_none = 0
        iters = 3

        img = self.cam_handler_front.get_next_frame()
        _, obstacle_map = self.model.extract_hmap(img, True)

        obstacle = self.avoider.getObstacle(obstacle_map)
        target = Detector.detect_box(img, cid)
        if target is not None:
            not_none += 1
            acc_point += target

        print("Obstacle box: ",obstacle)

        for _ in range(iters):
            print("Request_frame")
            img = self.cam_handler_front.get_next_frame()
            target = Detector.detect_box(img, cid)
            '''if the chilitag is far away we can avoid obstacles'''
            if target is not None:
                if obstacle is not None and target[1] < Policy.IGNORE_OBSTACLE_THRESH:
                    return AssessResult(object=AssessResult.OBJECT_OBSTACLE, point=obstacle[0], max_y=obstacle[1])
                not_none += 1
                acc_point += target
		
        if not_none == 0:
            return AssessResult(object=AssessResult.OBJECT_NONE)
        else:

            return AssessResult(object=AssessResult.OBJECT_TARGET_BOX, point=acc_point / not_none)

    def _start_approach(self, x, y, straight_thresh):
        return y > straight_thresh and Policy.THRES_L(y) < x < Policy.THRES_R(y)

    def _avoid_policy(self, pick_up_during_avoidance):
        self.master.blocking_call(Master.to_slave, 'turn_left_forever()')
        while True:
            print("turning")
            img = self.cam_handler_front.get_next_frame()
            _, obstacle_map = self.model.extract_hmap(img)
            obstacle = self.avoider.getObstacle(obstacle_map)
            if obstacle is None:
                break
        print("Moving past the obstacle")
        self.avoid_start_time = time.time()
        self.master.blocking_call(Master.to_slave, 'move_forever()')

        while time.time() - self.avoid_start_time < Policy.AVOID_STRAIGHT_FOR - self.already_avoided:
           if pick_up_during_avoidance:
                print("approach while avoid")	
                self.tracker.reset()
                res = self._assess_brick()
                if res.object == AssessResult.OBJECT_TARGET_BRICK and self._start_approach(res.point[0], res.point[1], Policy.STRAIGHT_THRESH_BRICK):
                    self.already_avoided = time.time() - self.avoid_start_time
                    return Policy.POLICY_STATE_PICK_UP_DURING_AVOIDANCE
        print("Done moving")		
        self.already_avoided = 0
        return Policy.POLICY_STATE_SCANNING


    def _go_to_target_policy(self, assess_fn, straight_thresh):
        state = ''
        while True:
            res = assess_fn()
            if res.object == AssessResult.OBJECT_TARGET_BRICK or res.object == AssessResult.OBJECT_TARGET_BOX:
                x = res.point[0]
                y = res.point[1]
                if self._start_approach(x, y, straight_thresh):
                    return Policy.POLICY_STATE_APPROACHING
                elif x > Policy.THRES_R(y):
                    if state != Policy.MOVEMENT_STATE_RIGHT:
                        state = Policy.MOVEMENT_STATE_RIGHT
                        print('changed to right')
                        self.master.blocking_call(Master.to_slave, 'turn_right_forever()')
                elif x < Policy.THRES_L(y):
                    if state != Policy.MOVEMENT_STATE_LEFT:
                        print('changed to left')
                        state = Policy.MOVEMENT_STATE_LEFT
                        self.master.blocking_call(Master.to_slave, 'turn_left_forever()')
                else:
                    if state != Policy.MOVEMENT_STATE_STRAIGHT:
                        print('changed to straight')
                        state = Policy.MOVEMENT_STATE_STRAIGHT
                        self.master.blocking_call(Master.to_slave, 'move_forever()')
            elif res.object == AssessResult.OBJECT_OBSTACLE:
                print("Obstacle detected")
                return Policy.POLICY_STATE_AVOID
            else:
                return Policy.POLICY_STATE_SCANNING

    def _scan_policy(self, assess_fn):
        turning = False
        while True:
            print("scan policy")
            res = assess_fn()
            if not turning:
                self.master.blocking_call(Master.to_slave, 'turn_right_forever()')    
                turning = True
            if res.object == AssessResult.OBJECT_TARGET_BRICK:
                start = time.time()
                run_time = start
                old_point = res.point
                print('Overshooting')
                while run_time-start<3:
                    run_time = time.time()

                self.tracker.reset()
                res = assess_fn()

                print(old_point)
                print(res.point)
                if res.point is None or old_point[1]>res.point[1]:
                    start = time.time()
                    run_time = start
                    self.master.blocking_call(Master.to_slave, 'turn_left_forever()')
                    while run_time-start<3:
                        res = assess_fn()
                        run_time = time.time()
                return Policy.POLICY_STATE_ON_TARGET
            elif res.object == AssessResult.OBJECT_TARGET_BOX:
                return Policy.POLICY_STATE_ON_TARGET
コード例 #7
0
def demo(save=True, use_cluster=True):
    file_names = ['4p-c0', '4p-c1', '4p-c2', '4p-c3']

    video_files = [
        os.path.join(currentUrl, 'data', 'train', 'lab', file_name+'.avi')\
            for file_name in file_names
    ]

    detection_files = [
        os.path.join(currentUrl, 'data', 'train', 'lab', file_name+'.pickle')\
            for file_name in file_names
    ]

    model_file = os.path.join(currentUrl, 'box2vec', 'model', 'model-86000')
    cluster_distance_threshold = 0.7

    print('video_files:')
    print(video_files)
    print('detection_files:')
    print(detection_files)
    caps, detections, video_length = get_caps_and_pickles(
        video_files, detection_files)
    if save:
        fourcc = cv2.VideoWriter_fourcc(*'XVID')
        now_str = str(datetime.now()).replace(':', '-')[:-7]
        cwd = os.getcwd()

        save_dir = os.path.join(cwd, 'result', now_str)
        os.mkdir(save_dir)
        video_names = [
            os.path.join(save_dir, 'c_' + str(i) + '.avi')
            for i in range(len(caps))
        ]
        outers = [
            cv2.VideoWriter(video_names[i], fourcc, 30.0, (360, 288))
            for i in range(len(caps))
        ]
    box_to_vect, sess = init_box_to_vect_net(model_file)
    tracker = Tracker(traj_smoth_alpha=0.99,
                      image_boundary=None,
                      lost_times_thresh=30,
                      lost_times_thresh_edge=3,
                      appear_times_thresh=30,
                      assoc_score_thresh=0.7,
                      cost_metric='distance')

    for frame_id in range(100, video_length):
        print('-' * 60)
        print('frame_id:', frame_id)
        frames, boxes = get_frames_and_boxes(caps,
                                             detections,
                                             frame_id,
                                             show=False)
        embedding = get_embedding(boxes, frames, box_to_vect, sess)
        if embedding is None:
            continue
        if embedding.shape[0] > 1:
            embedding_tsne = TSNE().fit_transform(embedding)
            plt.scatter(embedding_tsne[:, 0], embedding_tsne[:, 1])
            plt.pause(1)
            plt.close('all')
        if use_cluster:
            cluster_set, fused_embeddings = fusion_cluster(
                bboxes=embedding,
                method='cluster',
                distance_threshold=1,
                nms_threshold=0.3)

        else:
            distance = l2_distance(embedding, embedding)
            print('distance:')
            print(distance)
            cluster_set = fusion(distance,
                                 distance_threshold=cluster_distance_threshold)
            print('cluster_sets:', len(cluster_set.sets))
            fused_embeddings = []
            for embedding_ids in cluster_set.sets:
                clustered_embedding = embedding[embedding_ids, :]
                fused_embeddings.append(np.mean(clustered_embedding, axis=0))
            fused_embeddings = np.concatenate(fused_embeddings, axis=0)
            fused_embeddings = np.reshape(fused_embeddings, [-1, 128])
            print('fused_embeddings:')
            print(fused_embeddings.shape)

        tracker.update_tracker(candidate_bboxes_original=fused_embeddings,
                               time_stamp=frame_id)
        obj_to_traj = {}
        for valid_index in tracker.real_alive_index:
            print('valid_index:', valid_index)
            traj = tracker.trajectories[valid_index]
            current_object_id = traj.object_id[-1]
            traj_serial_num = tracker.whole_real_alive_index.index(valid_index)
            obj_to_traj[current_object_id] = traj_serial_num
            #print(traj_serial_num, ':', current_object_id)

        print(obj_to_traj)

        global_object_id_to_traj_id = [-1] * embedding.shape[0]
        for obj_id, cluster in enumerate(cluster_set.sets):
            for global_object_id in cluster:
                if obj_id in obj_to_traj:
                    global_object_id_to_traj_id[
                        global_object_id] = obj_to_traj[obj_id]
        global_object_id = 0
        for camera_id in range(4):
            frame = frames[camera_id]
            local_bbox = boxes[camera_id]
            for box in local_bbox:
                traj_id = global_object_id_to_traj_id[global_object_id]
                # print(traj_id)
                np.random.seed(traj_id + 10)
                color = np.random.randint(256, size=3)
                cv2.rectangle(frame, (box[0], box[1]), (box[2], box[3]),
                              (int(color[0]), int(color[1]), int(color[2])), 1)

                global_object_id += 1
                cv2.putText(frame, str(traj_id), (box[0], box[3]),\
                    cv2.FONT_HERSHEY_COMPLEX, 0.8, (int(color[0]), int(color[1]), int(color[2])), thickness = 2, lineType = -1)
            cv2.putText(frame, str(frame_id), (0, 20),\
                    cv2.FONT_HERSHEY_COMPLEX, 0.8, (0, 0, 255), thickness=2, lineType=-1)
            # print(frame.shape)
            if save:
                outers[camera_id].write(frame)
            cv2.imshow('camera' + str(camera_id), frame)
            cv2.waitKey(1)
コード例 #8
0
ファイル: test_tracking.py プロジェクト: bjuncek/detr
def main(args,
         dataset,
         load_results,
         frame_range,
         interpolate,
         write_images,
         oracle=None):

    output_dir = osp.join(get_output_dir(args.out_dir, args.name))

    if not osp.exists(output_dir):
        os.makedirs(output_dir)

    ##########################
    # Initialize the modules #
    ##########################

    # object detection
    print("Initializing object detector(s).")

    obj_detects = []
    #FIXME: Bruno fix this to reflect DETRa
    if not args.use_detr:
        obj_detect = FRCNN_FPN(num_classes=2)
        obj_detect.load_state_dict(
            torch.load(args.obj_detect_model, map_location="cpu"))
    else:
        obj_detect = build(args)

    obj_detect.eval()
    if torch.cuda.is_available():
        obj_detect.cuda()
    obj_detects.append(obj_detect)

    # reid
    print("Initializing reID network(s).")
    reid_networks = []
    reid_cfg = os.path.join(os.path.dirname(args.reid_model),
                            'sacred_config.yaml')
    reid_cfg = yaml.safe_load(open(reid_cfg))

    reid_network = ReIDNetwork_resnet50(pretrained=False,
                                        **reid_cfg['model_args'])
    reid_network.load_state_dict(
        torch.load(args.reid_model, map_location="cpu"))
    reid_network.eval()
    if torch.cuda.is_available():
        reid_network.cuda()

    reid_networks.append(reid_network)

    #FIXME: where is tracker coming from
    # tracktor
    # if oracle is not None:
    #     tracker = OracleTracker(
    #         obj_detect, reid_network, tracker, oracle)
    # else:
    tracker = Tracker(obj_detect, reid_network, vars(args))

    if args.profile:
        import torch.autograd.profiler as profiler

    time_total = 0
    num_frames = 0
    mot_accums = []
    dset_file = dataset
    dataset = Datasets(dataset)

    print(dataset)
    for seq, obj_detect, reid_network in zip(dataset, cycle(obj_detects),
                                             cycle(reid_networks)):
        print("IN da loop once")
        obj_detect.eval()
        tracker.obj_detect = obj_detect
        tracker.reid_network = reid_network
        tracker.reset()

        print(f"Tracking: {seq}")

        start_frame = int(frame_range['start'] * len(seq))
        end_frame = int(frame_range['end'] * len(seq))

        seq_loader = DataLoader(
            torch.utils.data.Subset(seq, range(start_frame, end_frame)))
        num_frames += len(seq_loader)

        results = {}
        if load_results:
            print("Reading results")
            results = seq.load_results(output_dir)
        if not results:
            start = time.time()

            with torch.no_grad():
                for frame_data in tqdm(seq_loader):
                    tracker.step(frame_data)

            results = tracker.get_results()

            time_total += time.time() - start

            print(f"Tracks found: {len(results)}")
            print(f"Runtime for {seq}: {time.time() - start :.2f} s.")

            if interpolate:
                results = interpolate_tracks(results)

            print(f"Writing predictions to: {output_dir}")
            seq.write_results(results, output_dir)

        if seq.no_gt:
            print("No GT data for evaluation available.")
        else:
            mot_accums.append(get_mot_accum(results, seq_loader))

        if write_images:
            plot_sequence(results, seq,
                          osp.join(output_dir, str(dataset), str(seq)),
                          write_images)

    if time_total:
        print(
            f"Tracking runtime for all sequences (without evaluation or image writing): "
            f"{time_total:.2f} s for {num_frames} frames ({num_frames / time_total:.2f} Hz)"
        )
    if mot_accums:
        print("Evaluation:")
        print(mot_accums)
        evaluate_mot_accums(mot_accums,
                            [str(s) for s in dataset if not s.no_gt],
                            generate_overall=True)
コード例 #9
0
ファイル: imm_pda.py プロジェクト: michaelbratsch/kalman
R = np.array([[s_xx_R, s_xy_R],
              [s_xy_R, s_yy_R]])

measurement = DataGenerator(
    segments=[
        Steady(duration=40.0,
               heading=0.5 * math.pi,
               abs_speed=1.0),
        Turn(duration=15.0,
             abs_speed=1.0,
             turnrate=0.2),
        Turn(duration=15.0,
             abs_speed=1.0,
             turnrate=-0.2)
    ]
)

tracker = Tracker(filter_factory=filter_factory)

np.random.seed(42)
dt = 1.0

for _ in range(200):
    z = measurement.draw(dt=dt, R=R)
    tracker.filter(dt=dt, z=z, R=R)
    for i in range(1, 2):
        tracker.filter(dt=0.0, z=z + np.array([10.0 * i, 10.0 * i]), R=R)

tracker.plot_all(vertical=False, dim=2)
tracker.show()