class dasiamrpn(object): """ Wrapper class for incorporating DaSiamRPN into OpenLabeling (https://github.com/foolwood/DaSiamRPN, https://github.com/Cartucho/OpenLabeling) """ def __init__(self): self.net = SiamRPNvot() self.net.load_state_dict(torch.load(join(realpath(dirname(__file__)), 'SiamRPNVOT.model'))) self.net.eval().cuda() def init(self, init_frame, initial_bbox): """ Initialize DaSiamRPN tracker with inital frame and bounding box. """ target_pos, target_sz = self.bbox_to_pos(initial_bbox) self.state = SiamRPN_init( init_frame, target_pos, target_sz, self.net) def update(self, next_image): """ Update bounding box position and size on next_image. Returns True beacuse tracking is terminated based on number of frames predicted in OpenLabeling, not based on feedback from tracking algorithm (unlike the opencv tracking algorithms). """ self.state = SiamRPN_track(self.state, next_image) target_pos = self.state["target_pos"] target_sz = self.state["target_sz"] bbox = self.pos_to_bbox(target_pos, target_sz) return True, bbox def bbox_to_pos(self, initial_bbox): """ Convert bounding box format from a tuple format containing xmin, ymin, width, and height to a tuple of two arrays which contain the x and y coordinates of the center of the box and its width and height respectively. """ xmin, ymin, w, h = initial_bbox cx = int(xmin + w/2) cy = int(ymin + h/2) target_pos = np.array([cx, cy]) target_sz = np.array([w, h]) return target_pos, target_sz def pos_to_bbox(self, target_pos, target_sz): """ Invert the bounding box format produced in the above conversion function. """ w = target_sz[0] h = target_sz[1] xmin = int(target_pos[0] - w/2) ymin = int(target_pos[1] - h/2) return xmin, ymin, w, h
def __init__(self, flags): # Initialize the siam network. self._logger = erdos.utils.setup_logging( 'multi_object_da_siam_rpn_trakcer', flags.log_file_name) self._siam_net = SiamRPNvot() self._siam_net.load_state_dict(torch.load( flags.da_siam_rpn_model_path)) self._siam_net.eval().cuda()
def __init__(self, flags, logger): # Initialize the siam network. self._logger = logger self._siam_net = SiamRPNvot() self._siam_net.load_state_dict(torch.load( flags.da_siam_rpn_model_path)) self._siam_net.eval().cuda() self._trackers = [] self._min_matching_iou = flags.min_matching_iou self._max_missed_detections = flags.obstacle_track_max_age
def __init__(self, flags): # Initialize the siam network. self._logger = erdos.utils.setup_logging( 'multi_object_da_siam_rpn_tracker', flags.log_file_name) self._siam_net = SiamRPNvot() self._siam_net.load_state_dict(torch.load( flags.da_siam_rpn_model_path)) self._siam_net.eval().cuda() self._trackers = [] self._min_matching_iou = flags.min_matching_iou self._max_missed_detections = flags.obstacle_track_max_age
class MultiObjectDaSiamRPNTracker(MultiObjectTracker): def __init__(self, flags): # Initialize the siam network. self._siam_net = SiamRPNvot() self._siam_net.load_state_dict(torch.load( flags.da_siam_rpn_model_path)) self._siam_net.eval().cuda() def reinitialize(self, frame, bboxes): # Create a tracker for each bbox. self._trackers = [] for bbox in bboxes: self._trackers.append( SingleObjectDaSiamRPNTracker(frame, bbox, self._siam_net))
def __init__(self): self.net = SiamRPNvot() # check if SiamRPNVOT.model was already downloaded (otherwise download it now) model_path = join(realpath(dirname(__file__)), 'DaSiamRPN', 'code', 'SiamRPNVOT.model') print(model_path) if not exists(model_path): print( '\nError: module not found. Please download the pre-trained model and copy it to the directory \'DaSiamRPN/code/\'\n' ) print( '\tdownload link: https://drive.google.com/file/d/1-vNVZxfbIplXHrqMHiJJYWXYWsOIvGsf/view' ) exit() self.net.load_state_dict(torch.load(model_path)) self.net.eval().cuda()
def __init__(self): self.net = SiamRPNvot() # check if SiamRPNVOT.model was already downloaded (otherwise download it now) model_path = join(realpath(dirname(__file__)), 'DaSiamRPN', 'code', 'SiamRPNVOT.model') print(model_path) if not exists(model_path): print('\nError: module not found. Please download the pre-trained model and copy it to the directory \'DaSiamRPN/code/\'\n') print('\tdownload link: https://github.com/fogx/DaSiamRPN_noCUDA/blob/master/SiamRPNVOT.model') exit() if(torch.cuda.is_available()): self.net.load_state_dict(torch.load(model_path)) else: self.net.load_state_dict(torch.load(model_path,map_location='cpu')) self.net.eval().to(device)
class MultiObjectDaSiamRPNTracker(MultiObjectTracker): def __init__(self, flags): # Initialize the siam network. self._logger = erdos.utils.setup_logging( 'multi_object_da_siam_rpn_tracker', flags.log_file_name) self._siam_net = SiamRPNvot() self._siam_net.load_state_dict(torch.load( flags.da_siam_rpn_model_path)) self._siam_net.eval().cuda() self._trackers = [] self._min_matching_iou = flags.min_matching_iou self._max_missed_detections = flags.obstacle_track_max_age def initialize(self, frame, obstacles): """ Initializes a multiple obstacle tracker. Args: frame: perception.camera_frame.CameraFrame to reinitialize with. obstacles: List of perception.detection.utils.DetectedObstacle. """ # Create a tracker for each obstacle. for obstacle in obstacles: self._trackers.append( SingleObjectDaSiamRPNTracker(frame, obstacle, self._siam_net)) def reinitialize(self, frame, obstacles): """ Renitializes a multiple obstacle tracker. Args: frame: perception.camera_frame.CameraFrame to reinitialize with. obstacles: List of perception.detection.utils.DetectedObstacle. """ # If empty obstacles passed in, continue existing tracks and exit. if not obstacles: self.track(frame) return if self._trackers == []: self.initialize(frame, obstacles) else: # Update the bounding boxes so that the matching happens between # bounding boxes computed on the same frame. self.track(frame, False) # Create matrix of similarities between detection and tracker bboxes. cost_matrix = self._create_hungarian_cost_matrix(obstacles) # Run linear assignment (Hungarian Algo) with matrix. row_ids, col_ids = solve_dense(cost_matrix) matched_map = {} for row_id, col_id in zip(row_ids, col_ids): matched_map[self._trackers[col_id].obstacle.id] = row_id matched_obstacle_indices, matched_tracker_indices = set(row_ids), set( col_ids) updated_trackers = [] # Separate matched and unmatched tracks, obstacles. unmatched_tracker_indices, matched_trackers, unmatched_trackers = \ self._separate_matches_from_unmatched(self._trackers, matched_tracker_indices) unmatched_obstacle_indices, matched_obstacles, unmatched_obstacles = \ self._separate_matches_from_unmatched(obstacles, matched_obstacle_indices) # Add successfully matched trackers to updated_trackers. for tracker in matched_trackers: tracker.missed_det_updates = 0 # Update the tracker's internal bounding box. If we don't do # this, the tracker's bounding box degrades across the frames until # it doesn't overlap with the bounding box the detector outputs. tracker.reset_bbox( obstacles[matched_map[tracker.obstacle.id]].bounding_box) updated_trackers.append(tracker) # Add 1 to age of any unmatched trackers, filter old ones. for tracker in unmatched_trackers: tracker.missed_det_updates += 1 if tracker.missed_det_updates <= self._max_missed_detections: updated_trackers.append(tracker) else: self._logger.debug("Dropping tracker with id {}".format( tracker.obstacle.id)) # Initialize trackers for unmatched obstacles. for obstacle in unmatched_obstacles: updated_trackers.append( SingleObjectDaSiamRPNTracker(frame, obstacle, self._siam_net)) # Keep one tracker per obstacle id; prefer trackers with recent # detection updates. unique_updated_trackers = {} for tracker in updated_trackers: if tracker.obstacle.id not in unique_updated_trackers: unique_updated_trackers[tracker.obstacle.id] = tracker elif (unique_updated_trackers[tracker.obstacle.id]. missed_det_updates > tracker.missed_det_updates): unique_updated_trackers[tracker.obstacle.id] = tracker self._trackers = list(unique_updated_trackers.values()) def track(self, frame, missed_detection=True): """ Tracks obstacles in a frame. Args: frame: perception.camera_frame.CameraFrame to track in. """ tracked_obstacles = [] for tracker in self._trackers: tracked_obstacles.append(tracker.track(frame)) if missed_detection: tracker.missed_det_updates += 1 self._trackers = [ tracker for tracker in self._trackers if tracker.missed_det_updates <= self._max_missed_detections ] return True, tracked_obstacles def _create_hungarian_cost_matrix(self, obstacles): # Create cost matrix with shape (num_bboxes, num_trackers) cost_matrix = [[0 for _ in range(len(self._trackers))] for __ in range(len(obstacles))] for i, obstacle in enumerate(obstacles): for j, tracker in enumerate(self._trackers): obstacle_bbox = obstacle.bounding_box tracker_bbox = tracker.obstacle.bounding_box iou = obstacle_bbox.calculate_iou(tracker_bbox) # If track too far from det, mark pair impossible with np.nan if iou >= self._min_matching_iou: cost_matrix[i][j] = iou else: cost_matrix[i][j] = np.nan return np.array(cost_matrix) def _separate_matches_from_unmatched(self, obstacles, matched_obstacle_indices): unmatched_obstacle_indices = \ set(range(len(obstacles))) - matched_obstacle_indices matched_obstacles = [obstacles[i] for i in matched_obstacle_indices] unmatched_obstacles = [ obstacles[i] for i in unmatched_obstacle_indices ] return unmatched_obstacle_indices, matched_obstacles, unmatched_obstacles
class dasiamrpn(object): """ Wrapper class for incorporating DaSiamRPN into OpenLabeling (https://github.com/foolwood/DaSiamRPN, https://github.com/Cartucho/OpenLabeling) """ def __init__(self): self.net = SiamRPNvot() # check if SiamRPNVOT.model was already downloaded (otherwise download it now) model_path = join(realpath(dirname(__file__)), 'DaSiamRPN', 'code', 'SiamRPNVOT.model') print(model_path) if not exists(model_path): print('\nError: module not found. Please download the pre-trained model and copy it to the directory \'DaSiamRPN/code/\'\n') print('\tdownload link: https://github.com/fogx/DaSiamRPN_noCUDA/blob/master/SiamRPNVOT.model') exit() if(torch.cuda.is_available()): self.net.load_state_dict(torch.load(model_path)) else: self.net.load_state_dict(torch.load(model_path,map_location='cpu')) self.net.eval().to(device) def init(self, init_frame, initial_bbox): """ Initialize DaSiamRPN tracker with inital frame and bounding box. """ target_pos, target_sz = self.bbox_to_pos(initial_bbox) self.state = SiamRPN_init( init_frame, target_pos, target_sz, self.net) def update(self, next_image): """ Update box position and size on next_image. Returns True beacuse tracking is terminated based on number of frames predicted in OpenLabeling, not based on feedback from tracking algorithm (unlike the opencv tracking algorithms). """ self.state = SiamRPN_track(self.state, next_image) target_pos = self.state["target_pos"] target_sz = self.state["target_sz"] bbox = self.pos_to_bbox(target_pos, target_sz) return True, bbox def bbox_to_pos(self, initial_bbox): """ Convert bounding box format from a tuple format containing xmin, ymin, width, and height to a tuple of two arrays which contain the x and y coordinates of the center of the box and its width and height respectively. """ xmin, ymin, w, h = initial_bbox cx = int(xmin + w/2) cy = int(ymin + h/2) target_pos = np.array([cx, cy]) target_sz = np.array([w, h]) return target_pos, target_sz def pos_to_bbox(self, target_pos, target_sz): """ Invert the bounding box format produced in the above conversion function. """ w = target_sz[0] h = target_sz[1] xmin = int(target_pos[0] - w/2) ymin = int(target_pos[1] - h/2) return xmin, ymin, w, h
def __init__(self): self.net = SiamRPNvot() self.net.load_state_dict(torch.load(join(realpath(dirname(__file__)), 'SiamRPNVOT.model'))) self.net.eval().cuda()
class MultiObjectDaSiamRPNTracker(MultiObjectTracker): def __init__(self, flags): # Initialize the siam network. self._logger = erdos.utils.setup_logging( 'multi_object_da_siam_rpn_trakcer', flags.log_file_name) self._siam_net = SiamRPNvot() self._siam_net.load_state_dict(torch.load( flags.da_siam_rpn_model_path)) self._siam_net.eval().cuda() def reinitialize(self, frame, obstacles): """ Reinitializes a multiple obstacle tracker. Args: frame: perception.camera_frame.CameraFrame to reinitialize with. obstacles: List of perception.detection.utils.DetectedObstacle. """ # Create a tracker for each obstacle. self._trackers = [ SingleObjectDaSiamRPNTracker(frame, obstacle, self._siam_net) for obstacle in obstacles ] def reinitialize_new(self, frame, obstacles): # Create matrix of similarities between detection and tracker bboxes. cost_matrix = self._create_hungarian_cost_matrix( frame.frame, obstacles) # Run sklearn linear assignment (Hungarian Algo) with matrix assignments = linear_assignment(cost_matrix) updated_trackers = [] # Add matched trackers to updated_trackers for obstacle_idx, tracker_idx in assignments: obstacles[obstacle_idx].id = self._trackers[tracker_idx].obj_id updated_trackers.append( SingleObjectDaSiamRPNTracker(frame, obstacles[obstacle_idx], self._siam_net)) # Add 1 to age of any unmatched trackers, filter old ones if len(self._trackers) > len(obstacles): for i, tracker in enumerate(self._trackers): if i not in assignments[:, 1]: tracker.missed_det_updates += 1 if tracker.missed_det_updates < MAX_TRACKER_AGE: updated_trackers.append(tracker) # Create new trackers for new bboxes elif len(obstacles) > len(self._trackers): for i, obstacle in enumerate(obstacles): if i not in assignments[:, 0]: updated_trackers.append( SingleObjectDaSiamRPNTracker(frame, obstacle, self._siam_net)) self._trackers = updated_trackers def _create_hungarian_cost_matrix(self, frame, obstacles): # Create cost matrix with shape (num_bboxes, num_trackers) cost_matrix = [[0 for _ in range(len(self._trackers))] for __ in range(len(obstacles))] for i, obstacle in enumerate(obstacles): for j, tracker in enumerate(self._trackers): obstacle_bbox = obstacle.bounding_box tracker_bbox = tracker.obstacle.bounding_box # Get crops from frame self._logger.debug(obstacle_bbox, tracker_bbox) bbox_crop = frame[obstacle_bbox.y_min:obstacle_bbox.y_max, obstacle_bbox.x_min:obstacle_bbox.x_max] tracker_bbox_crop = frame[ tracker_bbox.y_min:tracker_bbox.y_max, tracker_bbox.x_min:tracker_bbox.x_max] # Resize larger crop to same shape as smaller one bbox_area = np.prod(bbox_crop.shape[:2]) tracker_bbox_area = np.prod(tracker_bbox_crop.shape[:2]) if bbox_area < tracker_bbox_area: self._logger.debug(tracker_bbox_crop.shape) tracker_bbox_crop = cv2.resize( tracker_bbox_crop, bbox_crop.shape[:2] [::-1], # cv2 needs width, then height interpolation=cv2.INTER_AREA) else: self._logger.debug(bbox_crop.shape) bbox_crop = cv2.resize( bbox_crop, tracker_bbox_crop.shape[:2] [::-1], # cv2 needs width, then height interpolation=cv2.INTER_AREA) # Use SSIM as metric for crop similarity, assign to matrix self._logger.debug( bbox_crop.shape, tracker_bbox_crop.transpose((1, 0, 2)).shape) cost_matrix[i][j] = compare_ssim(bbox_crop, tracker_bbox_crop, multichannel=True) return np.array(cost_matrix)
def __init__(self, flags): # Initialize the siam network. self._siam_net = SiamRPNvot() self._siam_net.load_state_dict(torch.load( flags.da_siam_rpn_model_path)) self._siam_net.eval().cuda()
def __init__(self, flags): # Initialize the siam network. self._siam_net = SiamRPNvot() self._siam_net.load_state_dict( torch.load('dependencies/data/SiamRPNVOT.model')) self._siam_net.eval().cuda()
class MultiObjectDaSiamRPNTracker(MultiObjectTracker): def __init__(self, flags): # Initialize the siam network. self._logger = erdos.utils.setup_logging( 'multi_object_da_siam_rpn_tracker', flags.log_file_name) self._siam_net = SiamRPNvot() self._siam_net.load_state_dict(torch.load( flags.da_siam_rpn_model_path)) self._siam_net.eval().cuda() self._trackers = [] def initialize(self, frame, obstacles): """ Reinitializes a multiple obstacle tracker. Args: frame: perception.camera_frame.CameraFrame to reinitialize with. obstacles: List of perception.detection.utils.DetectedObstacle. """ # Create a tracker for each obstacle. for obstacle in obstacles: self._trackers.append( SingleObjectDaSiamRPNTracker(frame, obstacle, self._siam_net)) def reinitialize(self, frame, obstacles): if self._trackers == []: self.initialize(frame, obstacles) # Create matrix of similarities between detection and tracker bboxes. cost_matrix = self._create_hungarian_cost_matrix( frame.frame, obstacles) # Run linear assignment (Hungarian Algo) with matrix row_ids, col_ids = solve_dense(cost_matrix) matched_obstacle_indices, matched_tracker_indices = set(row_ids), set( col_ids) updated_trackers = [] # Separate matched and unmatched tracks unmatched_tracker_indices = \ set(range(len(self._trackers))) - matched_tracker_indices matched_trackers = [self._trackers[i] for i in matched_tracker_indices] unmatched_trackers = [ self._trackers[i] for i in unmatched_tracker_indices ] # Separate matched and unmatched detections unmatched_obstacle_indices = \ set(range(len(obstacles))) - matched_obstacle_indices matched_obstacles = [obstacles[i] for i in matched_obstacle_indices] unmatched_obstacles = [ obstacles[i] for i in unmatched_obstacle_indices ] # Add successfully matched trackers to updated_trackers for tracker in matched_trackers: tracker.missed_det_updates = 0 updated_trackers.append(tracker) # Add 1 to age of any unmatched trackers, filter old ones for tracker in unmatched_trackers: tracker.missed_det_updates += 1 if tracker.missed_det_updates < MAX_MISSED_DETECTIONS: updated_trackers.append(tracker) else: self._logger.debug("Dropping tracker with id {}".format( tracker.obstacle.id)) for obstacle in unmatched_obstacles: updated_trackers.append( SingleObjectDaSiamRPNTracker(frame, obstacle, self._siam_net)) self._trackers = updated_trackers def _create_hungarian_cost_matrix(self, frame, obstacles): # Create cost matrix with shape (num_bboxes, num_trackers) cost_matrix = [[0 for _ in range(len(self._trackers))] for __ in range(len(obstacles))] for i, obstacle in enumerate(obstacles): for j, tracker in enumerate(self._trackers): obstacle_bbox = obstacle.bounding_box tracker_bbox = tracker.obstacle.bounding_box iou = obstacle_bbox.calculate_iou(tracker_bbox) # If track too far from det, mark pair impossible with np.nan if iou > ASSOCIATION_THRESHOLD: cost_matrix[i][j] = iou else: cost_matrix[i][j] = np.nan return np.array(cost_matrix)