def initial_finetune(self, img, detection_box): self.stopwatch.start('initial_finetune') t = time.time() # generate samples pos_num, neg_num = ADNetConf.g()['initial_finetune'][ 'pos_num'], ADNetConf.g()['initial_finetune']['neg_num'] pos_boxes, neg_boxes = detection_box.get_posneg_samples(self.imgwh, pos_num, neg_num, use_whole=True) pos_lb_action = BoundingBox.get_action_labels(pos_boxes, detection_box) feats = self._get_features([ commons.extract_region(img, box) for i, box in enumerate(pos_boxes) ]) for box, feat in zip(pos_boxes, feats): box.feat = feat feats = self._get_features([ commons.extract_region(img, box) for i, box in enumerate(neg_boxes) ]) for box, feat in zip(neg_boxes, feats): box.feat = feat # train_fc_finetune_hem self._finetune_fc(img, pos_boxes, neg_boxes, pos_lb_action, ADNetConf.get()['initial_finetune']['learning_rate'], ADNetConf.get()['initial_finetune']['iter']) self.histories.append((pos_boxes, neg_boxes, pos_lb_action, np.copy(img), self.iteration)) _logger.info('ADNetRunner.initial_finetune t=%.3f' % t) self.stopwatch.stop('initial_finetune')
def __init__(self): self.tensor_input = tf.placeholder(tf.float32, shape=(None, 112, 112, 3), name='patch') self.tensor_action_history = tf.placeholder(tf.float32, shape=(None, 1, 1, 110), name='action_history') self.tensor_lb_action = tf.placeholder(tf.int32, shape=(None, ), name='lb_action') self.tensor_lb_class = tf.placeholder(tf.int32, shape=(None, ), name='lb_class') self.tensor_is_training = tf.placeholder(tf.bool, name='is_training') self.learning_rate_placeholder = tf.placeholder(tf.float32, [], name='learning_rate') self.persistent_sess = tf.Session(config=tf.ConfigProto( inter_op_parallelism_threads=1, intra_op_parallelism_threads=1 )) self.adnet = ADNetwork(self.learning_rate_placeholder) self.adnet.create_network(self.tensor_input, self.tensor_lb_action, self.tensor_lb_class, self.tensor_action_history, self.tensor_is_training) if 'ADNET_MODEL_PATH' in os.environ.keys(): self.adnet.read_original_weights(self.persistent_sess, os.environ['ADNET_MODEL_PATH']) else: self.adnet.read_original_weights(self.persistent_sess) self.action_histories = np.array([0] * ADNetConf.get()['action_history'], dtype=np.int8) print("self.action_histories >>", ADNetConf.get()['action_history']) self.action_histories_old = np.array([0] * ADNetConf.get()['action_history'], dtype=np.int8) self.histories = [] self.iteration = 0 self.imgwh = None self.callback_redetection = self.redetection_by_sampling self.failed_cnt = 0 self.latest_score = 0 self.stopwatch = StopWatchManager()
def initial_finetune(self, img, detection_box): # print("Start initial_finetune1") # generate samples pos_num, neg_num = ADNetConf.g()['initial_finetune'][ 'pos_num'], ADNetConf.g()['initial_finetune']['neg_num'] # print("Ending initial_finetune1") pos_boxes, neg_boxes = detection_box.get_posneg_samples(self.imgwh, pos_num, neg_num, use_whole=True) # print("Ending initial_finetune133") pos_lb_action = BoundingBox.get_action_labels(pos_boxes, detection_box) # print("Ending initial_finetune44") feats = self._get_features([ commons.extract_region(img, box) for i, box in enumerate(pos_boxes) ]) for box, feat in zip(pos_boxes, feats): box.feat = feat feats = self._get_features([ commons.extract_region(img, box) for i, box in enumerate(neg_boxes) ]) for box, feat in zip(neg_boxes, feats): box.feat = feat # print("Ending initial_finetune2") # train_fc_finetune_hem self._finetune_fc(img, pos_boxes, neg_boxes, pos_lb_action, ADNetConf.get()['initial_finetune']['learning_rate'], ADNetConf.get()['initial_finetune']['iter']) self.histories.append((pos_boxes, neg_boxes, pos_lb_action, np.copy(img), self.iteration))
def do_action(self, imgwh, action_idx): action_ratios = tuple( [ADNetConf.get()['action_move'][x] for x in 'xywh']) if action_idx < 8: deltas_xy = self.wh * action_ratios[:2] deltas_xy.max(1) actual_deltas = ADNetwork.ACTIONS[action_idx][:2] * (deltas_xy.x, deltas_xy.y) moved_xy = self.xy + actual_deltas new_box = BoundingBox(moved_xy.x, moved_xy.y, self.wh.x, self.wh.y) elif action_idx == 8: new_box = BoundingBox(self.xy.x, self.xy.y, self.wh.x, self.wh.y) else: deltas_wh = self.wh * action_ratios[2:] deltas_wh.max(2) deltas_wh_scaled = ADNetwork.ACTIONS[action_idx][2:] * ( deltas_wh.x, deltas_wh.y) moved_xy = self.xy + -1 * deltas_wh_scaled / 2 moved_wh = self.wh + deltas_wh_scaled new_box = BoundingBox(moved_xy.x, moved_xy.y, moved_wh.x, moved_wh.y) if imgwh: new_box.fit_image(imgwh) return new_box
def extract_region(img, bbox): xy_center = bbox.xy + bbox.wh * 0.5 wh = bbox.wh * ADNetConf.get()['predict']['roi_zoom'] xy = xy_center - wh * 0.5 xy.x = max(xy.x, 0) xy.y = max(xy.y, 0) # crop and resize crop = img[xy.y:xy.y+wh.y, xy.x:xy.x+wh.x, :] resize = cv2.resize(crop, (112, 112)) return resize
def get_action_label(sample, gt_box): ious = [] for i in range(ADNetwork.NUM_ACTIONS): moved_box = sample.do_action(imgwh=None, action_idx=i) iou = gt_box.iou(moved_box) ious.append(iou) if ious[ADNetwork.ACTION_IDX_STOP] > ADNetConf.get( )['predict']['stop_iou']: return ADNetwork.ACTION_IDX_STOP if max(ious[:-2]) * 0.99999 <= ious[ADNetwork.ACTION_IDX_STOP]: return np.argmax(ious) # return random.choice([i for i, x in enumerate(ious) if x >= max(ious)]) return np.argmax(ious[:-2])
whole_samples = [] # print("whole_samples::") pos_samples = [] for _ in range(pos_size): pos_samples.append(random.choice(gaussian_samples)) neg_candidates = uniform_samples + whole_samples neg_samples = [] for _ in range(neg_size): neg_samples.append(random.choice(neg_candidates)) return pos_samples, neg_samples if __name__ == '__main__': ADNetConf.get('./conf/large.yaml') # iou test box_a = BoundingBox(0, 0, 100, 100) box_b = BoundingBox(0, 0, 50, 10) assert box_a.iou(box_b) == 0.05 box_a = BoundingBox(0, 0, 10, 10) box_b = BoundingBox(5, 7, 7, 10) assert 0.096 < box_a.iou(box_b) < 0.097 # random generator test gt_box = BoundingBox.read_vid_gt('./data/BlurCar2/')[0] gt_box.wh.x = gt_box.wh.y = 30 imgpath = os.path.join('./data/BlurCar2/', 'img', '0001.jpg')
def tracking(self, img, curr_bbox): self.iteration += 1 is_tracked = True boxes = [] self.latest_score = -1 self.stopwatch.start('tracking.do_action') for track_i in range(ADNetConf.get()['predict']['num_action']): patch = commons.extract_region(img, curr_bbox) # forward with image & action history actions, classes = self.persistent_sess.run( [self.adnet.layer_actions, self.adnet.layer_scores], feed_dict={ self.adnet.input_tensor: [patch], self.adnet.action_history_tensor: [commons.onehot_flatten(self.action_histories)], self.tensor_is_training: False }) latest_score = classes[0][1] if latest_score < ADNetConf.g()['predict']['thresh_fail']: is_tracked = False self.action_histories_old = np.copy(self.action_histories) self.action_histories = np.insert(self.action_histories, 0, 12)[:-1] break else: self.failed_cnt = 0 self.latest_score = latest_score # move box action_idx = np.argmax(actions[0]) self.action_histories = np.insert(self.action_histories, 0, action_idx)[:-1] prev_bbox = curr_bbox curr_bbox = curr_bbox.do_action(self.imgwh, action_idx) if action_idx != ADNetwork.ACTION_IDX_STOP: if prev_bbox == curr_bbox: print('action idx', action_idx) print(prev_bbox) print(curr_bbox) raise Exception('box not moved.') # oscillation check if action_idx != ADNetwork.ACTION_IDX_STOP and curr_bbox in boxes: action_idx = ADNetwork.ACTION_IDX_STOP if action_idx == ADNetwork.ACTION_IDX_STOP: break boxes.append(curr_bbox) self.stopwatch.stop('tracking.do_action') # redetection when tracking failed new_score = 0.0 if not is_tracked: self.failed_cnt += 1 # run redetection callback function new_box, new_score = self.callback_redetection(curr_bbox, img) if new_box is not None: curr_bbox = new_box patch = commons.extract_region(img, curr_bbox) _logger.debug('redetection success=%s' % (str(new_box is not None))) # save samples if is_tracked or new_score > ADNetConf.g( )['predict']['thresh_success']: self.stopwatch.start('tracking.save_samples.roi') imgwh = Coordinate.get_imgwh(img) pos_num, neg_num = ADNetConf.g( )['finetune']['pos_num'], ADNetConf.g()['finetune']['neg_num'] pos_boxes, neg_boxes = curr_bbox.get_posneg_samples( imgwh, pos_num, neg_num, use_whole=False, pos_thresh=ADNetConf.g()['finetune']['pos_thresh'], neg_thresh=ADNetConf.g()['finetune']['neg_thresh'], uniform_translation_f=2, uniform_scale_f=5) self.stopwatch.stop('tracking.save_samples.roi') self.stopwatch.start('tracking.save_samples.feat') feats = self._get_features([ commons.extract_region(img, box) for i, box in enumerate(pos_boxes) ]) for box, feat in zip(pos_boxes, feats): box.feat = feat feats = self._get_features([ commons.extract_region(img, box) for i, box in enumerate(neg_boxes) ]) for box, feat in zip(neg_boxes, feats): box.feat = feat pos_lb_action = BoundingBox.get_action_labels(pos_boxes, curr_bbox) self.histories.append((pos_boxes, neg_boxes, pos_lb_action, np.copy(img), self.iteration)) # clear old ones self.histories = self.histories[-ADNetConf.g( )['finetune']['long_term']:] self.stopwatch.stop('tracking.save_samples.feat') # online finetune if self.iteration % ADNetConf.g( )['finetune']['interval'] == 0 or is_tracked is False: img_pos, img_neg = [], [] pos_boxes, neg_boxes, pos_lb_action = [], [], [] pos_term = 'long_term' if is_tracked else 'short_term' for i in range(ADNetConf.g()['finetune'][pos_term]): if i >= len(self.histories): break pos_boxes.extend(self.histories[-(i + 1)][0]) pos_lb_action.extend(self.histories[-(i + 1)][2]) img_pos.extend([self.histories[-(i + 1)][3]] * len(self.histories[-(i + 1)][0])) for i in range(ADNetConf.g()['finetune']['short_term']): if i >= len(self.histories): break neg_boxes.extend(self.histories[-(i + 1)][1]) img_neg.extend([self.histories[-(i + 1)][3]] * len(self.histories[-(i + 1)][1])) self.stopwatch.start('tracking.online_finetune') self._finetune_fc((img_pos, img_neg), pos_boxes, neg_boxes, pos_lb_action, ADNetConf.get()['finetune']['learning_rate'], ADNetConf.get()['finetune']['iter']) _logger.debug('finetuned') self.stopwatch.stop('tracking.online_finetune') visualizer.image('patch', patch) # cv2.imshow('patch', patch) return curr_bbox
[commons.onehot_flatten(self.action_histories_old)] * len(c_batch), self.tensor_is_training: False }) scores.extend([x[1] for x in classes]) top5_idx = [ i[0] for i in sorted( enumerate(scores), reverse=True, key=lambda x: x[1]) ][:5] mean_score = sum([scores[x] for x in top5_idx]) / 5.0 if mean_score >= self.latest_score: mean_box = candidates[0] for i in range(1, 5): mean_box += candidates[i] return mean_box / 5.0, mean_score return None, 0.0 def __del__(self): self.persistent_sess.close() if __name__ == '__main__': ADNetConf.get('./conf/repo.yaml') random.seed(1258) np.random.seed(1258) tf.set_random_seed(1258) fire.Fire(ADNetRunner)
def __init__(self, tracker_request, tracker_response): # Topic names self.tracker_request = tracker_request self.tracker_response = tracker_response # Tracker initializers './conf/repo.yaml' ADNetConf.get(conf_yaml_path) self.tensor_input = tf.placeholder(tf.float32, shape=(None, 112, 112, 3), name='patch') self.tensor_action_history = tf.placeholder(tf.float32, shape=(None, 1, 1, 110), name='action_history') self.tensor_lb_action = tf.placeholder(tf.int32, shape=(None, ), name='lb_action') self.tensor_lb_class = tf.placeholder(tf.int32, shape=(None, ), name='lb_class') self.tensor_is_training = tf.placeholder(tf.bool, name='is_training') self.learning_rate_placeholder = tf.placeholder(tf.float32, [], name='learning_rate') self.persistent_sess = tf.Session(config=tf.ConfigProto( inter_op_parallelism_threads=1, intra_op_parallelism_threads=1)) self.adnet = ADNetwork(self.learning_rate_placeholder) self.adnet.create_network(self.tensor_input, self.tensor_lb_action, self.tensor_lb_class, self.tensor_action_history, self.tensor_is_training) if 'ADNET_MODEL_PATH' in os.environ.keys(): self.adnet.read_original_weights(self.persistent_sess, os.environ['ADNET_MODEL_PATH']) else: self.adnet.read_original_weights(self.persistent_sess) # print("self.action_histories >>", ADNetConf.get()) self.action_histories = np.array([0] * ADNetConf.get()['action_history'], dtype=np.int8) self.action_histories_old = np.array([0] * ADNetConf.get()['action_history'], dtype=np.int8) self.histories = [] self.iteration = 0 self.imgwh = None self.callback_redetection = self.redetection_by_sampling print("Tracker initialization Done!!") # Initialize eCAL ecal.initialize(sys.argv, "object tracking") # Read the JSON files with open(topics_json_path) as data_file: self.json_data = json.load(data_file) # Define the callbacks for publisher subscriber self.initialize_subscr_topics() self.initialize_publsr_topics() # The callbacks will redirect to the tracker function and publish predicted ROI self.define_subscr_callbacks()