def __init__(self, name, frame_function, num_frames, param_min=0, param_max=1, first_frame=0): FrameContainer.__init__(self) self._name = name self.frame_function = frame_function self._param_min = param_min self._param_max = param_max self._timeline = None # update FrameContainer values self.first_frame(first_frame) self.last_frame(first_frame + num_frames - 1)
def __init__(self): FrameContainer.__init__(self) # generic frame attributes # general settings for Frames in this Timeline self.general_frame_settings['out_dir']=sage.misc.misc.tmp_dir() self.general_frame_settings['image_format']='.png' self.general_frame_settings['frame_name']='animation-frame' self.general_frame_settings['resolution']=(544,306) # attributes of this Timeline; # each of the values in this block can be shown or set with a # corresponding non-underscore method self._segment_class = Segment self._frame_rate = 30 # frames per second self._high_quality = False # in the default configuration, this has no effect self._segments = tuple()
def on_frame(self, frame): # step 1 candidates, auxiliary_c = self.__get_lights(frame) # step 2 traffic_lights, auxiliary_t = self.__get_tfl_lights(frame, candidates, auxiliary_c) # step 3 curr_frame = FrameContainer(frame) curr_frame.traffic_light = traffic_lights distances = [] if self.__prev_frame: curr_frame.EM = self.__egomotions['egomotion_' + str(self.__index - 1) + '-' + str(self.__index)] distances = self.__get_distance(curr_frame) self.__display(curr_frame, candidates, auxiliary_c, traffic_lights, auxiliary_t, distances) self.__prev_frame = curr_frame self.__index += 1 return traffic_lights, auxiliary_t, distances
def on_frame(self, current_frame, frame_index): # phase 1 candidates, auxliary = self.__get_candidates(current_frame) assert len(candidates) == len(auxliary) assert len(candidates) >= 0 # phase 2 traffic_lights, traffic_auxiliary = self.__get_tfl_coordinates( current_frame, candidates, auxliary) if len(traffic_lights) > len(candidates): traffic_lights = candidates assert len(traffic_lights) == len(traffic_auxiliary) assert len(traffic_lights) >= 0 # phase 3 current_frame = FrameContainer(current_frame) current_frame.traffic_light = traffic_lights if self.__prev_frame: try: current_frame.EM = self.__pkl_data['egomotion_' + str(frame_index - 1) + '-' + str(frame_index)] except KeyError: pass # I have not yet decided how to handle this case distance = self.__get_dists(self.__prev_frame, current_frame, self.__prev_frame.traffic_light, traffic_lights) assert len(distance) == len(traffic_lights) else: distance = None self.__prev_frame = current_frame visualize(current_frame, candidates, auxliary, traffic_lights, traffic_auxiliary, distance) return traffic_lights, traffic_auxiliary, distance
def run(self, frame, EM): self.curr_frame = FrameContainer(frame) self.curr_frame.EM = EM #part 1 self.curr_frame.cordinates, self.curr_frame.cordinates_colors = find_tfl_lights( self.curr_frame.img) #part 2 confirm_tfl_by_CNN(self.curr_frame, self.model) # part 3 if self.prev_frame: SFM.calc_TFL_dist(self.prev_frame, self.curr_frame, self.focal_length, self.principle_point)
def calc_tfl_dist(prev_container: FrameContainer, curr_container: FrameContainer, focal: np.float, pp: np.ndarray) -> FrameContainer: norm_prev_pts, norm_curr_pts, R, foe, t_z = \ prepare_3d_data(prev_container, curr_container, focal, pp) if abs(t_z) < 10e-6: print('t_z = ', t_z) elif norm_prev_pts.size == 0: print('no prev points') elif norm_prev_pts.size == 0: print('no curr points') else: curr_container.corresponding_ind, curr_container.traffic_lights_3d_location \ , curr_container.valid = calc_3d_data(norm_prev_pts, norm_curr_pts, R, foe, t_z) return curr_container
def test_calc_tfl_dist() -> None: pkl_path = 'dusseldorf_000049.pkl' prev_img_path = 'dusseldorf_000049_0000' + str( prev_frame_id) + '_leftImg8bit.png' curr_img_path = 'dusseldorf_000049_0000' + str( curr_frame_id) + '_leftImg8bit.png' prev_container = FrameContainer(prev_img_path) curr_container = FrameContainer(curr_img_path) with open(pkl_path, 'rb') as pkl_file: data = pickle.load(pkl_file, encoding='latin1') focal = data['flx'] pp = data['principle_point'] prev_container.traffic_light = np.array(data['points_' + str(prev_frame_id)][0]) curr_container.traffic_light = np.array(data['points_' + str(curr_frame_id)][0]) curr_container.EM = SFM.calc_EM(data, prev_frame_id, curr_frame_id) curr_container = SFM.calc_tfl_dist(prev_container, curr_container, focal, pp) visualize(prev_container, curr_container, focal, pp)
def run(self, curr_image_path: str, _id: int) -> Tuple[FrameContainer, List[int]]: if DEBUG is True: fig, (self.tfl_candidates, self.tfl, self.tfl_distance) = \ plt.subplots(1, 3, figsize=(12, 5)) self.curr_container = FrameContainer(curr_image_path) candidates, auxiliary = self.__get_tfl_candidates() assert len(candidates) == len(auxiliary) self.curr_container.traffic_light, tfl_aux = self.__get_tfl_coordinates(candidates, auxiliary) assert len(self.curr_container.traffic_light) == len(tfl_aux) assert len(self.curr_container.traffic_light) <= len(candidates) self.curr_container.traffic_lights_3d_location = self.__get_distance(_id) assert len(self.curr_container.traffic_lights_3d_location) == len(self.curr_container.traffic_light) if DEBUG is True: plt.show() self.__prev_container = self.curr_container return self.curr_container, tfl_aux
def run_product(self, image_path, frame_index): current_frame = FrameContainer(image_path)