def queue_step(self): search_state = self.queue.popleft() # TODO: delete this flag when bug fixed DEBUG_FLAG_search_state = str(search_state) event = self.teleport_to_state(search_state) # if search_state.y > 1.3: # raise Exception("**** got big point ") # iterate over all actions, push state after each action to the queue # then identify the present state as searched self.enqueue_states(search_state) self.visited_seen_states.append(search_state) # if present point is not closer to any of the point already saved in # self.grid_points, then save it! if self.make_grid and not any( map( lambda p: distance(p, search_state.position()) < self.distance_threshold, self.grid_points, ) ): self.grid_points.append(search_state.position()) if self.make_metadata: self.metadata[str(search_state)] = event.metadata if self.make_class: # False by default class_detections = event.class_detections2D for k, v in class_detections.items(): class_detections[k] = str(v) self.classdata[str(search_state)] = class_detections if self.make_images and str(search_state) not in self.images: self.images.create_dataset(str(search_state), data=event.frame) if self.make_seg and str(search_state) not in self.seg: self.seg.create_dataset( str(search_state), data=event.class_segmentation_frame ) if self.make_depth and str(search_state) not in self.depth: self.depth.create_dataset(str(search_state), data=event.depth_frame) # this print shows that the elif str(search_state) in self.images: print(self.scene_name, str(search_state))
def pos_step(self, pos): for rotation in self.rotations: for horizon in self.horizons: search_state = ThorAgentState(**pos, rotation=rotation, horizon=horizon, state_decimal=self.state_decimal) event = self.teleport_to_state(search_state) if self.make_grid and not any( map( lambda p: distance(p, search_state.position()) < self.distance_threshold, self.grid_points, )): self.grid_points.append(search_state.position()) if self.make_metadata: self.metadata[str(search_state)] = event.metadata if self.make_class: class_detections = event.class_detections2D for k, v in class_detections.items(): class_detections[k] = str(v) self.classdata[str(search_state)] = class_detections if self.make_images and str(search_state) not in self.images: self.images.create_dataset(str(search_state), data=event.frame) if self.make_seg and str(search_state) not in self.seg: self.seg.create_dataset( str(search_state), data=event.class_segmentation_frame) if self.make_depth and str(search_state) not in self.depth: self.depth.create_dataset(str(search_state), data=event.depth_frame) elif str(search_state) in self.images: print(self.scene_name, str(search_state))
def queue_step(self): search_state = self.queue.popleft() event = self.teleport_to_state(search_state) # if search_state.y > 1.3: # raise Exception("**** got big point ") self.enqueue_states(search_state) self.visited_seen_states.append(search_state) if self.make_grid and not any( map( lambda p: distance(p, search_state.position()) < self. distance_threshold, self.grid_points, )): self.grid_points.append(search_state.position()) if self.make_metadata: self.metadata[str(search_state)] = event.metadata if self.make_class: class_detections = event.class_detections2D for k, v in class_detections.items(): class_detections[k] = str(v) self.classdata[str(search_state)] = class_detections if self.make_images and str(search_state) not in self.images: self.images.create_dataset(str(search_state), data=event.frame) if self.make_seg and str(search_state) not in self.seg: self.seg.create_dataset(str(search_state), data=event.class_segmentation_frame) if self.make_depth and str(search_state) not in self.depth: self.depth.create_dataset(str(search_state), data=event.depth_frame) elif str(search_state) in self.images: print(self.scene_name, str(search_state))