def getVideoSource(source, params): """Return the camera abstract object to iterate the video source.""" # Validate the source if source not in VALID_SOURCES: msg = f'The chosen source {source} is not supported. Please choose one of: {VALID_SOURCES}' cprint.fatal(msg, interrupt=True) if source == 'Local': from Camera.local_camera import Camera as LocalCamera # Local camera device selected. cam_idx = params['DeviceNo'] cprint.info(f'Selected device: local camera #{cam_idx}') cam = LocalCamera(cam_idx) elif source == 'Video': from Camera.local_video import Camera as LocalVideo # Video file selected. video_path = os.path.expanduser(params['Path']) if not os.path.isfile(video_path): msg = f'The chosen video file {video_path} does not exist. Please check the file name.' cprint.fatal(msg, interrupt=True) cprint.info(f'Selected video: {video_path}') cam = LocalVideo(video_path) else: from Camera.roscam import ROSCam # source = cfg['ObjectDetector']['Source'] # if source.lower() == 'local': # from Camera.local_camera import Camera # cam_idx = cfg['ObjectDetector']['Local']['DeviceNo'] # print(' Chosen source: local camera (index %d)' % (cam_idx)) # cam = Camera(cam_idx) # elif source.lower() == 'video': # from Camera.local_video import Camera # video_path = cfg['ObjectDetector']['Video']['Path'] # print(' Chosen source: local video (%s)' % (video_path)) # cam = Camera(video_path) # elif source.lower() == 'stream': # # comm already prints the source technology (ICE/ROS) # import comm # import config # cfg = config.load(sys.argv[1]) # jdrc = comm.init(cfg, 'ObjectDetector') # proxy = jdrc.getCameraClient('ObjectDetector.Stream') # from Camera.stream_camera import Camera # cam = Camera(proxy) # else: # raise SystemExit(('%s not supported! Supported source: Local, Video, Stream') % (source)) return cam
def loadFrozenGraph(model_path, write_nodes): ''' Load a frozen graph from a .pb file. ''' model_path = os.path.join(MODELS_DIR, model_path) pb_path = os.path.join(model_path, FG_NAME) # Check the existance if not os.path.isfile(pb_path): cprint.fatal(f'Error: the file {pb_path} does not exist.', interrupt=True) cprint.info('Loading the frozen graph...') graph_def = tf.compat.v1.GraphDef() with tf.io.gfile.GFile(pb_path, 'rb') as fid: serialized_graph = fid.read() graph_def.ParseFromString(serialized_graph) tf.import_graph_def(graph_def, name='') cprint.ok('Loaded!') if write_nodes: writeNodes(model_path, graph_def) return graph_def
def loadCheckpoint(model_path, write_nodes): ''' Load a graph model from a training checkpoint. ''' model_path = os.path.join(MODELS_DIR, model_path) if not os.path.exists(model_path): cprint.fatal(f'Error: the path {model_path} does not exist.', interrupt=True) config_file = os.path.join(model_path, 'pipeline.config') if not os.path.isfile(config_file): cprint.fatal(f'Error: the config file {config_file} does not exist.', interrupt=True) checkpoint_file = os.path.join(model_path, 'model.ckpt') if not os.path.isfile(checkpoint_file + '.meta'): cprint.fatal(f'Error: the checkpoint file {checkpoint_file} does not exist.', interrupt=True) graph_def, input_names, output_names = build_detection_graph(config=config_file, checkpoint=checkpoint_file, score_threshold=0.3, batch_size=1, force_nms_cpu=FORCE_NMS_CPU) if write_nodes: writeNodes(model_path, graph_def) return graph_def, input_names, output_names
from time import sleep import yaml description = ''' Perform the optimization of a given model using the TF-TRT interface converter to the TensorRT engine. Search over a grid of provided parameters, and run a benchmark on each configuration, dumping the result on a YML file besides the resulting frozen graph. ''' parser = argparse.ArgumentParser(description=description) parser.add_argument('config_file', type=str, help='YML containing the parameters grid to try.') args = parser.parse_args() config_file = args.config_file if not os.path.isfile(config_file): cprint.fatal(f'Error: the file {config_file} does not exist!', interrupt=True) # Otherwise, parse it! with open(config_file, 'r') as f: cfg = yaml.safe_load(f) BAGS_DIR = 'bags' MODELS_DIR = 'Optimization/dl_models' OPTS_SUBDIR = 'optimizations' OPTIMIZATION_SCRIPT = 'Optimization/optimize_graph.py' BENCHMARKING_SCRIPT = 'benchmarkers.py' model_name = cfg['ModelName'] saved_as = cfg['SavedAs'] input_w, input_h = cfg['InputWidth'], cfg['InputHeight'] arch = cfg['Architecture']
if language == "francais": logging.info("Set language to French") gettext.bindtextdomain('base', localedir="locales") lang_translations = gettext.translation('base', localedir='locales', languages=["fr"]) elif language == "english": logging.info("Set language to English") gettext.bindtextdomain('base', localedir="locales") lang_translations = gettext.translation('base', localedir='locales', languages=["en"]) else: logging.fatal("USER DID NOT SPECIFY A LANGUAGE, ABORT!") cprint.fatal( "You did not specify a language. Abort.\nTu n'a pas dit une language supporte.", interrupt=True) lang_translations.install() _ = lang_translations.gettext logging.info("Attempting to import func.py and basicfunc.py.") try: from func import * except Exception as e: logging.critical("Could not access file func.py (%s)" % e) cprint.fatal(_( "I can't access the file func.py. This file is necessary for proper function of the Software." ), interrupt=True) logging.info("Successfully imported func.py") try: from basicfunc import *
input_w = args.input_w input_h = args.input_h precision = args.precision mss = args.mss mce = args.mce allow_growth = args.allow_growth arch = args.arch write_nodes = args.write_nodes save_in = args.save_in if model_format == 'frozen': # The input and output names have to be provided input_names = args.input_names output_names = args.output_names graph_def = loadFrozenGraph(model_dir, write_nodes) else: graph_def, input_names, output_names = loadCheckpoint(model_dir, write_nodes) cprint.ok('Graph loaded') # These nodes can't be optimized blacklist_nodes = input_names + output_names # Run the optimization! trt_graph = optim_graph(graph_def, blacklist_nodes, precision, mss, mce) if trt_graph is None: cprint.fatal('Error: optimization not completed.', interrupt=True) cprint.ok('Optimization done!') # And dump the graph into a new .pb file trt_path = saveTrtGraph(trt_graph, model_dir, save_in)
while True: try: image, _ = cam.getImages() except StopIteration: cprint.ok('ROSBag completed.') break image = np.array(Image.fromarray(image).resize(input_shape[:2])) if arch == 'ssd': feed_dict = {net.image_tensor: image[None, ...]} out, elapsed = net._forward_pass(feed_dict) # Save the elapsed time and the number of detections n_dets = out[-1] total_times.append(elapsed) total_dets.append(n_dets) else: cprint.fatal(f'{arch} not implemented!') exit() # Free resources net.sess.close() # Write the results bm_writer = BenchmarkWriter(path.dirname(write_to), pb_file) import pickle with open('cosas.pkl', 'wb') as f: pickle.dump([total_times, total_dets], f) bm_writer.write_log(total_times, total_dets, filename=path.basename(write_to), write_iters=False)
type=str, help='ROSBag to perform the test on') parser.add_argument('save_in', type=str, help='File in which write the output result') # Parse the args args = parser.parse_args() # print('\n' * 10, listdir('.'), '\n' * 20) pb_file = args.pb_file rosbag_file = args.rosbag_file # Check the existance of the files if not path.isfile(pb_file): cprint.fatal( f'Error: the provided frozen graph {pb_file} does not exist', interrupt=True) if not path.isfile(rosbag_file): cprint.fatal( f'Error: the provided ROSBag {rosbag_file} does not exist', interrupt=True) save_in = args.save_in arch = args.arch input_w, input_h = args.input_width, args.input_height # Create the ROSCam to open the ROSBag topics = { 'RGB': '/camera/rgb/image_raw', 'Depth': '/camera/depth_registered/image_raw' }
def __init__(self, arch, input_shape, frozen_graph=None, graph_def=None, dataset='coco', confidence_threshold=0.5, path_to_root=None): labels_file, max_num_classes = LABELS_DICT[dataset] # Append dir if provided (calling from another directory) if path_to_root is not None: labels_file = path.join(path_to_root, labels_file) label_map = label_map_util.load_labelmap(labels_file) # loads the labels map. categories = label_map_util.convert_label_map_to_categories(label_map, max_num_classes=max_num_classes) category_index = label_map_util.create_category_index(categories) self.classes = {k:str(v['name']) for k, v in category_index.items()} # Find person index for idx, class_ in self.classes.items(): if class_ == 'person': self.person_class = idx break # Graph load. We allocate the session attribute self.sess = None if frozen_graph is not None: # Read the graph def from a .pb file graph_def = tf.compat.v1.GraphDef() cprint.info(f'Loading the graph def from {frozen_graph}') with tf.io.gfile.GFile(frozen_graph, 'rb') as f: graph_def.ParseFromString(f.read()) self.load_graphdef(graph_def) elif graph_def is not None: cprint.info('Loading the provided graph def...') self.load_graphdef(graph_def) else: # No graph def was provided! cprint.fatal('The graph definition has not been loaded.', interrupt=True) self.input_shape = input_shape self.arch = arch # Dummy tensor to be used for the first inference. dummy_tensor = np.zeros((1,*self.input_shape), dtype=np.int32) # Set placeholders, depending on the network architecture cprint.warn(f'Network architecture: {self.arch}') if self.arch == 'ssd': # Inputs self.image_tensor = self.sess.graph.get_tensor_by_name('image_tensor:0') # Outputs self.detection_boxes = self.sess.graph.get_tensor_by_name('detection_boxes:0') self.detection_scores = self.sess.graph.get_tensor_by_name('detection_scores:0') self.detection_classes = self.sess.graph.get_tensor_by_name('detection_classes:0') self.num_detections = self.sess.graph.get_tensor_by_name('num_detections:0') self.boxes = [] self.scores = [] self.predictions = [] self.output_tensors = [self.detection_boxes, self.detection_scores, self.detection_classes, self.num_detections] self.dummy_feed = {self.image_tensor: dummy_tensor} elif self.arch in ['yolov3', 'yolov3tiny']: # Inputs self.inputs = self.sess.graph.get_tensor_by_name('inputs:0') # Outputs self.output_boxes = self.sess.graph.get_tensor_by_name('output_boxes:0') self.output_tensors = [self.output_boxes] self.dummy_feed = {self.inputs: dummy_tensor} elif self.arch == 'face_yolo': # Inputs self.input = self.sess.graph.get_tensor_by_name('img:0') self.training = self.sess.graph.get_tensor_by_name('training:0') # Outputs self.prob = self.sess.graph.get_tensor_by_name('prob:0') self.x_center = self.sess.graph.get_tensor_by_name('x_center:0') self.y_center = self.sess.graph.get_tensor_by_name('y_center:0') self.w = self.sess.graph.get_tensor_by_name('w:0') self.h = self.sess.graph.get_tensor_by_name('h:0') self.output_tensors = [self.prob, self.x_center, self.y_center, self.w, self.h] self.dummy_feed = {self.input: dummy_tensor, self.training: False} elif self.arch == 'face_corrector': # Inputs self.input = self.sess.graph.get_tensor_by_name('img:0') self.training = self.sess.graph.get_tensor_by_name('training:0') # Outputs self.X = self.sess.graph.get_tensor_by_name('X:0') self.Y = self.sess.graph.get_tensor_by_name('Y:0') self.W = self.sess.graph.get_tensor_by_name('W:0') self.H = self.sess.graph.get_tensor_by_name('H:0') self.output_tensors = [self.X, self.Y, self.W, self.H] self.dummy_feed = {self.input: dummy_tensor, self.training: False} elif self.arch == 'facenet': # Inputs self.input = self.sess.graph.get_tensor_by_name('input:0') self.phase_train = self.sess.graph.get_tensor_by_name('phase_train:0') # Outputs self.embeddings = self.sess.graph.get_tensor_by_name('embeddings:0') self.output_tensors = [self.embeddings] self.dummy_feed = {self.input: dummy_tensor, self.phase_train: False} else: cprint.fatal(f'Architecture {arch} is not supported', interrupt=True) # First (slower) inference cprint.info("Performing first inference...") self._forward_pass(self.dummy_feed) self.confidence_threshold = confidence_threshold cprint.ok("Detection network ready!")