def handler_getlog(self, update, context): logger.info("Received command `getlog`, chat id: %s" % update.message.chat_id) # Create temporary tar.xz file tmpTGZ1 = tempfile.NamedTemporaryFile(suffix=".tar.xz") tmpTGZ = tarfile.open(fileobj=tmpTGZ1, mode="w:xz") tmpTGZPath = tmpTGZ1.name # Traverse /var/log varlogDir = os.path.abspath(os.path.join(os.sep, "var", "log")) for root, dirs, files in os.walk(varlogDir): for file in files: fullPath = os.path.join(root, file) # Check if the file is a regular file if not os.path.isfile(fullPath): continue # Check if the file is accessible if not os.access(fullPath, os.R_OK): continue # Pack the file tmpTGZ.add(name=fullPath, recursive=False) tmpTGZ.close() self.updater.bot.send_document( chat_id=update.message.chat_id, document=open(tmpTGZPath, 'rb'), filename=time.strftime('berrynet-varlog_%Y%m%d_%H%M%S.tar.xz'))
def update(self, pl): try: payload_json = payload.deserialize_payload(pl.decode('utf-8')) jpg_bytes = payload.destringify_jpg(payload_json["bytes"]) jpg_file_descriptor = io.BytesIO(jpg_bytes) for u in self.cameraHandlers: if self.updater is None: continue if self.target_label == '': if len(payload_json['annotations']) > 0: logger.debug("Send photo to %s" % u) self.updater.bot.send_photo(chat_id=u, photo=jpg_file_descriptor) else: logger.debug("Does not detect any object, no action") elif self.match_target_label(self.target_label, payload_json): logger.info("Send notification photo with result to %s" % u) self.updater.bot.send_photo(chat_id=u, photo=jpg_file_descriptor) else: pass except Exception as e: logger.info(e)
def handler_stop(self, update, context): logger.info("Received command `stop`, chat id: %s" % update.message.chat_id) # Register the chat-id for receiving images while (update.message.chat_id in self.cameraHandlers): self.cameraHandlers.remove(update.message.chat_id) update.message.reply_text('Bye')
def handler_shot(self, update, context): logger.info("Received command `shot`, chat id: %s" % update.message.chat_id) # Register the chat-id for receiving images self.shot = True self.single_shot_chat_id = update.message.chat_id logger.debug('Enable single shot.')
def single_shot(self, pl): """Capture an image from camera client and send to the client. """ if self.shot is True: try: payload_json = payload.deserialize_payload(pl.decode('utf-8')) # WORKAROUND: Support customized camera client. # # Original camera client sends an `obj` in payload, # Customized camera client sends an `[obj]` in payload. # # We are unifying the rules. Before that, checking the type # as workaround. if type(payload_json) is list: logger.debug('WORDAROUND: receive and unpack [obj]') payload_json = payload_json[0] jpg_bytes = payload.destringify_jpg(payload_json["bytes"]) jpg_file_descriptor = io.BytesIO(jpg_bytes) logger.info('Send single shot') self.updater.bot.send_photo(chat_id=self.single_shot_chat_id, photo=jpg_file_descriptor) except Exception as e: logger.info(e) self.shot = False else: logger.debug('Single shot is disabled, do nothing.')
def handler_camera(self, update, context): logger.info("Received command `camera`, chat id: %s" % update.message.chat_id) # Register the chat-id for receiving images if (update.message.chat_id not in self.cameraHandlers): self.cameraHandlers.append(update.message.chat_id) update.message.reply_text('Dear, I am ready to help send notification')
def handler_help(self, update, context): logger.info("Received command `help`") update.message.reply_text(('I support these commands:\n\n' 'help - Display help message.\n' 'hi - Test Telegram client.\n' 'camera - Start camera.\n' 'stop - Stop camera.\n' 'shot - Take a shot from camera.'))
def handleResult(self, pl): try: payload_json = payload.deserialize_payload(pl.decode('utf-8')) print(payload_json) self.comm.stop_nb() sys.exit(0) except Exception as e: logger.info(e)
def handleConfig(self, pl): payload_json = "" try: payload_json = payload.deserialize_payload(pl.decode('utf-8')) self.comm.send(self.comm_config['publish'], payload.serialize_payload(payload_json)) except Exception as e: logger.info(e) # output config file with open(self.comm_config['configfile'], 'w') as configfile: configfile.write(payload.serialize_payload(payload_json)) configfile.close() # restart service subprocess.run(["supervisorctl", "restart", "bnpipeline-bndyda"])
def deploy(self, pl): """Deploy newly retrained model for pipeline engine New dyda config filepath is in the payload. Args: pl: MQTT message payload w/ new dyda config filepath. Returns: N/A """ dyda_config_path = pl.decode('utf-8') self.dyda_config_path = dyda_config_path self.comm.send('berrynet/data/deployed', '') logger.info(('New model has been deployed, ' 'dyda config: {}'.format(self.dyda_config_path)))
def handleConfig(self, pl): payload_json = "" try: id = pl.decode('utf-8') if (id in self.idlist): configFilename = self.idlist[id] f = open(configFilename) payload_json = payload.deserialize_payload(f.read()) self.sendConfig(payload.serialize_payload(payload_json)) else: logger.warning("ID %s is not in idlist" % (id)) return except Exception as e: logger.info(e) # output config file with open(self.comm_config['configfile'], 'w') as configfile: configfile.write(payload.serialize_payload(payload_json)) configfile.close() # restart service subprocess.run(["supervisorctl", "restart", "bnpipeline-bndyda"])
def handler_hi(self, update, context): logger.info("Received command `hi`") update.message.reply_text('Hi, {}'.format( update.message.from_user.first_name))
def dl_inference(self, pl): def empty_inference_result(count): return [{'channel': i, 'annotations': []} for i in range(count)] t = datetime.now() base_name = None logger.debug('counter #{}'.format(self.counter)) logger.debug('payload size: {}'.format(len(pl))) logger.debug('payload type: {}'.format(type(pl))) # Unify the type of input payload to a list, so that # bnpipeline can process the input in the same way. # # If the payload is # - a list of items: keep the list # - a single item: convert to a list with an item mqtt_payload = payload.deserialize_payload(pl.decode('utf-8')) if isinstance(mqtt_payload, list): jpg_json = mqtt_payload else: jpg_json = [mqtt_payload] logger.info('Convert input type from {0} to {1}'.format( type(mqtt_payload), type(jpg_json))) jpg_bytes_list = [ payload.destringify_jpg(img['bytes']) for img in jpg_json ] metas = [img.get('meta', {}) for img in jpg_json] logger.debug('destringify_jpg: {} ms'.format(duration(t))) t = datetime.now() bgr_arrays = [ payload.jpg2bgr(jpg_bytes) for jpg_bytes in jpg_bytes_list ] logger.debug('jpg2bgr: {} ms'.format(duration(t))) t = datetime.now() # FIXME: Galaxy pipeline may or may not use a list as input, so we # check the length here and then choose whether to send a list or not. # We may drop it when Galaxy Pipline unite their input. if len(bgr_arrays) > 1: image_data = self.engine.process_input(bgr_arrays) else: image_data = self.engine.process_input(bgr_arrays[0]) # FIXME: Galaxy pipeline doesn't support multiple metadata for multiple # images at the moment (which will be needed), so we provide the first # metadata here. This commit should be revert when Galaxy pipeline # support it: https://gitlab.com/DT42/galaxy42/dt42-trainer/issues/120 meta_data = metas[0] try: logger.debug(meta_data) output = self.engine.inference(image_data, meta=meta_data, base_name=base_name) model_outputs = self.engine.process_output(output) except IndexError as e: # FIXME: workaround for pipeline # Pipeline throw IndexError when there's no results, see: # https://gitlab.com/DT42/galaxy42/dt42-trainer/issues/86 # So we catch the exception, and produce a dummy result # to hook. This workaround should be removed after the issue # has been fixed. model_outputs = empty_inference_result(len(jpg_json)) logger.warning(('inference results are empty because ' 'pipeline raised IndexError')) if model_outputs is None: model_outputs = empty_inference_result(len(jpg_json)) logger.warning(('inference results are empty because ' 'severe error happened in pipeline')) logger.debug('Result: {}'.format(model_outputs)) logger.debug('Classification takes {} ms'.format(duration(t))) # self.engine.cache_data('model_output', model_outputs) # self.engine.cache_data('model_output_filepath', output_name) # self.engine.save_cache() self.send_result(self.generalize_result(jpg_json, model_outputs)) self.counter += 1
def handler_help(self, update, context): logger.info("Received command `help`") update.message.reply_text( 'I support these commands: help, hello, camera')
def __init__(self, dyda_config_path='', debug=False): """ __init__ of DetectorOpenVINO Trainer Variables: input_data: a list of image array results: defined by lab_tools.output_pred_detection() Arguments: dyda_config_path -- Trainer config filepath """ if debug: logger.setLevel(logging.DEBUG) else: logger.setLevel(logging.INFO) # Setup dyda config super(DetectorOpenVINO, self).__init__(dyda_config_path=dyda_config_path) self.set_param(self.class_name) self.check_param_keys() if "threshold" in self.param.keys(): self.threshold = self.param["threshold"] else: self.threshold = 0.3 # Setup DL model model_xml = self.param['model_description'] model_bin = self.param['model_file'] with open(self.param['label_file'], 'r') as f: self.labels_map = [x.strip() for x in f] # Setup OpenVINO # # Plugin initialization for specified device and # load extensions library if specified # # Note: MKLDNN CPU-targeted custom layer support is not included # because we do not use it yet. self.plugin = IEPlugin(device=self.param['device'], plugin_dirs=self.param['plugin_dirs']) if self.param['device'] == 'CPU': for ext in self.param['cpu_extensions']: logger.info('Add cpu extension: {}'.format(ext)) self.plugin.add_cpu_extension(ext) logger.debug("Computation device: {}".format(self.param['device'])) # Read IR logger.debug("Loading network files:\n\t{}\n\t{}".format( model_xml, model_bin)) net = IENetwork(model=model_xml, weights=model_bin) if self.plugin.device == "CPU": supported_layers = self.plugin.get_supported_layers(net) not_supported_layers = [ l for l in net.layers.keys() if l not in supported_layers ] if len(not_supported_layers) != 0: logger.error( ('Following layers are not supported ' 'by the plugin for specified device {}:\n {}').format( self.plugin.device, ', '.join(not_supported_layers))) logger.error("Please try to specify cpu " "extensions library path in demo's " "command line parameters using -l " "or --cpu_extension command line argument") sys.exit(1) assert len(net.inputs.keys()) == 1, ( 'Demo supports only single input topologies') assert len( net.outputs) == 1, ('Demo supports only single output topologies') # input_blob and and out_blob are the layer names in string format. logger.debug("Preparing input blobs") self.input_blob = next(iter(net.inputs)) self.out_blob = next(iter(net.outputs)) self.n, self.c, self.h, self.w = net.inputs[self.input_blob].shape # Loading model to the plugin self.exec_net = self.plugin.load(network=net, num_requests=2) del net # Initialize engine mode: sync or async # # FIXME: async mode does not work currently. # process_input needs to provide two input tensors for async. self.is_async_mode = False self.cur_request_id = 0 self.next_request_id = 1