def crop_and_paste_get_roi(self, image, width, height, crop_and_paste_width, crop_and_paste_height): """ :image: input image :width: input image width :height: input image height :crop_and_paste_width: crop_and_paste_width :crop_and_paste_height: crop_and_paste_height :return: return AclImage """ print('[Dvpp] vpc crop and paste stage:') input_desc = self._gen_input_pic_desc(image) stride_width = utils.align_up16(crop_and_paste_width) stride_height = utils.align_up2(crop_and_paste_height) out_buffer_size = utils.yuv420sp_size(stride_width, stride_height) out_buffer, ret = acl.media.dvpp_malloc(out_buffer_size) output_desc = \ self._gen_output_pic_desc(crop_and_paste_width, crop_and_paste_height, out_buffer, out_buffer_size) self._crop_config = acl.media.dvpp_create_roi_config( 0, (width >> 1 << 1) - 1, 0, (height >> 1 << 1) - 1) self._paste_config = acl.media.dvpp_create_roi_config( 0, crop_and_paste_width - 1, 0, crop_and_paste_height - 1) ret = acl.media.dvpp_vpc_crop_and_paste_async(self._dvpp_channel_desc, input_desc, output_desc, self._crop_config, self._paste_config, self._stream) utils.check_ret("acl.media.dvpp_vpc_crop_and_paste_async", ret) ret = acl.rt.synchronize_stream(self._stream) utils.check_ret("acl.rt.synchronize_stream", ret) print('[Dvpp] vpc crop and paste stage success') stride_width = utils.align_up16(crop_and_paste_width) stride_height = utils.align_up2(crop_and_paste_height) return AclImage(out_buffer, stride_width, stride_height, out_buffer_size, constants.MEMORY_DVPP)
def _get_frame_config(self, frame_confg): get_eos = acl.media.venc_get_frame_config_eos(frame_confg) utils.check_ret("acl.media.venc_get_frame_config_eos", get_eos) get_force_frame = acl.media.venc_get_frame_config_force_i_frame( frame_confg) utils.check_ret("acl.media.venc_get_frame_config_force_i_frame", get_force_frame)
def main(arg): """ main """ # 初始化资源相关语句 acl_resource = AclResource() acl_resource.init() # 初始化推理的类 classify = Classify(MODEL_PATH) ret = classify.init() utils.check_ret("Classify init ", ret) # 得到测试样本及其类标签 print(f"================正在加载测试集数据================") all_test_data, test_label = load_data(DATA_PATH) print(f"================测试集数据加载完毕================") # print(list(test_label).count(0)) # 类标签为0的测试集样本数量 # 调制分类推理,依次预测每一个信号的调制类型 print(f"=====================开始推理=====================") for now_num in range(arg.start_num, arg.end_num): initial_result = classify.inference(all_test_data[now_num, :, :, :]) result = initial_result[0] # 每一类的可能性大小(定性) # print(result) result = result.flatten() # x.argsort(),将x中的元素从小到大排列,返回其对应的索引 pre_index = result.argsort()[-1] # 可能性最大的类别的索引 final_result = modulation_type[pre_index] # 预测标签,即预测的测试样本的调制类型 true_label = modulation_type[test_label[now_num]] print( f"================编号为{now_num}的信号的预测的调制类型为:{final_result},实际的调制类型为:{true_label}================" ) with open("./result_modulation_type.csv", 'a', newline='') as t2: writer_train2 = csv.writer(t2) writer_train2.writerow([now_num, final_result, true_label])
def _init_resource(self): if self._is_inited: return self.venc_channel_desc = acl.media.venc_create_channel_desc() utils.check_none("acl.media.venc_create_channel_desc", self.venc_channel_desc) self._cb_thread_id, ret = acl.util.start_thread( self._cb_thread_func, []) utils.check_ret("acl.util.start_thread", ret) log_info("[INFO] start_thread", self._cb_thread_id, ret) self._set_channel_desc() log_info("[INFO] set venc channel desc") ret = acl.media.venc_create_channel(self.venc_channel_desc) utils.check_ret("acl.media.venc_create_channel", ret) self.frame_config = acl.media.venc_create_frame_config() utils.check_none("acl.media.venc_create_frame_config", self.frame_config) log_info("[INFO] create_frame_config") # set picture description self.dvpp_pic_desc = acl.media.dvpp_create_pic_desc() utils.check_none("acl.media.dvpp_create_pic_desc", self.dvpp_pic_desc) self._set_frame_config(self.frame_config, 0, 0) log_info("[INFO] set frame config") self._get_frame_config(self.frame_config) self._is_inited = True
def crop_and_paste( self, image, width, height, crop_and_paste_width, crop_and_paste_height): """ crop_and_paste """ print('[Dvpp] vpc crop and paste stage:') input_desc = self._gen_input_pic_desc(image) stride_width = utils.align_up16(crop_and_paste_width) stride_height = utils.align_up2(crop_and_paste_height) out_buffer_size = utils.yuv420sp_size(stride_width, stride_height) out_buffer, ret = acl.media.dvpp_malloc(out_buffer_size) output_desc = self._gen_output_pic_desc( crop_and_paste_width, crop_and_paste_height, out_buffer, out_buffer_size) self._crop_config = acl.media.dvpp_create_roi_config( 0, (width >> 1 << 1) - 1, 0, (height >> 1 << 1) - 1) # set crop area: rx = float(width) / float(crop_and_paste_width) ry = float(height) / float(crop_and_paste_height) if rx > ry: dx = 0 r = rx dy = int((crop_and_paste_height - height / r) / 2) else: dy = 0 r = ry dx = int((crop_and_paste_width - width / r) / 2) pasteRightOffset = int(crop_and_paste_width - 2 * dx) pasteBottomOffset = int(crop_and_paste_height - 2 * dy) if (pasteRightOffset % 2) == 0: pasteRightOffset = pasteRightOffset - 1 if (pasteBottomOffset % 2) == 0: pasteBottomOffset = pasteBottomOffset - 1 self._paste_config = acl.media.dvpp_create_roi_config( 0, pasteRightOffset, 0, pasteBottomOffset) ret = acl.media.dvpp_vpc_crop_and_paste_async(self._dvpp_channel_desc, input_desc, output_desc, self._crop_config, self._paste_config, self._stream) utils.check_ret("acl.media.dvpp_vpc_crop_and_paste_async", ret) ret = acl.rt.synchronize_stream(self._stream) utils.check_ret("acl.rt.synchronize_stream", ret) print('[Dvpp] vpc crop and paste stage success') stride_width = crop_and_paste_width - 2 * dx stride_height = crop_and_paste_height - 2 * dy #stride_width = utils.align_up16(crop_and_paste_width) #stride_height = utils.align_up2(crop_and_paste_height) return AclImage(out_buffer, stride_width, stride_height, out_buffer_size, constants.MEMORY_DVPP)
def process(self, image): ret = acl.media.dvpp_set_pic_desc_data(self.dvpp_pic_desc, image.data()) ret = acl.media.dvpp_set_pic_desc_size(self.dvpp_pic_desc, image.size) log_info("[INFO] set pic desc size") # send frame ret = acl.media.venc_send_frame(self.venc_channel_desc, self.dvpp_pic_desc, 0, self.frame_config, None) utils.check_ret("acl.media.venc_send_frame", ret)
def _init_resource(self): # Create dvpp channel self._dvpp_channel_desc = acl.media.dvpp_create_channel_desc() ret = acl.media.dvpp_create_channel(self._dvpp_channel_desc) utils.check_ret("acl.media.dvpp_create_channel", ret) # Create a resize configuration self._resize_config = acl.media.dvpp_create_resize_config() # Create yuv to jpeg configuration self._jpege_config = acl.media.dvpp_create_jpege_config() ret = acl.media.dvpp_set_jpege_config_level(self._jpege_config, 100) utils.check_ret("acl.media.dvpp_set_jpege_config_level", ret)
def _start(self): thread_id, ret = acl.util.start_thread(self._thread_entry, []) utils.check_ret("acl.util.start_thread", ret) log_info("Start sub thread ok, wait init...") while self._status == STATUS_PREPROC_INIT: time.sleep(0.001) log_info("Status changed to ", self._status) while self._start == STATUS_PREPROC_RUNNING: if self._image_queue.qsize() > 0: break time.sleep(0.001) return self._status != STATUS_PREPROC_ERROR
def main(): """ main """ SRC_PATH = os.path.realpath(__file__).rsplit("/", 1)[0] MODEL_PATH = "../model/deploy_vel.om" MODEL_WIDTH = 512 MODEL_HEIGHT = 512 # With picture directory parameters during program execution if (len(sys.argv) != 2): print("The App arg is invalid") exit(1) acl_resource = AclResource() acl_resource.init() single_image_dehaze = SingleImageDehaze(MODEL_PATH, MODEL_WIDTH, MODEL_HEIGHT) ret = single_image_dehaze.init() utils.check_ret("single_image_dehaze init ", ret) image_dir = sys.argv[1] images_list = [ os.path.join(image_dir, img) for img in os.listdir(image_dir) if os.path.splitext(img)[1] in constants.IMG_EXT ] # Create a directory to save inference results if not os.path.isdir(os.path.join(SRC_PATH, "../outputs")): os.mkdir(os.path.join(SRC_PATH, "../outputs")) for image_file in images_list: image_name = image_file.split('/')[-1] # read image im = Image.open(image_file) # Preprocess the picture resized_image = single_image_dehaze.pre_process(im) # Inferencecd result = single_image_dehaze.inference([ resized_image, ]) # # Post-processing single_image_dehaze.post_process(result, image_name)
def _thread_entry(self, args_list): self._context, ret = acl.rt.create_context(0) utils.check_ret("acl.rt.create_context", ret) ret = True while ret: data = self._data_queue.get() if isinstance(data, DetectData): ret = self._process_detect_data(data.detect_results, data.frame_data) elif isinstance(data, str): log_info("Post process thread exit") self._exit = True ret = False else: log_error("post process thread receive unkonow data")
def _gen_output_dataset(self, ouput_num): log_info("[Model] create model output dataset:") dataset = acl.mdl.create_dataset() for i in range(ouput_num): #malloc device memory for output size = acl.mdl.get_output_size_by_index(self._model_desc, i) buf, ret = acl.rt.malloc(size, const.ACL_MEM_MALLOC_NORMAL_ONLY) utils.check_ret("acl.rt.malloc", ret) #crate oputput data buffer dataset_buffer = acl.create_data_buffer(buf, size) _, ret = acl.mdl.add_dataset_buffer(dataset, dataset_buffer) log_info("malloc output %d, size %d" % (i, size)) if ret: acl.rt.free(buf) acl.destroy_data_buffer(dataset_buffer) utils.check_ret("acl.destroy_data_buffer", ret) self._output_dataset = dataset log_info("Create model output dataset success")
def _set_eos_stream_desc(self): stream_format = 0 timestamp = 123456 ret_code = 1 eos = 1 # stream desc dvpp_stream_desc = acl.media.dvpp_create_stream_desc() utils.check_none("acl.media.dvpp_create_stream_desc", dvpp_stream_desc) # stream_desc set function acl.media.dvpp_set_stream_desc_format(dvpp_stream_desc, stream_format) acl.media.dvpp_set_stream_desc_timestamp(dvpp_stream_desc, timestamp) acl.media.dvpp_set_stream_desc_ret_code(dvpp_stream_desc, ret_code) acl.media.dvpp_set_stream_desc_eos(dvpp_stream_desc, eos) ret = acl.media.dvpp_destroy_stream_desc(dvpp_stream_desc) utils.check_ret("acl.media.dvpp_destroy_stream_desc", ret)
def __init__(self, model_path): self._run_mode, ret = acl.rt.get_run_mode() utils.check_ret("acl.rt.get_run_mode", ret) self._copy_policy = const.ACL_MEMCPY_DEVICE_TO_DEVICE if self._run_mode == const.ACL_HOST: self._copy_policy = const.ACL_MEMCPY_DEVICE_TO_HOST self._model_path = model_path # string self._model_id = None # pointer self._input_num = 0 self._input_buffer = [] self._input_dataset = None self._output_dataset = None self._model_desc = None # pointer when using self._output_size = 0 self._init_resource() self._is_destroyed = False resource_list.register(self)
def main(): """ main """ image_dir = os.path.join(currentPath, "data") images_list = [ os.path.join(image_dir, img) for img in os.listdir(image_dir) if os.path.splitext(img)[1] in constants.IMG_EXT ] acl_resource = AclResource() acl_resource.init() hpa = Hpa(MODEL_PATH, MODEL_WIDTH, MODEL_HEIGHT) ret = hpa.init() utils.check_ret("hpa init ", ret) # Create a directory to save inference results if not os.path.exists(OUTPUT_DIR): os.mkdir(OUTPUT_DIR) for image_file in images_list: image_name = os.path.join(image_dir, os.path.basename(image_file)) print('====' + image_name + '====') # read image im = Image.open(image_name) if len(im.split()) != 3: print('warning: "{}" is not a color image and will be ignored'. format(image_name)) continue # Preprocess the picture resized_image = hpa.pre_process(im) # Inferencecd result = hpa.inference([ resized_image, ]) # # Post-processing hpa.post_process(result, image_name)
def __init__(self, acl_resource=None): if acl_resource is None: self._stream, ret = acl.rt.create_stream() utils.check_ret("acl.rt.create_stream", ret) self._run_mode, ret = acl.rt.get_run_mode() utils.check_ret("acl.rt.get_run_mode", ret) else: self._stream = acl_resource.stream self._run_mode = acl_resource.run_mode self._dvpp_channel_desc = None self._crop_config = None self._paste_config = None self._init_resource() # Dvpp involves acl resources, which need to be released \ # before the acl ends when the program exits, \ # register here to the resource table to ensure the release timing self._is_destroyed = False resource_list.register(self)
def _init_resource(self): log_info("Init model resource start...") if not os.path.isfile(self._model_path): log_error("model_path failed, please check. model_path=%s" % self._model_path) return const.FAILED self._model_id, ret = acl.mdl.load_from_file(self._model_path) utils.check_ret("acl.mdl.load_from_file", ret) self._model_desc = acl.mdl.create_desc() ret = acl.mdl.get_desc(self._model_desc, self._model_id) utils.check_ret("acl.mdl.get_desc", ret) #get outputs num of model self._output_size = acl.mdl.get_num_outputs(self._model_desc) #create output dataset self._gen_output_dataset(self._output_size) #recode input data address,if need malloc memory,the memory will be reuseable self._init_input_buffer() log_info("Init model resource success") return const.SUCCESS
def init(self): """ init resource """ print("init resource stage:") ret = acl.init() utils.check_ret("acl.rt.set_device", ret) ret = acl.rt.set_device(self.device_id) utils.check_ret("acl.rt.set_device", ret) self.context, ret = acl.rt.create_context(self.device_id) utils.check_ret("acl.rt.create_context", ret) self.stream, ret = acl.rt.create_stream() utils.check_ret("acl.rt.create_stream", ret) self.run_mode, ret = acl.rt.get_run_mode() utils.check_ret("acl.rt.get_run_mode", ret) print("Init resource success")
def _thread_entry(self, args_list): self._context, ret = acl.rt.create_context(0) utils.check_ret("acl.rt.create_context", ret) self._cap = video.AclVideo(self._stream_name) self._dvpp = Dvpp() self._status = STATUS_PREPROC_RUNNING frame_cnt = 0 while self._status == STATUS_PREPROC_RUNNING: ret, image = self._cap.read() if ret or (image is None): if ret == const.VIDEO_DECODE_FINISH: log_info("Video %s decode finish"%(self._stream_name)) self._status = STATUS_PREPROC_EXIT else: log_info("Video %s decode failed"%(self._stream_name)) self._status = STATUS_PREPROC_ERROR break if (int(frame_cnt) % 5 == 0): self._process_frame(image) time.sleep(0.0) self._thread_exit()
def main(): # check param if (len(sys.argv) != 2): print("The App arg is invalid") exit(1) # get all pictures image_dir = sys.argv[1] images_list = [ os.path.join(image_dir, img) for img in os.listdir(image_dir) if os.path.splitext(img)[1] in const.IMG_EXT ] acl_resource = AclResource() acl_resource.init() # instantiation Cartoonization object cartoonization = Cartoonization(MODEL_PATH, MODEL_WIDTH, MODEL_HEIGHT) # init ret = cartoonization.init() utils.check_ret("Cartoonization.init ", ret) # create dir to save result if not os.path.isdir('../outputs'): os.mkdir('../outputs') for image_file in images_list: # read image image = AclImage(image_file) # preprocess crop_and_paste_image = cartoonization.pre_process(image) print("[Sample] pre process end") # inference result = cartoonization.inference([ crop_and_paste_image, ]) # postprocess cartoonization.post_process(result, image_file, image)
def main(): """ main """ if (len(sys.argv) != 2): print("The App arg is invalid") exit(1) acl_resource = AclResource() acl_resource.init() gesture = Gesture(MODEL_PATH, MODEL_WIDTH, MODEL_HEIGHT) ret = gesture.init() utils.check_ret("Gesture.init ", ret) image_dir = sys.argv[1] images_list = [ os.path.join(image_dir, img) for img in os.listdir(image_dir) if os.path.splitext(img)[1] in const.IMG_EXT ] if not os.path.isdir(os.path.join(SRC_PATH, "../outputs")): os.mkdir(os.path.join(SRC_PATH, "../outputs")) for image_file in images_list: image = AclImage(image_file) resized_image = gesture.pre_process(image) print("pre process end") result = gesture.inference([ resized_image, ]) gesture.post_process(result, image_file)
def main(): """ main """ image_dir = os.path.join(currentPath, "data") if not os.path.exists(OUTPUT_DIR): os.mkdir(OUTPUT_DIR) acl_resource = AclResource() acl_resource.init() classify = Classify(MODEL_PATH, MODEL_WIDTH, MODEL_HEIGHT) ret = classify.init() utils.check_ret("Classify init ", ret) images_list = [ os.path.join(image_dir, img) for img in os.listdir(image_dir) if os.path.splitext(img)[1] in const.IMG_EXT ] for image_file in images_list: print('=== ' + os.path.basename(image_file) + '===') # read image image = AclImage(image_file) # Preprocess the picture resized_image = classify.pre_process(image) # Inferencecd result = classify.inference([ resized_image, ]) # # Post-processing classify.post_process(result, image_file)
def _start(self): thread_id, ret = acl.util.start_thread(self._thread_entry, []) utils.check_ret("acl.util.start_thread", ret)
acl.media.dvpp_set_stream_desc_eos(dvpp_stream_desc, eos) ret = acl.media.dvpp_destroy_stream_desc(dvpp_stream_desc) utils.check_ret("acl.media.dvpp_destroy_stream_desc", ret) def _thread_join(self): self.callback_run_flag = False while self._is_thread_exit == False: time.sleep(0.01) ret = acl.util.stop_thread(self._cb_thread_id) log_info("[INFO] stop_thread", ret) if __name__ == '__main__': ret = acl.init("") utils.check_ret("acl.init", ret) ret = acl.rt.set_device(DEVICE_ID) utils.check_ret("acl.rt.set_device", ret) run_mode, ret = acl.rt.get_run_mode() utils.check_ret("acl.rt.get_run_mode", ret) venc_handel = AclVenc() venc_cnt = 16 while venc_cnt: # load file image = AclImage(VENC_FILE_PATH, 1280, 720) image = image.copy_to_dvpp() venc_handel.process(image) venc_cnt -= 1 log_info("process end")