def msvs_setup_env(env): batfilename = msvs.get_batch_file() msvs = get_vs_by_version(version) if msvs is None: return # XXX: I think this is broken. This will silently set a bogus tool instead # of failing, but there is no other way with the current scons tool # framework if batfilename is not None: vars = ('LIB', 'LIBPATH', 'PATH', 'INCLUDE') msvs_list = get_installed_visual_studios() vscommonvarnames = [vs.common_tools_var for vs in msvs_list] save_ENV = env['ENV'] nenv = normalize_env(env['ENV'], ['COMSPEC'] + vscommonvarnames, force=True) try: output = get_output(batfilename, arch, env=nenv) finally: env['ENV'] = save_ENV vars = parse_output(output, vars) for k, v in vars.items(): env.PrependENVPath(k, v, delete_existing=1)
def merge_default_version(env): version = get_default_version(env) arch = get_default_arch(env) msvs = get_vs_by_version(version) if msvs is None: return batfilename = msvs.get_batch_file() # XXX: I think this is broken. This will silently set a bogus tool instead # of failing, but there is no other way with the current scons tool # framework if batfilename is not None: vars = ('LIB', 'LIBPATH', 'PATH', 'INCLUDE') msvs_list = get_installed_visual_studios() # TODO(1.5): #vscommonvarnames = [ vs.common_tools_var for vs in msvs_list ] vscommonvarnames = map(lambda vs: vs.common_tools_var, msvs_list) nenv = normalize_env(env['ENV'], vscommonvarnames + ['COMSPEC']) output = get_output(batfilename, arch, env=nenv) vars = parse_output(output, vars) for k, v in vars.items(): env.PrependENVPath(k, v, delete_existing=1)
def script_env(script, args=None): stdout = common.get_output(script, args) # Stupid batch files do not set return code: we take a look at the # beginning of the output for an error message instead olines = stdout.splitlines() if olines[0].startswith("The specified configuration type is missing"): raise BatchFileExecutionError("\n".join(olines[:2])) return common.parse_output(stdout)
def start_detector(args, interpreter, labels, camera_res): """ Detect max_objs objects from camera frames. """ detected_objects.clear() try: cap = cv2.VideoCapture(args.camera_idx) while cap.isOpened(): ret, frame = cap.read() if not ret: break cv2_im = frame cv2_im_u = cv2.undistort(cv2_im, common.CAMERA_MATRIX, common.DIST_COEFFS) cv2_im_u_rgb = cv2.cvtColor(cv2_im_u, cv2.COLOR_BGR2RGB) pil_im = Image.fromarray(cv2_im_u_rgb) common.set_input(interpreter, pil_im) interpreter.invoke() objs = common.get_output(interpreter, score_threshold=args.threshold, labels=labels) # Reject images with number of detected objects > max_objs. if len(objs) > args.max_objs: continue # Create proto buffer message and add to stack. for obj in objs: detected_object = detection_server_pb2.DetectedObject( label=obj.label, score=obj.score, area=obj.area, centroid=detection_server_pb2.DetectedObject.Centroid( x=obj.centroid.x, y=obj.centroid.y), bbox=detection_server_pb2.DetectedObject.BBox( xmin=obj.bbox.xmin, ymin=obj.bbox.ymin, xmax=obj.bbox.xmax, ymax=obj.bbox.ymax)) detected_objects.appendleft(detected_object) if args.display: cv2_im_u = common.annotate_image(objs, camera_res, cv2_im_u) cv2.imshow('frame', cv2_im_u) if cv2.waitKey(1) & 0xFF == ord('q'): break except cv2.error as e: print('cv2 error: {e}'.format(e)) finally: cap.release() cv2.destroyAllWindows() return
def main(): if (edgetpu==1): mdl = model_edgetpu else: mdl = model interpreter, labels =cm.load_model(model_dir,model_edgetpu,lbl,edgetpu) fps=1 while True: start_time=time.time() #----------------Capture Camera Frame----------------- ret, frame = cap.read() if not ret: break cv2_im = frame cv2_im = cv2.flip(cv2_im, 0) cv2_im = cv2.flip(cv2_im, 1) cv2_im_rgb = cv2.cvtColor(cv2_im, cv2.COLOR_BGR2RGB) pil_im = Image.fromarray(cv2_im_rgb) #-------------------Inference--------------------------------- cm.set_input(interpreter, pil_im) interpreter.invoke() objs = cm.get_output(interpreter, score_threshold=threshold, top_k=top_k) #-----------------other------------------------------------ track_object(objs,labels)#tracking <<<<<<< fps = round(1.0 / (time.time() - start_time),1) print("*********FPS: ",fps,"************") #----------------------------------------------------- cap.release() cv2.destroyAllWindows()
def main(): if (edgetpu == 1): mdl = model_edgetpu else: mdl = model interpreter, labels = cm.load_model(model_dir, mdl, lbl, edgetpu) fps = 1 arr_dur = [0, 0, 0] #while cap.isOpened(): while True: start_time = time.time() #----------------Capture Camera Frame----------------- start_t0 = time.time() ret, frame = cap.read() if not ret: break cv2_im = frame cv2_im = cv2.flip(cv2_im, 0) cv2_im = cv2.flip(cv2_im, 1) cv2_im_rgb = cv2.cvtColor(cv2_im, cv2.COLOR_BGR2RGB) pil_im = Image.fromarray(cv2_im_rgb) arr_dur[0] = time.time() - start_t0 #cm.time_elapsed(start_t0,"camera capture") #---------------------------------------------------- #-------------------Inference--------------------------------- start_t1 = time.time() cm.set_input(interpreter, pil_im) interpreter.invoke() objs = cm.get_output(interpreter, score_threshold=threshold, top_k=top_k) arr_dur[1] = time.time() - start_t1 #cm.time_elapsed(start_t1,"inference") #---------------------------------------------------- #-----------------other------------------------------------ start_t2 = time.time() track_object(objs, labels) #tracking <<<<<<< if cv2.waitKey(1) & 0xFF == ord('q'): break cv2_im = append_text_img1(cv2_im, objs, labels, arr_dur, arr_track_data) ret, jpeg = cv2.imencode('.jpg', cv2_im) pic = jpeg.tobytes() #Flask streaming yield (b'--frame\r\n' b'Content-Type: image/jpeg\r\n\r\n' + pic + b'\r\n\r\n') arr_dur[2] = time.time() - start_t2 fps = round(1.0 / (time.time() - start_time), 1) print("*********FPS: ", fps, "************") cap.release() cv2.destroyAllWindows()
def script_env(script): stdout = common.get_output(script) return common.parse_output(stdout)
def capture(parse_args, interpreter, labels, camera_res): """ Capture images from camera frames and write to disk. """ sample = 0 sample_time = 0 start_time = time.time() try: cap = cv2.VideoCapture(parse_args.camera_idx) while cap.isOpened(): if sample > parse_args.num_samples: break elapsed_time = time.time() - sample_time ret, frame = cap.read() if not ret: break if elapsed_time > 1.0 / parse_args.frame_rate: sample_time = time.time() cv2_im = frame cv2_im_u = cv2.undistort(cv2_im, common.CAMERA_MATRIX, common.DIST_COEFFS) cv2_im_u_rgb = cv2.cvtColor(cv2_im_u, cv2.COLOR_BGR2RGB) pil_im = Image.fromarray(cv2_im_u_rgb) common.set_input(interpreter, pil_im) interpreter.invoke() objs = common.get_output(interpreter, score_threshold=parse_args.threshold, labels=labels) img_id = uuid.uuid4() img_name = ''.join((str(img_id), '.jpg')) for obj in objs: if obj.label in parse_args.capture: # Image may appear in more than one named directory # if it contains more than one class of interest. img_path = '{}'.format( os.path.join(parse_args.images, obj.label, img_name)) print('Found "{}" at t+ {:.2f} sec. Writing "{}".'. format(obj.label, sample_time - start_time, img_name)) cv2.imwrite(img_path, cv2_im_u) sample += 1 if parse_args.display: cv2_im_u = common.annotate_image(objs, camera_res, cv2_im_u) cv2.imshow('detections', cv2_im_u) if cv2.waitKey(1) & 0xFF == ord('q'): break except cv2.error as e: print('cv2 error: {e}'.format(e)) except Exception as e: print('Unhandled error: {e}'.format(e)) finally: cap.release() cv2.destroyAllWindows() return