def __init__(self, ids, batch_size=4): self.lock1 = multiprocessing.Lock() self.lock2 = multiprocessing.Lock() self.batch_foremost = sharedctypes.RawValue(ctypes.c_int, 0) self.batch_rearmost = sharedctypes.RawValue(ctypes.c_int, -1) self.condition = multiprocessing.Condition(lock=self.lock2) data = sharedctypes.RawArray(ctypes.c_int, ids.data) self.ids = np.ctypeslib.as_array(data) self.batch_size = batch_size self.loader = loader self.X = np.empty((self.batch_size, *loader.shape)) self.Y = np.empty((self.batch_size, Y.shape[1]), dtype=np.int)
def __init__(self, tids, batch_size=4): self.ss = sample_size self.curr = 0 self.lock1 = multiprocessing.Lock() self.lock2 = multiprocessing.Lock() self.batch_foremost = sharedctypes.RawValue(ctypes.c_int, 0) self.batch_rearmost = sharedctypes.RawValue(ctypes.c_int, -1) self.condition = multiprocessing.Condition(lock=self.lock2) data = sharedctypes.RawArray(ctypes.c_int, tids.data) self.tids = np.ctypeslib.as_array(data) self.batch_size = batch_size self.loader = loader
def test_global_handle(self): """Test ID: Jira-XXXX. Test Description: Use a pool handle in another process. :avocado: tags=all,daily_regression :avocado: tags=tiny :avocado: tags=container,global_handle,container_global_handle """ # initialize a python pool object then create the underlying # daos storage and connect to it self.add_pool(create=True, connect=True) # create a pool global handle iov_len, buf_len, buf = self.pool.pool.local2global() buftype = ctypes.c_byte * buf_len c_buf = buftype.from_buffer(buf) sct_pool_handle = (sharedctypes.RawValue( IOV, ctypes.cast(c_buf, ctypes.c_void_p), buf_len, iov_len)) # create a container self.add_container(self.pool) self.container.open() try: # create a container global handle iov_len, buf_len, buf = self.container.container.local2global() buftype = ctypes.c_byte * buf_len c_buf = buftype.from_buffer(buf) sct_cont_handle = (sharedctypes.RawValue( IOV, ctypes.cast(c_buf, ctypes.c_void_p), buf_len, iov_len)) sct_pool_uuid = sharedctypes.RawArray(ctypes.c_byte, self.pool.pool.uuid) # this should work in the future but need on-line server addition # arg_list = ( # p = Process(target=check_handle, args=arg_list) # p.start() # p.join() # for now verifying global handle in the same process which is not # the intended use case self.check_handle(sct_pool_handle, sct_pool_uuid, sct_cont_handle, 0) except DaosApiError as error: self.log.error(error) self.log.error(traceback.format_exc()) self.fail("Expecting to pass but test has failed.\n")
def __init__(self, source, bufferlen=10, name=None, send_data_to_sink_manager=True, **kwargs): ''' Parameters ---------- source: class compatible with DataSourceSystem Class to be instantiated as the "system" with changing data values. bufferlen: float Number of seconds long to make the ringbuffer. Seconds are converted to number of samples based on the 'update_freq' attribute of the source name: string, optional, default=None Name of the sink, i.e., HDF table. If one is not provided, it will be inferred based on the name of the source module send_data_to_sink_manager: boolean, optional, default=True Flag to indicate whether data should be saved to a sink (e.g., HDF file) kwargs: optional keyword arguments Passed to the source during object construction if any are specified Returns ------- DataSource instance ''' super(DataSource, self).__init__() if name is not None: self.name = name else: self.name = source.__module__.split('.')[-1] self.filter = None self.source = source self.source_kwargs = kwargs self.bufferlen = bufferlen self.max_len = bufferlen * int(self.source.update_freq) self.slice_size = self.source.dtype.itemsize self.lock = mp.Lock() self.idx = shm.RawValue('l', 0) self.data = shm.RawArray('c', self.max_len * self.slice_size) self.pipe, self._pipe = mp.Pipe() self.cmd_event = mp.Event() self.status = mp.Value('b', 1) self.stream = mp.Event() self.last_idx = 0 # self.methods = set(n for n in dir(source) if inspect.ismethod(getattr(source, n))) self.methods = set( filter(lambda n: inspect.isfunction(getattr(source, n)), dir(source))) # in DataSource.run, there is a call to "self.sinks.send(...)", # but if the DataSource was never registered with the sink manager, # then this line results in unnecessary IPC # so, set send_data_to_sink_manager to False if you want to avoid this self.send_data_to_sink_manager = send_data_to_sink_manager
def main(): global FRAMEBUFFER, RUNNING global NFACES, FACE_BOUNDINGBOXES, FACE_RESULTS, FACE_FRAMEBUFFER RUNNING = sharedctypes.RawValue(ctypes.c_ubyte, 1) FRAMEBUFFER = sharedctypes.RawValue(ctypes.c_ubyte*3*480*360) NFACES = sharedctypes.RawValue(ctypes.c_ushort) FACE_BOUNDINGBOXES = sharedctypes.RawValue((ctypes.c_ushort*4)*10) FACE_RESULTS = sharedctypes.RawValue(ctypes.c_bool*10) FACE_FRAMEBUFFER = sharedctypes.RawValue(ctypes.c_ubyte*3*480*360) drone = init_drone() if args.record: p_recorder = Process(target=recorder) p_recorder.start() face_event = mp.Event() lock = mp.Lock() p_pilot = Process(target=pilot, args=(drone.sock.fileno(), lock, face_event)) p_pilot.start() if args.face: p_face_recognition = mp.Process(target=face_recognition, args=(face_event,)) p_face_recognition.start() container = av.open(drone.get_video_stream()) frame_skip = 350 fps = FPS() for frame in container.decode(video=0): fps.update() if not RUNNING.value: break if 0 < frame_skip: frame_skip = frame_skip - 1 continue start_time = time.time() if frame.time_base < 1.0/60: time_base = 1.0/60 else: time_base = frame.time_base # Convert frame to cv2 image frame = frame.to_ndarray(width=480, height=360, format='bgr24') frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) FRAMEBUFFER[:] = np.ctypeslib.as_ctypes(frame) cur = time.time() prev = cur frame_skip = int((time.time() - start_time)/time_base) print ('VideoReceiver FPS:', fps.get())
def __init__(self, settings, pipe_buffered_data_after_pause=True, chunk_size=10000): """ForceSensorProcess return_buffered_data_after_pause: does not write shared data queue continuously and writes it the buffer data to queue only after pause (or stop) """ # DOC explain usage # type checks if not isinstance(settings, SensorSettings): raise RuntimeError( "settings has to be force_sensor.Settings object") super(SensorProcess, self).__init__() self.sensor_settings = settings self._pipe_buffer_after_pause = pipe_buffered_data_after_pause self._chunk_size = chunk_size self._pipe_i, self._pipe_o = Pipe() self._event_is_polling = Event() self._event_sending_data = Event() self._event_new_data = Event() self.event_bias_is_available = Event() self.event_trigger = Event() self._last_Fx = sharedctypes.RawValue(ct.c_float) self._last_Fy = sharedctypes.RawValue(ct.c_float) self._last_Fz = sharedctypes.RawValue(ct.c_float) self._last_Tx = sharedctypes.RawValue(ct.c_float) self._last_Ty = sharedctypes.RawValue(ct.c_float) self._last_Tz = sharedctypes.RawValue(ct.c_float) self._buffer_size = sharedctypes.RawValue(ct.c_uint64) self._sample_cnt = sharedctypes.Value(ct.c_uint64) self._event_quit_request = Event() self._determine_bias_flag = Event() self._bias_n_samples = 200 atexit.register(self.join)
def test_global_handle(self): """ Test ID: DAO Test Description: Use a pool handle in another process. :avocado: tags=container,conthandle,vm,small,regression """ try: # use the uid/gid of the user running the test, these should # be perfectly valid createuid = os.geteuid() creategid = os.getegid() # parameters used in pool create that are in yaml createmode = self.params.get("mode", '/run/testparams/createmode/') createsetid = self.params.get("setname", '/run/testparams/createset/') createsize = self.params.get("size", '/run/testparams/createsize/') # initialize a python pool object then create the underlying # daos storage pool = DaosPool(self.Context) pool.create(createmode, createuid, creategid, createsize, createsetid, None) pool.connect(1 << 1) # create a pool global handle iov_len, buf_len, buf = pool.local2global() buftype = ctypes.c_byte * buf_len c_buf = buftype.from_buffer(buf) sct_pool_handle = sharedctypes.RawValue( IOV, ctypes.cast(c_buf, ctypes.c_void_p), buf_len, iov_len) # create a container container = DaosContainer(self.Context) container.create(pool.handle) container.open() # create a container global handle iov_len, buf_len, buf = container.local2global() buftype = ctypes.c_byte * buf_len c_buf = buftype.from_buffer(buf) sct_cont_handle = sharedctypes.RawValue( IOV, ctypes.cast(c_buf, ctypes.c_void_p), buf_len, iov_len) sct_pool_uuid = sharedctypes.RawArray(ctypes.c_byte, pool.uuid) # this should work in the future but need on-line server addition #arg_list = ( #p = Process(target=CheckHandle, args=arg_list) #p.start() #p.join() # for now verifying global handle in the same process which is not # the intended use case CheckHandle(sct_pool_handle, sct_pool_uuid, sct_cont_handle, 0) except DaosApiError as e: print(e) print(traceback.format_exc()) self.fail("Expecting to pass but test has failed.\n")
def __init__(self, slots, obj_size): self._slots = slots self._obj_size = obj_size self._mm = None self._head = sharedctypes.RawValue(ctypes.c_uint, 0) self._tail = sharedctypes.RawValue(ctypes.c_uint, 0)
def main(): global RUNNING, PAUSE global NPOSES, FRAMEBUFFER, KP_BUFFER, KP_SCORE_BUFFER, POSESCORE_BUFFER global NFACES, FACE_BOUNDINGBOXES, FACE_FRAMEBUFFER, FACE_RESULTS FRAMEBUFFER = sharedctypes.RawValue(ctypes.c_ubyte*3*appsink_size[0]*appsink_size[1]) KP_BUFFER = sharedctypes.RawValue(ctypes.c_int*2*C_NKP*C_MAXPOSE) KP_SCORE_BUFFER = sharedctypes.RawValue(ctypes.c_double*C_NKP*C_MAXPOSE) POSESCORE_BUFFER = sharedctypes.RawValue(ctypes.c_double*C_MAXPOSE) NPOSES = sharedctypes.RawValue(ctypes.c_ushort) ''' NFACES = sharedctypes.RawValue(ctypes.c_ushort) FACE_BOUNDINGBOXES = sharedctypes.RawValue((ctypes.c_ushort*4)*10) FACE_RESULTS = sharedctypes.RawValue(ctypes.c_bool*10) FACE_FRAMEBUFFER = sharedctypes.RawValue(ctypes.c_ubyte*3*appsink_size[0]*appsink_size[1]) ''' RUNNING = sharedctypes.RawValue(ctypes.c_ubyte, 1) PAUSE = sharedctypes.RawValue(ctypes.c_ubyte, 0) lock = mp.Lock() mp_event = mp.Event() mp_event2 = mp.Event() # face_event = mp.Event() # p_face_recognition = mp.Process(target=face_recognition, args=(face_event,)) p_renderer_tracker = mp.Process(target=renderer_tracker, args=(lock, mp_event)) p_renderer_tracker2 = mp.Process(target=renderer_tracker2, args=(lock, mp_event)) p_renderer_pose_only = mp.Process(target=renderer_pose_only, args=(lock, mp_event)) p_renderer_allstar = mp.Process(target=renderer_allstar, args=(lock, mp_event, mp_event2)) p_face_recognition_evaluation = mp.Process(target=face_recognition_evaluation, args=(lock, mp_event, mp_event2)) # p_face_recognition_evaluation.start() p_renderer_allstar.start() # p_face_recognition.start() # p_renderer_tracker2.start() # p_renderer_tracker.start() # p_renderer_pose_only.start() if args.file is not None: cap = cv2.VideoCapture(args.file) elif args.evaluation: inputs = [] for f in os.listdir(args.evaluation): if f.endswith('.jpg'): inputs.append(f) # print (len(inputs)) else: cap = cv2.VideoCapture(args.cam_id) cap.set(cv2.CAP_PROP_FRAME_WIDTH, src_size[0]) cap.set(cv2.CAP_PROP_FRAME_HEIGHT, src_size[1]) ''' default: CAP_PROP_AUTO_EXPOSURE 0(OFF) CAP_PROP_EXPOSURE 1000 ''' cap.set(cv2.CAP_PROP_EXPOSURE, 1000) cap.set(cv2.CAP_PROP_AUTO_EXPOSURE, 0) # print (cap.get(cv2.CAP_PROP_EXPOSURE)) # print (cap.get(cv2.CAP_PROP_AUTO_EXPOSURE)) frame_count = 0 start_time = time.time() pose_height = np.zeros(C_MAXPOSE, dtype=np.uint32) while RUNNING: if not PAUSE: try: # t1 = time.time() if args.evaluation: try: input_img = cv2.imread(os.path.join(args.evaluation, inputs[frame_count])) except: break else: cap_res, cap_frame = cap.read() input_img = cv2.resize(cap_frame, appsink_size, cv2.INTER_NEAREST) frame_count += 1 input_img = cv2.cvtColor(input_img, cv2.COLOR_BGR2RGB).astype(np.uint8) # pil_frame = Image.fromarray(input_img) # input_img = cv2.GaussianBlur(input_img, (5, 5), cv2.BORDER_DEFAULT) # opencv ouput frame in (y, x) but use (x, y) point format ¯\_(ツ)_/¯ # print (input_img.shape) nposes, pose_scores, kps, kps_score = pose_engine.DetectPosesInImage(input_img) # faces = face_engine.detect_with_image(pil_frame, threshold=0.05, keep_aspect_ratio=False, relative_coord=False, top_k=10) # print (faces) # t2 = time.time() # print ('PoseNet time:', (t2 - t1)*1000) lock.acquire() FRAMEBUFFER[:] = np.ctypeslib.as_ctypes(input_img) if nposes: NPOSES.value = nposes # I converted PoseNet output to (x, y) KP_BUFFER[:nposes] = np.ctypeslib.as_ctypes(kps.astype(np.int32)) KP_SCORE_BUFFER[:nposes] = np.ctypeslib.as_ctypes(kps_score) POSESCORE_BUFFER[:] = np.ctypeslib.as_ctypes(pose_scores) lock.release() if args.evaluation: mp_event.set() mp_event2.wait() mp_event2.clear() except: traceback.print_exc() RUNNING.value = 0 break RUNNING.value = 0 end_time = time.time() print ('Posenet FPS:', frame_count/(end_time - start_time))