def put_record(self, data, partition_key=None): """Add data to the record queue in the proper format. Parameters ---------- data : str Data to send. partition_key: str Hash that determines which shard a given data record belongs to. """ # Byte encode the data data = encode_data(data) # Create a random partition key if not provided if not partition_key: partition_key = uuid.uuid4().hex # Build the record record = {'Data': data, 'PartitionKey': partition_key} # Flush the queue if it reaches the batch size if self.queue.qsize() >= self.batch_size: #logging.info("Queue Flush: batch size reached") self.pool.submit(self.flush_queue) # Append the record logging.debug('Putting record "{}"'.format(record['Data'][:100])) self.queue.put(record)
def make_element(factory_name, element_name): logging.debug(f'Creating element {element_name} of type {factory_name}') element = Gst.ElementFactory.make(factory_name, element_name) if not element: raise Exception( f'Unable to create element {element_name} of type {factory_name}') return element
def put_record(self, data, partition_key=None): """Add data to the record queue in the proper format. Parameters ---------- data : str Data to send. partition_key: str Hash that determines which shard a given data record belongs to. """ # Byte encode the data data = encode_data(data) # Create a random partition key if not provided partition_key = str(partition_key) or uuid.uuid4().hex # Build the record record = {'Data': data, 'PartitionKey': partition_key} explicit_hash_key = self.hash_keys[partition_key] if explicit_hash_key is not None: record['ExplicitHashKey'] = str(explicit_hash_key) # Append the record logging.debug('Putting record "{}"'.format(record['Data'][:100])) self.queue.put(record)
def __init__(self, labels, delimiter='->'): assert 'anything' not in labels, f"'anything' is already a reserved label" codebook = encode(labels) self.cls2id = codebook['cls2id'] self.id2chr = codebook['id2chr'] self.id2cls = codebook['id2cls'] self.delimiter = delimiter logging.debug(f"Rule codebook\n{Config({label: self.id2chr[self.cls2id[label]] for label in labels})}")
def launch(): logging.debug(f"{' '.join(sys.argv)}") assert len(sys.argv) > 1 cmd = sys.argv[1] m = importlib.import_module(f".{cmd}", 'ml.vision.cli') argv = sys.argv[1:] m.launch(argv) return
def on_error(self, bus, message): """ ERROR Bus Callback """ err, debug = message.parse_error() logging.debug(debug) message = f'Error received from element {message.src.get_name()}: {err}' self.put(MESSAGE_TYPE.ERROR, Exception(message))
def on_status_changed(self, bus, message): """ STATUS Change Bus Callback """ state = message.parse_state_changed() self.state = STATE_CHANGE(STATE(state.oldstate), STATE(state.newstate), STATE(state.pending)) logging.debug(f"State | {self.state}")
def compute(self): if self._N == 0: raise NotComputableError( "Loss must have at least one example before it can be computed." ) TPs = self._TPs / self._N typeTPs = self._typeTPs / (self._typeN + (self._typeN == 0).float()) logging.debug(self._typeN.tolist()) return TPs.tolist(), typeTPs.tolist()
def render_data_api_response(response): if "records" in response: column_names = list( map(lambda x: x['name'], response.get('columnMetadata', []))) logging.debug(column_names, response['records']) for i, record in enumerate(response["records"]): response["records"][i] = { column_names[j]: _render_value(v) for j, v in enumerate(response["records"][i]) } return response
def preprocess(image_path, *shape): r'''Preprocessing for TensorRT calibration Args: image_path(str): path to image channels(int): ''' image = Image.open(image_path) logging.debug(f"image.size={image.size}, mode={image.mode}") image = image.convert('RGB') C = len(image.mode) im = trans(image) assert im.shape == (C, H, W) return im
def parse(self): while True: self.raw_requestline = self.rfile.readline(65537) if len(self.raw_requestline) > 65536: raise BufferError(HTTPStatus.REQUEST_URI_TOO_LONG) if not self.raw_requestline: self.close_connection = True return if self.raw_requestline == b'\r\n': continue break logging.debug(f"request: {self.raw_requestline}") return self.parse_one()
def track_frames(frames, start, step): frames = th.stack(frames) # Track person only with th.cuda.amp.autocast(enabled=cfg.det_amp): dets, features = detector.detect(frames, size=cfg.det_scales, conf_thres=cfg.det_cls_thres, iou_thres=cfg.det_nms_thres, batch_preprocess=True) persons = dets_select(dets, cfg.trk_cls_person) objs = [ dets_f[~persons_f].cpu() for dets_f, persons_f in zip(dets, persons) ] ppls = [ dets_f[persons_f].cpu() for dets_f, persons_f in zip(dets, persons) ] ppl_feats = [ feats_f[persons_f].cpu() for feats_f, persons_f in zip(features, persons) ] for j, (objs_f, ppls_f, ppl_feats_f) in enumerate(zip(objs, ppls, ppl_feats), start): logging.info( f"[{start + (j - start) * step}] objs: {tuple(objs_f.shape)}, ppls: {tuple(ppls_f.shape)}, feats: {tuple(ppl_feats_f.shape)}" ) assert objs_f.shape[1] == 4 + 1 + 1 assert ppls_f.shape[1] == 4 + 1 + 1 assert len(ppls) == len(ppl_feats) # assert ppl_feats.shape[1] == 256 + 512 + 1024 assert ppl_feats_f.shape[1] == 320 + 640 + 1280 matches = tracker.update( ppls_f, ppl_feats_f.view(len(ppl_feats_f), np.prod(ppl_feats_f.shape[1:]))) snapshot = tracker.snapshot() tracks = [] for tid, info in snapshot: tracks.append([tid, info]) logging.debug(f"matches[{start + (j - start) * step}]: {matches}") logging.debug( f"snapshot[{start + (j - start) * step}]: {snapshot}") # Render both dets and tracks side by side frame_det = frames[j - start] frame_trk = frame_det.clone() C, H, W = frame_det.shape idx = f'{start + (j - start) * step:03d}' if cfg.render_all: dets = th.cat([ppls_f, objs_f]) frame_det = render_frame(idx, frame_det, dets, False) else: frame_det = render_frame(idx, frame_det, ppls_f, False) if tracks: frame_trk = render_frame(idx, frame_trk, tracks, True) frame = th.zeros((C, H, 2 * W), dtype=th.uint8) frame[:, :, :W] = frame_det frame[:, :, W:] = frame_trk write_jpeg(frame, str(export_frame / f"frame{idx}.jpg")) if media is not None: frame = av.VideoFrame.from_ndarray(frame.permute(1, 2, 0).numpy(), format='rgb24') packets = stream.encode(frame) media.mux(packets) logging.debug(f'Encoded: {len(packets)} {packets}, {frame}')
def read_video(self, session, format='BGR'): meta = session['video'] stream = meta['stream'] codec = meta['codec'] workaround = meta['workaround'] while True: try: pkt = next(stream) except StopIteration: pkt = None now = time.time() prev = meta.get('prev', None) if prev is None: if not pkt.is_keyframe: # Some RTSP source may not send key frame to begin with e.g. wisecam logging.warning( f"No key frame to begin with, skip through") session['start'] = now continue meta['keyframe'] = pkt.is_keyframe meta['time'] = session['start'] streams = session['streams'] sformat = session['format'] # XXX Stream container package format determines H.264 NALUs in AVCC or Annex B. # TODO Streaming NALUs in AVCC if 'hls' in sformat or 'rtsp' in sformat or '264' in sformat: # XXX In case of out of band CPD: SPS/PPS in AnnexB. CPD = [] if codec.extradata is not None: for (pos, _, _, type), nalu in NALUParser(codec.extradata, workaround=workaround): if hasStartCode(nalu): CPD.append(nalu) logging.info( f"CPD {NALU_t(type).name} at {pos}: {nalu[:8]} ending with {nalu[-1:]}" ) else: logging.warning( f"Invalid CPD NALU({type}) at {pos}: {nalu[:8]} ending with {nalu[-1:]}" ) if not CPD: # Skip all break NALUs = [] if workaround: # FIXME workaround before KVS MKVGenerator deals with NALUs ending with a zero byte # https://github.com/awslabs/amazon-kinesis-video-streams-producer-sdk-cpp/issues/491 for (pos, _, _, type), nalu in NALUParser(memoryview(pkt), workaround=workaround): assert hasStartCode( nalu ), f"frame[{meta['count']+1}] NALU(type={type}) at {pos} without START CODE: {nalu[:8].tobytes()}" if type in (NALU_t.SPS, NALU_t.PPS): if CPD: # NOTE: some streams could have multiple UNSPECIFIED(0) NALUs within a single packet with SPS/PPS #assert len(CPD) == 2, f"len(CPD) == {len(CPD)}, not 2 for SPS/PPS" ordinal = type - NALU_t.SPS if nalu == CPD[ordinal]: logging.info( f"frame[{meta['count']+1}] same {NALU_t(type).name}({nalu[:8].tobytes()}) at {pos} as in CPD({CPD[ordinal][:8]})" ) else: # FIXME may expect the CPD to be inserted in the beginning? logging.warning( f"frame[{meta['count']+1}] inconsistent {NALU_t(type).name}({nalu[:8].tobytes()}) at {pos} with CPD({CPD[ordinal][:8]})" ) print(f"CPD {NALU_t(type).name}:", CPD[ordinal]) print(f"NALU {NALU_t(type).name}:", nalu.tobytes()) # XXX bitstream may present invalid CPD => replacement with bitstream SPS/PPS CPD[ordinal] = nalu else: NALUs.append(nalu) logging.info( f"frame[{meta['count']+1}] {NALU_t(type).name} at {pos}: {nalu[:8].tobytes()} ending with {nalu[-1:].tobytes()}" ) # XXX KVS master is ready to filter out non-VCL NALUs as part of the CPD # elif type in (NALU_t.IDR, NALU_t.NIDR): elif type in (NALU_t.AUD, NALU_t.SEI, NALU_t.IDR, NALU_t.NIDR): NALUs.append(nalu) logging.info( f"frame[{meta['count']+1}] {NALU_t(type).name} at {pos}: {nalu[:8].tobytes()}" ) else: # FIXME may expect CPD to be inserted in the beginning? logging.warning( f"frame[{meta['count']+1}] skipped unexpected NALU(type={type}) at {pos}: {nalu[:8].tobytes()}" ) logging.info( f"{pkt.is_keyframe and 'key ' or ''}frame[{meta['count']}] combining CPD({len(CPD)}) and NALUs({len(NALUs)})" ) else: NALUs.append(memoryview(pkt)) logging.info( f"{pkt.is_keyframe and 'key ' or ''}frame[{meta['count']}] prepending CPD({len(CPD)})" ) packet = av.Packet(bytearray(b''.join(CPD + NALUs))) packet.dts = pkt.dts packet.pts = pkt.pts packet.time_base = pkt.time_base pkt = packet if pkt.pts is None: logging.warning( f"Initial packet dts/pts={pkt.dts}/{pkt.pts}, time_base={pkt.time_base}" ) elif pkt.pts > 0: logging.warning( f"Reset dts/pts of 1st frame from {pkt.pts} to 0") pkt.pts = pkt.dts = 0 elif 'dash' in sformat: # TODO In case of out of band CPD: SPS/PPS in AVCC. logging.info(f"DASH AVCC extradata: {codec.extradata}") logging.info( f"pkt[:16]({pkt.is_keyframe}) {memoryview(pkt)[:16].tobytes()}" ) else: keyframe = pkt.is_keyframe logging.debug( f"packet[{meta['count']}] {keyframe and 'key ' or ''}dts/pts={pkt.dts}/{pkt.pts}, time_base={pkt.time_base}, duration={pkt.duration}" ) if 'hls' in sformat or 'rtsp' in sformat or '264' in sformat: NALUs = [] if workaround: for (pos, _, _, type), nalu in NALUParser(memoryview(pkt), workaround=workaround): # assert hasStartCode(nalu), f"frame[{meta['count']+1}] NALU(type={type}) at {pos} without START CODE: {nalu[:8].tobytes()}" # FIXME KVS master is not ready to take AUD/SEI as part of the CPD # if type in (NALU_t.SPS, NALU_t.PPS, NALU_t.IDR, NALU_t.NIDR): if type in (NALU_t.AUD, NALU_t.SEI, NALU_t.SPS, NALU_t.PPS, NALU_t.IDR, NALU_t.NIDR): NALUs.append(nalu) logging.debug( f"frame[{meta['count']+1}] {NALU_t(type).name} at {pos}: {nalu[:8].tobytes()}" ) else: # FIXME may expect CPD to be inserted? logging.debug( f"frame[{meta['count']+1}] skipped NALU(type={type}) at {pos}: {nalu[:8].tobytes()} ending with {nalu[-1:].tobytes()}" ) else: NALUs.append(memoryview(pkt)) # XXX Assme no SPS/PPS change packet = av.Packet(bytearray(b''.join(NALUs))) packet.dts = pkt.dts packet.pts = pkt.pts packet.time_base = pkt.time_base pkt = packet frame = prev if session['decoding']: try: frames = codec.decode(prev) if not frames: logging.warning( f"Decoded nothing, continue to read...") meta['prev'] = pkt meta['count'] += 1 continue except Exception as e: logging.error( f"Failed to decode video packet of size {prev.size}: {e}" ) raise e else: # print(prev, frames) frame = frames[0] meta['width'] = frame.width meta['height'] = frame.height if format == 'BGR': frame = frame.to_rgb().to_ndarray()[:, :, ::-1] elif format == 'RGB': frame = frame.to_rgb().to_ndarray() if session['rt']: ''' Live source from network or local camera encoder. Bitstream contains no pts but frame duration. Adaptive frame duration on drift from wall clock: - Faster for long frame buffering - Fall behind for being slower than claimed FPS: resync as now ''' if pkt.pts is not None and not meta['drifting']: # Check if drifting if prev.pts is None: prev.dts = prev.pts = 0 logging.warning( "Reset previous packet dts/pts from None to 0") duration = float((pkt.pts - prev.pts) * pkt.time_base) # assert duration > 0, f"pkt.pts={pkt.pts}, prev.pts={prev.pts}, pkt.time_base={pkt.time_base}, pkt.duration={pkt.duration}, prev.duration={prev.duration}, duration={duration}" if duration <= 0: # FIXME RTSP from Dahua/QB and WiseNet/Ernie pts = prev.pts + (meta['duration'] / pkt.time_base) / 2 duration = float((pts - prev.pts) * pkt.time_base) logging.warning( f"Non-increasing pts: pkt.pts={pkt.pts}, prev.pts={prev.pts} => pts={pts}, duration={duration}" ) pkt.pts = pts timestamp = meta['time'] + duration if meta['adaptive']: # adaptive frame duration only if not KVS diff = abs(timestamp - now) threshold = meta['thresholds']['drifting'] if diff > threshold: meta['drifting'] = True logging.warning( f"Drifting video timestamps: abs({timestamp:.3f} - {now:.3f}) = {diff:.3f} > {threshold}s" ) if pkt.pts is None or meta['drifting']: # Real-time against wall clock duration = now - meta['time'] duration = min(1.5 / meta['fps'], duration) duration = max(0.5 / meta['fps'], duration) meta['duration'] = duration yield meta, frame meta['time'] += duration else: meta['duration'] = duration yield meta, frame meta['time'] = timestamp else: # TODO: no sleep for being handled by renderer playback # Simulating RT meta['duration'] = 1.0 / meta['fps'] slack = (meta['time'] + meta['duration']) - now if slack > 0: logging.debug( f"Sleeping for {slack:.3f}s to simulate RT source") time.sleep(slack) yield meta, frame meta['time'] += meta['duration'] meta['keyframe'] = keyframe if pkt.size == 0: logging.warning(f"EOF/EOS on empty packet") return None else: meta['prev'] = pkt meta['count'] += 1