示例#1
0
def test_multiscale_fusion_align():
    from ml.vision import ops
    pooler = MultiScaleFusionRoIAlign(3)
    features = [
        torch.randn(2, 256, 76, 60),
        torch.randn(2, 512, 38, 30),
        torch.randn(2, 1024, 19, 15)
    ]
    metas = [
        dict(
            shape=(1080, 810),
            offset=(0, (608 - 810 / 1080 * 608) % 64),
            ratio=(608 / 1080, ) * 2,
        ),
        dict(
            shape=(540, 405),
            offset=(0, (608 - 405 / 540 * 608) % 64),
            ratio=(608 / 540, ) * 2,
        )
    ]

    boxes = torch.rand(6, 4) * 256
    boxes[:, 2:] += boxes[:, :2]
    rois = [boxes, boxes * 2]
    pooled = pooler(features, rois, metas)
    logging.info(
        f"RoI aligned features: {tuple(feats.shape for feats in pooled)}")
    assert list(pooled[0].shape) == [len(rois[0]), 1024 + 512 + 256, 3, 3]
示例#2
0
def test_multi_sessions(credentials, FPS):
    ip = credentials['ip']
    port = credentials['port']
    user = credentials['username']
    passwd = credentials['passwd']
    source = AVSource.create(url(ip, port), user=user, passwd=passwd)
    sessions = source.open('fisheye',
                           'Original',
                           decoding=True,
                           with_audio=False)
    logging.info(f"{[session['cam']['area'] for session in sessions]}")
    assert len(sessions) == 3
    areas = set()
    for session in sessions:
        area = session['cam']['area']
        if 'video' in session:
            video = session['video']
            if video['fps'] is None:
                video['fps'] = area in FPS and FPS[area] or 15
        frame = source.read(session)
        areas.add(area)
        source.close(session)
        assert frame is not None, f"Failed to read frame from '{session['cam']['area']}'"
        assert not session

    assert len(
        areas
    ) == 3, f"Three distinc areas are expected but only got {len(areas)}: {areas}"
示例#3
0
def parallelize(module, device_ids=None, distributed=False):
    """
    Args:
        device_ids:
            None: all available GPUs
            str: number or comma separated list
            int: number of GPUs to parallelize
            list: int GPU device ids
    """

    available = list(range(th.cuda.device_count()))
    if device_ids is None:
        device_ids = available
    elif isinstance(device_ids, str):
        # number or comma separated list
        device_ids = (available[-int(device_ids):] if device_ids.isnumeric()
                      else [int(i) for i in device_ids.split(",")])
    assert all(
        i in available for i in device_ids
    ), f"Not all requested GPUs {device_ids} are available in {available}"

    device = th.device(f"cuda:{device_ids[0]}") if device_ids else th.device(
        "cpu")
    module.to(device)
    if distributed:
        # TODO: testing
        return DistributedDataParallel(module)
    elif len(device_ids) > 1:
        module = nn.DataParallel(module, device_ids)
        logging.info(f"Parallelized module on GPUs: {device_ids}")

    return module, device
示例#4
0
 def close(self):
     """Flushes the queue and waits for the executor to finish."""
     logging.info('Closing producer')
     self.flush_queue()
     self.monitor_running.clear()
     self.pool.shutdown()
     logging.info('Producer closed')
示例#5
0
文件: runner.py 项目: necla-ml/ML
 def trainer_epoch_completed(engine):
     epoch = engine.state.epoch
     evaluator.run(dataloaders[1], 1)
     metrics = evaluator.state.metrics
     loss = metrics["bce"]
     recall, typeRecall = metrics["recall"]
     recalls = tuple(round(100 * v, 2) for v in recall)
     typeRecalls = tuple(round(100 * v, 2) for v in typeRecall)
     logging.info(
         f"[{epoch}/{cfg.epochs}] "
         f"validation loss={loss:.4f}, recalls={recalls}, per type={typeRecalls}, time={epochTimer.value():.3f}s"
     )
     if vis is not None:
         vis.line(
             X=np.array([epoch]),
             Y=np.array([loss]),
             win=val_loss_win,
             update="append",
         )
         vis.line(
             X=np.array([epoch]),
             Y=np.array([recalls]),
             win=val_recall_win,
             update="append",
         )
         vis.line(
             X=np.array([epoch]),
             Y=np.array([typeRecalls]),
             win=val_type_recall_win,
             update="append",
         )
示例#6
0
 def close(self):
     logging.info(f"CLOSE {self.name}")
     self.loop.quit()
     state = self.pipeline.set_state(Gst.State.NULL)
     if state != Gst.StateChangeReturn.SUCCESS:
         logging.warning('GST state change to NULL failed')
     self.join(timeout=None)
示例#7
0
文件: driver.py 项目: necla-ml/ML-WS
def kvs(args, stop_event):
    from ml.streaming import KVProducer
    try:
        path = args.get('path')
        stream = args.get('stream', 'loopback')
        logging.info(f'Streaming video at {path} to KV stream={stream}')
        producer = KVProducer()
        producer.connect(stream)
        producer.upload(
            path,
            fps=int(args.get('fps', 10)),
            loop=args.get('loop', True),
            start=args.get('start', None),
            end=args.get('end', None),
            transcode=args.get('transcode', False),
            bucket=args.get('bucket', 'eigen-stream-videos'),
            stream_id=args.get('stream_id'),
            env=args.get('env', 'PROD'),
            workaround=args.get('workaround', True),
            stop_event=stop_event
        )
    except Exception as e:
        raise e
    finally:
        producer.disconnect()
示例#8
0
def startStreaming(nvr, cfg, duration=None, debug=False):
    serverId, cam, profile, codec = cfg
    host = f'{nvr.ip}:{nvr.port}'
    url = f'http://{host}'
    show = sys.x_available()
    decoder = av.CodecContext.create(codec.replace('.', '').lower(), "r")
    stream = nvr.startStreaming(cfg, debug=debug)
    logging.info(
        f"##### {cam['area']}: streaming({profile}/{codec}) for {duration} frames #####"
    )
    frames = 0
    for i, (pkt, (media, cc)) in enumerate(stream):
        # Unsupported codecs, e.g.:
        #   - b'audio/x-g711-mulaw'
        if media != 'video' or decoder.name not in cc:
            print(f"[{i}] Skipped unsupported frame: {media}/{cc}")
            continue

        packet = av.Packet(pkt['payload'])
        timestamp = pkt['time']
        frame = decoder.decode(packet)[0].to_rgb().to_ndarray()[:, :, ::-1]
        if show:
            cv.imshow('Live: {url}', frame)
            cv.waitKey(1)
        frames += 1
        logging.info(
            f"[{i}] {media}/{cc} of {packet.size} bytes in {frame.shape} at {timestamp:.3f}s, now={time():.3f}s"
        )
        if duration is not None and frames == duration:
            cv.destroyAllWindows()
            break
示例#9
0
文件: driver.py 项目: necla-ml/ML-WS
def nuuo(args, stop_event):
    from ml.streaming import NUUOProducer
    try:
        stream = args.get('stream', 'nuuo-usa')
        area = str(args.get('area'))
        producer = NUUOProducer(
            ip=args.get('ip', None),
            port=args.get('port', None),
            user=args.get('user', None),
            passwd=args.get('passwd', None),
            site_id=args.get('site_id'),
            env=args.get('env', 'PROD')
        )
        producer.connect(stream)
        logging.info(f'Connecting to NUUO area: {area}')
        producer.upload(
            area,
            fps=int(args.get('fps', 10)),
            profile=args.get('profile', 'Original'),
            stream_id=args.get('stream_id', None),
            env=args.get('env', 'PROD'),
            timeout=(15, 30),
            stop_event=stop_event
        )
    except Exception as e:
        raise e
    finally:
        # NOTE: make sure the thread timeout is enough for streamer to disconnect properly
        # XXX: could result in doubled streams 
        producer.disconnect()
示例#10
0
文件: awscam.py 项目: necla-ml/ML-WS
 def close(self):
     if self.stream is not None:
         self.encoder.close()
         self.encoder = self.stream = self.video = self.codec = None
         logging.info(
             f"{self.frames} fresh frames read at {self.frames / (time() - self.start):.2f}FPS since open"
         )
示例#11
0
文件: runner.py 项目: necla-ml/ML
def prepare_test(cfg):
    test_loader = dataloader(cfg, cfg.split, bs=cfg.bs, shuffle=False)
    logging.info(f"Loaded {cfg.split} dataset of size {len(test_loader)}")

    model, device = setup_model(cfg)
    logging.info(f"Set up {cfg.model}")
    return test_loader, model, device
示例#12
0
def test_beer_run(beer_run):
    print()
    labels = [
        'standing',
        'sitting',
        'walking',
        'running',
        'grabbing',
        'inspection',
        'concealing',
        'lying_on_floor',
        'walking items',
        'walking_items',
        'retrieving_items',
        'returning_items',
        'concealing_items',
        'entering',
        'entering123',
        '123entering',
        'exiting',
        'nothing',
    ]
    engine = SequenceRuleEngine(labels, '->')
    rule = engine.compile(beer_run)
    logging.info(f"rule={beer_run}")
    logging.info(f"compiled={rule}")
示例#13
0
def deploy_yolo5():
    parser = argparse.ArgumentParser(
        'Deploy a trained YOLO5 checkpoint on S3 by stripping out training states'
    )
    parser.add_argument('chkpt',
                        help='Path to a trained checkpoint for deployment')
    parser.add_argument('--url', help='s3://bucket/key/to/chkpt.pt')
    cfg = parser.parse_args()

    from ml import hub
    from ml.hub import github
    from ml.vision.models.detection.yolo5 import GITHUB
    repo = hub.repo(github(**GITHUB), force_reload=False)
    chkpt = cfg.chkpt
    if chkpt is None:
        chkpt = f"{repo}/weights/best.pt"

    sys.add_path(repo)
    from utils.general import strip_optimizer
    before = os.path.getsize(chkpt)
    strip_optimizer(chkpt)
    after = os.path.getsize(chkpt)
    logging.info(
        f"Optimized {chkpt} optimized from {before / 1024**2:.2f}MB to {after / 1024**2:.2f}MB"
    )
    if cfg.url.startswith('s3://'):
        logging.info(f"Uploading to {cfg.url}...")
        parts = cfg.url[len('s3://'):].split('/')
        bucket = parts[0]
        key = '/'.join(parts[1:])
        hub.upload_s3(chkpt, bucket, key)
    else:
        ValueError(f"Unsupported URL to upload: {cfg.url}")
示例#14
0
 def read(self, session, media='video', format='BGR'):
     if session is None or media not in session:
         logging.error(f"{media} not in session to read")
         return None
     try:
         if media == 'video':
             meta = session[media]
             stream = meta['stream']
             codec = meta['codec']
             meta['framer'] = framer = meta.get(
                 'framer', self.read_video(session, format))
             meta, frame = next(framer)
         elif media == 'audio':
             meta = session[media]
             stream = meta['stream']
             meta['framer'] = framer = meta.get('framer',
                                                self.read_audio(session))
             meta, frame = next(framer)
     except StopIteration:
         # EOS
         return None
     except Exception as e:
         logging.info(f"Failed to read a frame: {e}")
         raise e
     else:
         return media, meta, frame
示例#15
0
文件: calibrator.py 项目: necla-ml/ML
 def load_batches(self):
     if self.files:
         # Populates a persistent buffer with images.
         for index in range(0, len(self.files), self.batch_size):
             for offset in range(self.batch_size):
                 image_path = self.files[index + offset]
                 for i, input_shape in enumerate(self.inputs):
                     self.buffers[i][offset] = self.preprocess_func(
                         image_path).contiguous()
             logging.info(
                 f"preprocessed calibration images: {index + self.batch_size}/{len(self.files)}"
             )
             yield
     else:
         for index in range(0, self.max_calib_data, self.batch_size):
             for offset in range(self.batch_size):
                 for i, input_shape in enumerate(self.inputs):
                     rand_batch = torch.rand(
                         (self.batch_size, *input_shape),
                         dtype=torch.float32).contiguous()
                     self.buffers[i].copy_(rand_batch)
             logging.info(
                 f"generated random calibration data batch: {index + self.batch_size}/{self.max_calib_data}"
             )
             yield
示例#16
0
 def close(self):
     """Flushes the queue and waits for the executor to finish."""
     logging.info(f'[{self._name}] Closing kinesis producer')
     self.flush_queue()
     self.monitor_running.clear()
     self.join()
     logging.info(f'[{self._name}] Kinesis producer closed')
示例#17
0
文件: driver.py 项目: necla-ml/ML-WS
    def run(self):
        # loop streaming thread until stop_event is set
        while not self.stop_event.is_set():
            try:
                # get task_type and respective task function
                task_type = self._args.get('stream_type')
                TASKS[task_type](
                    self._args,
                    self.stop_event
                )
            except Exception as e:
                # exception occured:
                # put to monitor queue for updating the database
                logging.error(e)
                if self.exception_count == 0:
                    stream_id = self._args.get('stream_id')
                    msg = dict(
                        timestamp=time.time(),
                        payload=str(e),
                        msg_type=MSG_TYPE.ERROR,
                        stream_id=stream_id
                    )
                    self.monitor.put_msg(msg)

                self.exception_count += 1
                # exponential delay before restarting
                time.sleep(self.exception_count * 10)
                # XXX: make sure the delay does not get too big 
                if self.exception_count == self.max_exceptions:
                    self.exception_count = 1
                logging.info(f'[{self.name}] Restarting streaming producer on error')
示例#18
0
文件: calibrator.py 项目: necla-ml/ML
 def read_calibration_cache(self):
     # If there is a cache, use it instead of calibrating again. Otherwise, implicitly return None.
     if os.path.exists(self.cache):
         with open(self.cache, "rb") as f:
             logging.info(
                 "Using calibration cache to save time: {:}".format(
                     self.cache))
             return f.read()
示例#19
0
文件: runner.py 项目: necla-ml/ML
 def test_completed(engine):
     metrics = engine.state.metrics
     loss = metrics["bce"]
     recall, typeRecall = metrics["recall"]
     recalls = tuple(round(100 * v, 2) for v in recall)
     typeRecalls = tuple(round(100 * v, 2) for v in typeRecall)
     logging.info(
         f"Test loss={loss:.4f}, recalls={recalls}, per type={typeRecalls}, time={epochTimer.value():.3f}s"
     )
示例#20
0
def test_connect(credentials):
    ip = credentials['ip']
    port = credentials['port']
    user = credentials['username']
    passwd = credentials['passwd']
    nvr = NVR.create(ip, port, user=user, passwd=passwd)
    assert isinstance(nvr, Titan8040R)
    nvr.connect()
    for device in nvr:
        logging.info(device)
示例#21
0
文件: trt.py 项目: necla-ml/ML
def build(path):
    """Build an inference engine from a serialized model.
    Args:
        path: model or path to a saved onnx/trt checkpoint
    """
    logging.info("Deserializing the TensorRT engine from {}".format(path))
    with open(
            path,
            "rb") as f, trt.Logger() as logger, trt.Runtime(logger) as runtime:
        return runtime.deserialize_cuda_engine(f.read())
示例#22
0
 def monitor(self):
     """Flushes the queue periodically."""
     while self.monitor_running.is_set():
         if time.time() - self.last_flush > self.batch_time:
             if not self.queue.empty():
                 logging.info(
                     "KinesisProducer: Queue Flush-time without flush exceeded"
                 )
                 self.flush_queue()
         time.sleep(self.batch_time)
示例#23
0
def test_io_image(img):
    with tempfile.TemporaryDirectory() as tmp:
        path = f"{tmp}/image.jpg"
        io.save(img, path)
        size = os.path.getsize(path)
        logging.info(f"saved an image to {path} of {size} bytes")
        assert size > 0

        target = io.load(path)
        assert target.shape == img.shape
        assert target.dtype == img.dtype
示例#24
0
def upload_s3(path, bucket, key):
    '''
    Args:
        path(str): path to the file to upload
        bucket(str): S3 bucket name
        key(str): key to upload to the bucket where the ending '/' matters
    '''
    try:
        import botocore, boto3
        from botocore.exceptions import ClientError
    except ImportError as e:
        logging.warning(
            f'botocore and boto3 are required to download from S3: {e}')
        return False
    else:
        # XXX Amazon S3 supports buckets and objects, and there is no hierarchy.
        path = Path(path)
        s3 = boto3.resource('s3').meta.client
        if not path.is_file():
            logging.error(f"{path} not exist or not a file to upload")
            return False
        total = 0
        start = time()

        def callback(bytes):
            nonlocal total
            total += bytes
            elapse = time() - start
            if total < 1024:
                print(
                    f"\rUploaded {total:4d} bytes at {total / elapse:.2f} bytes/s",
                    end='')
            elif total < 1024**2:
                KB = total / 1024
                print(f"\rUploaded {KB:4.2f}KB at {KB/elapse:4.2f} KB/s",
                      end='')
            else:
                MB = total / 1024**2
                print(f"\rUploaded {MB:8.2f}MB at {MB/elapse:6.2f} MB/s",
                      end='')
            sys.stdout.flush()

        try:
            print(path, bucket, key)
            s3.upload_file(str(path), bucket, key, Callback=callback)
        except ClientError as e:
            print()
            logging.error(
                f"Failed to upload {path} to s3://{bucket}/{key}: {e}")
        else:
            print()
            logging.info(f"Succeeded to upload {path} to s3://{bucket}/{key}")
        return True
示例#25
0
def test_io_video(vid):
    with tempfile.TemporaryDirectory() as tmp:
        # FIXME: fps must be 11+
        path = f"{tmp}/video.mp4"
        io.save(vid, path, fps=12, video_codec='h264')
        size = os.path.getsize(path)
        logging.info(f"saved video to {path} of {size} bytes")
        assert size > 0

        video, audio, meta = io.load(path)
        assert video.shape == vid.shape
        assert video.dtype == vid.dtype
示例#26
0
文件: youtube.py 项目: necla-ml/ML-WS
def yt_hls_url(url, *args, file_name='video.h264', **kwargs):
    """
    Get hls url if live stream else download video and transcode
    params: 
        url - youtube url to download video from
        start - start video from this timestamp(00:00:15)
        end - end video after this timestamp(00:00:10)
    Returns:
        video path or url(str)
    """
    res = None
    start = kwargs.pop('start', None)
    # NOTE: enforce 5 min limit on non-live youtube videos
    end = kwargs.pop('end', None)
    if not end:
        end = '00:05:00'
    try:
        # video is live --> get hls url and stream
        res = subprocess.run(['youtube-dl', '-f', '95', '-g', url], stdout=subprocess.PIPE) \
            .stdout.decode('utf-8') \
            .strip()
        if not res:
            logging.warning(
                f"video is not live --> transcode to h264 and stream")
            url = subprocess.run(
                ['youtube-dl', '-f', 'best', '-g', url],
                stdout=subprocess.PIPE).stdout.decode('utf-8').strip()
            cmd = f'ffmpeg '
            if start:
                cmd += f'-ss {start} '
            cmd += f'-i {url} '
            if end:
                cmd += f'-t {end} '
            cmd += f'-an \
                -s 1280x720 \
                -g 15 \
                -r 15 \
                -b 2M \
                -vcodec h264 \
                -bf 0 \
                -bsf h264_mp4toannexb \
                -y {file_name}'

            cmd = shlex.split(cmd)
            output = subprocess.call(cmd)
            res = os.path.abspath(file_name)
    except Exception as e:
        # TODO: handle transcoding error with proper errno key
        #sys.exit(errno.)
        logging.info(e)

    return res
示例#27
0
 def __init__(self, *args, **kwargs):
     src = args[0]
     if src.isnumeric():
         # camera index in the system
         self.src = int(src)
     else:
         # filesystem path
         path = Path(src)
         if path.exists():
             logging.info(f"Local source path {path}")
         else:
             logging.info(f"Assume remote source URL {src}")
         self.src = src
示例#28
0
文件: driver.py 项目: necla-ml/ML-WS
 def run(self):
     while not self.stop_event.is_set():
         try:
             msg = self._queue.get(block=True)
             if msg is not None:
                 if self.env == 'TEST':
                     logging.info(f'[{self.name}] Published {msg}')
                 else:
                     send_msg(self.sqs, msg)
                 self._queue.task_done()
         except Exception as e:
             logging.error(f'[{self.name}] Monitor Failed: {e}')
             raise e
示例#29
0
def download_gdrive(id='1mM8aZJlWTxOg7BZJvNUMrTnA2AbeCVzS',
                    path='/tmp/yolov5x.pt',
                    force=False):
    # https://gist.github.com/tanaikech/f0f2d122e05bf5f971611258c22c110f
    # Downloads a file from Google Drive, accepting presented query
    # from utils.google_utils import *; gdrive_download()
    import time
    t = time.time()

    if os.path.exists(path):
        if force:
            os.remove(path)
            logging.warning(f"Removed existing download: {path}")
        else:
            logging.warning(
                f"Download exists: {path}, specify force=True to remove if necessary"
            )
            return 0

    logging.info(
        f'Downloading https://drive.google.com/uc?export=download&id={id} to {path}...'
    )
    os.remove('cookie') if os.path.exists('cookie') else None

    # Attempt file download
    os.system(
        f"curl -c ./cookie -s -L \'https://drive.google.com/uc?export=download&id={id}\' > /dev/null"
    )
    if os.path.exists('cookie'):  # large file
        # s = "curl -Lb ./cookie \"https://drive.google.com/uc?export=download&confirm=`awk '/download/ {print $NF}' ./cookie`&id=%s\" -o %s" % (id, path)
        s = f"curl -Lb ./cookie \"https://drive.google.com/uc?export=download&confirm=`awk '/download/ {{print $NF}}' ./cookie`&id={id}\" -o {path}"
    else:  # small file
        s = f"curl -s -L -o {path} 'https://drive.google.com/uc?export=download&id={id}'"
    r = os.system(s)  # execute, capture return values
    os.remove('cookie') if os.path.exists('cookie') else None

    # Error check
    if r != 0:
        os.remove(path) if os.path.exists(path) else None  # remove partial
        logging.error(f'Failed to download to {path}'
                      )  # raise Exception('Download error')
        return r
    '''
    # Unzip if archive
    if path.endswith('.zip'):
        logging.info('Unzipping... ')
        os.system('unzip -q %s' % path)  # unzip
        os.remove(path)  # remove zip to free space
    '''
    logging.info(f'Done in {time.time() - t:.1f}s')
    return r
示例#30
0
def test_streaming_all(ip, port, user, passwd, duration=5):
    nvr = NVR.create(ip, port, user=user, passwd=passwd)
    assert isinstance(nvr, Crystal)
    nvr.connect()
    logging.info(
        f"##### Probing all streaming profiles for {duration} frames #####")
    for _, setup in nvr:
        for cam in setup['devices']:
            cfgs = nvr.query(area=cam['area'], profile=0)
            assert len(cfgs) == 1
            cfg = cfgs[0]
            assert len(cfg) == 4
            startStreaming(nvr, cfg, duration)
            print()