Example #1
0
 def read(self, session, media='video', format='BGR'):
     if session is None or media not in session:
         logging.error(f"{media} not in session to read")
         return None
     try:
         if media == 'video':
             meta = session[media]
             stream = meta['stream']
             codec = meta['codec']
             meta['framer'] = framer = meta.get(
                 'framer', self.read_video(session, format))
             meta, frame = next(framer)
         elif media == 'audio':
             meta = session[media]
             stream = meta['stream']
             meta['framer'] = framer = meta.get('framer',
                                                self.read_audio(session))
             meta, frame = next(framer)
     except StopIteration:
         # EOS
         return None
     except Exception as e:
         logging.info(f"Failed to read a frame: {e}")
         raise e
     else:
         return media, meta, frame
Example #2
0
def transcode_to_h264(pth, cmd=None):
    """
    Transcode video to h264(annexb)
    params: 
        pth - path to video to be transcoded
    Returns:
        path to transcoded video
    """
    output_pth = None
    try:
        basename = os.path.basename(pth).split('.')[0]
        output = ''.join(basename)
        output_pth = os.path.dirname(basename) + f'{output}.h264'
        
        if not cmd:
            cmd = f'''ffmpeg -i {pth} \
            -c:v copy \
            -bsf h264_mp4toannexb \
            -y {output_pth}'''

        cmd = shlex.split(cmd)
        output = subprocess.call(cmd)
    except Exception as e:
        logging.error(f'Error transcoding the video at {pth}: {e}')

    return output_pth
Example #3
0
def get_nvr_credentials(rds, site_id, private_key) -> dict:
    """Get nvr_credentials given site_id
    Params:
        rds: RDS object to call sql execution
        site_id: site id 
        private_key: private key to decrypt the password
    Returns: 
        credentials(dict)
    """
    from .utils import decrypt
    sql = """SELECT * FROM nvr_credentials WHERE site_id = :site_id"""
    sql_parameters = {'site_id': int(site_id)}
    site_id = None
    credentials = {}
    try:
        response = rds.execute_statement(sql, sql_parameters)
        records = response.get('records')
        if records:
            credentials = records[0]
            # decypt password with private key
            passwd = credentials.get('passwd').encode()
            decrypted_passwd = decrypt(private_key=private_key, value=passwd)
            credentials['passwd'] = decrypted_passwd
    except Exception as e:
        logging.error(f"{e}, assume credentials in the environment")
        return os.environ
    else:
        return credentials
Example #4
0
    def __next__(self):
        boundary = self.stream.read(len(self.boundary))
        #print('##### boundary: ', boundary)
        assert boundary == self.boundary, f'Unexpected boundary: {boundary}'
        self.buf += self.stream.read(self.bsize)
        if len(self.buf) < self.bsize:
            logging.error(f'EOS: only {len(buf)} bytes in the buffer < {self.bsize}')
            raise StopIteration

        #print(f'peek: {buf[:self.bsize]}')
        part = BodyPart(self.buf, self.encoding)
        if b'content-length' in part.headers:
            length = int(part.headers[b'content-length'].decode(self.encoding)) # excluding newline
            # assert length >= self.bsize, f"{length} < bsize({self.bsize})"
            size = length - len(part.content) # + len(self.newline)
            if size < 0: # over read
                # print(f"[PartIterator] Over read {-size} bytes")
                self.buf = part.content[size:]
                part.content = part.content[:size]
                if len(self.buf) < len(self.newline):
                    self.stream.read(len(self.newline) - len(self.buf))
                    self.buf.clear()
                else:
                    self.buf = self.buf[len(self.newline):]
            else: # under read
                part.content += self.stream.read(size)
                self.stream.read(len(self.newline))
                self.buf.clear()
            # print('[PartIterator]', part.headers, len(part.content))
            return part
        else:
            logging.error(f'No Content-Length in the part headers: {part.headers}')
            raise StopIteration
Example #5
0
def get_secret(secret_id="KinesisVideoStreamProducerCredentials"):
    """"
    Helper function to get the credentials from AWS secret manager
    Params: 
        secret_id: id to be fetched from secret store
    Returns: 
        secret(json)
    """
    secret = None
    # init Secrets Manager client
    session = boto3.session.Session()
    client = session.client(
        service_name='secretsmanager',
        region_name='us-east-1',
    )
    try:
        get_secret_value_response = client.get_secret_value(SecretId=secret_id)
    except ClientError as e:
        logging.error(f"{e}, assume secrets in the environment")
        return os.environ
    else:
        # secret could be string or binary
        if 'SecretString' in get_secret_value_response:
            secret = get_secret_value_response['SecretString']
        else:
            secret = base64.b64decode(get_secret_value_response['SecretBinary']) 
    
    return json.loads(secret) 
Example #6
0
def get_s3_video(url,
                 *args,
                 transcode=False,
                 bucket='eigen-stream-videos',
                 pth='assets/',
                 **kwargs):
    """
    Download video from s3 and transcode
    params: 
        url - s3 key name
        transcode - transcode video to h264
        bucket - s3 key bucket
    Returns:
        video path or url(str)
    """
    # create path if not exist
    key = url.split('s3://')[1]
    pth = Path(pth)
    pth.mkdir(parents=True, exist_ok=True)
    download_path = Path(f'{pth}/{key}')
    # download from s3
    s3.download_key(key=key, pth=download_path, bucket=bucket, reload=True)
    if download_path.exists():
        if transcode:
            # transcode video to h264
            download_path = utils.transcode_to_h264(pth=download_path)
            if not pth:
                logging.error(f'Error transcoding video at: {download_path}')
                sys.exit(1)
    else:
        logging.error(f'Failed downloading key: {key} from bucket: {bucket}')
        sys.exit(1)

    return str(download_path)
Example #7
0
    def run(self):
        # loop streaming thread until stop_event is set
        while not self.stop_event.is_set():
            try:
                # get task_type and respective task function
                task_type = self._args.get('stream_type')
                TASKS[task_type](
                    self._args,
                    self.stop_event
                )
            except Exception as e:
                # exception occured:
                # put to monitor queue for updating the database
                logging.error(e)
                if self.exception_count == 0:
                    stream_id = self._args.get('stream_id')
                    msg = dict(
                        timestamp=time.time(),
                        payload=str(e),
                        msg_type=MSG_TYPE.ERROR,
                        stream_id=stream_id
                    )
                    self.monitor.put_msg(msg)

                self.exception_count += 1
                # exponential delay before restarting
                time.sleep(self.exception_count * 10)
                # XXX: make sure the delay does not get too big 
                if self.exception_count == self.max_exceptions:
                    self.exception_count = 1
                logging.info(f'[{self.name}] Restarting streaming producer on error')
Example #8
0
def send_msg(sqs, msg):
    try:
        global ECS_TASK_ID
        global DOCKER_ID
        if not ECS_TASK_ID:
            try:
                out = get_task_id_on_instance()
                ECS_TASK_ID = out.get('task_id', 'INVALID')
                DOCKER_ID = out.get('docker_id', 'INVALID')
            except Exception as e:
                logging.error(e)
                ECS_TASK_ID = uuid4().hex
                DOCKER_ID = uuid4().hex

        msg['ecs_task_id'] = ECS_TASK_ID
        msg['ecs_docker_id'] = DOCKER_ID
        msg['publisher'] = 'STREAMING_TASK'
        stream_id = msg['stream_id']
        # NOTE: Messages that belong to the same message group are always processed one by one, 
        # in a strict order relative to the message group 
        # (however, messages that belong to different message groups might be processed out of order). 
        sqs.send_message(
            message_body=json.dumps(msg),
            message_group_id=f'{stream_id}'
        )
    except Exception as e:
        logging.error(f'SQS send message failed: {e}')
        raise e
Example #9
0
 def open(self, start=None, end=None, timestamp='PRODUCER', **kwargs):
     """Start streaming from KVS.
     Streaming may be interleaved with media of different formats.
     Kwargs:
         start(float): KVS start timestamp
         start(float): KVS end timestamp
         expires(int): streaming expiry
         timestamp: [ SERVER | PRODUCER ]
             DeepLens allows for either timestamp.
             NUUO works only with PRODUCER timestamp.
     """
     try:
         now = start or time()
         url = kvs_session_url(self.stream, start, end, timestamp, **kwargs)
         session = openAV(url, adaptive=False, **kwargs)
     except Exception as e:
         # TODO Possible causes:
         # - [tcp @ 0x7fde64038000] Connection to tcp://b-604520a7.kinesisvideo.us-east-1.amazonaws.com:443 failed: Connection timed out
         # - [tls @ 0x7f4660073f40] error:00000000:lib(0):func(0):reason(0)
         # - botocore.errorfactory.ResourceNotFoundException:
         #       An error occurred (ResourceNotFoundException) when calling the GetHLSStreamingSessionURL operation:
         #       No fragments found in the stream for the streaming request.
         logging.error(f"Failed to open {self.url}: {e}")
         raise e
     else:
         #logging.info(f"KVS session start: requested={now:.3f}s, actual={session['start']:.3f}s")
         return session
Example #10
0
def startStreaming(nvr, cfg, duration=None):
    cam, profile, codec = cfg
    host = f'{nvr.ip}:{nvr.port}'
    url = f'http://{host}'
    show = sys.x_available()
    decoder = av.CodecContext.create(codec.replace('.', '').lower(), "r")
    stream = nvr.startStreaming(cfg, debug=not True)
    logging.info(
        f"##### {cam['area']}: streaming({profile}/{codec}) for {duration} frames #####"
    )
    frames = 0
    for i, (pkt, (media, cc)) in enumerate(stream):
        try:
            # Unsupported codecs, e.g.:
            #   - b'audio/x-g711-mulaw'
            if media != 'video' or decoder.name not in cc:
                print(f"[{i}] Skipped unsupported frame: {media}/{cc}")
                continue

            packet = av.Packet(pkt['payload'])
            frame = decoder.decode(packet)[0].to_rgb().to_ndarray()[:, :, ::-1]
        except Exception as e:
            logging.error(f"[{frames}] Failed to decode frame: {e}")
            break
        else:
            if show:
                cv.imshow('Live: {url}', frame)
                cv.waitKey(1)
            frames += 1
            logging.info(
                f"[{i}] {media}/{cc} of {len(pkt)} bytes in {frame.shape}")
            if duration is not None and frames == duration:
                cv.destroyAllWindows()
                break
Example #11
0
 def run(self):
     while not self.stop_event.is_set():
         try:
             # blocking operation
             source, headers, message = self.broker.get_msg()
             self.handler(source, headers, message)
             self.broker.task_done()
         except Exception as e:
             logging.error(f'[{self.name}] Driver Failed: {e}')
             raise e
Example #12
0
 def open(self, *args, **kwargs):
     try:
         if not self.path:
             self.path = yt_hls_url(self.url, *args, **kwargs)
         session = openAV(self.path, *args, **kwargs)
     except Exception as e:
         logging.error(f"Failed to open {self.url}: {e}")
         raise e
     else:
         return session
Example #13
0
    def __init__(self,
                 resolution=None,
                 fps=None,
                 gop=None,
                 bitrate=None,
                 ch=1):
        if fps:
            ret = os.system(f"{MXUVC_BIN} --ch {ch} framerate {fps}")
            if ret > 0:
                self.fps = None
                logging.error(
                    f"Failed to set FPS to {fps} w/ ret={ret}, typically permission denied"
                )
            else:
                self.fps = fps

        if resolution:
            res = RESOLUTION[resolution]
            ret = os.system(
                f"{MXUVC_BIN} --ch {ch} resolution {res[0]} {res[1]}")
            if ret > 0:
                self.resolution = None
                logging.error(
                    f"Failed to set resolution to {res} w/ ret={ret}, typically permission denied"
                )
            else:
                self.resolution = resolution

        gop = gop or fps
        if gop:
            ret = os.system(f"{MXUVC_BIN} --ch {ch} gop {gop}")
            if ret > 0:
                self.gop = None
                logging.error(
                    f"Failed to set GOP to {gop} w/ ret={ret}, typically permission denied"
                )
            else:
                self.gop = gop

        if bitrate:
            ret = os.system(f"{MXUVC_BIN} --ch {ch} bitrate {bitrate}")
            if ret > 0:
                self.bitrate = None
                logging.error(
                    f"Failed to set bitrate to {bitrate} w/ ret={ret}, typically permission denied"
                )
            else:
                self.bitrate = bitrate

        ret = os.system(f"{MXUVC_BIN} --ch {ch} iframe")
        if ret > 0:
            logging.error(f"Failed to force generating an IFrame")

        self.stream = None
        self.ch = ch
Example #14
0
 def open(self, *args, **kwargs):
     try:
         api_client = SUPPORTED_APIS[self.api]()
         self.path = api_client.stream_url(self.channel_id,
                                           self.stream_type)
         session = openAV(self.path, **kwargs)
     except Exception as e:
         logging.error(f"Failed to open session: {e}")
         raise e
     else:
         return session
Example #15
0
def upload_s3(path, bucket, key):
    '''
    Args:
        path(str): path to the file to upload
        bucket(str): S3 bucket name
        key(str): key to upload to the bucket where the ending '/' matters
    '''
    try:
        import botocore, boto3
        from botocore.exceptions import ClientError
    except ImportError as e:
        logging.warning(
            f'botocore and boto3 are required to download from S3: {e}')
        return False
    else:
        # XXX Amazon S3 supports buckets and objects, and there is no hierarchy.
        path = Path(path)
        s3 = boto3.resource('s3').meta.client
        if not path.is_file():
            logging.error(f"{path} not exist or not a file to upload")
            return False
        total = 0
        start = time()

        def callback(bytes):
            nonlocal total
            total += bytes
            elapse = time() - start
            if total < 1024:
                print(
                    f"\rUploaded {total:4d} bytes at {total / elapse:.2f} bytes/s",
                    end='')
            elif total < 1024**2:
                KB = total / 1024
                print(f"\rUploaded {KB:4.2f}KB at {KB/elapse:4.2f} KB/s",
                      end='')
            else:
                MB = total / 1024**2
                print(f"\rUploaded {MB:8.2f}MB at {MB/elapse:6.2f} MB/s",
                      end='')
            sys.stdout.flush()

        try:
            print(path, bucket, key)
            s3.upload_file(str(path), bucket, key, Callback=callback)
        except ClientError as e:
            print()
            logging.error(
                f"Failed to upload {path} to s3://{bucket}/{key}: {e}")
        else:
            print()
            logging.info(f"Succeeded to upload {path} to s3://{bucket}/{key}")
        return True
Example #16
0
 def run(self):
     while not self.stop_event.is_set():
         try:
             msg = self._queue.get(block=True)
             if msg is not None:
                 if self.env == 'TEST':
                     logging.info(f'[{self.name}] Published {msg}')
                 else:
                     send_msg(self.sqs, msg)
                 self._queue.task_done()
         except Exception as e:
             logging.error(f'[{self.name}] Monitor Failed: {e}')
             raise e
Example #17
0
def download_gdrive(id='1mM8aZJlWTxOg7BZJvNUMrTnA2AbeCVzS',
                    path='/tmp/yolov5x.pt',
                    force=False):
    # https://gist.github.com/tanaikech/f0f2d122e05bf5f971611258c22c110f
    # Downloads a file from Google Drive, accepting presented query
    # from utils.google_utils import *; gdrive_download()
    import time
    t = time.time()

    if os.path.exists(path):
        if force:
            os.remove(path)
            logging.warning(f"Removed existing download: {path}")
        else:
            logging.warning(
                f"Download exists: {path}, specify force=True to remove if necessary"
            )
            return 0

    logging.info(
        f'Downloading https://drive.google.com/uc?export=download&id={id} to {path}...'
    )
    os.remove('cookie') if os.path.exists('cookie') else None

    # Attempt file download
    os.system(
        f"curl -c ./cookie -s -L \'https://drive.google.com/uc?export=download&id={id}\' > /dev/null"
    )
    if os.path.exists('cookie'):  # large file
        # s = "curl -Lb ./cookie \"https://drive.google.com/uc?export=download&confirm=`awk '/download/ {print $NF}' ./cookie`&id=%s\" -o %s" % (id, path)
        s = f"curl -Lb ./cookie \"https://drive.google.com/uc?export=download&confirm=`awk '/download/ {{print $NF}}' ./cookie`&id={id}\" -o {path}"
    else:  # small file
        s = f"curl -s -L -o {path} 'https://drive.google.com/uc?export=download&id={id}'"
    r = os.system(s)  # execute, capture return values
    os.remove('cookie') if os.path.exists('cookie') else None

    # Error check
    if r != 0:
        os.remove(path) if os.path.exists(path) else None  # remove partial
        logging.error(f'Failed to download to {path}'
                      )  # raise Exception('Download error')
        return r
    '''
    # Unzip if archive
    if path.endswith('.zip'):
        logging.info('Unzipping... ')
        os.system('unzip -q %s' % path)  # unzip
        os.remove(path)  # remove zip to free space
    '''
    logging.info(f'Done in {time.time() - t:.1f}s')
    return r
Example #18
0
 def open(self, *args, **kwargs):
     try:
         if not self.path:
             transcode = kwargs.pop('transcode', False)
             bucket = kwargs.pop('bucket', 'eigen-stream-videos')
             self.path = get_s3_video(self.url,
                                      transcode=transcode,
                                      bucket=bucket)
         session = openAV(self.path, **kwargs)
     except Exception as e:
         logging.error(f"Failed to open {self.url}: {e}")
         return None
     else:
         return session
Example #19
0
    def send_records(self, records, attempt=0):
        """Send records to the Kinesis stream.
        Falied records are sent again with an exponential backoff decay.
        Parameters
        ----------
        records : array
            Array of formated records to send.
        attempt: int
            Number of times the records have been sent without success.
        """

        # If we already tried more times than we wanted, save to a file
        if attempt > self.max_retries:
            logging.warning(
                f'[{self._name}] Writing {len(records)} records to file')
            with open('failed_records.dlq', 'ab') as f:
                for r in records:
                    f.write(r.get('Data'))
            return

        # Sleep before retrying
        if attempt:
            time.sleep(2**attempt * .1)

        try:
            response = self.kinesis_client.put_records(
                StreamName=self.stream_name, Records=records)
        except Exception as e:
            logging.error(f'[{self._name}]: {e}')
            raise e
        else:
            failed_record_count = response['FailedRecordCount']

            # Grab failed records
            if failed_record_count:
                logging.warning(f'[{self._name}] Retrying failed records')
                failed_records = []
                for i, record in enumerate(response['Records']):
                    if record.get('ErrorCode'):
                        failed_records.append(records[i])

                # Recursive call
                attempt += 1
                self.send_records(failed_records, attempt=attempt)
Example #20
0
File: math.py Project: necla-ml/ML
def round(n, digits=None):
    r"""Round to the specified number of digits by input types.
    """
    if isinstance(n, float):
        if digits:
            return _round(n, digits)
        else:
            return int(Decimal(n).to_integral_value(rounding=ROUND_HALF_UP))
    else:
        try:
            import torch as th
            if th.is_tensor(n):
                n.apply_(lambda x: round(x, digits))
                return n
        except ImportError as e:
            logging.error(
                f"No pytorch to round potential tensor input of type {type(n)}"
            )
            raise e
Example #21
0
def setup_test():
    import subprocess
    import shlex
    import socket
    # XXX: ActiveMQ broker TEST server
    # stomp protocol @port 61613
    try:
        cmd = shlex.split('docker run --rm --name activemq -d -p 61616:61616 -p 8161:8161 -p 61613:61613 rmohr/activemq')
        subprocess.run(cmd)
    except Exception as e:
        logging.error(e)
    ip = socket.gethostbyname(socket.gethostname())
    broker_creds = {
            'server': ip,
            'user': '******',
            'passwd': 'admin',
            'port': 61613
    }
    return broker_creds
Example #22
0
def encrypt(private_key, value):
    """
    Encrypt value with the private key
    Params: 
        private_key: encoded str or str
        value: (encoded str or dict or str)value to be encrypted by the private key
    Returns: 
        encrypted value(string)
    """
    cipher_key = private_key.encode() if not isinstance(private_key, bytes) else private_key # encode private key
    # init fernet
    cipher = Fernet(cipher_key)
    if isinstance(value, dict):
        value = json.dumps(value)
    value = value.encode() if not isinstance(value, bytes) else value # encode value
    encrypted_value = None
    try:
        encrypted_value = cipher.encrypt(value) # encypt with cipher
        encrypted_value = encrypted_value.decode()
    except Exception as e:
        logging.error(e)

    return encrypted_value
Example #23
0
def test_pool_map(chunksize):
    from multiprocessing import Pool
    import subprocess
    import torch as th
    pool = Pool(processes=3)
    done = []
    failed = []
    t = time()
    tasks = list(range(8))
    args = [1,3,5,0,1,3,0,4]
    chunksize = 16
    for tid, res in tqdm(pool.imap(process_division, zip(tasks, args), chunksize), total=len(tasks)):
        if isinstance(res, Exception):
            e = res
            failed.append(tid)
            logging.error(f"task[{tid}] Failed division due to {e}")
        else:
            done.append(tid)

    logging.info(f"Tasks={list(zip(tasks, args))}")    
    logging.info(f"Done tasks={done}")    
    logging.info(f"Failed tasks={failed}")
    assert len(done) == 3
    assert len(failed) == len(tasks) - len(done)
Example #24
0
def posenet(pretrained=False,
            arch='litehrnet_30_coco_384x288',
            model_dir=None,
            force_reload=False,
            unload_after=False,
            **kwargs):
    """
    Kwargs:
        pretrained(bool, str): True for official checkpoint or path(str) to load a custom checkpoint

        tag(str): git repo tag to explicitly specify a particular commit
        url(str): direct url to download checkpoint
        s3(dict): S3 source containing bucket and key to download a checkpoint from
        threshold(float):
    
    """
    ARCH = dict(litehrnet_18_coco_256x192='1ZewlvpncTvahbqcCFb-95C3NHet30mk5',
                litehrnet_18_coco_384x288='1E3S18YbUfBm7YtxYOV7I9FmrntnlFKCp',
                litehrnet_30_coco_256x192='1KLjNInzFfmZWSbEQwx-zbyaBiLB7SnEj',
                litehrnet_30_coco_384x288='1BcHnLka4FWiXRmPnJgJKmsSuXXqN4dgn',
                litehrnet_18_mpii_256x256='1bcnn5Ic2-FiSNqYOqLd1mOfQchAz_oCf',
                litehrnet_30_mpii_256x256='1JB9LOwkuz5OUtry0IQqXammFuCrGvlEd')

    tag = kwargs.get('tag', GITHUB['tag'])
    modules = sys.modules.copy()
    entry = 'posenet'
    m = None
    try:
        logging.info(f"Creating '{entry}(arch={arch})'")
        m = hub.load(github(tag=tag), entry, arch, force_reload=force_reload)
        m.tag = tag
        if pretrained:
            if isinstance(pretrained, bool):
                # official pretrained
                state_dict = from_pretrained(f"{entry}.pt",
                                             force_reload=force_reload,
                                             gdrive=dict(id=ARCH[arch]))
            else:
                # custom checkpoint
                path = Path(pretrained)
                if not path.exists():
                    path = f"{hub.get_dir()}/{pretrained}"
                state_dict = io.load(path, map_location='cpu')
                state_dict = {
                    k: v
                    for k, v in state_dict.items()
                    if m.state_dict()[k].shape == v.shape
                }
                # load_checkpoint(model, path, map_location='cpu')
            m.load_state_dict(state_dict, strict=True)
        logging.info(f"kwargs={kwargs}")
        if kwargs.get('fp16', False):
            from mmpose.core import wrap_fp16_model
            wrap_fp16_model(m)
            logging.info(f"[posnet] wrapped in fp16")
        if kwargs.get('fuse_conv_bn', True):
            from mmcv.cnn import fuse_conv_bn
            m = fuse_conv_bn(m)
            logging.info(f"[posenet] fused conv and bn")
    except Exception as e:
        logging.error(f"Failed to load '{entry}': {e}")
        raise e
    finally:
        # XXX Remove newly imported modules in case of conflict with next load
        if unload_after:
            for module in sys.modules.keys() - modules.keys():
                del sys.modules[module]
    m.to('cpu')
    return m
Example #25
0
 def run(self):
     try:
         self.monitor()
     except Exception as e:
         logging.error(e)
         raise e
Example #26
0
def train_yolo5():
    """Train YOLOv5 over a dataset in YOLO5/COCO format.
    
    Usage:
        train_yolo5 ../../datasets/Retail81 --with-coco --names object --device 1 --batch-size 32
    
    References:
        python train.py --data coco.yaml --cfg yolov5x.yaml --weights yolov5x.pt --device 1 --batch-size 16
    """
    parser = argparse.ArgumentParser(
        'Train YOLO5 oover a dataset in YOLO5/COCO format')
    parser.add_argument('path', help='Path to a dataset in YOLO/COCO format')
    parser.add_argument('--arch',
                        choices=['yolov5l', 'yolov5x'],
                        default='yolov5x',
                        help='YOLOv5 model architecture to train')
    parser.add_argument('--tag',
                        choices=['v1.0', 'v2.0', 'v3.0'],
                        default='v3.0',
                        help='YOLOv5 repo version tag')
    parser.add_argument('--batch-size', type=int, default=16)
    parser.add_argument('--device',
                        default='',
                        help='cuda device, i.e. 0 or 0,1,2,3 or cpu')
    parser.add_argument('--epochs', type=int, default=100)
    parser.add_argument('--names', nargs='*', default=[], help='class names')
    parser.add_argument('--with-coco',
                        action='store_true',
                        help='Including COCO80 classes')
    args = parser.parse_args()
    logging.info(
        f"dataset={args.path}, arch={args.arch}, names={args.names}, with_coco={args.with_coco}"
    )

    path = Path(args.path).resolve()
    data = path / 'data'
    data.mkdir(exist_ok=True)
    data /= f"{path.name.lower()}.yaml"
    models = path / "models"
    models.mkdir(exist_ok=True)
    cfg = f"{args.arch}.yaml"
    weights = f"{args.arch}.pt"
    names = args.names
    if args.with_coco:
        from ml.vision.datasets.coco import COCO80_CLASSES
        names = COCO80_CLASSES + names
    nc = len(names)
    assert nc > 0
    with open(data, 'w') as f:
        yaml = f"""
train: { path / 'train.txt' }
val: { path / 'val.txt' }
nc: {nc}
names: {names}
"""
        print(yaml, file=f)
        logging.info(yaml)

    from ml.vision.models.detection.yolo5 import github
    from ml.utils import Config
    from ml import hub
    repo = hub.repo(github(tag=tag), force_reload=False)
    config = Config().load(f"{repo}/models/{cfg}")
    config.nc = nc
    config.save(models / cfg)

    os.chdir(repo)
    cmd = f"python train.py --data {data.resolve()} --cfg {models / cfg} --weights {weights} --device {args.device} --batch-size {args.batch_size} --epochs {args.epochs}"
    logging.info(f"wd={os.getcwd()}")
    logging.info(cmd)
    r = os.system(cmd)
    if r:
        logging.error(f"Failed train YOLOv5 over {path} with res={r}")
    else:
        # v1.0
        # logging.info(f"Best trained checkpoiont at {repo}/weights/best.pt")
        # last v1.0 and v2.0+
        logging.info(
            f"Best trained checkpoiont at {repo}/runs/expX/weights/best.pt")
Example #27
0
    def read_video(self, session, format='BGR'):
        meta = session['video']
        stream = meta['stream']
        codec = meta['codec']
        workaround = meta['workaround']
        while True:
            try:
                pkt = next(stream)
            except StopIteration:
                pkt = None
            now = time.time()
            prev = meta.get('prev', None)
            if prev is None:
                if not pkt.is_keyframe:
                    # Some RTSP source may not send key frame to begin with e.g. wisecam
                    logging.warning(
                        f"No key frame to begin with, skip through")
                    session['start'] = now
                    continue
                meta['keyframe'] = pkt.is_keyframe
                meta['time'] = session['start']
                streams = session['streams']
                sformat = session['format']

                # XXX Stream container package format determines H.264 NALUs in AVCC or Annex B.
                # TODO Streaming NALUs in AVCC
                if 'hls' in sformat or 'rtsp' in sformat or '264' in sformat:
                    # XXX In case of out of band CPD: SPS/PPS in AnnexB.
                    CPD = []
                    if codec.extradata is not None:
                        for (pos, _, _,
                             type), nalu in NALUParser(codec.extradata,
                                                       workaround=workaround):
                            if hasStartCode(nalu):
                                CPD.append(nalu)
                                logging.info(
                                    f"CPD {NALU_t(type).name} at {pos}: {nalu[:8]} ending with {nalu[-1:]}"
                                )
                            else:
                                logging.warning(
                                    f"Invalid CPD NALU({type}) at {pos}: {nalu[:8]} ending with {nalu[-1:]}"
                                )
                                if not CPD:
                                    # Skip all
                                    break
                    NALUs = []
                    if workaround:
                        # FIXME workaround before KVS MKVGenerator deals with NALUs ending with a zero byte
                        #   https://github.com/awslabs/amazon-kinesis-video-streams-producer-sdk-cpp/issues/491
                        for (pos, _, _,
                             type), nalu in NALUParser(memoryview(pkt),
                                                       workaround=workaround):
                            assert hasStartCode(
                                nalu
                            ), f"frame[{meta['count']+1}] NALU(type={type}) at {pos} without START CODE: {nalu[:8].tobytes()}"
                            if type in (NALU_t.SPS, NALU_t.PPS):
                                if CPD:
                                    # NOTE: some streams could have multiple UNSPECIFIED(0) NALUs within a single packet with SPS/PPS
                                    #assert len(CPD) == 2, f"len(CPD) == {len(CPD)}, not 2 for SPS/PPS"
                                    ordinal = type - NALU_t.SPS
                                    if nalu == CPD[ordinal]:
                                        logging.info(
                                            f"frame[{meta['count']+1}] same {NALU_t(type).name}({nalu[:8].tobytes()}) at {pos} as in CPD({CPD[ordinal][:8]})"
                                        )
                                    else:
                                        # FIXME may expect the CPD to be inserted in the beginning?
                                        logging.warning(
                                            f"frame[{meta['count']+1}] inconsistent {NALU_t(type).name}({nalu[:8].tobytes()}) at {pos} with CPD({CPD[ordinal][:8]})"
                                        )
                                        print(f"CPD {NALU_t(type).name}:",
                                              CPD[ordinal])
                                        print(f"NALU {NALU_t(type).name}:",
                                              nalu.tobytes())
                                        # XXX bitstream may present invalid CPD => replacement with bitstream SPS/PPS
                                        CPD[ordinal] = nalu
                                else:
                                    NALUs.append(nalu)
                                    logging.info(
                                        f"frame[{meta['count']+1}] {NALU_t(type).name} at {pos}: {nalu[:8].tobytes()} ending with {nalu[-1:].tobytes()}"
                                    )
                            # XXX KVS master is ready to filter out non-VCL NALUs as part of the CPD
                            # elif type in (NALU_t.IDR, NALU_t.NIDR):
                            elif type in (NALU_t.AUD, NALU_t.SEI, NALU_t.IDR,
                                          NALU_t.NIDR):
                                NALUs.append(nalu)
                                logging.info(
                                    f"frame[{meta['count']+1}] {NALU_t(type).name} at {pos}: {nalu[:8].tobytes()}"
                                )
                            else:
                                # FIXME may expect CPD to be inserted in the beginning?
                                logging.warning(
                                    f"frame[{meta['count']+1}] skipped unexpected NALU(type={type}) at {pos}: {nalu[:8].tobytes()}"
                                )
                        logging.info(
                            f"{pkt.is_keyframe and 'key ' or ''}frame[{meta['count']}] combining CPD({len(CPD)}) and NALUs({len(NALUs)})"
                        )
                    else:
                        NALUs.append(memoryview(pkt))
                        logging.info(
                            f"{pkt.is_keyframe and 'key ' or ''}frame[{meta['count']}] prepending CPD({len(CPD)})"
                        )
                    packet = av.Packet(bytearray(b''.join(CPD + NALUs)))
                    packet.dts = pkt.dts
                    packet.pts = pkt.pts
                    packet.time_base = pkt.time_base
                    pkt = packet
                    if pkt.pts is None:
                        logging.warning(
                            f"Initial packet dts/pts={pkt.dts}/{pkt.pts}, time_base={pkt.time_base}"
                        )
                    elif pkt.pts > 0:
                        logging.warning(
                            f"Reset dts/pts of 1st frame from {pkt.pts} to 0")
                        pkt.pts = pkt.dts = 0
                elif 'dash' in sformat:
                    # TODO In case of out of band CPD: SPS/PPS in AVCC.
                    logging.info(f"DASH AVCC extradata: {codec.extradata}")
                    logging.info(
                        f"pkt[:16]({pkt.is_keyframe}) {memoryview(pkt)[:16].tobytes()}"
                    )
            else:
                keyframe = pkt.is_keyframe
                logging.debug(
                    f"packet[{meta['count']}] {keyframe and 'key ' or ''}dts/pts={pkt.dts}/{pkt.pts}, time_base={pkt.time_base}, duration={pkt.duration}"
                )
                if 'hls' in sformat or 'rtsp' in sformat or '264' in sformat:
                    NALUs = []
                    if workaround:
                        for (pos, _, _,
                             type), nalu in NALUParser(memoryview(pkt),
                                                       workaround=workaround):
                            # assert hasStartCode(nalu), f"frame[{meta['count']+1}] NALU(type={type}) at {pos} without START CODE: {nalu[:8].tobytes()}"
                            # FIXME KVS master is not ready to take AUD/SEI as part of the CPD
                            # if type in (NALU_t.SPS, NALU_t.PPS, NALU_t.IDR, NALU_t.NIDR):
                            if type in (NALU_t.AUD, NALU_t.SEI, NALU_t.SPS,
                                        NALU_t.PPS, NALU_t.IDR, NALU_t.NIDR):
                                NALUs.append(nalu)
                                logging.debug(
                                    f"frame[{meta['count']+1}] {NALU_t(type).name} at {pos}: {nalu[:8].tobytes()}"
                                )
                            else:
                                # FIXME may expect CPD to be inserted?
                                logging.debug(
                                    f"frame[{meta['count']+1}] skipped NALU(type={type}) at {pos}: {nalu[:8].tobytes()} ending with {nalu[-1:].tobytes()}"
                                )
                    else:
                        NALUs.append(memoryview(pkt))
                    # XXX Assme no SPS/PPS change
                    packet = av.Packet(bytearray(b''.join(NALUs)))
                    packet.dts = pkt.dts
                    packet.pts = pkt.pts
                    packet.time_base = pkt.time_base
                    pkt = packet
                frame = prev
                if session['decoding']:
                    try:
                        frames = codec.decode(prev)
                        if not frames:
                            logging.warning(
                                f"Decoded nothing, continue to read...")
                            meta['prev'] = pkt
                            meta['count'] += 1
                            continue
                    except Exception as e:
                        logging.error(
                            f"Failed to decode video packet of size {prev.size}: {e}"
                        )
                        raise e
                    else:
                        # print(prev, frames)
                        frame = frames[0]
                        meta['width'] = frame.width
                        meta['height'] = frame.height
                        if format == 'BGR':
                            frame = frame.to_rgb().to_ndarray()[:, :, ::-1]
                        elif format == 'RGB':
                            frame = frame.to_rgb().to_ndarray()
                if session['rt']:
                    '''
                    Live source from network or local camera encoder.
                    Bitstream contains no pts but frame duration.
                    Adaptive frame duration on drift from wall clock:
                        - Faster for long frame buffering
                        - Fall behind for being slower than claimed FPS: resync as now
                    '''
                    if pkt.pts is not None and not meta['drifting']:
                        # Check if drifting
                        if prev.pts is None:
                            prev.dts = prev.pts = 0
                            logging.warning(
                                "Reset previous packet dts/pts from None to 0")
                        duration = float((pkt.pts - prev.pts) * pkt.time_base)
                        # assert duration > 0, f"pkt.pts={pkt.pts}, prev.pts={prev.pts}, pkt.time_base={pkt.time_base}, pkt.duration={pkt.duration}, prev.duration={prev.duration}, duration={duration}"
                        if duration <= 0:
                            # FIXME RTSP from Dahua/QB and WiseNet/Ernie
                            pts = prev.pts + (meta['duration'] /
                                              pkt.time_base) / 2
                            duration = float((pts - prev.pts) * pkt.time_base)
                            logging.warning(
                                f"Non-increasing pts: pkt.pts={pkt.pts}, prev.pts={prev.pts} => pts={pts}, duration={duration}"
                            )
                            pkt.pts = pts

                        timestamp = meta['time'] + duration
                        if meta['adaptive']:
                            # adaptive frame duration only if not KVS
                            diff = abs(timestamp - now)
                            threshold = meta['thresholds']['drifting']
                            if diff > threshold:
                                meta['drifting'] = True
                                logging.warning(
                                    f"Drifting video timestamps: abs({timestamp:.3f} - {now:.3f}) = {diff:.3f} > {threshold}s"
                                )
                    if pkt.pts is None or meta['drifting']:
                        # Real-time against wall clock
                        duration = now - meta['time']
                        duration = min(1.5 / meta['fps'], duration)
                        duration = max(0.5 / meta['fps'], duration)
                        meta['duration'] = duration
                        yield meta, frame
                        meta['time'] += duration
                    else:
                        meta['duration'] = duration
                        yield meta, frame
                        meta['time'] = timestamp
                else:
                    # TODO: no sleep for being handled by renderer playback
                    # Simulating RT
                    meta['duration'] = 1.0 / meta['fps']
                    slack = (meta['time'] + meta['duration']) - now
                    if slack > 0:
                        logging.debug(
                            f"Sleeping for {slack:.3f}s to simulate RT source")
                        time.sleep(slack)
                    yield meta, frame
                    meta['time'] += meta['duration']
                meta['keyframe'] = keyframe
            if pkt.size == 0:
                logging.warning(f"EOF/EOS on empty packet")
                return None
            else:
                meta['prev'] = pkt
                meta['count'] += 1
Example #28
0
except ImportError as e:
    # XXX In case of torch <=1.4
    from torch.multiprocessing import *
    from multiprocessing import connection
    import platform, sys, signal
    import ctypes, ctypes.util
    from ml import logging
    if platform.system() == "Windows":
        path_libc = ctypes.util.find_library("msvcrt")
    else:
        path_libc = ctypes.util.find_library("c")

    try:
        libc = ctypes.CDLL(path_libc)
    except OSError as e:
        logging.error("Unable to load the system C library: {e}")
        sys.exit(1)

    _supports_context = sys.version_info >= (3, 4)

    def _python_version_check():
        if not _supports_context:
            raise RuntimeError("Requires python 3.4 or higher to use "
                               "ml.multiprocessing.spawn and "
                               "ml.multiprocessing.ProcessContext helper "
                               "to launch multiple processes.")

    def _wrap(fn, i, args, error_queue):
        # prctl(2) is a Linux specific system call.
        # On other systems the following function call has no effect.
        # This is set to ensure that non-daemonic child processes can
Example #29
0
def batched_nms(predictions,
                conf_thres=0.3,
                iou_thres=0.6,
                agnostic=False,
                merge=True,
                multi_label=False,
                classes=None):
    """Perform NMS on inference results
    Args:
        prediction(B, AG, 4+1+80): center, width and height refinement, plus anchor and class scores 
                                   per anchor and grid combination
        conf_thres(float): anchor confidence threshold
        iou_thres(float): NMS IoU threshold
        agnostic(bool): class agnostic NMS or per class NMS
        merge(bool): weighted merge by IoU for best mAP
        multi_label(bool): whether to select mulitple class labels above the threshold or just the max
        classes(list | tuple): class ids of interest to retain
    Returns:
        output(List[Tensor[B, N, 6]]): list of detections per image in (x1, y1, x2, y2, conf, cls)
    """
    min_wh, max_wh = 2, 4096  # minimum and maximum box width and height
    B, _, nc = predictions.shape
    nc -= 5
    multi_label &= nc > 1  # multiple labels per box if nc > 1 too
    output = [None] * B

    for b, x in enumerate(predictions):  # image index and inference
        x = x[x[:, 4] > conf_thres]  # Threshold anchors by confidence
        x = x[((x[:, 2:4] > min_wh) &
               (x[:, 2:4] < max_wh)).all(1)]  # width/height constraints
        if x.numel() == 0:
            continue

        # Compute resulting class scores = anchor * class
        x[..., 5:] *= x[..., 4:5]

        # xcycwh to xyxy
        boxes = xcycwh2xyxy(x[:, :4].round())

        # Single or multi-label boxes
        if multi_label:
            # Combinations of repeated boxes with different classes: [x1, y1, x2, y2, conf, class]
            keep, cls = (x[:, 5:] > conf_thres).nonzero().t()
            x = torch.cat((boxes[keep], x[keep, 5 + cls].unsqueeze(1),
                           cls.float().unsqueeze(1)), 1)
        else:
            # Best class only: [x1, y1, x2, y2, conf, class]
            conf, cls = x[:, 5:].max(1)
            x = torch.cat((boxes, conf.unsqueeze(1), cls.float().unsqueeze(1)),
                          1)[conf > conf_thres]

        # Filter out boxes not in any specified classses
        if classes:
            x = x[(cls.view(-1, 1) == torch.tensor(classes,
                                                   device=cls.device)).any(1)]

        if x.numel() == 0:
            continue

        # Batched NMS
        scores = x[:, 4]
        boxes = x[:, :4]
        cls = x[:, 5] * 0 if agnostic else x[:, 5]  # classes
        boxes = boxes + cls.view(
            -1, 1) * boxes.max()  # boxes (offset by class), scores
        keep = nms(boxes, scores, iou_thres)
        if merge and (1 < x.shape[0] < 3e3):
            # Weighted NMS box merge by IoU * scoress
            try:
                iou = box_iou(boxes[keep], boxes) > iou_thres  # Filtered IoU
                weights = iou * scores[None]  # weighted IoU by class scores
                x[keep, :4] = torch.mm(
                    weights, x[:, :4]).float() / weights.sum(1, keepdim=True)
            except Exception as e:  # possible CUDA error https://github.com/ultralytics/yolov3/issues/1139
                logging.error(
                    f"Failed to merge NMS boxes by weighted IoU: {e}")
                print(x, x.shape, keep, keep.shape)
        output[b] = x[keep].to(predictions.dtype)

    return output
Example #30
0
    def download_test(self,
                      area,
                      profile='Original',
                      fps=DEFAULT_FPS_VALUE,
                      duration=None):
        '''Start streaming from the source area camera.
        '''
        from time import time
        pFrame = ffi.new('PFrame')
        pFrame.version = ffi.integer_const('FRAME_CURRENT_VERSION')
        pFrame.trackId = ffi.integer_const('DEFAULT_VIDEO_TRACK_ID')
        pFrame.duration = int(HUNDREDS_OF_NANOS_SEC /
                              (fps or DEFAULT_FPS_VALUE))

        frameIndex = 0
        start_time = time()
        retStatus = ffi.integer_const('STATUS_SUCCESS')

        streamStopTime = None
        if duration is not None:
            streamingDuration = duration * HUNDREDS_OF_NANOS_SEC
            streamStopTime = lib.defaultGetTime() + streamingDuration
            print(f"Streaming stops in {streamStopTime / 10000000:.3f}s")
        else:
            print(f"Streaming indefinitely")

        stream_time = lib.defaultGetTime()

        # Python signal delivery depends on the main thread not being blocked on system calls
        import signal
        signaled = False

        def handler(sig, frame):
            nonlocal signaled
            print(f"Interrupted or stopped by {signal.Signals(sig).name}")
            signaled = True

        signal.signal(signal.SIGINT, handler)
        signal.signal(signal.SIGTERM, handler)

        sessions = self.nuuo.open(area,
                                  profile,
                                  decoding=False,
                                  exact=True,
                                  with_audio=False)
        assert len(sessions) == 1, f"Only one streaming session at a time"
        session = sessions.pop()
        while (streamStopTime is None
               or lib.defaultGetTime() < streamStopTime) and not signaled:
            # TODO audio streaming
            res = self.nuuo.read(session, media='video')
            if res is None:
                logging.error(f"Failed to read frame from NUUO NVR")
                break

            # TODO non-H.264 keyframe requires additional parsing
            m, media, frame = res
            keyframe = media.get('keyframe', False)
            pFrame.flags = ffi.integer_const(
                'FRAME_FLAG_KEY_FRAME') if keyframe else ffi.integer_const(
                    'FRAME_FLAG_NONE')
            pFrame.frameData = ffi.cast('void*', frame.buffer_ptr)
            pFrame.size = frame.size
            pFrame.index = frameIndex
            logging.info(f'FPS {area}: {frameIndex/(time() - start_time)}')
            frameIndex += 1
        self.nuuo.close(session)