示例#1
0
    def put_media(self, media, onlyMP2TS):
        """Put an incoming media packet."""
        if self.flushing:
            raise ValueError(to_bytes(FecReceiver.ER_FLUSHING))
        if onlyMP2TS:
            if not media.validMP2T:
                raise ValueError(to_bytes(FecReceiver.ER_VALID_RTP_MP2TS))
        else:
            if not media.valid:
                raise ValueError(to_bytes(FecReceiver.ER_VALID_RTP))

        # Put the media packet into medias buffer
        if media.sequence in self.medias:
            self.media_overwritten += 1
        self.medias[media.sequence] = media
        if len(self.medias) > self.max_media:
            self.max_media = len(self.medias)
        self.media_received += 1

        cross = self.crosses.get(media.sequence)
        if cross:
            # Simulate the recovery of a media packet to update buffers and potentially start
            self.recover_media_packet(media.sequence, cross,
                                      None)  # a recovery cascade !

        self.out()  # FIXME maybe better to call it from another thread
示例#2
0
 def revoke_publisher_task(self, task, callback_url, terminate=False, remove=False):
     u"""
     This do not delete tasks from tasks database (if remove=False) but set revoked attribute in tasks database and
     broadcast revoke request to publication units with celery.
     If the task is actually running it will be cancelled if terminated = True.
     In any case, the output media asset will be deleted (task running or successfully finished).
     """
     if valid_uuid(task, none_allowed=False):
         task = self.get_publisher_task({u'_id': task})
     task.is_valid(True)
     if task.status in PublisherTask.CANCELED_STATUS:
         raise ValueError(to_bytes(u'Cannot revoke a publication task with status {0}.'.format(task.status)))
     if not self.config.is_mock:
         revoke(task._id, terminate=terminate)
     if task.status == PublisherTask.SUCCESS and not self.config.is_mock:
         # Send revoke task to the worker that published the media
         callback = Callback(self.config.api_url + callback_url, u'node', self.config.node_secret)
         queue = task.get_hostname()
         result = PublisherWorker.revoke_publisher_task.apply_async(
             args=(task.publish_uri, object2json(callback, False)), queue=queue)
         if not result.id:
             raise ValueError(to_bytes(u'Unable to transmit task to queue {0}.'.format(queue)))
         logging.info(u'New revoke publication task {0} -> queue {1}.'.format(result.id, queue))
         self.update_publisher_task_and_media(task, revoke_task_id=result.id, status=PublisherTask.REVOKING)
     else:
         self.update_publisher_task_and_media(task, status=PublisherTask.REVOKED)
     if remove:
         self._db.publisher_tasks.remove({u'_id': task._id})
示例#3
0
 def send_email_task(self, task, status, media=None, media_out=None):
     if task.send_email:
         user = self.get_user({u'_id': task.user_id}, {u'mail': 1})
         if not user:
             raise IndexError(to_bytes(u'Unable to find user with id {0}.'.format(task.user_id)))
         if isinstance(task, TransformTask):
             media_in = self.get_media({u'_id': task.media_in_id})
             if not media_in:
                 # FIXME maybe do not raise but put default value or return ?
                 raise IndexError(to_bytes(u'Unable to find input media asset with id {0}.'.format(
                                  task.media_in_id)))
             profile = self.get_transform_profile({u'_id': task.profile_id})
             if not profile:
                 # FIXME maybe do not raise but put default value or return ?
                 raise IndexError(to_bytes(u'Unable to find transformation profile with id {0}.'.format(
                                  task.profile_id)))
             task.load_fields(user, media_in, media_out, profile)
             template, name = self.config.email_ttask_template, u'Transformation'
         elif isinstance(task, PublisherTask):
             task.load_fields(user, media)
             template, name = self.config.email_ptask_template, u'Publication'
         else:
             return  # FIXME oups
         task.append_async_result()
         with open(template, u'r', u'utf-8') as template_file:
             text_plain = Template(template_file.read()).render(object2dict(task, include_properties=True))
             # FIXME YourFormatter().format(template_file.read(), task)
         self.send_email(task.user.mail, u'OSCIED - {0} task {1} {2}'.format(name, task._id, status), text_plain)
示例#4
0
    def put_media(self, media, onlyMP2TS):
        """Put an incoming media packet."""
        if self.flushing:
            raise ValueError(to_bytes(FecReceiver.ER_FLUSHING))
        if onlyMP2TS:
            if not media.validMP2T:
                raise ValueError(to_bytes(FecReceiver.ER_VALID_RTP_MP2TS))
        else:
            if not media.valid:
                raise ValueError(to_bytes(FecReceiver.ER_VALID_RTP))

        # Put the media packet into medias buffer
        if media.sequence in self.medias:
            self.media_overwritten += 1
        self.medias[media.sequence] = media
        if len(self.medias) > self.max_media:
            self.max_media = len(self.medias)
        self.media_received += 1

        cross = self.crosses.get(media.sequence)
        if cross:
            # Simulate the recovery of a media packet to update buffers and potentially start
            self.recover_media_packet(media.sequence, cross, None)  # a recovery cascade !

        self.out()  # FIXME maybe better to call it from another thread
示例#5
0
 def add_media(config, media):
     if media.status != Media.PENDING:
         media_src_path = config.storage_medias_path(media, generate=False)
         if media_src_path:
             media_dst_path = config.storage_medias_path(media, generate=True)
             if media_dst_path != media_src_path:
                 # Generate media storage uri and move it to media storage path + set permissions
                 media.uri = config.storage_medias_uri(media)
                 try_makedirs(os.path.dirname(media_dst_path))
                 the_error = None
                 for i in xrange(5):
                     try:
                         os.rename(media_src_path, media_dst_path)
                         # FIXME chown chmod
                         the_error = None
                         break
                     except OSError as error:
                         the_error = error
                         time.sleep(1)
                 if the_error:
                     raise IndexError(to_bytes(u'An error occured : {0} ({1} -> {2}).'.format(
                                      the_error, media_src_path, media_dst_path)))
             try:
                 size = get_size(os.path.dirname(media_dst_path))
             except OSError:
                 raise ValueError(to_bytes(u'Unable to detect size of media asset {0}.'.format(media_dst_path)))
             duration = get_media_duration(media_dst_path)
             if duration is None:
                 raise ValueError(to_bytes(u'Unable to detect duration of media asset {0}.'.format(media_dst_path)))
             return (size, duration)
         else:
             raise NotImplementedError(to_bytes(u'FIXME Add of external URI not implemented.'))
     return (0, None)
示例#6
0
def revoke_publisher_task(publish_uri, callback_json):

    def revoke_publish_callback(status, publish_uri):
        data = {u'task_id': request.id, u'status': status}
        if publish_uri:
            data[u'publish_uri'] = publish_uri
        data_json = object2json(data, False)
        if callback is None:
            print(u'{0} [ERROR] Unable to callback orchestrator: {1}'.format(request.id, data_json))
        else:
            r = callback.post(data_json)
            print(u'{0} Code {1} {2} : {3}'.format(request.id, r.status_code, r.reason, r._content))

    # ------------------------------------------------------------------------------------------------------------------

    # Avoid 'referenced before assignment'
    callback = None
    request = current_task.request

    try:
        # Let's the task begin !
        print(u'{0} Revoke publication task started'.format(request.id))

        # Read current configuration to translate files URIs to local paths
        local_config = PublisherLocalConfig.read(LOCAL_CONFIG_FILENAME, inspect_constructor=False)
        print(object2json(local_config, True))

        # Load and check task parameters
        callback = Callback.from_json(callback_json, inspect_constructor=True)
        callback.is_valid(True)

        # Update callback socket according to configuration
        if local_config.api_nat_socket and len(local_config.api_nat_socket) > 0:
            callback.replace_netloc(local_config.api_nat_socket)

        publish_root = dirname(local_config.publish_uri_to_path(publish_uri))
        if not publish_root:
            raise ValueError(to_bytes(u'Media asset is not hosted on this publication point.'))

        # Remove publication directory
        start_date, start_time = datetime_now(), time.time()
        shutil.rmtree(publish_root, ignore_errors=True)
        if valid_uri(publish_uri, check_404=True):
            raise IOError(to_bytes(u'Media asset is reachable from publication URI {0}'.format(publish_uri)))
        elapsed_time = time.time() - start_time

        # Here all seem okay
        print(u'{0} Revoke publication task successful, media asset unpublished from {1}'.format(
              request.id, publish_uri))
        revoke_publish_callback(PublisherTask.SUCCESS, publish_uri)
        return {u'hostname': request.hostname, u'start_date': start_date, u'elapsed_time': elapsed_time, u'eta_time': 0,
                u'percent': 100}

    except Exception as error:

        # Here something went wrong
        print(u'{0} Revoke publication task failed'.format(request.id))
        revoke_publish_callback(unicode(error), None)
        raise
示例#7
0
 def add(self, *args, **kwargs):
     if not(bool(args) ^ bool(kwargs)):
         raise ValueError(to_bytes(u'You must set args OR kwargs.'))
     if args and len(args) != 1:
         raise ValueError(to_bytes(u'args should contain only 1 value.'))
     value = args[0] if args else kwargs
     response = self.api_client.do_request(post, self.get_url(), data=object2json(value, include_properties=False))
     instance = dict2object(self.cls, response, inspect_constructor=True) if self.cls else response
     # Recover user's secret
     if isinstance(instance, User):
         instance.secret = value.secret if args else kwargs[u'secret']
     return instance
示例#8
0
 def validate_task(media_in, profile, media_out):
     if media_in.status != Media.READY:
         raise NotImplementedError(to_bytes(u"Cannot launch the task, input media asset's status is {0}.".format(
                                   media_in.status)))
     if media_in.is_dash and profile.encoder_name != u'copy':
         raise NotImplementedError(to_bytes(u'Cannot launch the task, input media asset is MPEG-DASH content and enc'
                                   'oder is not copy.'))
     if profile.is_dash and not media_out.is_dash:
         raise ValueError(to_bytes(u'Cannot launch the task, output media asset is not a MPD but task is based on a '
                          'MPEG-DASH encoder called {0}.'.format(profile.encoder_name)))
     if not profile.is_dash and media_out.is_dash:
         raise ValueError(to_bytes(u'Cannot launch the task, output media asset is a MPD but task is not based on a '
                          'MPEG-DASH encoder called {0}.'.format(profile.encoder_name)))
示例#9
0
文件: server.py 项目: ebu/OSCIED
 def launch_transform_task(
     self, user_id, media_in_id, profile_id, filename, metadata, send_email, queue, callback_url
 ):
     if self.is_standalone:
         user = self.get_user({"_id": user_id}, {"secret": 0})
         if not user:
             raise IndexError(to_bytes("No user with id {0}.".format(user_id)))
     media_in = self.get_media({"_id": media_in_id})
     if not media_in:  # FIXME maybe a media access control here
         raise IndexError(to_bytes("No media asset with id {0}.".format(media_in_id)))
     profile = self.get_transform_profile({"_id": profile_id})
     if not profile:  # FIXME maybe a profile access control here
         raise IndexError(to_bytes("No transformation profile with id {0}.".format(profile_id)))
     if not queue in self.config.transform_queues:
         raise IndexError(to_bytes("No transformation queue with name {0}.".format(queue)))
     media_out = Media(
         user_id=user_id, parent_id=media_in_id, filename=filename, metadata=metadata, status=Media.PENDING
     )
     media_out.uri = self.config.storage_medias_uri(media_out)
     TransformTask.validate_task(media_in, profile, media_out)
     self.save_media(media_out)  # Save pending output media
     # FIXME create a one-time password to avoid fixed secret authentication ...
     callback = Callback(self.config.api_url + callback_url, "node", self.config.node_secret)
     if self.is_mock:
         result_id = unicode(uuid.uuid4())
     else:
         result = TransformWorker.transform_task.apply_async(
             args=(
                 object2json(media_in, False),
                 object2json(media_out, False),
                 object2json(profile, False),
                 object2json(callback, False),
             ),
             queue=queue,
         )
         result_id = result.id
     if not result_id:
         raise ValueError(to_bytes("Unable to transmit task to workers of queue {0}.".format(queue)))
     logging.info("New transformation task {0} -> queue {1}.".format(result_id, queue))
     task = TransformTask(
         user_id=user_id,
         media_in_id=media_in._id,
         media_out_id=media_out._id,
         profile_id=profile._id,
         send_email=send_email,
         _id=result_id,
     )
     task.statistic["add_date"] = datetime_now()
     self._db.transform_tasks.save(task.__dict__, safe=True)
     return task
示例#10
0
    def ensure_publisher_units(self, environment, num_units, terminate, test=False):
        u"""

        .. warning::

            FIXME implement more robust resources listing and removing, sometimes juju fail during a call
            (e.g. destroy_transform_units with num_units=10) and then some machines are not destroyed.

            * implement a garbage collector method callable by user when he want to destroy useless machines ?
            * implement a thread to handle removing unit asynchronously.
        """
        if not test:
            raise NotImplementedError(u'This method is in development, set test to True to disable this warning.')
        environments, default = self.get_environments()
        if environment == 'default':
            environment = default
        same_environment = (environment == default)
        config = juju.load_unit_config(self.config.publisher_config)
        config[u'rabbit_queues'] = u'publisher_{0}'.format(environment)
        if not same_environment:
            raise NotImplementedError(to_bytes(u'Unable to setup publication units into non-default environment {0} '
                                      '(default is {1}).'.format(environment, default)))
            config[u'mongo_connection'] = self.config.mongo_node_connection
            config[u'rabbit_connection'] = self.config.rabbit_connection
            # FIXME copy storage configuration, first method
            config[u'storage_address'] = self.config.storage_address
            config[u'storage_fstype'] = self.config.storage_fstype
            config[u'storage_mountpoint'] = self.config.storage_mountpoint
            config[u'storage_options'] = self.config.storage_options
        juju.save_unit_config(self.config.charms_config, self.config.publisher_service, config)
        the_environment = self._get_environment(environment)
        the_environment.ensure_num_units(self.config.publisher_service, local=True, num_units=num_units,
                                         terminate=terminate, repository=self.config.charms_repository)
        if same_environment and num_units:
            try:
                try:
                    the_environment.add_relation(self.config.orchestra_service, self.config.publisher_service,
                                                 u'publisher', u'publisher')
                except RuntimeError as e:
                    raise NotImplementedError(to_bytes(u'Orchestra service must be available and running on default '
                                              'environment {0}, reason : {1}'.format(default, e)))
                try:
                    the_environment.add_relation(self.config.storage_service, self.config.publisher_service)
                except RuntimeError as e:
                    raise NotImplementedError(to_bytes(u'Storage service must be available and running on default '
                                              'environment {0}, reason : {1}'.format(default, e)))
            except NotImplementedError:
                the_environment.destroy_service(self.config.publisher_service)
                raise
示例#11
0
 def delete_media(self, media):
     if valid_uuid(media, none_allowed=False):
         media = self.get_media({u'_id': media})
     media.is_valid(True)
     task = self.get_transform_task({u'media_in_id': media._id}, append_result=True)
     if task and task.status in TransformTask.WORK_IN_PROGRESS_STATUS:
         raise ValueError(to_bytes(u'Cannot delete the media asset, it is actually in use by transformation task wit'
                          'h id {0} and status {1}.'.format(task._id, task.status)))
     task = self.get_publisher_task({u'media_id': media._id}, append_result=True)
     if task and task.status in TransformTask.WORK_IN_PROGRESS_STATUS:
         raise ValueError(to_bytes(u'Cannot delete the media asset, it is actually in use by publication task with i'
                          'd {0} and status {1}.'.format(task._id, task.status)))
     media.status = Media.DELETED
     self.save_media(media)
     #self._db.medias.remove({'_id': media._id})
     Storage.delete_media(self.config, media)
示例#12
0
文件: utils.py 项目: zlorb/pytoolbox
def update_widget_attributes(widget, updates):
    """
    Update attributes of a `widget` with content of `updates` handling classes addition [+],
    removal [-] and toggle [^].

    **Example usage**

    >>> from pytoolbox.unittest import asserts
    >>> widget = type(str(''), (), {})
    >>> widget.attrs = {'class': 'mondiale'}
    >>> update_widget_attributes(
    ...     widget, {'class': '+pigeon +pigeon +voyage -mondiale -mondiale, ^voyage ^voyageur'})
    >>> asserts.dict_equal(widget.attrs, {'class': 'pigeon voyageur'})
    >>> update_widget_attributes(widget, {'class': '+le', 'cols': 100})
    >>> asserts.dict_equal(widget.attrs, {'class': 'le pigeon voyageur', 'cols': 100})
    """
    updates = copy(updates)
    if 'class' in updates:
        class_set = set(
            [c for c in widget.attrs.get('class', '').split(' ') if c])
        for cls in set([c for c in updates['class'].split(' ') if c]):
            operation, cls = cls[0], cls[1:]
            if operation == '+' or (operation == '^' and cls not in class_set):
                class_set.add(cls)
            elif operation in ('-', '^'):
                class_set.discard(cls)
            else:
                raise ValueError(
                    to_bytes(
                        'updates must be a valid string with "<op>class <op>..." with op in [+-^].'
                    ))
        widget.attrs['class'] = ' '.join(sorted(class_set))
        del updates['class']
    widget.attrs.update(updates)
示例#13
0
def api_user_id_delete(id=None, auth_user=None, api_core=None, request=None):
    u"""Delete a user."""
    user = api_core.get_user(spec={u'_id': id})
    if not user:
        raise IndexError(to_bytes(u'No user with id {0}.'.format(id)))
    api_core.delete_user(user)
    return ok_200(u'The user "{0}" has been deleted.'.format(user.name), include_properties=False)
示例#14
0
def update_widget_attributes(widget, updates):
    """
    Update attributes of a `widget` with content of `updates` handling classes addition [+],
    removal [-] and toggle [^].

    **Example usage**

    >>> from pytoolbox.unittest import asserts
    >>> widget = type(str(''), (), {})
    >>> widget.attrs = {'class': 'mondiale'}
    >>> update_widget_attributes(
    ...     widget, {'class': '+pigeon +pigeon +voyage -mondiale -mondiale, ^voyage ^voyageur'})
    >>> asserts.dict_equal(widget.attrs, {'class': 'pigeon voyageur'})
    >>> update_widget_attributes(widget, {'class': '+le', 'cols': 100})
    >>> asserts.dict_equal(widget.attrs, {'class': 'le pigeon voyageur', 'cols': 100})
    """
    updates = copy(updates)
    if 'class' in updates:
        class_set = set([c for c in widget.attrs.get('class', '').split(' ') if c])
        for cls in set([c for c in updates['class'].split(' ') if c]):
            operation, cls = cls[0], cls[1:]
            if operation == '+' or (operation == '^' and cls not in class_set):
                class_set.add(cls)
            elif operation in ('-', '^'):
                class_set.discard(cls)
            else:
                raise ValueError(to_bytes(
                    'updates must be a valid string with "<op>class <op>..." with op in [+-^].'))
        widget.attrs['class'] = ' '.join(sorted(class_set))
        del updates['class']
    widget.attrs.update(updates)
示例#15
0
 def subordinate_register(self, mongo=None, rabbit=None):
     local_cfg = self.local_config
     if self.subordinate_config_is_enabled:
         self.info(u'Override subordinate parameters with charm configuration')
         mongo = self.config.mongo_connection
         rabbit = self.config.rabbit_connection
         socket = self.config.api_nat_socket
     elif mongo and rabbit:
         self.info(u'Use subordinate parameters from charm relation')
         socket = u''
     else:
         return
     self.info(u'Register the Orchestrator')
     local_cfg.api_nat_socket = socket
     try:
         infos = pymongo.uri_parser.parse_uri(mongo)
         assert len(infos[u'nodelist']) == 1
         infos.update({
             u'concurrency': self.config.concurrency, u'directory': self.directory,
             u'host': infos[u'nodelist'][0][0], u'port': infos[u'nodelist'][0][1],
             u'group': DAEMON_GROUP, u'name': local_cfg.worker_name, u'queues': self.rabbit_queues,
             u'rabbit': rabbit, u'user': DAEMON_USER
         })
         del infos[u'nodelist']
         self.info(u'{0}'.format(infos))
         for name in (u'concurrency', u'group', u'host', u'name', u'port', u'queues', u'rabbit', u'user'):
             assert infos[name], u'Info {0} is empty'.format(name)
     except:
         raise ValueError(to_bytes(u'Unable to parse MongoDB connection {0}'.format(mongo)))
     self.template2config(local_cfg.celery_init_template_file,    local_cfg.celery_init_file, {})
     self.template2config(local_cfg.celery_default_template_file, local_cfg.celery_default_file, infos)
     self.template2config(local_cfg.celery_config_template_file,  local_cfg.celery_config_file,  infos)
     os.chmod(local_cfg.celery_init_file, 755)
     self.cmd(u'update-rc.d {0} defaults'.format(local_cfg.worker_name))
     self.remark(u'Orchestrator successfully registered')
示例#16
0
def post_install():
    from encodebox import lib
    from pytoolbox.console import confirm
    from pytoolbox.encoding import to_bytes
    from pytoolbox.filesystem import chown, from_template, try_makedirs, try_remove
    from pytoolbox.network.http import download

    if not exists(u'/usr/local/bin/neroAacEnc'):
        try:
            print(u'Download and install Nero AAC encoder')
            download(u'ftp://ftp6.nero.com/tools/NeroDigitalAudio.zip',
                     u'/tmp/nero.zip')
            zipfile.ZipFile(u'/tmp/nero.zip').extract(u'linux/neroAacEnc',
                                                      u'/usr/local/bin')
            os.chmod(
                u'/usr/local/bin/neroAacEnc',
                os.stat(u'/usr/local/bin/neroAacEnc').st_mode | stat.S_IEXEC)
        finally:
            try_remove(u'/tmp/nero.zip')

    filename = lib.SETTINGS_FILENAME
    settings = lib.load_settings(u'etc/config.yaml')
    if not exists(filename) or confirm(
            u'Overwrite existing configuration file "{0}"'.format(filename)):
        print(u'Generate configuration file "{0}"'.format(filename))
        password = lib.generate_password()
        settings[u'rabbit_password'] = password
        lib.save_settings(filename, settings)

    print(u'Configure RabbitMQ Message Broker')
    check_call([u'service', u'rabbitmq-server', u'start'])
    call([u'rabbitmqctl', u'add_vhost', u'/'])
    call([u'rabbitmqctl', u'delete_user', u'guest'])
    call([u'rabbitmqctl', u'delete_user', u'encodebox'])
    call([
        u'rabbitmqctl', u'add_user', u'encodebox', settings[u'rabbit_password']
    ])
    check_call([
        u'rabbitmqctl', u'set_permissions', u'-p', u'/', u'encodebox', u'.*',
        u'.*', u'.*'
    ])
    users, vhosts = lib.rabbit_users(), lib.rabbit_vhosts()
    print(u'RabbitMQ users: {0} vhosts: {1}'.format(users, vhosts))
    if u'guest' in users or u'encodebox' not in users:
        raise RuntimeError(to_bytes(u'Unable to configure RabbitMQ'))

    print(u'Create directory for storing persistent data')
    try_makedirs(lib.LIB_DIRECTORY)
    chown(lib.LIB_DIRECTORY,
          lib.USERNAME,
          pwd.getpwnam(lib.USERNAME).pw_gid,
          recursive=True)
    print(u'Register and start our services as user ' + lib.USERNAME)
    from_template(u'etc/encodebox.conf.template',
                  u'/etc/supervisor/conf.d/encodebox.conf', {
                      u'lib_directory': lib.LIB_DIRECTORY,
                      u'user': lib.USERNAME
                  })
    call([u'service', u'supervisor', u'force-reload'])
示例#17
0
def api_transform_profile_id_delete(id=None, auth_user=None, api_core=None, request=None):
    u"""Delete a transformation profile."""
    profile = api_core.get_transform_profile(spec={u'_id': id})
    if not profile:
        raise IndexError(to_bytes(u'No transformation profile with id {0}.'.format(id)))
    api_core.delete_transform_profile(profile)
    return ok_200(u'The transformation profile "{0}" has been deleted.'.format(profile.title),
                  include_properties=False)
示例#18
0
def get_media_bitrate(filename):
    cmd = u'ffprobe "{0}"'.format(filename)
    pipe = Popen(shlex.split(to_bytes(cmd)), stderr=PIPE, close_fds=True)
    match = re.search(ur'bitrate: (?P<bitrate>\d+)', unicode(pipe.stderr.read()))
    if not match:
        return None
    bitrate = match.group(u'bitrate')
    return bitrate
示例#19
0
 def cleanup(self):
     """Remove FEC packets that are stored / waiting but useless."""
     if self.flushing:
         raise ValueError(to_bytes(FecReceiver.ER_FLUSHING))
     if self.startup:
         raise ValueError(to_bytes(FecReceiver.ER_STARTUP))
     if self.delay_units == FecReceiver.PACKETS:
         start, end = self.position, (self.position + self.delay_value) & RtpPacket.S_MASK
         for media_sequence in self.crosses.keys:
             if not self.validity_window(media_sequence, start, end):
                 cross = self.crosses[media_sequence]
                 del self.cols[cross['col_sequence']]
                 del self.rows[cross['row_sequence']]
                 del self.crosses[media_sequence]
     elif self.delay_units == FecReceiver.SECONDS:
         raise NotImplementedError()
     raise ValueError(to_bytes(FecReceiver.ER_DELAY_UNITS.format(self.delay_units)))
示例#20
0
 def save_transform_profile(self, profile):
     profile.is_valid(True)
     # FIXME exact matching !
     try:
         self._db.transform_profiles.save(profile.__dict__, safe=True)
     except DuplicateKeyError:
         raise ValueError(to_bytes(u'The title {0} is already used by another transformation profile.'.format(
                          profile.title)))
示例#21
0
 def launch_publisher_task(self, user_id, media_id, send_email, queue, callback_url):
     if self.config.is_standalone:
         user = self.get_user({u'_id': user_id}, {u'secret': 0})
         if not user:
             raise IndexError(to_bytes(u'No user with id {0}.'.format(user_id)))
     media = self.get_media({u'_id': media_id})
     if not media:  # FIXME maybe a media access control here
         raise IndexError(to_bytes(u'No media asset with id {0}.'.format(media_id)))
     if not queue in self.config.publisher_queues:
         raise IndexError(to_bytes(u'No publication queue with name {0}.'.format(queue)))
     if media.status != Media.READY:
         raise NotImplementedError(to_bytes(u"Cannot launch the task, input media asset's status is {0}.".format(
                                   media.status)))
     if len(media.public_uris) > 0:
         raise NotImplementedError(to_bytes(u'Cannot launch the task, input media asset is already published.'))
     other = self.get_publisher_task({u'media_id': media._id})
     if other and other.status not in PublisherTask.FINAL_STATUS and other.status != PublisherTask.REVOKED:
         raise NotImplementedError(to_bytes(u'Cannot launch the task, input media asset will be published by another'
                                   ' task with id {0}.'.format(other._id)))
     # FIXME create a one-time password to avoid fixed secret authentication ...
     callback = Callback(self.config.api_url + callback_url, u'node', self.config.node_secret)
     if self.config.is_mock:
         result_id = unicode(uuid.uuid4())
     else:
         result = PublisherWorker.publisher_task.apply_async(
             args=(object2json(media, False), object2json(callback, False)), queue=queue)
         result_id = result.id
     if not result_id:
         raise ValueError(to_bytes(u'Unable to transmit task to workers of queue {0}.'.format(queue)))
     logging.info(u'New publication task {0} -> queue {1}.'.format(result_id, queue))
     task = PublisherTask(user_id=user_id, media_id=media._id, send_email=send_email, _id=result_id)
     task.statistic[u'add_date'] = int(time())
     self._db.publisher_tasks.save(task.__dict__, safe=True)
     return task
示例#22
0
文件: server.py 项目: ebu/OSCIED
 def transform_callback(self, task_id, status):
     task = self.get_transform_task({"_id": task_id})
     if not task:
         raise IndexError(to_bytes("No transformation task with id {0}.".format(task_id)))
     media_out = self.get_media({"_id": task.media_out_id})
     if not media_out:
         raise IndexError(to_bytes("Unable to find output media asset with id {0}.".format(task.media_out_id)))
     if status == TransformTask.SUCCESS:
         media_out.status = Media.READY
         self.save_media(media_out)
         logging.info("{0} Media {1} is now {2}".format(task_id, media_out.filename, media_out.status))
         # self.send_email_task(task, TransformTask.SUCCESS, media_out=media_out)
     else:
         self.delete_media(media_out)
         task.statistic["error_details"] = status.replace("\n", "\\n")
         self._db.transform_tasks.save(task.__dict__, safe=True)
         logging.info("{0} Error: {1}".format(task_id, status))
         logging.info("{0} Media {1} is now deleted".format(task_id, media_out.filename))
示例#23
0
 def save_user(self, user, hash_secret):
     self.only_standalone()
     user.is_valid(True)
     if hash_secret:
         user.hash_secret()
     try:
         self._db.users.save(user.__dict__, safe=True)
     except DuplicateKeyError:
         raise ValueError(to_bytes(u'The email address {0} is already used by another user.'.format(user.mail)))
示例#24
0
    def __init__(self, output):
        """
        Construct a new FecReceiver and register `output`.

        :param output: Where to output payload of the recovered stream.
        :type output: IOBase

        **Example usage**

        Not yet an output:

        >>> FecReceiver(None)
        Traceback (most recent call last):
            ...
        ValueError: output is None
        >>>
        >>> from StringIO import StringIO
        >>> output = StringIO()
        >>> receiver = FecReceiver(output)
        """
        if not output:
            raise ValueError(to_bytes('output is None'))
        # Media packets storage, medias[media seq] = media pkt
        self.medias = {}
        self.startup = True  # Indicate that actual position must be initialized
        self.flushing = False  # Indicate that a flush operation is actually running
        self.position = 0  # Actual position (sequence number) in the medias buffer
        # Link media packets to fec packets able to recover it, crosses[mediaseq] = {colseq, rowseq}
        self.crosses = {}
        # Fec packets + related information storage, col[sequence] = { fec pkt + info }
        self.cols = {}
        self.rows = {}
        self.matrixL = 0  # Detected FEC matrix size (number of columns)
        self.matrixD = 0  # Detected FEC matrix size (number of rows)
        # Output
        self.output = output  # Registered output
        # Settings
        self.delay_value = 100  # RTP buffer delay value
        self.delay_units = FecReceiver.PACKETS  # RTP buffer delay units
        # Statistics about media (buffers and packets)
        self.media_received = 0  # Received media packets counter
        self.media_recovered = 0  # Recovered media packets counter
        self.media_aborted_recovery = 0  # Aborted media packet recovery counter
        self.media_overwritten = 0  # Overwritten media packets counter
        self.media_missing = 0  # Missing media packets counter
        self.max_media = 0  # Largest amount of stored elements in the medias buffer
        # Statistics about fec (buffers and packets)
        self.col_received = 0  # Received column fec packets counter
        self.row_received = 0  # Received row fec packets counter
        self.col_dropped = 0  # Dropped column fec packets counter
        self.row_dropped = 0  # Dropped row fec packets counter
        self.max_cross = 0  # Largest amount of stored elements in the crosses buffer
        self.max_col = 0  # Largest amount of stored elements in the columns buffer
        self.max_row = 0  # Largest amount of stored elements in the rows buffer
        self.lostogram = collections.defaultdict(
            int)  # Statistics about lost medias
        self.lostogram_counter = 0  # Incremented while there are lost media packets
示例#25
0
def api_media_id_delete(id=None, auth_user=None, api_core=None, request=None):
    u"""Remove a media asset from the shared storage and update informations about it (set status to DELETED)."""
    media = api_core.get_media(spec={u'_id': id})
    if not media:
        raise IndexError(to_bytes(u'No media asset with id {0}.'.format(id)))
    if auth_user._id != media.user_id:
        flask.abort(403, u'You are not allowed to delete media asset with id {0}.'.format(id))
    api_core.delete_media(media)
    return ok_200(u'The media asset "{0}" has been deleted.'.format(media.metadata[u'title']), include_properties=False)
示例#26
0
 def set_delay(self, value, units):
     """Set desired size for the internal media buffer."""
     if units == FecReceiver.PACKETS:
         self.delay_value = value
         self.delay_units = units
     elif units == FecReceiver.SECONDS:
         raise NotImplementedError()
     else:
         raise ValueError(to_bytes(FecReceiver.ER_DELAY_UNITS.format(units)))
示例#27
0
 def current_delay(self):
     """Return current delay based on the length of the media buffer."""
     if len(self.medias) == 0:
         return 0
     if self.delay_units == FecReceiver.PACKETS:
         return len(self.medias)
     elif self.delay_units == FecReceiver.SECONDS:
         raise NotImplementedError()
     raise ValueError(to_bytes(FecReceiver.ER_DELAY_UNITS.format(self.delay_units)))
示例#28
0
    def __init__(self, output):
        """
        Construct a new FecReceiver and register `output`.

        :param output: Where to output payload of the recovered stream.
        :type output: IOBase

        **Example usage**

        Not yet an output:

        >>> FecReceiver(None)
        Traceback (most recent call last):
            ...
        ValueError: output is None
        >>>
        >>> from StringIO import StringIO
        >>> output = StringIO()
        >>> receiver = FecReceiver(output)
        """
        if not output:
            raise ValueError(to_bytes('output is None'))
        # Media packets storage, medias[media seq] = media pkt
        self.medias = {}
        self.startup = True    # Indicate that actual position must be initialized
        self.flushing = False  # Indicate that a flush operation is actually running
        self.position = 0      # Actual position (sequence number) in the medias buffer
        # Link media packets to fec packets able to recover it, crosses[mediaseq] = {colseq, rowseq}
        self.crosses = {}
        # Fec packets + related information storage, col[sequence] = { fec pkt + info }
        self.cols = {}
        self.rows = {}
        self.matrixL = 0  # Detected FEC matrix size (number of columns)
        self.matrixD = 0  # Detected FEC matrix size (number of rows)
        # Output
        self.output = output  # Registered output
        # Settings
        self.delay_value = 100                  # RTP buffer delay value
        self.delay_units = FecReceiver.PACKETS  # RTP buffer delay units
        # Statistics about media (buffers and packets)
        self.media_received = 0          # Received media packets counter
        self.media_recovered = 0         # Recovered media packets counter
        self.media_aborted_recovery = 0  # Aborted media packet recovery counter
        self.media_overwritten = 0       # Overwritten media packets counter
        self.media_missing = 0           # Missing media packets counter
        self.max_media = 0               # Largest amount of stored elements in the medias buffer
        # Statistics about fec (buffers and packets)
        self.col_received = 0  # Received column fec packets counter
        self.row_received = 0  # Received row fec packets counter
        self.col_dropped = 0   # Dropped column fec packets counter
        self.row_dropped = 0   # Dropped row fec packets counter
        self.max_cross = 0     # Largest amount of stored elements in the crosses buffer
        self.max_col = 0       # Largest amount of stored elements in the columns buffer
        self.max_row = 0       # Largest amount of stored elements in the rows buffer
        self.lostogram = collections.defaultdict(int)  # Statistics about lost medias
        self.lostogram_counter = 0  # Incremented while there are lost media packets
示例#29
0
 def configure_rabbitmq(self):
     self.info(u'Configure RabbitMQ Message Broker')
     self.cmd(u'rabbitmqctl delete_user guest', fail=False)
     self.cmd(u'rabbitmqctl add_user node "{0}"'.format(self.config.rabbit_password), fail=False)
     self.cmd(u'rabbitmqctl add_vhost celery', fail=False)
     self.cmd(u'rabbitmqctl set_permissions -p celery node ".*" ".*" ".*"', fail=False)
     users, vhosts = self.rabbit_users, self.rabbit_vhosts
     self.debug(u'RabbitMQ users: {0} vhosts: {1}'.format(users, vhosts))
     if u'guest' in users or u'node' not in users or u'celery' not in vhosts:
         raise RuntimeError(to_bytes(u'Unable to configure RabbitMQ'))
示例#30
0
 def cleanup(self):
     """Remove FEC packets that are stored / waiting but useless."""
     if self.flushing:
         raise ValueError(to_bytes(FecReceiver.ER_FLUSHING))
     if self.startup:
         raise ValueError(to_bytes(FecReceiver.ER_STARTUP))
     if self.delay_units == FecReceiver.PACKETS:
         start, end = self.position, (self.position +
                                      self.delay_value) & RtpPacket.S_MASK
         for media_sequence in self.crosses.keys:
             if not self.validity_window(media_sequence, start, end):
                 cross = self.crosses[media_sequence]
                 del self.cols[cross['col_sequence']]
                 del self.rows[cross['row_sequence']]
                 del self.crosses[media_sequence]
     elif self.delay_units == FecReceiver.SECONDS:
         raise NotImplementedError()
     raise ValueError(
         to_bytes(FecReceiver.ER_DELAY_UNITS.format(self.delay_units)))
示例#31
0
 def transform_callback(self, task_id, status, measures):
     task = self.get_transform_task({u'_id': task_id})
     if not task:
         raise IndexError(to_bytes(u'No transformation task with id {0}.'.format(task_id)))
     media_out = self.get_media({u'_id': task.media_out_id})
     if not media_out:
         raise IndexError(to_bytes(u'Unable to find output media asset with id {0}.'.format(task.media_out_id)))
     if status == TransformTask.SUCCESS:
         media_out.status = Media.READY
         media_out.metadata['measures'] = measures
         self.save_media(media_out)
         logging.info(u'{0} Media {1} is now {2}'.format(task_id, media_out.filename, media_out.status))
         #self.send_email_task(task, TransformTask.SUCCESS, media_out=media_out)
     else:
         self.delete_media(media_out)
         task.statistic[u'error_details'] = status.replace(u'\n', u'\\n')
         self._db.transform_tasks.save(task.__dict__, safe=True)
         logging.info(u'{0} Error: {1}'.format(task_id, status))
         logging.info(u'{0} Media {1} is now deleted'.format(task_id, media_out.filename))
示例#32
0
 def set_delay(self, value, units):
     """Set desired size for the internal media buffer."""
     if units == FecReceiver.PACKETS:
         self.delay_value = value
         self.delay_units = units
     elif units == FecReceiver.SECONDS:
         raise NotImplementedError()
     else:
         raise ValueError(to_bytes(
             FecReceiver.ER_DELAY_UNITS.format(units)))
示例#33
0
 def current_delay(self):
     """Return current delay based on the length of the media buffer."""
     if len(self.medias) == 0:
         return 0
     if self.delay_units == FecReceiver.PACKETS:
         return len(self.medias)
     elif self.delay_units == FecReceiver.SECONDS:
         raise NotImplementedError()
     raise ValueError(
         to_bytes(FecReceiver.ER_DELAY_UNITS.format(self.delay_units)))
示例#34
0
def api_publisher_task_id_head(id=None, auth_user=None, api_core=None, request=None):
    u"""
    Return a publication task serialized to JSON.

    The publication task attributes are appended with the Celery's ``async result`` of the task.
    """
    task = api_core.get_publisher_task(spec={u'_id': id})
    if not task:
        raise IndexError(to_bytes(u'No publication task with id {0}.'.format(id)))
    return ok_200(task, include_properties=True)
示例#35
0
def api_media_id_get(id=None, auth_user=None, api_core=None, request=None):
    u"""
    Return the informations about a media asset serialized to JSON.

    All ``thing_id`` fields are replaced by corresponding ``thing``.
    For example ``user_id`` is replaced by ``user``'s data.
    """
    media = api_core.get_media(spec={'_id': id}, load_fields=True)
    if not media:
        raise IndexError(to_bytes(u'No media asset with id {0}.'.format(id)))
    return ok_200(media, include_properties=True)
示例#36
0
 def save_media(self, media):
     media.is_valid(True)
     if not media.get_metadata(u'title'):
         raise ValueError(to_bytes(u"Title key is required in media asset's metadata."))
     if media.status != Media.DELETED:
         if self.config.is_mock:
             size = randint(10*1024*1024, 10*1024*1024*1024)
             duration = u'%02d:%02d:%02d' % (randint(0, 2), randint(0, 59), randint(0, 59))
         else:
             size, duration = Storage.add_media(self.config, media)
     else:
         size, duration = (0, 0)
     media.add_metadata(u'size', size, True)
     if duration:
         media.add_metadata(u'duration', duration, True)
     media.add_metadata(u'add_date', int(time()), True)
     try:
         self._db.medias.save(media.__dict__, safe=True)
     except DuplicateKeyError:
         raise ValueError(to_bytes(u'The media URI {0} is already used by another media asset.'.format(media.uri)))
示例#37
0
 def do_request(self, verb, resource, auth=None, data=None):
     u"""Execute a method of the API."""
     headers = {u'Content-type': u'application/json', u'Accept': u'application/json'}
     auth = auth or self.auth
     auth = auth.credentials if isinstance(auth, User) else auth
     url = u'http://{0}'.format(resource)
     response = verb(url, auth=auth, data=data, headers=headers, timeout=self.timeout)
     try:
         response_json = response.json()
     except:
         raise ValueError(to_bytes(u'Response does not contain valid JSON data:\n' + unicode(response.text)))
     return map_exceptions(response_json)
示例#38
0
def load_settings(filename=None, create_directories=False):
    default = os.environ.get(u'ENCODEBOX_SETTINGS_FILENAME', SETTINGS_FILENAME)
    filename = filename or default
    if not exists(filename):
        raise IOError(to_bytes(u'Unable to find settings file "{0}".'.format(filename)))
    with open(filename, u'r', u'utf-8') as f:
        settings = yaml.load(f)
    for key, value in settings.iteritems():
        if u'directory' in key and not u'remote' in key:
            settings[key] = abspath(expanduser(value))
            if create_directories:
                try_makedirs(settings[key])
    return settings
示例#39
0
def passes_from_template(template_passes, **kwargs):
    u"""
    Return a list of (transcoding) passes with {variables} replaced by the values in kwargs.

    **Example usage**

    >>> import os
    >>> templated_passes = [
    ...     [u'ffmpeg', [u'{video}', u'{audio}'], u'{tmp}/a.wav', u'-analyzeduration 2147480000 -ar 48000 -ac 2'],
    ...     u'neroAacEnc -cbr 128000 -lc -if "{tmp}/été.wav" -of "{tmp}/été.mp4"',
    ...     [u'x264', u'{tmp}/vidéo.y4m', None, u'--pass 1 --fps 25 --bitrate 2000 --no-scenecut']
    ... ]
    >>> passes = passes_from_template(templated_passes, video=u'tabby.h264', audio=u'miaow.aac', tmp=u'/tmp')
    >>> for p in passes:
    ...     print(p)
    [u'ffmpeg', [u'tabby.h264', u'miaow.aac'], u'/tmp/a.wav', u'-analyzeduration 2147480000 -ar 48000 -ac 2']
    [u'neroAacEnc', u'-cbr', u'128000', u'-lc', u'-if', u'/tmp/\\xe9t\\xe9.wav', u'-of', u'/tmp/\\xe9t\\xe9.mp4']
    [u'x264', u'/tmp/vid\\xe9o.y4m', None, u'--pass 1 --fps 25 --bitrate 2000 --no-scenecut']

    Verify that Unicode filenames are handled correctly:

    >>> open(u'/tmp/vidéo.y4m', u'a').close()
    >>> os.remove(passes[2][1])
    """
    passes = []
    for template_pass in template_passes:
        if isinstance(template_pass, string_types):
            values = shlex.split(to_bytes(template_pass.format(**kwargs)))
            passes.append([unicode(v, u'utf-8') for v in values])
        else:
            the_pass = []
            for value in template_pass:
                if value is None:
                    the_pass.append(value)
                elif isinstance(value, string_types):
                    the_pass.append(value.format(**kwargs))
                else:
                    the_pass.append([x.format(**kwargs) for x in value])
            passes.append(the_pass)
    return passes
示例#40
0
    def compute(sequence, algorithm, direction, L, D, packets):
        """
        This method will generate FEC packet's field by applying FEC algorithm to input packets.
        In case of error (e.g. bad version number) the method will abort filling fields and
        un-updated fields are set to their corresponding default value.

        :param sequence: Sequence number of computed FEC packet
        :type sequence: int
        :param algorithm: Name of algorithm used to compute payload recovery from packets payload
        :type algorithm: str
        :param direction: Direction (column or row) of computed FEC packet (see RFC to understand)
        :type direction: str
        :param L: Horizontal size of the FEC matrix (columns)
        :type L: int
        :param D: Vertical size of the FEC matrix (rows)
        :type D: int
        :param packets: Array containing RTP packets to protect
        :type packets: array(RtPacket)

        **Example usage**

        Testing invalid input collection of packets:

        >>> from pytoolbox.network.rtp import RtpPacket
        >>> packets = [RtpPacket.create(10, 10, RtpPacket.MP2T_PT, 'a'),
        ...            RtpPacket.create(22, 22, RtpPacket.MP2T_PT, 'b')]
        >>> fec = FecPacket.compute(1, FecPacket.XOR, FecPacket.COL, 2, 2, packets)
        Traceback (most recent call last):
            ...
        ValueError: One of the packets doesn't verify : sequence = snbase + i * offset, 0<i<na

        Testing valid input collection of packets:

        >>> packets = [RtpPacket.create(10, 10, RtpPacket.MP2T_PT, bytearray('gaga', 'utf-8')),
        ...            RtpPacket.create(14, 14, RtpPacket.MP2T_PT, bytearray('salut', 'utf-8')),
        ...            RtpPacket.create(18, 18, RtpPacket.MP2T_PT, bytearray('12345', 'utf-8')),
        ...            RtpPacket.create(22, 22, RtpPacket.MP2T_PT, bytearray('robot', 'utf-8'))]
        >>> fec = FecPacket.compute(2, FecPacket.XOR, FecPacket.COL, 4, 4, packets)
        >>> print(fec)
        errors                = []
        sequence              = 2
        algorithm             = XOR
        direction             = COL
        snbase                = 10
        offset                = 4
        na                    = 4
        L x D                 = 4 x 4
        payload type recovery = 0
        timestamp recovery    = 0
        length recovery       = 1
        payload recovery size = 5
        missing               = []
        >>> print(''.join('%02x:' % x for x in fec.payload_recovery))
        57:5d:5a:4f:35:

        Testing fec packet generation (based on source RTP packets):

        >>> from os import urandom
        >>> from random import randint
        >>> from pytoolbox.network.rtp import RtpPacket
        >>> L = 4
        >>> D = 5
        >>> OFF = 2
        >>> # Generate a [D][L] matrix of randomly generated RTP packets
        >>> matrix = [[RtpPacket.create(L * j + i, (L * j + i) * 100 + randint(0, 50),
        ...           RtpPacket.MP2T_PT, bytearray(urandom(randint(50, 100))))
        ...           for i in xrange(L)] for j in xrange(D)]
        >>> assert(len(matrix) == D and len(matrix[0]) == L)
        >>> # Retrieve the OFF'th column of the matrix
        >>> expected_payload_type_recovery = 0
        >>> expected_timestamp_recovery = 0
        >>> expected_lenght_recovery = 0
        >>> expected_payload_recovery = bytearray(100)
        >>> packets = []
        >>> for i in xrange(D):
        ...     packet = matrix[i][OFF]
        ...     packets.append(packet)
        ...     # Compute expected recovery fields values
        ...     expected_payload_type_recovery ^= packet.payload_type
        ...     expected_timestamp_recovery ^= packet.timestamp
        ...     expected_lenght_recovery ^= packet.payload_size
        ...     for j in xrange(packet.payload_size):
        ...         expected_payload_recovery[j] ^= packet.payload[j]
        >>> fec = FecPacket.compute(15, FecPacket.XOR, FecPacket.COL, L, D, packets)
        >>> assert(fec.valid)
        >>> assert(fec.snbase == matrix[0][OFF].sequence == 2)
        >>> assert(fec.na == D and fec.offset == L)
        >>> assert(fec.payload_type_recovery == expected_payload_type_recovery)
        >>> assert(fec.timestamp_recovery == expected_timestamp_recovery)
        >>> assert(fec.length_recovery == expected_lenght_recovery)
        >>> for i in xrange(fec.payload_size):
        ...     if fec.payload_recovery[i] != expected_payload_recovery[i]:
        ...         print('Payload recovery test failed with i = ' + i)
        """
        # Fields default values
        fec = FecPacket()
        fec.sequence = sequence
        if algorithm not in FecPacket.ALGORITHM_RANGE:
            raise ValueError(
                to_bytes('algorithm is not a valid FEC algorithm'))
        if direction not in FecPacket.DIRECTION_RANGE:
            raise ValueError(
                to_bytes('direction is not a valid FEC direction'))
        fec.algorithm = algorithm
        fec.direction = direction
        if fec.direction == FecPacket.COL:
            fec.na = D
            fec.offset = L
        else:
            fec.na = L
            fec.offset = 1
        if fec.algorithm != FecPacket.XOR:
            raise NotImplementedError(to_bytes(FecPacket.ER_ALGORITHM))
        if len(packets) != fec.na:
            raise ValueError(
                to_bytes('packets must contain exactly {0} packets'.format(
                    fec.na)))
        fec.snbase = packets[0].sequence
        # Detect maximum length of packets payload and check packets validity
        size = 0
        i = 0
        for packet in packets:
            if not packet.validMP2T:
                raise ValueError(to_bytes(FecPacket.ER_VALID_MP2T))
            if packet.sequence != (fec.snbase +
                                   i * fec.offset) & RtpPacket.S_MASK:
                raise ValueError(to_bytes(FecPacket.ER_SEQUENCE))
            size = max(size, packet.payload_size)
            i += 1
        # Create payload recovery field according to size/length
        fec.payload_recovery = bytearray(size)
        # Compute FEC packet's fields based on input packets
        for packet in packets:
            # Update (...) recovery fields by xor'ing corresponding fields of all packets
            fec.payload_type_recovery ^= packet.payload_type
            fec.timestamp_recovery ^= packet.timestamp
            fec.length_recovery ^= packet.payload_size
            # Update payload recovery by xor'ing all packets payload
            payload = packet.payload
            if len(packet.payload) < size:
                payload = payload + bytearray(size - len(packet.payload))
            fast_xor_inplace(fec.payload_recovery, payload)
            # NUMPY fec.payload_recovery = bytearray(
            #     numpy.bitwise_xor(fec.payload_recovery, payload))
            # XOR LOOP for i in xrange(min(size, len(packet.payload))):
            # XOR LOOP     fec.payload_recovery[i] ^= packet.payload[i]
        return fec
示例#41
0
def transcode(in_relpath_json):
    u"""Convert an input media file to 3 (SD) or 5 (HD) output files."""

    logger = get_task_logger(u'encodebox.tasks.transcode')
    report = None
    in_abspath = None
    failed_abspath = None
    temporary_directory = None
    outputs_directory = None
    final_state = states.FAILURE
    final_url = None
    try:
        settings = load_settings()
        in_relpath = json.loads(in_relpath_json)
        in_abspath = join(settings[u'local_directory'], in_relpath)
        try:
            in_directories = in_relpath.split(os.sep)
            assert (len(in_directories) == 4)
            publisher_id = in_directories[0]
            product_id = in_directories[1]
            assert (in_directories[2] == u'uploaded')
            filename = in_directories[3]
            name, extension = splitext(filename)
        except:
            raise ValueError(
                to_bytes(
                    u'Input file path does not respect template publisher_id/product_id/filename'
                ))

        # Generate a unguessable filename using a seed and the original filename
        name = generate_unguessable_filename(settings[u'filenames_seed'],
                                             filename)

        completed_abspath = join(settings[u'local_directory'], publisher_id,
                                 product_id, u'completed', filename)
        failed_abspath = join(settings[u'local_directory'], publisher_id,
                              product_id, u'failed', filename)
        temporary_directory = join(settings[u'local_directory'], publisher_id,
                                   product_id, u'temporary', filename)
        outputs_directory = join(settings[u'local_directory'], publisher_id,
                                 product_id, u'outputs', filename)
        remote_directory = join(settings[u'remote_directory'], publisher_id,
                                product_id)
        remote_url = settings[u'remote_url'].format(publisher_id=publisher_id,
                                                    product_id=product_id,
                                                    name=name)

        report = TranscodeProgressReport(settings[u'api_servers'],
                                         publisher_id, product_id, filename,
                                         getsize(in_abspath), logger)
        report.send_report(states.STARTED, counter=0)

        logger.info(u'Create outputs directories')

        for path in (completed_abspath, failed_abspath, temporary_directory,
                     outputs_directory):
            shutil.rmtree(path, ignore_errors=True)
        try_makedirs(temporary_directory)
        try_makedirs(outputs_directory)

        resolution = get_media_resolution(in_abspath)
        if not resolution:
            raise IOError(
                to_bytes(u'Unable to detect resolution of video "{0}"'.format(
                    in_relpath)))

        quality = u'hd' if resolution[HEIGHT] >= HD_HEIGHT else u'sd'
        template_transcode_passes = settings[quality + u'_transcode_passes']
        template_smil_filename = settings[quality + u'_smil_template']

        logger.info(u'Media {0} {1}p {2}'.format(quality.upper(),
                                                 resolution[HEIGHT],
                                                 in_relpath))

        logger.info(u'Generate SMIL file from template SMIL file')
        from_template(template_smil_filename,
                      join(outputs_directory, name + u'.smil'),
                      {u'name': name})

        logger.info(
            u'Generate transcoding passes from templated transcoding passes')
        transcode_passes = passes_from_template(template_transcode_passes,
                                                input=in_abspath,
                                                name=name,
                                                out=outputs_directory,
                                                tmp=temporary_directory)
        report.transcode_passes = transcode_passes

        logger.info(u'Execute transcoding passes')
        for counter, transcode_pass in enumerate(transcode_passes, 1):
            if transcode_pass[0] in (u'ffmpeg', u'x264'):
                encoder_module = globals()[transcode_pass[0]]
                for statistics in encoder_module.encode(
                        transcode_pass[1], transcode_pass[2],
                        transcode_pass[3]):
                    status = statistics.pop(u'status').upper()
                    if status == u'PROGRESS':
                        for info in (u'output', u'returncode', u'sanity'):
                            statistics.pop(info, None)
                        report.send_report(states.ENCODING,
                                           counter=counter,
                                           statistics=statistics)
                    elif status == u'ERROR':
                        raise RuntimeError(statistics)
            else:
                try:
                    check_call(transcode_pass)
                except OSError:
                    raise OSError(
                        to_bytes(u'Missing encoder ' + transcode_pass[0]))

        logger.info(
            u'Move the input file to the completed directory and send outputs to the remote host'
        )
        move(in_abspath, completed_abspath)
        try:
            report.send_report(states.TRANSFERRING)
            is_remote = u':' in remote_directory
            if is_remote:
                # Create directory in remote host
                username_host, directory = remote_directory.split(u':')
                username, host = username_host.split(u'@')
                ssh_client = paramiko.SSHClient()
                ssh_client.load_system_host_keys()
                ssh_client.set_missing_host_key_policy(
                    paramiko.AutoAddPolicy())  # FIXME man-in-the-middle attack
                ssh_client.connect(host, username=username)
                ssh_client.exec_command(u'mkdir -p "{0}"'.format(directory))
            else:
                # Create directory in local host
                try_makedirs(remote_directory)
            rsync(source=outputs_directory,
                  destination=remote_directory,
                  source_is_dir=True,
                  destination_is_dir=True,
                  archive=True,
                  progress=True,
                  recursive=True,
                  extra=u'ssh' if is_remote else None)
            final_state, final_url = states.SUCCESS, remote_url
        except Exception as e:
            logger.exception(u'Transfer of outputs to remote host failed')
            final_state = states.TRANSFER_ERROR
            with open(join(outputs_directory, u'transfer-error.log'), u'w',
                      u'utf-8') as log:
                log.write(repr(e))
    except Exception as e:
        logger.exception(u'Transcoding task failed')
        try:
            logger.info(u'Report the error by e-mail')
            send_error_email(exception=e,
                             filename=in_abspath,
                             settings=settings)
        except:
            logger.exception(u'Unable to report the error by e-mail')
        logger.info(
            u'Move the input file to the failed directory and remove the outputs'
        )
        if in_abspath and failed_abspath:
            move(in_abspath, failed_abspath)
        if outputs_directory and exists(outputs_directory):
            shutil.rmtree(outputs_directory)
        raise
    finally:
        if report:
            report.send_report(final_state, url=final_url)
        logger.info(u'Remove the temporary files')
        if temporary_directory and exists(temporary_directory):
            shutil.rmtree(temporary_directory)
示例#42
0
文件: http.py 项目: zlorb/pytoolbox
def get_request_data(request,
                     accepted_keys=None,
                     required_keys=None,
                     sources=('query', 'form', 'json'),
                     qs_only_first_value=False,
                     optional=False):
    """
    Return a python dictionary containing the values retrieved from various attributes (sources) of
    the request.

    This function is specifically implemented to retrieve data from an instance of
    `werkzeug.wrappers.Request` or `django.http.request.HttpRequest` by only using `getattr` to
    respect the duck typing philosophy.

    FIXME : Add an example that have a JSON content ....

    **Example usage**

    >>> from cStringIO import StringIO
    >>> from pytoolbox.unittest import asserts
    >>> from werkzeug.wrappers import Request

    >>> d = 'key1=this+is+encoded+form+data&key2=another'
    >>> q = 'foo=bar&blah=blafasel'
    >>> c = 'application/x-www-form-urlencoded'
    >>> r = Request.from_values(
    ...         query_string=q, content_length=len(d), input_stream=StringIO(d), content_type=c)

    >>> asserts.dict_equal(get_request_data(r), {
    ...     'blah': ['blafasel'],
    ...     'foo': ['bar'],
    ...     'key1': ['this is encoded form data'],
    ...     'key2': ['another']
    ... })

    Restrict valid keys:

    >>> get_request_data(r, accepted_keys=['foo']) # doctest: +ELLIPSIS
    Traceback (most recent call last):
        ...
    ValueError: Invalid key "..." from the request, valid: [...'foo'].

    Requires specific keys:

    >>> get_request_data(r, required_keys=['foo', 'THE_key']) # doctest: +ELLIPSIS
    Traceback (most recent call last):
        ...
    ValueError: Missing key "THE_key from the request, required: [...'foo', ...'THE_key'].

    Retrieve data with or without a fallback to an empty string (JSON content):

    >>> get_request_data(r, sources=['json'], optional=True)
    {}
    >>> get_request_data(r, sources=['json'])
    Traceback (most recent call last):
        ...
    ValueError: Unable to retrieve any data from the request.

    The order of the sources is important:

    >>> d = 'foo=bar+form+data'
    >>> q = 'foo=bar+query+string&it=works'
    >>> r = Request.from_values(
    ...         query_string=q, content_length=len(d), input_stream=StringIO(d), content_type=c)
    >>> asserts.dict_equal(get_request_data(r, sources=['query', 'form']), {
    ...     'it': ['works'], 'foo': ['bar form data']
    ... })
    >>> asserts.dict_equal(get_request_data(r, sources=['form', 'query']), {
    ...     'it': ['works'], 'foo': ['bar query string']
    ... })

    Retrieve only the first value of the keys (Query string):

    >>> r = Request.from_values(query_string='foo=bar+1&foo=bar+2&foo=bar+3', content_type=c)
    >>> asserts.dict_equal(get_request_data(r, sources=['query']), {
    ...     'foo': ['bar 1', 'bar 2', 'bar 3']
    ... })
    >>> asserts.dict_equal(get_request_data(r, sources=['query'], qs_only_first_value=True), {
    ...     'foo': 'bar 1'
    ... })

    """
    data = {}
    for source in sources:
        if source == 'form':
            data.update(getattr(request, 'form', {}))  # werkzeug
        elif source == 'json':
            data.update(getattr(request, 'get_json', lambda: {})()
                        or {})  # werkzeug
        elif source == 'query':
            query_dict = getattr(
                request,
                'args',  # werkzeug
                urlparse.parse_qs(
                    getattr(request, 'META', {}).get('QUERY_STRING',
                                                     '')))  # django
            if qs_only_first_value:
                for key, value in query_dict.iteritems():
                    data[key] = value[0] if isinstance(value, list) else value
            else:
                data.update(query_dict)

    if required_keys is not None:
        for key in required_keys:
            if key not in data:
                raise ValueError(
                    to_bytes(
                        'Missing key "{0} from the request, required: {1}.'.
                        format(key, required_keys)))
    if accepted_keys is not None:
        for key in data:
            if key not in accepted_keys:
                raise ValueError(
                    to_bytes('Invalid key "{0}" from the request, valid: {1}.'.
                             format(key, accepted_keys)))
    if not data and not optional:
        raise ValueError(
            to_bytes('Unable to retrieve any data from the request.'))
    return data or {}
示例#43
0
    def put_fec(self, fec):
        """
        Put an incoming FEC packet, the algorithm will do the following according to these
        scenarios:

        1. The fec packet is useless if none of the protected media packets is missing
        2. Only on media packet missing, fec packet is able to recover it now !
        3. More than one media packet is missing, fec packet stored for future recovery
        """
        if self.flushing:
            raise ValueError(to_bytes(FecReceiver.ER_FLUSHING))
        if not fec.valid:
            raise ValueError(to_bytes('Invalid FEC packet'))

        if fec.direction == FecPacket.COL:
            self.col_received += 1
        elif fec.direction == FecPacket.ROW:
            self.row_received += 1
        else:
            raise ValueError(FecReceiver.ER_DIRECTION.format(fec.direction))
        cross = None
        media_lost = 0
        media_max = (fec.snbase + fec.na * fec.offset) & RtpPacket.S_MASK
        media_test = fec.snbase
        while media_test != media_max:
            # If media packet is not in the medias buffer (is missing)
            if media_test not in self.medias:
                media_lost = media_test
                # TODO
                cross = self.crosses.get(media_test)
                if not cross:
                    cross = {'col_sequence': None, 'row_sequence': None}
                    self.crosses[media_test] = cross
                    if len(self.crosses) > self.max_cross:
                        self.max_cross = len(self.crosses)
                # Register the fec packet able to recover the missing media packet
                if fec.direction == FecPacket.COL:
                    if cross['col_sequence']:
                        raise ValueError(
                            to_bytes(
                                FecReceiver.ER_COL_OVERWRITE.format(
                                    media_lost)))
                    cross['col_sequence'] = fec.sequence
                elif fec.direction == FecPacket.ROW:
                    if cross['row_sequence']:
                        raise ValueError(
                            to_bytes(
                                FecReceiver.ER_ROW_OVERWRITE.format(
                                    media_lost)))
                    cross['row_sequence'] = fec.sequence
                else:
                    raise ValueError(
                        to_bytes(
                            FecReceiver.ER_FEC_DIRECTION.format(
                                fec.direction)))
                fec.set_missing(media_test)
            media_test = (media_test + fec.offset) & RtpPacket.S_MASK
        if fec.L:
            self.matrixL = fec.L
        if fec.D:
            self.matrixD = fec.D
        # [1] The fec packet is useless if none of the protected media packets is missing
        if len(fec.missing) == 0:
            return
        # FIXME check if 10*delay_value is a good way to avoid removing early fec packets !
        # The fec packet is useless if it needs an already output'ed media packet to do recovery
        drop = not FecReceiver.validity_window(
            fec.snbase, self.position,
            (self.position + 10 * self.delay_value) & RtpPacket.S_MASK)
        if fec.direction == FecPacket.COL:
            if drop:
                self.col_dropped += 1
                return
            self.cols[fec.sequence] = fec
            if len(self.cols) > self.max_col:
                self.max_col = len(self.cols)
        if fec.direction == FecPacket.ROW:
            if drop:
                self.row_dropped += 1
                return
            self.rows[fec.sequence] = fec
            if len(self.rows) > self.max_row:
                self.max_row = len(self.rows)
        # [2] Only on media packet missing, fec packet is able to recover it now !
        if len(fec.missing) == 1:
            self.recover_media_packet(media_lost, cross, fec)
            self.out()  # FIXME maybe better to call it from another thread
示例#44
0
    def recover_media_packet(self, media_sequence, cross, fec):
        """
        Recover a missing media packet helped by a FEC packet, this method is also called to
        register an incoming media packet if it is registered as missing.
        """

        recovered_by_fec = fec is not None

        # Read and remove "cross" it from the buffer
        col_sequence = cross['col_sequence']
        row_sequence = cross['row_sequence']
        del self.crosses[media_sequence]

        # Recover the missing media packet and remove any useless linked fec packet
        if recovered_by_fec:
            if len(fec.missing) != 1:
                raise NotImplementedError(
                    FecReceiver.ER_MISSING_COUNT.format(len(fec.missing)))
            if fec.direction == FecPacket.COL and fec.sequence != col_sequence:
                raise NotImplementedError(
                    FecReceiver.ER_COL_MISMATCH.format(fec.sequence,
                                                       col_sequence))
            if fec.direction == FecPacket.ROW and fec.sequence != row_sequence:
                raise NotImplementedError(
                    FecReceiver.ER_ROW_MISMATCH.format(fec.sequence,
                                                       row_sequence))

            # Media packet recovery
            # > Copy fec packet fields into the media packet
            media = RtpPacket.create(media_sequence, fec.timestamp_recovery,
                                     fec.payload_type_recovery,
                                     fec.payload_recovery)
            payload_size = fec.length_recovery

            # > recovered payload ^= all media packets linked to the fec packet
            aborted = False
            media_max = (fec.snbase + fec.na * fec.offset) & RtpPacket.S_MASK
            media_test = fec.snbase
            while media_test != media_max:
                if media_test == media_sequence:
                    media_test = (media_test + fec.offset) & RtpPacket.S_MASK
                    continue
                friend = self.medias[media_test]
                # Unable to recover the media packet if any of the friend media packets is missing
                if not friend:
                    self.media_aborted_recovery += 1
                    aborted = True
                    break
                media.payload_type ^= friend.payload_type
                media.timestamp ^= friend.timestamp
                payload_size ^= friend.payload_size
                # FIXME FIXME FIXME FIXME FIXME OPTIMIZATION FIXME FIXME FIXME FIXME
                for no in xrange(min(len(media.payload),
                                     len(friend.payload))):  # noqa
                    media.payload[no] ^= friend.payload[no]
                media_test = (media_test + fec.offset) & RtpPacket.S_MASK

            # If the media packet is successfully recovered
            if not aborted:
                media.payload = media.payload[0:payload_size]
                self.media_recovered += 1
                if media.sequence in self.medias:
                    self.media_overwritten += 1
                self.medias[media.sequence] = media
                if len(self.medias) > self.max_media:
                    self.max_media = len(self.medias)
                if fec.direction == FecPacket.COL:
                    del self.cols[fec.sequence]
                else:
                    del self.rows[fec.sequence]

        # Check if a cascade effect happens ...
        fec_col = self.cols.get(col_sequence) if col_sequence else None
        fec_row = self.rows.get(row_sequence) if row_sequence else None

        if fec_col:
            fec_col.set_recovered(media_sequence)
        if fec_row:
            fec_row.set_recovered(media_sequence)

        if fec_col:
            if len(fec_col.missing) == 1:
                # Cascade !
                cascade_media_sequence = fec_col.missing[0]
                if cascade_media_sequence:
                    cascade_cross = self.crosses.get(cascade_media_sequence)
                    if cascade_cross:
                        self.recover_media_packet(cascade_media_sequence,
                                                  cascade_cross, fec_col)
                    else:
                        raise NotImplementedError(
                            to_bytes(
                                'recover_media_packet({1}, {2}, {3}):{0}{4}{0}media sequence : {5}{0}{6}{0}'
                                .format(os.linesep, media_sequence, cross, fec,
                                        FecReceiver.ER_NULL_COL_CASCADE,
                                        cascade_media_sequence, fec_col)))
                else:
                    raise NotImplementedError(
                        to_bytes(
                            FecReceiver.ER_GET_COL_CASCADE.format(
                                os.linesep, fec_col)))

        if fec_row:
            if len(fec_row.missing) == 1:
                # Cascade !
                cascade_media_sequence = fec_row.missing[0]
                if cascade_media_sequence:
                    cascade_cross = self.crosses.get(cascade_media_sequence)
                    if cascade_cross:
                        self.recover_media_packet(cascade_media_sequence,
                                                  cascade_cross, fec_row)
                    else:
                        raise NotImplementedError(
                            to_bytes(
                                '{1}{0}recover_media_packet({2}, {3}, {4}):{0}media sequence : {5}{0}{6}{0}'
                                .format(os.linesep,
                                        FecReceiver.ER_NULL_ROW_CASCADE,
                                        media_sequence, cross, fec,
                                        cascade_media_sequence, fec_row)))
                else:
                    raise NotImplementedError(
                        to_bytes(
                            FecReceiver.ER_GET_ROW_CASCADE.format(
                                os.linesep, fec_row)))