コード例 #1
0
 def _get_missing_subformats(subformats, ar, w, h):
     """Return missing and transcodable subformats."""
     dones = [
         subformat['tags']['preset_quality'] for subformat in subformats
     ]
     missing = set(get_all_distinct_qualities()) - set(dones)
     transcodables = list(
         filter(lambda q: can_be_transcoded(q, ar, w, h), missing))
     return transcodables
コード例 #2
0
def _validate(id_type=None, quality=None):
    """Validate input parameters."""
    if id_type not in id_types:
        raise Exception('`id_type` param must be one of {0}'.format(id_types))

    all_possible_qualities = get_all_distinct_qualities()
    if quality and quality not in all_possible_qualities:
        raise Exception('`quality` param must be one of {0}'.format(
            all_possible_qualities))
コード例 #3
0
ファイル: receivers.py プロジェクト: kprzerwa/cds-videos
 def _second_step(self, event):
     """Define second step."""
     all_distinct_qualities = get_all_distinct_qualities()
     event.response['presets'] = all_distinct_qualities
     return group(
         self.run_task(event=event, task_name='file_video_extract_frames'),
         *[self.run_task(
             event=event, task_name='file_transcode',
             preset_quality=preset_quality)
           for preset_quality in all_distinct_qualities]
     )
コード例 #4
0
 def clean(self, event):
     """Delete tasks and everything created by them."""
     self.clean_task(event=event, task_name='file_video_extract_frames')
     for preset_quality in get_all_distinct_qualities():
         self.clean_task(event=event,
                         task_name='file_transcode',
                         preset_quality=preset_quality)
     self.clean_task(event=event,
                     task_name='file_video_metadata_extraction')
     if 'version_id' not in event.payload:
         self.clean_task(event=event, task_name='file_download')
     super(AVCWorkflow, self).clean(event)
コード例 #5
0
ファイル: cli.py プロジェクト: kprzerwa/cds-videos
def quality(recid, depid, quality):
    """Recreate subformat for the given quality."""
    if not recid and not depid:
        raise ClickException('Missing option "--recid" or "--depid"')

    value = recid or depid
    type = 'recid' if recid else 'depid'

    qualities = get_all_distinct_qualities()
    if quality not in qualities:
        raise ClickException(
            "Input quality must be one of {0}".format(qualities))

    output = create_subformat(id_type=type, id_value=value, quality=quality)
    if output:
        click.echo("Creating the following subformat: {0}".format(output))
    else:
        click.echo("This subformat cannot be transcoded.")
コード例 #6
0
def create_all_subformats(id_type, id_value):
    """Recreate all subformats."""
    _validate(id_type=id_type)

    video_deposit, dep_uuid = _resolve_deposit(id_type, id_value)
    master, ar, w, h = _get_master_video(video_deposit)

    transcodables = list(
        filter(lambda q: can_be_transcoded(q, ar, w, h),
               get_all_distinct_qualities()))

    # sequential (and immutable) transcoding to avoid MergeConflicts on bucket
    if transcodables:
        chain([
            MaintenanceTranscodeVideoTask().si(version_id=master['version_id'],
                                               preset_quality=quality,
                                               deposit_id=dep_uuid)
            for quality in transcodables
        ]).apply_async()

    return transcodables
コード例 #7
0
def create_all_missing_subformats(id_type, id_value):
    """Create all missing subformats."""
    _validate(id_type=id_type)

    video_deposit, dep_uuid = _resolve_deposit(id_type, id_value)
    master, ar, w, h = _get_master_video(video_deposit)
    subformats = CDSVideosFilesIterator.get_video_subformats(master)

    dones = [subformat['tags']['preset_quality'] for subformat in subformats]
    missing = set(get_all_distinct_qualities()) - set(dones)
    transcodables = list(
        filter(lambda q: can_be_transcoded(q, ar, w, h), missing))

    # sequential (and immutable) transcoding to avoid MergeConflicts on bucket
    if transcodables:
        chain([
            MaintenanceTranscodeVideoTask().si(version_id=master['version_id'],
                                               preset_quality=quality,
                                               deposit_id=dep_uuid)
            for quality in transcodables
        ]).apply_async()

    return transcodables
コード例 #8
0
def test_avc_workflow_receiver_local_file_pass(api_app, db, api_project,
                                               access_token, json_headers,
                                               mock_sorenson, online_video,
                                               webhooks, local_file):
    """Test AVCWorkflow receiver."""
    project, video_1, video_2 = api_project
    video_1_depid = video_1['_deposit']['id']
    video_1_id = str(video_1.id)
    project_id = str(project.id)

    bucket_id = ObjectVersion.query.filter_by(
        version_id=local_file).one().bucket_id
    video_size = 5510872
    master_key = 'test.mp4'
    slave_keys = [
        '{0}.mp4'.format(quality) for quality in get_presets_applied().keys()
        if quality != '1024p'
    ]
    with api_app.test_request_context():
        url = url_for('invenio_webhooks.event_list',
                      receiver_id='avc',
                      access_token=access_token)

    with api_app.test_client() as client, \
            mock.patch('invenio_sse.ext._SSEState.publish') as mock_sse, \
            mock.patch('invenio_indexer.api.RecordIndexer.bulk_index') \
            as mock_indexer:
        sse_channel = 'mychannel'
        payload = dict(
            uri=online_video,
            deposit_id=video_1_depid,
            key=master_key,
            sse_channel=sse_channel,
            sleep_time=0,
            version_id=str(local_file),
        )
        # [[ RUN WORKFLOW ]]
        resp = client.post(url, headers=json_headers, data=json.dumps(payload))

        assert resp.status_code == 201
        data = json.loads(resp.data.decode('utf-8'))

        assert '_tasks' in data
        assert data['key'] == master_key
        assert 'version_id' in data
        assert data.get('presets') == get_all_distinct_qualities()
        assert 'links' in data  # TODO decide with links are needed

        assert ObjectVersion.query.count() == get_object_count()

        # Master file
        master = ObjectVersion.get(bucket_id, master_key)
        tags = master.get_tags()
        assert tags['_event_id'] == data['tags']['_event_id']
        assert master.key == master_key
        assert str(master.version_id) == data['version_id']
        assert master.file
        assert master.file.size == video_size

        # Check metadata tags
        metadata_keys = [
            'duration', 'bit_rate', 'size', 'avg_frame_rate', 'codec_name',
            'codec_long_name', 'width', 'height', 'nb_frames',
            'display_aspect_ratio', 'color_range'
        ]
        assert all([key in tags for key in metadata_keys])
        assert ObjectVersion.query.count() == get_object_count()
        assert ObjectVersionTag.query.count() == get_tag_count(is_local=True)

        # Check metadata patch
        recid = PersistentIdentifier.get('depid', video_1_depid).object_uuid
        record = Record.get_record(recid)
        assert 'extracted_metadata' in record['_cds']
        assert all([
            key in str(record['_cds']['extracted_metadata'])
            for key in metadata_keys
        ])

        # Check slaves
        for slave_key in slave_keys:
            slave = ObjectVersion.get(bucket_id, slave_key)
            tags = slave.get_tags()
            assert slave.key == slave_key
            assert '_sorenson_job_id' in tags
            assert tags['_sorenson_job_id'] == '1234'
            assert 'master' in tags
            assert tags['master'] == str(master.version_id)
            assert master.file
            assert master.file.size == video_size

        video = deposit_video_resolver(video_1_depid)
        events = get_deposit_events(video['_deposit']['id'])

        # check deposit tasks status
        tasks_status = get_tasks_status_by_task(events)
        assert len(tasks_status) == 3
        assert 'file_transcode' in tasks_status
        assert 'file_video_extract_frames' in tasks_status
        assert 'file_video_metadata_extraction' in tasks_status

        # check single status
        collector = CollectInfoTasks()
        iterate_events_results(events=events, fun=collector)
        info = list(collector)
        assert len(info) == 11
        assert info[0][0] == 'file_video_metadata_extraction'
        assert info[0][1].status == states.SUCCESS
        assert info[1][0] == 'file_video_extract_frames'
        assert info[1][1].status == states.SUCCESS
        transocode_tasks = info[2:]
        statuses = [task[1].status for task in info[2:]]
        assert len(transocode_tasks) == len(statuses)
        assert [
            states.SUCCESS, states.REVOKED, states.REVOKED, states.REVOKED,
            states.SUCCESS, states.REVOKED, states.REVOKED, states.REVOKED,
            states.REVOKED
        ] == statuses

        # check tags (exclude 'uri-origin')
        assert ObjectVersionTag.query.count() == (get_tag_count() - 1)

        # check sse is called
        assert mock_sse.called

        messages = [
            (sse_channel, states.SUCCESS, 'file_video_metadata_extraction'),
            (sse_channel, states.STARTED, 'file_transcode'),
            (sse_channel, states.SUCCESS, 'file_transcode'),
            (sse_channel, states.REVOKED, 'file_transcode'),  # ResolutionError
            (sse_channel, states.STARTED, 'file_video_extract_frames'),
            (sse_channel, states.SUCCESS, 'file_video_extract_frames'),
            (sse_channel, states.SUCCESS, 'update_deposit'),
        ]

        call_args = []
        for (_, kwargs) in mock_sse.call_args_list:
            type_ = kwargs['type_']
            state = kwargs['data']['state']
            channel = kwargs['channel']
            tuple_ = (channel, state, type_)
            if tuple_ not in call_args:
                call_args.append(tuple_)

        assert len(call_args) == len(messages)
        for message in messages:
            assert message in call_args

        deposit = deposit_video_resolver(video_1_depid)

        def filter_events(call_args):
            _, x = call_args
            return x['type_'] == 'update_deposit'

        list_kwargs = list(filter(filter_events, mock_sse.call_args_list))
        assert len(list_kwargs) == 10
        _, kwargs = list_kwargs[8]
        assert kwargs['type_'] == 'update_deposit'
        assert kwargs['channel'] == 'mychannel'
        assert kwargs['data']['state'] == states.SUCCESS
        assert kwargs['data']['meta']['payload'] == {
            'deposit_id': deposit['_deposit']['id'],
            'event_id': data['tags']['_event_id'],
            'deposit': deposit,
        }

        # check ElasticSearch is called
        ids = set(get_indexed_records_from_mock(mock_indexer))
        assert video_1_id in ids
        assert project_id in ids
        assert deposit['_cds']['state'] == {
            'file_video_metadata_extraction': states.SUCCESS,
            'file_video_extract_frames': states.SUCCESS,
            'file_transcode': states.SUCCESS,
        }

    # Test cleaning!
    url = '{0}?access_token={1}'.format(data['links']['cancel'], access_token)

    with mock.patch('invenio_sse.ext._SSEState.publish') as mock_sse, \
            mock.patch('invenio_indexer.api.RecordIndexer.bulk_index') \
            as mock_indexer, \
            api_app.test_client() as client:
        # [[ DELETE WORKFLOW ]]
        resp = client.delete(url, headers=json_headers)

        assert resp.status_code == 201

        # check that object versions and tags are deleted
        # (Create + Delete) * Num Objs - 1 (because the file is local and will
        # be not touched)
        assert ObjectVersion.query.count() == 2 * get_object_count() - 1
        # Tags associated with the old version
        assert ObjectVersionTag.query.count() == get_tag_count(is_local=True)
        bucket = Bucket.query.first()
        # and bucket is empty
        assert bucket.size == 0

        record = RecordMetadata.query.filter_by(id=video_1_id).one()

        # check metadata patch are deleted
        assert 'extracted_metadata' not in record.json['_cds']

        # check the corresponding Event persisted after cleaning
        assert len(get_deposit_events(record.json['_deposit']['id'])) == 0
        assert len(
            get_deposit_events(record.json['_deposit']['id'],
                               _deleted=True)) == 1

        # check no SSE message and reindexing is fired
        assert mock_sse.called is False
        assert mock_indexer.called is False
コード例 #9
0
def test_avc_workflow_receiver_pass(api_app, db, api_project, access_token,
                                    json_headers, mock_sorenson, online_video,
                                    webhooks, users):
    """Test AVCWorkflow receiver."""
    project, video_1, video_2 = api_project
    video_1_depid = video_1['_deposit']['id']
    video_1_id = str(video_1.id)
    project_id = str(project.id)

    bucket_id = video_1['_buckets']['deposit']
    video_size = 5510872
    master_key = 'test.mp4'
    slave_keys = [
        '{0}.mp4'.format(quality) for quality in get_presets_applied()
        if quality != '1024p'
    ]
    with api_app.test_request_context():
        url = url_for('invenio_webhooks.event_list',
                      receiver_id='avc',
                      access_token=access_token)

    with api_app.test_client() as client, \
            mock.patch('invenio_sse.ext._SSEState.publish') as mock_sse, \
            mock.patch('invenio_indexer.api.RecordIndexer.bulk_index') \
            as mock_indexer:
        sse_channel = 'mychannel'
        payload = dict(
            uri=online_video,
            deposit_id=video_1_depid,
            key=master_key,
            sse_channel=sse_channel,
            sleep_time=0,
        )
        resp = client.post(url, headers=json_headers, data=json.dumps(payload))

        assert resp.status_code == 201
        data = json.loads(resp.data.decode('utf-8'))

        assert '_tasks' in data
        assert data['tags']['uri_origin'] == online_video
        assert data['key'] == master_key
        assert 'version_id' in data
        assert data.get('presets') == get_all_distinct_qualities()
        assert 'links' in data  # TODO decide with links are needed

        assert ObjectVersion.query.count() == get_object_count()

        # Master file
        master = ObjectVersion.get(bucket_id, master_key)
        tags = master.get_tags()
        assert tags['_event_id'] == data['tags']['_event_id']
        assert master.key == master_key
        assert str(master.version_id) == data['version_id']
        assert master.file
        assert master.file.size == video_size

        # Check metadata tags
        metadata_keys = [
            'duration', 'bit_rate', 'size', 'avg_frame_rate', 'codec_name',
            'codec_long_name', 'width', 'height', 'nb_frames',
            'display_aspect_ratio', 'color_range'
        ]
        assert all([key in tags for key in metadata_keys])

        # Check metadata patch
        recid = PersistentIdentifier.get('depid', video_1_depid).object_uuid
        record = Record.get_record(recid)
        assert 'extracted_metadata' in record['_cds']
        assert all([
            key in str(record['_cds']['extracted_metadata'])
            for key in metadata_keys
        ])

        # Check slaves
        for slave_key in slave_keys:
            slave = ObjectVersion.get(bucket_id, slave_key)
            tags = slave.get_tags()
            assert slave.key == slave_key
            assert '_sorenson_job_id' in tags
            assert tags['_sorenson_job_id'] == '1234'
            assert 'master' in tags
            assert tags['master'] == str(master.version_id)
            assert master.file
            assert master.file.size == video_size

        video = deposit_video_resolver(video_1_depid)
        events = get_deposit_events(video['_deposit']['id'])

        # check deposit tasks status
        tasks_status = get_tasks_status_by_task(events)
        assert len(tasks_status) == 4
        assert 'file_download' in tasks_status
        assert 'file_transcode' in tasks_status
        assert 'file_video_extract_frames' in tasks_status
        assert 'file_video_metadata_extraction' in tasks_status

        # check single status
        collector = CollectInfoTasks()
        iterate_events_results(events=events, fun=collector)
        info = list(collector)
        presets = get_presets_applied().keys()
        assert info[0][0] == 'file_download'
        assert info[0][1].status == states.SUCCESS
        assert info[1][0] == 'file_video_metadata_extraction'
        assert info[1][1].status == states.SUCCESS
        assert info[2][0] == 'file_video_extract_frames'
        assert info[2][1].status == states.SUCCESS
        for i in info[3:]:
            assert i[0] == 'file_transcode'
            if i[1].status == states.SUCCESS:
                assert i[1].result['payload']['preset_quality'] in presets

        # check tags
        assert ObjectVersionTag.query.count() == get_tag_count()

        # check sse is called
        assert mock_sse.called

        messages = [
            (sse_channel, states.STARTED, 'file_download'),
            (sse_channel, states.SUCCESS, 'file_download'),
            (sse_channel, states.SUCCESS, 'file_video_metadata_extraction'),
            (sse_channel, states.STARTED, 'file_transcode'),
            (sse_channel, states.SUCCESS, 'file_transcode'),
            (sse_channel, states.REVOKED, 'file_transcode'),  # ResolutionError
            (sse_channel, states.STARTED, 'file_video_extract_frames'),
            (sse_channel, states.SUCCESS, 'file_video_extract_frames'),
            (sse_channel, states.SUCCESS, 'update_deposit'),
        ]

        call_args = []
        for (_, kwargs) in mock_sse.call_args_list:
            type_ = kwargs['type_']
            state = kwargs['data']['state']
            channel = kwargs['channel']
            tuple_ = (channel, state, type_)
            if tuple_ not in call_args:
                call_args.append(tuple_)

        assert len(call_args) == len(messages)
        for message in messages:
            assert message in call_args

        deposit = deposit_video_resolver(video_1_depid)

        def filter_events(call_args):
            _, x = call_args
            return x['type_'] == 'update_deposit'

        list_kwargs = list(filter(filter_events, mock_sse.call_args_list))
        assert len(list_kwargs) == 12
        _, kwargs = list_kwargs[10]
        assert kwargs['type_'] == 'update_deposit'
        assert kwargs['channel'] == 'mychannel'
        assert kwargs['data']['state'] == states.SUCCESS
        assert kwargs['data']['meta']['payload'] == {
            'deposit_id': deposit['_deposit']['id'],
            'event_id': data['tags']['_event_id'],
            'deposit': deposit,
        }

        # check ElasticSearch is called
        ids = set(get_indexed_records_from_mock(mock_indexer))
        assert video_1_id in ids
        assert project_id in ids
        assert deposit['_cds']['state'] == {
            'file_download': states.SUCCESS,
            'file_video_metadata_extraction': states.SUCCESS,
            'file_video_extract_frames': states.SUCCESS,
            'file_transcode': states.SUCCESS,
        }

    # check feedback from anoymous user
    event_id = data['tags']['_event_id']
    with api_app.test_request_context():
        url = url_for('invenio_webhooks.event_feedback_item',
                      event_id=event_id,
                      receiver_id='avc')
    with api_app.test_client() as client:
        resp = client.get(url, headers=json_headers)
        assert resp.status_code == 401
    # check feedback from owner
    with api_app.test_request_context():
        url = url_for('invenio_webhooks.event_feedback_item',
                      event_id=event_id,
                      receiver_id='avc')
    with api_app.test_client() as client:
        login_user_via_session(client, email=User.query.get(users[0]).email)
        resp = client.get(url, headers=json_headers)
        assert resp.status_code == 200
    # check feedback from another user without access
    with api_app.test_request_context():
        url = url_for('invenio_webhooks.event_feedback_item',
                      event_id=event_id,
                      receiver_id='avc')
    with api_app.test_client() as client:
        login_user_via_session(client, email=User.query.get(users[1]).email)
        resp = client.get(url, headers=json_headers)
        assert resp.status_code == 403
    # check feedback from another user with access
    user_2 = User.query.get(users[1])
    user_2_id = str(user_2.id)
    user_2_email = user_2.email
    project = deposit_project_resolver(project['_deposit']['id'])
    project['_access'] = {'update': [user_2_email]}
    project = project.commit()
    with api_app.test_request_context():
        url = url_for('invenio_webhooks.event_feedback_item',
                      event_id=event_id,
                      receiver_id='avc')
    with api_app.test_client() as client:

        @identity_loaded.connect
        def load_email(sender, identity):
            if current_user.get_id() == user_2_id:
                identity.provides.update([UserNeed(user_2_email)])

        login_user_via_session(client, email=user_2_email)
        resp = client.get(url, headers=json_headers)
        assert resp.status_code == 200

    # Test cleaning!
    url = '{0}?access_token={1}'.format(data['links']['cancel'], access_token)

    with mock.patch('invenio_sse.ext._SSEState.publish') as mock_sse, \
            mock.patch('invenio_indexer.api.RecordIndexer.bulk_index') \
            as mock_indexer, \
            api_app.test_client() as client:
        resp = client.delete(url, headers=json_headers)

        assert resp.status_code == 201

        # check that object versions and tags are deleted
        # (Create + Delete) * Num Objs
        assert ObjectVersion.query.count() == 2 * get_object_count()
        # Tags connected with the old version
        assert ObjectVersionTag.query.count() == get_tag_count()
        bucket = Bucket.query.first()
        # and bucket is empty
        assert bucket.size == 0

        record = RecordMetadata.query.filter_by(id=video_1_id).one()

        # check metadata patch are deleted
        assert 'extracted_metadata' not in record.json['_cds']

        # check the corresponding Event persisted after cleaning
        assert len(get_deposit_events(record.json['_deposit']['id'])) == 0
        assert len(
            get_deposit_events(record.json['_deposit']['id'],
                               _deleted=True)) == 1

        # check no SSE message and reindexing is fired
        assert mock_sse.called is False
        assert mock_indexer.called is False
コード例 #10
0
def test_available_preset_qualities(app):
    """Test `get_all_distinct_qualities` function."""
    assert sorted(get_all_distinct_qualities()) == sorted([
        '240p', '360p', '480p', '720p', '1024p', '1080p', '1080ph265', '2160p',
        '2160ph265'
    ])