Exemple #1
0
def test_deposit_versioning_draft_child_unlinking_bug(app, db, communities,
                                                      deposit, deposit_file):
    """
    Bug with draft_child_deposit unlinking.

    Bug where a draft_child_deposit was unlinked from a new version draft,
    when another version of a record was edited and published.
    """
    deposit_v1 = publish_and_expunge(db, deposit)
    recid_v1, record_v1 = deposit.fetch_published()
    recid_v1_value = recid_v1.pid_value

    # Initiate a new version draft
    deposit_v1.newversion()
    recid_v1, record_v1 = record_resolver.resolve(recid_v1_value)
    pv = PIDVersioning(child=recid_v1)
    assert pv.draft_child_deposit
    assert pv.draft_child

    deposit_v1.edit()
    deposit_v1 = deposit_v1.edit()
    deposit_v1 = publish_and_expunge(db, deposit_v1)

    recid_v1, record_v1 = record_resolver.resolve(recid_v1_value)
    pv = PIDVersioning(child=recid_v1)
    # Make sure the draft child deposit was not unliked due to publishing of
    # the edited draft
    assert pv.draft_child_deposit
    assert pv.draft_child
Exemple #2
0
def test_deposit_versioning_draft_child_unlinking_bug(
        app, db, communities, deposit, deposit_file):
    """
    Bug with draft_child_deposit unlinking.

    Bug where a draft_child_deposit was unlinked from a new version draft,
    when another version of a record was edited and published.
    """
    deposit_v1 = publish_and_expunge(db, deposit)
    recid_v1, record_v1 = deposit.fetch_published()
    recid_v1_value = recid_v1.pid_value

    # Initiate a new version draft
    deposit_v1.newversion()
    recid_v1, record_v1 = record_resolver.resolve(recid_v1_value)
    pv = PIDVersioning(child=recid_v1)
    assert pv.draft_child_deposit
    assert pv.draft_child

    deposit_v1.edit()
    deposit_v1 = deposit_v1.edit()
    deposit_v1 = publish_and_expunge(db, deposit_v1)

    recid_v1, record_v1 = record_resolver.resolve(recid_v1_value)
    pv = PIDVersioning(child=recid_v1)
    # Make sure the draft child deposit was not unliked due to publishing of
    # the edited draft
    assert pv.draft_child_deposit
    assert pv.draft_child
Exemple #3
0
def test_basic_api(app, db, communities, deposit, deposit_file):
    """Test basic workflow using Deposit and Communities API."""
    deposit_v1 = publish_and_expunge(db, deposit)
    depid_v1_value = deposit_v1['_deposit']['id']

    recid_v1, record_v1 = deposit_v1.fetch_published()
    recid_v1_value = recid_v1.pid_value

    deposit_v1.newversion()
    pv = PIDVersioning(child=recid_v1)
    depid_v2 = pv.draft_child_deposit
    deposit_v2 = ZenodoDeposit.get_record(depid_v2.get_assigned_object())
    deposit_v2.files['file.txt'] = BytesIO(b('file1'))
    deposit_v2 = publish_and_expunge(db, deposit_v2)
    deposit_v2 = deposit_v2.edit()
    # 1. Request for 'c1' and 'c2' through deposit v2
    deposit_v2['communities'] = [
        'c1',
        'c2',
    ]
    deposit_v2 = publish_and_expunge(db, deposit_v2)
    recid_v2, record_v2 = deposit_v2.fetch_published()
    recid_v2_value = recid_v2.pid_value
    depid_v1, deposit_v1 = deposit_resolver.resolve(depid_v1_value)
    recid_v1, record_v1 = deposit_v1.fetch_published()
    assert record_v1.get('communities', []) == []
    assert record_v2.get('communities', []) == []

    c1_api = ZenodoCommunity('c1')
    c2_api = ZenodoCommunity('c2')

    # Inclusion requests should be visible for both records
    assert c1_api.get_comm_irs(record_v1, pid=recid_v1).count() == 1
    assert c1_api.get_comm_irs(record_v2, pid=recid_v2).count() == 1

    assert c2_api.get_comm_irs(record_v1, pid=recid_v1).count() == 1
    assert c2_api.get_comm_irs(record_v2, pid=recid_v2).count() == 1

    # Accept to 'c1' through record_v2 (as originally requested),
    # and 'c2' through record_v1 (version)
    c1_api.accept_record(record_v2, pid=recid_v2)
    c2_api.accept_record(record_v1, pid=recid_v1)
    recid_v1, record_v1 = record_resolver.resolve(recid_v1_value)
    recid_v2, record_v2 = record_resolver.resolve(recid_v2_value)
    # Accepting individual record to a community should propagate the changes
    # to all versions
    assert record_v1['communities'] == record_v2['communities'] == \
        ['c1', 'c2', ]

    # Removing 'c1' from deposit_v1 should remove it from two published records
    depid_v1, deposit_v1 = deposit_resolver.resolve(depid_v1_value)
    deposit_v1 = deposit_v1.edit()
    deposit_v1['communities'] = []
    deposit_v1 = publish_and_expunge(db, deposit_v1)
    recid_v1, record_v1 = record_resolver.resolve(recid_v1_value)
    recid_v2, record_v2 = record_resolver.resolve(recid_v2_value)
    assert record_v1.get('communities', []) == []
    assert record_v2.get('communities', []) == []
def test_basic_api(app, db, communities, deposit, deposit_file):
    """Test basic workflow using Deposit and Communities API."""
    deposit_v1 = publish_and_expunge(db, deposit)
    depid_v1_value = deposit_v1['_deposit']['id']

    recid_v1, record_v1 = deposit_v1.fetch_published()
    recid_v1_value = recid_v1.pid_value

    deposit_v1.newversion()
    pv = PIDVersioning(child=recid_v1)
    depid_v2 = pv.draft_child_deposit
    deposit_v2 = ZenodoDeposit.get_record(depid_v2.get_assigned_object())
    deposit_v2.files['file.txt'] = BytesIO(b('file1'))
    deposit_v2 = publish_and_expunge(db, deposit_v2)
    deposit_v2 = deposit_v2.edit()
    # 1. Request for 'c1' and 'c2' through deposit v2
    deposit_v2['communities'] = ['c1', 'c2', ]
    deposit_v2 = publish_and_expunge(db, deposit_v2)
    recid_v2, record_v2 = deposit_v2.fetch_published()
    recid_v2_value = recid_v2.pid_value
    depid_v1, deposit_v1 = deposit_resolver.resolve(depid_v1_value)
    recid_v1, record_v1 = deposit_v1.fetch_published()
    assert record_v1.get('communities', []) == []
    assert record_v2.get('communities', []) == []

    c1_api = ZenodoCommunity('c1')
    c2_api = ZenodoCommunity('c2')

    # Inclusion requests should be visible for both records
    assert c1_api.get_comm_irs(record_v1, pid=recid_v1).count() == 1
    assert c1_api.get_comm_irs(record_v2, pid=recid_v2).count() == 1

    assert c2_api.get_comm_irs(record_v1, pid=recid_v1).count() == 1
    assert c2_api.get_comm_irs(record_v2, pid=recid_v2).count() == 1

    # Accept to 'c1' through record_v2 (as originally requested),
    # and 'c2' through record_v1 (version)
    c1_api.accept_record(record_v2, pid=recid_v2)
    c2_api.accept_record(record_v1, pid=recid_v1)
    recid_v1, record_v1 = record_resolver.resolve(recid_v1_value)
    recid_v2, record_v2 = record_resolver.resolve(recid_v2_value)
    # Accepting individual record to a community should propagate the changes
    # to all versions
    assert record_v1['communities'] == record_v2['communities'] == \
        ['c1', 'c2', ]

    # Removing 'c1' from deposit_v1 should remove it from two published records
    depid_v1, deposit_v1 = deposit_resolver.resolve(depid_v1_value)
    deposit_v1 = deposit_v1.edit()
    deposit_v1['communities'] = []
    deposit_v1 = publish_and_expunge(db, deposit_v1)
    recid_v1, record_v1 = record_resolver.resolve(recid_v1_value)
    recid_v2, record_v2 = record_resolver.resolve(recid_v2_value)
    assert record_v1.get('communities', []) == []
    assert record_v2.get('communities', []) == []
Exemple #5
0
def datecite_register(recid, eager):
    """Send a record to DataCite for registration."""
    pid, record = record_resolver.resolve(recid)
    if eager:
        datacite_register.s(pid.pid_value, str(record.id)).apply(throw=True)
    else:
        datacite_register.s(pid.pid_value, str(record.id)).apply_async()
def test_propagation_with_newversion_open(
        app, db, users, communities, deposit, deposit_file):
    """Adding old versions to a community should propagate to all drafts."""
    # deposit['communities'] = ['c1', 'c2']
    deposit_v1 = publish_and_expunge(db, deposit)
    deposit_v1 = deposit_v1.edit()

    recid_v1, record_v1 = deposit_v1.fetch_published()
    recid_v1_value = recid_v1.pid_value

    deposit_v1 = deposit_v1.newversion()
    pv = PIDVersioning(child=recid_v1)
    depid_v2 = pv.draft_child_deposit
    depid_v2_value = depid_v2.pid_value

    # New version in 'deposit_v2' has not been published yet
    deposit_v2 = ZenodoDeposit.get_record(depid_v2.get_assigned_object())

    # depid_v1_value = deposit_v1['_deposit']['id']
    # depid_v1, deposit_v1 = deposit_resolver.resolve(depid_v1_value)
    deposit_v1['communities'] = ['c1', 'c2', ]
    deposit_v1 = publish_and_expunge(db, deposit_v1)

    recid_v1, record_v1 = record_resolver.resolve(recid_v1_value)
    c1_api = ZenodoCommunity('c1')
    c1_api.accept_record(record_v1, pid=recid_v1)

    depid_v2, deposit_v2 = deposit_resolver.resolve(depid_v2_value)
    assert deposit_v2['communities'] == ['c1', 'c2']
    deposit_v2.files['file.txt'] = BytesIO(b('file1'))
    deposit_v2 = publish_and_expunge(db, deposit_v2)
    recid_v2, record_v2 = deposit_v2.fetch_published()

    assert record_v2['communities'] == ['c1', ]
Exemple #7
0
def datecite_register(recid, eager):
    """Send a record to DataCite for registration."""
    pid, record = record_resolver.resolve(recid)
    if eager:
        datacite_register.s(pid.pid_value, str(record.id)).apply(throw=True)
    else:
        datacite_register.s(pid.pid_value, str(record.id)).apply_async()
def test_autoadd_explicit_newversion(
        app, db, users, communities, deposit, deposit_file,
        communities_autoadd_enabled):
    """Explicitly the autoadded communities in a new version."""
    deposit_v1 = publish_and_expunge(db, deposit)
    recid_v1, record_v1 = deposit_v1.fetch_published()
    depid_v1_value = deposit_v1['_deposit']['id']
    recid_v1_value = recid_v1.pid_value

    deposit_v1 = deposit_v1.newversion()
    pv = PIDVersioning(child=recid_v1)
    depid_v2 = pv.draft_child_deposit
    depid_v2_value = depid_v2.pid_value

    deposit_v2 = ZenodoDeposit.get_record(depid_v2.get_assigned_object())

    deposit_v2['communities'] = ['ecfunded', 'grants_comm', 'zenodo']
    deposit_v2['grants'] = [{'title': 'SomeGrant'}, ]
    deposit_v2.files['file.txt'] = BytesIO(b('file1'))
    deposit_v2 = publish_and_expunge(db, deposit_v2)
    recid_v2, record_v2 = deposit_v2.fetch_published()

    depid_v1, deposit_v1 = deposit_resolver.resolve(depid_v1_value)
    depid_v2, deposit_v2 = deposit_resolver.resolve(depid_v2_value)
    recid_v1, record_v1 = record_resolver.resolve(recid_v1_value)
    assert record_v1.get('communities', []) == ['grants_comm', ]
    assert deposit_v1.get('communities', []) == ['ecfunded', 'grants_comm',
                                                 'zenodo']
    assert record_v2.get('communities', []) == ['grants_comm', ]
    assert deposit_v2.get('communities', []) == ['ecfunded', 'grants_comm',
                                                 'zenodo']
Exemple #9
0
def rename_file(recid, key, new_key):
    """Remove a file from a publishd record."""
    pid, record = record_resolver.resolve(recid)
    bucket = record.files.bucket

    obj = ObjectVersion.get(bucket, key)
    if obj is None:
        click.echo(click.style(u'File with key "{key}" not found.'.format(
            key=key), fg='red'))
        return

    new_obj = ObjectVersion.get(bucket, new_key)
    if new_obj is not None:
        click.echo(click.style(u'File with key "{key}" already exists.'.format(
            key=new_key), fg='red'))
        return

    if click.confirm(u'Rename "{key}" to "{new_key}" on bucket {bucket}.'
                     u' Continue?'.format(
                        key=obj.key, new_key=new_key, bucket=bucket.id)):
        record.files.bucket.locked = False

        file_id = obj.file.id
        ObjectVersion.delete(bucket, obj.key)
        ObjectVersion.create(bucket, new_key, _file_id=file_id)
        record.files.bucket.locked = True
        record.files.flush()
        record.commit()
        db.session.commit()
        click.echo(click.style(u'File renamed successfully.', fg='green'))
    else:
        click.echo(click.style(u'Aborted file rename.', fg='green'))
Exemple #10
0
def add_file(recid, fp, replace_existing):
    """Add a new file to a published record."""
    pid, record = record_resolver.resolve(recid)
    bucket = record.files.bucket
    key = os.path.basename(fp.name)

    obj = ObjectVersion.get(bucket, key)
    if obj is not None and not replace_existing:
        click.echo(
            click.style(
                u'File with key "{key}" already exists.'
                u' Use `--replace-existing/-f` to overwrite it.'.format(
                    key=key, recid=recid),
                fg='red'))
        return

    fp.seek(SEEK_SET, SEEK_END)
    size = fp.tell()
    fp.seek(SEEK_SET)

    click.echo(u'Will add the following file:\n')
    click.echo(
        click.style(u'  key: "{key}"\n'
                    u'  bucket: {bucket}\n'
                    u'  size: {size}\n'
                    u''.format(key=key, bucket=bucket.id, size=size),
                    fg='green'))
    click.echo(u'to record:\n')
    click.echo(
        click.style(u'  Title: "{title}"\n'
                    u'  RECID: {recid}\n'
                    u'  UUID: {uuid}\n'
                    u''.format(recid=record['recid'],
                               title=record['title'],
                               uuid=record.id),
                    fg='green'))
    if replace_existing and obj is not None:
        click.echo(u'and remove the file:\n')
        click.echo(
            click.style(u'  key: "{key}"\n'
                        u'  bucket: {bucket}\n'
                        u'  size: {size}\n'
                        u''.format(key=obj.key,
                                   bucket=obj.bucket,
                                   size=obj.file.size),
                        fg='green'))

    if click.confirm(u'Continue?'):
        bucket.locked = False
        if obj is not None and replace_existing:
            ObjectVersion.delete(bucket, obj.key)
        ObjectVersion.create(bucket, key, stream=fp, size=size)
        bucket.locked = True

        record.files.flush()
        record.commit()
        db.session.commit()
        click.echo(click.style(u'File added successfully.', fg='green'))
    else:
        click.echo(click.style(u'File addition aborted.', fg='green'))
Exemple #11
0
def test_propagation_with_newversion_open(
        app, db, users, communities, deposit, deposit_file):
    """Adding old versions to a community should propagate to all drafts."""
    # deposit['communities'] = ['c1', 'c2']
    deposit_v1 = publish_and_expunge(db, deposit)
    deposit_v1 = deposit_v1.edit()

    recid_v1, record_v1 = deposit_v1.fetch_published()
    recid_v1_value = recid_v1.pid_value

    deposit_v1 = deposit_v1.newversion()
    pv = PIDVersioning(child=recid_v1)
    depid_v2 = pv.draft_child_deposit
    depid_v2_value = depid_v2.pid_value

    # New version in 'deposit_v2' has not been published yet
    deposit_v2 = ZenodoDeposit.get_record(depid_v2.get_assigned_object())

    # depid_v1_value = deposit_v1['_deposit']['id']
    # depid_v1, deposit_v1 = deposit_resolver.resolve(depid_v1_value)
    deposit_v1['communities'] = ['c1', 'c2', ]
    deposit_v1 = publish_and_expunge(db, deposit_v1)

    recid_v1, record_v1 = record_resolver.resolve(recid_v1_value)
    c1_api = ZenodoCommunity('c1')
    c1_api.accept_record(record_v1, pid=recid_v1)

    depid_v2, deposit_v2 = deposit_resolver.resolve(depid_v2_value)
    assert deposit_v2['communities'] == ['c1', 'c2']
    deposit_v2.files['file.txt'] = BytesIO(b('file1'))
    deposit_v2 = publish_and_expunge(db, deposit_v2)
    recid_v2, record_v2 = deposit_v2.fetch_published()

    assert record_v2['communities'] == ['c1', ]
Exemple #12
0
def migrate_concept_recid_sips(recid, overwrite=False):
    """Create Bagit metadata for SIPs."""
    pid = PersistentIdentifier.get('recid', recid)
    pv = PIDVersioning(parent=pid)
    all_sips = []
    for child in pv.children:
        pid, rec = record_resolver.resolve(child.pid_value)
        rsips = RecordSIP.query.filter_by(pid_id=pid.id).order_by(
            RecordSIP.created)
        all_sips.append([rs.sip.id for rs in rsips])
    base_sip_id = None

    for sipv in all_sips:
        for idx, sip_id in enumerate(sipv):
            sip = SIP.query.get(sip_id)
            base_sip = SIP.query.get(base_sip_id) if base_sip_id else None
            bia = BagItArchiver(SIPApi(sip),
                                patch_of=base_sip,
                                include_all_previous=(idx > 0))

            bmeta = BagItArchiver.get_bagit_metadata(sip)

            if (not bmeta) or overwrite:
                bia.save_bagit_metadata(overwrite=True)
            base_sip_id = sip_id
            db.session.commit()
Exemple #13
0
def test_autoadd_explicit_newversion(
        app, db, users, communities, deposit, deposit_file,
        communities_autoadd_enabled):
    """Explicitly the autoadded communities in a new version."""
    deposit_v1 = publish_and_expunge(db, deposit)
    recid_v1, record_v1 = deposit_v1.fetch_published()
    depid_v1_value = deposit_v1['_deposit']['id']
    recid_v1_value = recid_v1.pid_value

    deposit_v1 = deposit_v1.newversion()
    pv = PIDVersioning(child=recid_v1)
    depid_v2 = pv.draft_child_deposit
    depid_v2_value = depid_v2.pid_value

    deposit_v2 = ZenodoDeposit.get_record(depid_v2.get_assigned_object())

    deposit_v2['communities'] = ['ecfunded', 'grants_comm', 'zenodo']
    deposit_v2['grants'] = [{'title': 'SomeGrant'}, ]
    deposit_v2.files['file.txt'] = BytesIO(b('file1'))
    deposit_v2 = publish_and_expunge(db, deposit_v2)
    recid_v2, record_v2 = deposit_v2.fetch_published()

    depid_v1, deposit_v1 = deposit_resolver.resolve(depid_v1_value)
    depid_v2, deposit_v2 = deposit_resolver.resolve(depid_v2_value)
    recid_v1, record_v1 = record_resolver.resolve(recid_v1_value)
    assert record_v1.get('communities', []) == ['grants_comm', ]
    assert deposit_v1.get('communities', []) == ['ecfunded', 'grants_comm',
                                                 'zenodo']
    assert record_v2.get('communities', []) == ['grants_comm', ]
    assert deposit_v2.get('communities', []) == ['ecfunded', 'grants_comm',
                                                 'zenodo']
Exemple #14
0
def curate(community):
    """Index page with uploader and list of existing depositions.

    :param community_id: ID of the community to curate.
    """
    action = request.json.get('action')
    recid = request.json.get('recid')
    if not recid:
        abort(400)
    if action not in ['accept', 'reject', 'remove']:
        abort(400)

    # Resolve recid to a Record
    pid, record = record_resolver.resolve(recid)

    # Perform actions
    pv = PIDVersioning(child=pid)
    if pv.exists:
        api = ZenodoCommunity(community)
    else:
        api = community
    if action == "accept":
        api.accept_record(record, pid=pid)
    elif action == "reject":
        api.reject_record(record, pid=pid)
    elif action == "remove":
        api.remove_record(record, pid=pid)

    db.session.commit()
    RecordIndexer().index_by_id(record.id)
    return jsonify({'status': 'success'})
Exemple #15
0
def test_record_delete_v2(mocker, app, db, users, deposit, deposit_file):
    """Delete a record (only last version) with multiple versions."""
    dc_mock = mocker.patch(
        'invenio_pidstore.providers.datacite.DataCiteMDSClient')
    deposit_v1 = publish_and_expunge(db, deposit)
    recid_v1, record_v1 = deposit.fetch_published()
    recid_v1_value = recid_v1.pid_value
    deposit_v1.newversion()
    recid_v1, record_v1 = record_resolver.resolve(recid_v1_value)

    # Stash a copy of v1 for later
    rec1 = deepcopy(record_v1)

    pv = PIDVersioning(child=recid_v1)
    depid_v2 = pv.draft_child_deposit
    deposit_v2 = ZenodoDeposit.get_record(depid_v2.get_assigned_object())
    deposit_v2.files['file.txt'] = BytesIO(b('file1'))
    deposit_v2 = publish_and_expunge(db, deposit_v2)
    recid_v2, record_v2 = deposit_v2.fetch_published()

    # Stash a copy of v2 for later
    rec2 = deepcopy(record_v2)
    rec2_id = str(record_v2.id)

    assert dc_mock().metadata_delete.call_count == 0

    # Remove the first version
    delete_record(rec2_id, 'spam', users[0]['id'])

    # Make sure all PIDs are deleted
    assert PID.get('doi', rec2['doi']).status == PIDStatus.DELETED
    assert PID.get('recid', rec2['recid']).status == PIDStatus.DELETED
    assert PID.get('depid', rec2['_deposit']['id']).status == PIDStatus.DELETED

    # Concept DOI should be left registered
    assert PID.get('doi', rec2['conceptdoi']).status == PIDStatus.REGISTERED

    # Make sure conceptrecid is redirecting to v1
    crecid = PID.get('recid', rec2['conceptrecid'])
    assert crecid.status == PIDStatus.REDIRECTED
    assert crecid.get_redirect() == PID.get('recid', rec1['recid'])

    # Make sure the v1 PIDs are kept intact
    assert PID.get('oai', rec1['_oai']['id']).status == PIDStatus.REGISTERED
    assert PID.get('doi', rec1['doi']).status == PIDStatus.REGISTERED
    assert PID.get('recid', rec1['recid']).status == PIDStatus.REGISTERED
    assert PID.get('depid', rec1['_deposit']['id']).status == \
        PIDStatus.REGISTERED

    # Only the v1 DOI should be deleted
    assert dc_mock().doi_post.call_count == 2
    assert dc_mock().doi_post.has_any_call('10.5072/zenodo.2')
    assert dc_mock().doi_post.has_any_call('10.5072/zenodo.1')
    assert dc_mock().metadata_delete.call_count == 1
    dc_mock().metadata_delete.assert_any_call('10.5072/zenodo.3')
    record = Record.get_record(rec2_id)
    assert record['removed_by'] == users[0]['id']
    assert record['removal_reason'] == 'Spam record, removed by Zenodo staff.'
Exemple #16
0
def versioning_link(recids):
    """Link several records into a versioning scheme.

    Support cases with some records being already versioned, as long
    as they are all within a single versioning scheme.

    For example, given the following records:
    - 123, 234, 345 (record with 3 versions)
    - 543, 432 (record with 2 versions)
    - 111 (single non-versioned record)
    - 222 (single, non-versioned record)

    The following cases are supported (Good) or not supported (Error):
    versioning_link 111 123 234 345 (Good - will add 111 as first version)
    versioning_link 111 222 (Good, will create new versioning scheme)
    versioning_link 345 123 234 (Good - no new records liked, but will reorder
                                 the records in the versioning list)
    versioning_link 123 234 543 (Error - trying to link two versioned records)
    versioning_link 123 234 (Error - must specify all children)
    """
    int_recids = [int(recid) for recid in recids]
    if sorted(int_recids) != int_recids and not click.confirm(
            u'Requested RECIDS are not in the order of creation. Continue?'):
        click.echo(click.style(u'Record linking aborted.', fg='green'))
        return

    recids_records = [
        record_resolver.resolve(recid_val) for recid_val in recids
    ]

    upgraded = [(recid, rec) for recid, rec in recids_records
                if 'conceptdoi' in rec]

    if len(upgraded) == 1 and not click.confirm(
            u'Recid {0} already migrated. Its Concept recid: {1} will be used as'
            u'the base for the Concept DOI in the versioning linking. '
            u'Continue?'):
        return
    elif len(upgraded) > 1:
        i_recids = [int(recid) for recid in recids]
        child_recids = [
            int(recid.pid_value)
            for recid in PIDVersioning(child=upgraded[0][0]).children.all()
        ]
        if not all(cr in i_recids for cr in child_recids):
            click.echo(u'All children recids ({0}) of the upgraded record need'
                       u' to be specified. Aborting.'.format(
                           [recid for recid in child_recids]))
            return
        i_upgraded = [int(recid.pid_value) for recid, rec in upgraded]
        if set(child_recids) != set(i_upgraded):
            click.echo(u'Found multiple upgraded records {0}, which do not '
                       u'belong to a single versioning scheme. Aborting.'
                       u''.format(i_upgraded,
                                  [recid for recid in child_recids]))
            return
    versioning_link_records(recids)
Exemple #17
0
def retry_indexing_failed_openaire_records(eager):
    """Retries indexing of records that failed to get indexed in OpenAIRE."""
    for key in _iter_openaire_direct_index_keys():
        recid = key.split('openaire_direct_index:')[1]
        recid, record  = record_resolver.resolve(recid)
        click.echo("Indexing record with id {}".format(recid))
        if eager:
            openaire_direct_index(str(recid.object_uuid), retry=False)
        else:
            openaire_direct_index.delay(
                str(recid.object_uuid), retry=False)
Exemple #18
0
def list_files(recid):
    """List files for the record."""
    pid, record = record_resolver.resolve(recid)
    click.echo(u'Files for record {recid} (UUID:{uuid}) ({cnt} file(s)):\n'
               u''.format(recid=recid, uuid=record.id, cnt=len(record.files)))
    for idx, key in enumerate(record.files.keys):
        f = record.files[key].obj.file
        click.echo(click.style(
            u'{idx:3}: "{key}", {checksum}, size:{size}'
            u''.format(idx=idx, key=key, checksum=f.checksum, size=f.size),
            fg='green'))
Exemple #19
0
def test_oai_set_result_count(mocker, audit_records, db, es, communities,
                              oai_sources, issues):
    db_records, es_records, oai2d_records = oai_sources

    for recid in db_records:
        _, record = record_resolver.resolve(recid)
        record['_oai']['sets'] = ['user-c1']
        record.commit()
    db.session.commit()

    indexer = RecordIndexer()
    for recid in es_records:
        _, record = record_resolver.resolve(recid)
        record['_oai']['sets'] = ['user-c1']
        indexer.index(record)
    current_search.flush_and_refresh(index='records')

    # '/oai2d' needs straight-forward cheating... There's no way to be sure
    # why the endpoint sometimes fails to report the correct results. It could
    # be a Resumption Token issue, or even an indexing issue on Elasticsearch.
    # Either way, we have to be able to replicate when running on production
    # this behavior and report it as an issue.
    oai2d_ids_mock = MagicMock()
    oai2d_ids_mock.return_value = set(oai2d_records)
    oai2d_ids_mock = mocker.patch(
        'zenodo.modules.auditor.oai.OAISetResultCheck'
        '._oai2d_endpoint_identifiers',
        new=oai2d_ids_mock)

    audit = OAIAudit('testAudit', logging.getLogger('auditorTesting'), [])
    check = OAISetResultCheck(audit, Community.get('c1'))
    check.perform()
    audit.clear_db_oai_set_cache()

    result_issues = check.issues.get('missing_ids', {})
    db_issues, es_issues, api_issues = issues
    assert set(result_issues.get('db', [])) == set(db_issues)
    assert set(result_issues.get('es', [])) == set(es_issues)
    assert set(result_issues.get('oai2d', [])) == set(api_issues)
Exemple #20
0
def test_oai_set_result_count(mocker, audit_records, db, es, communities,
                              oai_sources, issues):
    db_records, es_records, oai2d_records = oai_sources

    for recid in db_records:
        _, record = record_resolver.resolve(recid)
        record['_oai']['sets'] = ['user-c1']
        record.commit()
    db.session.commit()

    indexer = RecordIndexer()
    for recid in es_records:
        _, record = record_resolver.resolve(recid)
        record['_oai']['sets'] = ['user-c1']
        indexer.index(record)
    current_search.flush_and_refresh(index='records')

    # '/oai2d' needs straight-forward cheating... There's no way to be sure
    # why the endpoint sometimes fails to report the correct results. It could
    # be a Resumption Token issue, or even an indexing issue on Elasticsearch.
    # Either way, we have to be able to replicate when running on production
    # this behavior and report it as an issue.
    oai2d_ids_mock = MagicMock()
    oai2d_ids_mock.return_value = set(oai2d_records)
    oai2d_ids_mock = mocker.patch(
        'zenodo.modules.auditor.oai.OAISetResultCheck'
        '._oai2d_endpoint_identifiers', new=oai2d_ids_mock)

    audit = OAIAudit('testAudit', logging.getLogger('auditorTesting'), [])
    check = OAISetResultCheck(audit, Community.get('c1'))
    check.perform()
    audit.clear_db_oai_set_cache()

    result_issues = check.issues.get('missing_ids', {})
    db_issues, es_issues, api_issues = issues
    assert set(result_issues.get('db', [])) == set(db_issues)
    assert set(result_issues.get('es', [])) == set(es_issues)
    assert set(result_issues.get('oai2d', [])) == set(api_issues)
Exemple #21
0
def test_communities_newversion_while_ir_pending_bug(app, db, users,
                                                     communities, deposit,
                                                     deposit_file):
    """Make sure that pending IRs remain after a new version (bug)."""
    deposit['communities'] = ['c1', 'c2']
    deposit_v1 = publish_and_expunge(db, deposit)
    recid_v1, record_v1 = deposit_v1.fetch_published()
    depid_v1_value = deposit_v1['_deposit']['id']
    recid_v1_value = recid_v1.pid_value

    # Two inclusion requests are pending
    assert InclusionRequest.query.count() == 2

    # Accept one community
    c1_api = ZenodoCommunity('c1')
    c1_api.accept_record(record_v1, pid=recid_v1)

    deposit_v1 = deposit_v1.newversion()
    pv = PIDVersioning(child=recid_v1)
    depid_v2 = pv.draft_child_deposit
    depid_v2_value = depid_v2.pid_value

    deposit_v2 = ZenodoDeposit.get_record(depid_v2.get_assigned_object())

    deposit_v2.files['file.txt'] = BytesIO(b('file1'))
    deposit_v2 = publish_and_expunge(db, deposit_v2)
    recid_v2, record_v2 = deposit_v2.fetch_published()

    depid_v1, deposit_v1 = deposit_resolver.resolve(depid_v1_value)
    depid_v2, deposit_v2 = deposit_resolver.resolve(depid_v2_value)
    recid_v1, record_v1 = record_resolver.resolve(recid_v1_value)
    # Make sure there is still IR to community 'c2' after newversion
    assert InclusionRequest.query.count() == 1
    assert InclusionRequest.query.one().id_community == 'c2'
    assert record_v1.get('communities', []) == [
        'c1',
    ]
    assert deposit_v1.get('communities', []) == [
        'c1',
        'c2',
    ]
    assert record_v2.get('communities', []) == [
        'c1',
    ]
    assert deposit_v2.get('communities', []) == [
        'c1',
        'c2',
    ]
Exemple #22
0
def test_communities_newversion_addition(app, db, users, communities, deposit,
                                         deposit_file):
    """Make sure that new version of record synchronizes the communities."""
    deposit['communities'] = ['c1', 'c2']
    deposit_v1 = publish_and_expunge(db, deposit)
    recid_v1, record_v1 = deposit_v1.fetch_published()
    depid_v1_value = deposit_v1['_deposit']['id']
    recid_v1_value = recid_v1.pid_value

    c1_api = ZenodoCommunity('c1')
    c2_api = ZenodoCommunity('c2')

    c1_api.accept_record(record_v1, pid=recid_v1)
    c2_api.accept_record(record_v1, pid=recid_v1)

    deposit_v1 = deposit_v1.newversion()
    pv = PIDVersioning(child=recid_v1)
    depid_v2 = pv.draft_child_deposit
    depid_v2_value = depid_v2.pid_value

    deposit_v2 = ZenodoDeposit.get_record(depid_v2.get_assigned_object())

    # Remove 'c2' and request for 'c5'. Make sure that communities from
    # previous record version are preserved/removed properly
    deposit_v2['communities'] = ['c1', 'c5']
    deposit_v2.files['file.txt'] = BytesIO(b('file1'))
    deposit_v2 = publish_and_expunge(db, deposit_v2)
    recid_v2, record_v2 = deposit_v2.fetch_published()

    depid_v1, deposit_v1 = deposit_resolver.resolve(depid_v1_value)
    depid_v2, deposit_v2 = deposit_resolver.resolve(depid_v2_value)
    recid_v1, record_v1 = record_resolver.resolve(recid_v1_value)
    assert record_v1.get('communities', []) == [
        'c1',
    ]
    assert deposit_v1.get('communities', []) == [
        'c1',
        'c5',
    ]
    assert record_v2.get('communities', []) == [
        'c1',
    ]
    assert deposit_v2.get('communities', []) == [
        'c1',
        'c5',
    ]
Exemple #23
0
def remove_file(recid, key=None, index=None):
    """Remove a file from a published record."""
    pid, record = record_resolver.resolve(recid)
    bucket = record.files.bucket
    obj = ObjectVersion.get(bucket, key)
    if obj is None:
        click.echo(click.style(u'File with key "{key}" not found.'.format(
            key=key, recid=recid), fg='red'))
        return

    click.echo(u'Will remove the following file:\n')
    click.echo(click.style(
        u'  key: "{key}"\n'
        u'  {checksum}\n'
        u'  bucket: {bucket}\n'
        u''.format(
            key=key.decode('utf-8'),
            checksum=obj.file.checksum,
            bucket=bucket.id),
        fg='green'))
    click.echo('from record:\n')
    click.echo(click.style(
        u'  Title: "{title}"\n'
        u'  RECID: {recid}\n'
        u'  UUID: {uuid}\n'
        u''.format(
            recid=record['recid'],
            title=record['title'],
            uuid=record.id),
        fg='green'))

    if click.confirm(u'Continue?'):
        bucket.locked = False
        bucket.size -= obj.file.size
        ObjectVersion.delete(bucket, obj.key)
        bucket.locked = True
        record.files.flush()
        record.commit()
        db.session.commit()
        click.echo(click.style(u'File removed successfully.', fg='green'))
    else:
        click.echo(click.style(u'Aborted file removal.', fg='green'))
Exemple #24
0
def remove_file(recid, key=None, index=None):
    """Remove a file from a publishd record."""
    pid, record = record_resolver.resolve(recid)
    bucket = record.files.bucket
    obj = ObjectVersion.get(bucket, key)
    if obj is None:
        click.echo(click.style(u'File with key "{key}" not found.'.format(
            key=key, recid=recid), fg='red'))
        return

    click.echo(u'Will remove the following file:\n')
    click.echo(click.style(
        u'  key: "{key}"\n'
        u'  {checksum}\n'
        u'  bucket: {bucket}\n'
        u''.format(
            key=key,
            checksum=obj.file.checksum,
            bucket=bucket.id),
        fg='green'))
    click.echo('from record:\n')
    click.echo(click.style(
        u'  Title: "{title}"\n'
        u'  RECID: {recid}\n'
        u'  UUID: {uuid}\n'
        u''.format(
            recid=record['recid'],
            title=record['title'],
            uuid=record.id),
        fg='green'))

    if click.confirm(u'Continue?'):
        bucket.locked = False
        ObjectVersion.delete(bucket, obj.key)
        bucket.locked = True
        record.files.flush()
        record.commit()
        db.session.commit()
        click.echo(click.style(u'File removed successfully.', fg='green'))
    else:
        click.echo(click.style(u'Aborted file removal.', fg='green'))
Exemple #25
0
def curate(community):
    """Index page with uploader and list of existing depositions.

    :param community_id: ID of the community to curate.
    """
    action = request.json.get('action')
    recid = request.json.get('recid')
    if not recid:
        abort(400)
    if action not in ['accept', 'reject', 'remove']:
        abort(400)

    # Resolve recid to a Record
    pid, record = record_resolver.resolve(recid)

    # Perform actions
    pv = PIDVersioning(child=pid)
    if pv.exists:
        api = ZenodoCommunity(community)
    else:
        api = community
    if action == "accept":
        api.accept_record(record, pid=pid)
    elif action == "reject":
        api.reject_record(record, pid=pid)
    elif action == "remove":
        api.remove_record(record, pid=pid)
    record_id = record.id
    db.session.commit()
    RecordIndexer().index_by_id(record_id)

    if current_app.config['OPENAIRE_DIRECT_INDEXING_ENABLED']:
        if action == 'accept':
            openaire_direct_index.delay(record_uuid=str(record_id))
        elif action in ('reject', 'remove'):
            openaire_delete.delay(record_uuid=str(record_id))
    if current_app.config['DEPOSIT_DATACITE_MINTING_ENABLED']:
        datacite_register.delay(recid, str(record_id))

    return jsonify({'status': 'success'})
Exemple #26
0
def curate(community):
    """Index page with uploader and list of existing depositions.

    :param community_id: ID of the community to curate.
    """
    action = request.json.get('action')
    recid = request.json.get('recid')
    if not recid:
        abort(400)
    if action not in ['accept', 'reject', 'remove']:
        abort(400)

    # Resolve recid to a Record
    pid, record = record_resolver.resolve(recid)

    # Perform actions
    pv = PIDVersioning(child=pid)
    if pv.exists:
        api = ZenodoCommunity(community)
    else:
        api = community
    if action == "accept":
        api.accept_record(record, pid=pid)
    elif action == "reject":
        api.reject_record(record, pid=pid)
    elif action == "remove":
        api.remove_record(record, pid=pid)
    record_id = record.id
    db.session.commit()
    RecordIndexer().index_by_id(record_id)

    if current_app.config['OPENAIRE_DIRECT_INDEXING_ENABLED']:
        if action == 'accept':
            openaire_direct_index.delay(record_uuid=str(record_id))
        elif action in ('reject', 'remove'):
            openaire_delete.delay(record_uuid=str(record_id))
    if current_app.config['DEPOSIT_DATACITE_MINTING_ENABLED']:
        datacite_register.delay(recid, str(record_id))

    return jsonify({'status': 'success'})
def test_communities_newversion_while_ir_pending_bug(
        app, db, users, communities, deposit, deposit_file):
    """Make sure that pending IRs remain after a new version (bug)."""
    deposit['communities'] = ['c1', 'c2']
    deposit_v1 = publish_and_expunge(db, deposit)
    recid_v1, record_v1 = deposit_v1.fetch_published()
    depid_v1_value = deposit_v1['_deposit']['id']
    recid_v1_value = recid_v1.pid_value

    # Two inclusion requests are pending
    assert InclusionRequest.query.count() == 2

    # Accept one community
    c1_api = ZenodoCommunity('c1')
    c1_api.accept_record(record_v1, pid=recid_v1)

    deposit_v1 = deposit_v1.newversion()
    pv = PIDVersioning(child=recid_v1)
    depid_v2 = pv.draft_child_deposit
    depid_v2_value = depid_v2.pid_value

    deposit_v2 = ZenodoDeposit.get_record(depid_v2.get_assigned_object())

    deposit_v2.files['file.txt'] = BytesIO(b('file1'))
    deposit_v2 = publish_and_expunge(db, deposit_v2)
    recid_v2, record_v2 = deposit_v2.fetch_published()

    depid_v1, deposit_v1 = deposit_resolver.resolve(depid_v1_value)
    depid_v2, deposit_v2 = deposit_resolver.resolve(depid_v2_value)
    recid_v1, record_v1 = record_resolver.resolve(recid_v1_value)
    # Make sure there is still IR to community 'c2' after newversion
    assert InclusionRequest.query.count() == 1
    assert InclusionRequest.query.one().id_community == 'c2'
    assert record_v1.get('communities', []) == ['c1', ]
    assert deposit_v1.get('communities', []) == ['c1', 'c2', ]
    assert record_v2.get('communities', []) == ['c1', ]
    assert deposit_v2.get('communities', []) == ['c1', 'c2', ]
def test_communities_newversion_addition(
        app, db, users, communities, deposit, deposit_file):
    """Make sure that new version of record synchronizes the communities."""
    deposit['communities'] = ['c1', 'c2']
    deposit_v1 = publish_and_expunge(db, deposit)
    recid_v1, record_v1 = deposit_v1.fetch_published()
    depid_v1_value = deposit_v1['_deposit']['id']
    recid_v1_value = recid_v1.pid_value

    c1_api = ZenodoCommunity('c1')
    c2_api = ZenodoCommunity('c2')

    c1_api.accept_record(record_v1, pid=recid_v1)
    c2_api.accept_record(record_v1, pid=recid_v1)

    deposit_v1 = deposit_v1.newversion()
    pv = PIDVersioning(child=recid_v1)
    depid_v2 = pv.draft_child_deposit
    depid_v2_value = depid_v2.pid_value

    deposit_v2 = ZenodoDeposit.get_record(depid_v2.get_assigned_object())

    # Remove 'c2' and request for 'c5'. Make sure that communities from
    # previous record version are preserved/removed properly
    deposit_v2['communities'] = ['c1', 'c5']
    deposit_v2.files['file.txt'] = BytesIO(b('file1'))
    deposit_v2 = publish_and_expunge(db, deposit_v2)
    recid_v2, record_v2 = deposit_v2.fetch_published()

    depid_v1, deposit_v1 = deposit_resolver.resolve(depid_v1_value)
    depid_v2, deposit_v2 = deposit_resolver.resolve(depid_v2_value)
    recid_v1, record_v1 = record_resolver.resolve(recid_v1_value)
    assert record_v1.get('communities', []) == ['c1', ]
    assert deposit_v1.get('communities', []) == ['c1', 'c5', ]
    assert record_v2.get('communities', []) == ['c1', ]
    assert deposit_v2.get('communities', []) == ['c1', 'c5', ]
Exemple #29
0
def test_versioning_indexing(db, es, deposit, deposit_file):
    """Test the indexing of 'version' relations."""
    deposit_index_name = 'deposits-records-record-v1.0.0'
    records_index_name = 'records-record-v1.0.0'

    deposit_v1 = publish_and_expunge(db, deposit)
    depid_v1_value = deposit_v1['_deposit']['id']
    recid_v1, record_v1 = deposit_v1.fetch_published()
    recid_v1_value = recid_v1.pid_value

    RecordIndexer().index_by_id(str(record_v1.id))
    RecordIndexer().process_bulk_queue()
    current_search.flush_and_refresh(index=deposit_index_name)
    current_search.flush_and_refresh(index=records_index_name)
    s_dep = current_search.client.search(
        index=deposit_index_name)['hits']['hits']
    s_rec = current_search.client.search(
        index=records_index_name)['hits']['hits']
    assert len(s_dep) == 1
    assert len(s_rec) == 1
    assert 'relations' in s_dep[0]['_source']
    assert 'relations' in s_rec[0]['_source']

    expected = {
        "version": [
            {
                "draft_child_deposit": None,
                "index": 0,
                "is_last": True,
                "last_child": {
                    "pid_type": "recid",
                    "pid_value": "2"
                },
                "count": 1,
                "parent": {
                    "pid_type": "recid",
                    "pid_value": "1"
                },
            }
        ]
    }
    assert s_dep[0]['_source']['relations'] == expected
    assert s_rec[0]['_source']['relations'] == expected

    deposit_v1.newversion()
    pv = PIDVersioning(child=recid_v1)
    depid_v2 = pv.draft_child_deposit
    deposit_v2 = ZenodoDeposit.get_record(depid_v2.object_uuid)
    depid_v1, deposit_v1 = deposit_resolver.resolve(depid_v1_value)

    RecordIndexer().process_bulk_queue()
    current_search.flush_and_refresh(index=deposit_index_name)
    current_search.flush_and_refresh(index=records_index_name)
    s_dep = current_search.client.search(
        index=deposit_index_name)['hits']['hits']
    s_rec = current_search.client.search(
        index=records_index_name)['hits']['hits']

    assert len(s_dep) == 2  # Two deposits should be indexed
    assert len(s_rec) == 1  # One, since record does not exist yet

    s_dep1 = current_search.client.get(
        index=deposit_index_name, id=deposit_v1.id)
    s_dep2 = current_search.client.get(
        index=deposit_index_name, id=deposit_v2.id)

    expected_d1 = {
        "version": [
            {
                "draft_child_deposit": {
                    "pid_type": "depid",
                    "pid_value": "3"
                },
                "index": 0,
                "is_last": False,
                "last_child": {
                    "pid_type": "recid",
                    "pid_value": "2"
                },
                "parent": {
                    "pid_type": "recid",
                    "pid_value": "1"
                },
                "count": 2  # For deposit, draft children are also counted
            }
        ]
    }
    expected_d2 = {
        "version": [
            {
                "draft_child_deposit": {
                    "pid_type": "depid",
                    "pid_value": "3"
                },
                "index": 1,
                "is_last": True,
                "last_child": {
                    "pid_type": "recid",
                    "pid_value": "2"
                },
                "count": 2,  # For deposit, draft children are also counted
                "parent": {
                    "pid_type": "recid",
                    "pid_value": "1"
                },
            }
        ]
    }

    assert s_dep1['_source']['relations'] == expected_d1
    assert s_dep2['_source']['relations'] == expected_d2

    deposit_v2 = publish_and_expunge(db, deposit_v2)
    recid_v2, record_v2 = deposit_v2.fetch_published()
    recid_v1, record_v1 = record_resolver.resolve(recid_v1_value)
    depid_v1, deposit_v1 = deposit_resolver.resolve(depid_v1_value)

    RecordIndexer().index_by_id(str(record_v2.id))
    RecordIndexer().process_bulk_queue()
    current_search.flush_and_refresh(index=deposit_index_name)
    current_search.flush_and_refresh(index=records_index_name)

    s_dep = current_search.client.search(
        index=deposit_index_name)['hits']['hits']
    s_rec = current_search.client.search(
        index=records_index_name)['hits']['hits']
    assert len(s_dep) == 2
    assert len(s_rec) == 2

    s_dep1 = current_search.client.get(
        index=deposit_index_name, id=deposit_v1.id)
    s_dep2 = current_search.client.get(
        index=deposit_index_name, id=deposit_v2.id)

    s_rec1 = current_search.client.get(
        index=records_index_name, id=record_v1.id)
    s_rec2 = current_search.client.get(
        index=records_index_name, id=record_v2.id)

    expected_d1 = {
        "version": [
            {
                "draft_child_deposit": None,
                "index": 0,
                "is_last": False,
                "last_child": {
                    "pid_type": "recid",
                    "pid_value": "3"
                },
                "parent": {
                    "pid_type": "recid",
                    "pid_value": "1"
                },
                "count": 2
            }
        ]
    }
    expected_d2 = {
        "version": [
            {
                "draft_child_deposit": None,
                "index": 1,
                "is_last": True,
                "last_child": {
                    "pid_type": "recid",
                    "pid_value": "3"
                },
                "count": 2,
                "parent": {
                    "pid_type": "recid",
                    "pid_value": "1"
                },
            }
        ]
    }
    assert s_dep1['_source']['relations'] == expected_d1
    assert s_dep2['_source']['relations'] == expected_d2

    expected_r1 = {
        "version": [
            {
                "draft_child_deposit": None,
                "index": 0,
                "is_last": False,
                "last_child": {
                    "pid_type": "recid",
                    "pid_value": "3"
                },
                "parent": {
                    "pid_type": "recid",
                    "pid_value": "1"
                },
                "count": 2
            }
        ]
    }
    expected_r2 = {
        "version": [
            {
                "draft_child_deposit": None,
                "index": 1,
                "is_last": True,
                "last_child": {
                    "pid_type": "recid",
                    "pid_value": "3"
                },
                "count": 2,
                "parent": {
                    "pid_type": "recid",
                    "pid_value": "1"
                },
            }
        ]
    }
    assert s_rec1['_source']['relations'] == expected_r1
    assert s_rec2['_source']['relations'] == expected_r2
Exemple #30
0
def test_versioning_indexing(db, es, deposit, deposit_file):
    """Test the indexing of 'version' relations."""
    deposit_index_name = 'deposits-records-record-v1.0.0'
    records_index_name = 'records-record-v1.0.0'

    deposit_v1 = publish_and_expunge(db, deposit)
    depid_v1_value = deposit_v1['_deposit']['id']
    recid_v1, record_v1 = deposit_v1.fetch_published()
    recid_v1_value = recid_v1.pid_value

    RecordIndexer().index_by_id(str(record_v1.id))
    RecordIndexer().process_bulk_queue()
    current_search.flush_and_refresh(index=deposit_index_name)
    current_search.flush_and_refresh(index=records_index_name)
    s_dep = current_search.client.search(
        index=deposit_index_name)['hits']['hits']
    s_rec = current_search.client.search(
        index=records_index_name)['hits']['hits']
    assert len(s_dep) == 1
    assert len(s_rec) == 1
    assert 'relations' in s_dep[0]['_source']
    assert 'relations' in s_rec[0]['_source']

    expected = {
        "version": [{
            "draft_child_deposit": None,
            "index": 0,
            "is_last": True,
            "last_child": {
                "pid_type": "recid",
                "pid_value": "2"
            },
            "count": 1,
            "parent": {
                "pid_type": "recid",
                "pid_value": "1"
            },
        }]
    }
    assert s_dep[0]['_source']['relations'] == expected
    assert s_rec[0]['_source']['relations'] == expected

    deposit_v1.newversion()
    pv = PIDVersioning(child=recid_v1)
    depid_v2 = pv.draft_child_deposit
    deposit_v2 = ZenodoDeposit.get_record(depid_v2.object_uuid)
    deposit_v2.files['file.txt'] = BytesIO(b('file1'))
    depid_v1, deposit_v1 = deposit_resolver.resolve(depid_v1_value)

    RecordIndexer().process_bulk_queue()
    current_search.flush_and_refresh(index=deposit_index_name)
    current_search.flush_and_refresh(index=records_index_name)
    s_dep = current_search.client.search(
        index=deposit_index_name)['hits']['hits']
    s_rec = current_search.client.search(
        index=records_index_name)['hits']['hits']

    assert len(s_dep) == 2  # Two deposits should be indexed
    assert len(s_rec) == 1  # One, since record does not exist yet

    s_dep1 = current_search.client.get(index=deposit_index_name,
                                       id=deposit_v1.id)
    s_dep2 = current_search.client.get(index=deposit_index_name,
                                       id=deposit_v2.id)

    expected_d1 = {
        "version": [{
            "draft_child_deposit": {
                "pid_type": "depid",
                "pid_value": "3"
            },
            "index": 0,
            "is_last": False,
            "last_child": {
                "pid_type": "recid",
                "pid_value": "2"
            },
            "parent": {
                "pid_type": "recid",
                "pid_value": "1"
            },
            "count": 2  # For deposit, draft children are also counted
        }]
    }
    expected_d2 = {
        "version": [{
            "draft_child_deposit": {
                "pid_type": "depid",
                "pid_value": "3"
            },
            "index": 1,
            "is_last": True,
            "last_child": {
                "pid_type": "recid",
                "pid_value": "2"
            },
            "count": 2,  # For deposit, draft children are also counted
            "parent": {
                "pid_type": "recid",
                "pid_value": "1"
            },
        }]
    }

    assert s_dep1['_source']['relations'] == expected_d1
    assert s_dep2['_source']['relations'] == expected_d2

    deposit_v2 = publish_and_expunge(db, deposit_v2)
    recid_v2, record_v2 = deposit_v2.fetch_published()
    recid_v1, record_v1 = record_resolver.resolve(recid_v1_value)
    depid_v1, deposit_v1 = deposit_resolver.resolve(depid_v1_value)

    RecordIndexer().index_by_id(str(record_v2.id))
    RecordIndexer().process_bulk_queue()
    current_search.flush_and_refresh(index=deposit_index_name)
    current_search.flush_and_refresh(index=records_index_name)

    s_dep = current_search.client.search(
        index=deposit_index_name)['hits']['hits']
    s_rec = current_search.client.search(
        index=records_index_name)['hits']['hits']
    assert len(s_dep) == 2
    assert len(s_rec) == 2

    s_dep1 = current_search.client.get(index=deposit_index_name,
                                       id=deposit_v1.id)
    s_dep2 = current_search.client.get(index=deposit_index_name,
                                       id=deposit_v2.id)

    s_rec1 = current_search.client.get(index=records_index_name,
                                       id=record_v1.id)
    s_rec2 = current_search.client.get(index=records_index_name,
                                       id=record_v2.id)

    expected_d1 = {
        "version": [{
            "draft_child_deposit": None,
            "index": 0,
            "is_last": False,
            "last_child": {
                "pid_type": "recid",
                "pid_value": "3"
            },
            "parent": {
                "pid_type": "recid",
                "pid_value": "1"
            },
            "count": 2
        }]
    }
    expected_d2 = {
        "version": [{
            "draft_child_deposit": None,
            "index": 1,
            "is_last": True,
            "last_child": {
                "pid_type": "recid",
                "pid_value": "3"
            },
            "count": 2,
            "parent": {
                "pid_type": "recid",
                "pid_value": "1"
            },
        }]
    }
    assert s_dep1['_source']['relations'] == expected_d1
    assert s_dep2['_source']['relations'] == expected_d2

    expected_r1 = {
        "version": [{
            "draft_child_deposit": None,
            "index": 0,
            "is_last": False,
            "last_child": {
                "pid_type": "recid",
                "pid_value": "3"
            },
            "parent": {
                "pid_type": "recid",
                "pid_value": "1"
            },
            "count": 2
        }]
    }
    expected_r2 = {
        "version": [{
            "draft_child_deposit": None,
            "index": 1,
            "is_last": True,
            "last_child": {
                "pid_type": "recid",
                "pid_value": "3"
            },
            "count": 2,
            "parent": {
                "pid_type": "recid",
                "pid_value": "1"
            },
        }]
    }
    assert s_rec1['_source']['relations'] == expected_r1
    assert s_rec2['_source']['relations'] == expected_r2
Exemple #31
0
def fetch_record(recid):
    """Cached record fetch."""
    return record_resolver.resolve(recid)
Exemple #32
0
def add_file(recid, fp, replace_existing):
    """Add a new file to a publishd record."""
    pid, record = record_resolver.resolve(recid)
    bucket = record.files.bucket
    key = os.path.basename(fp.name)

    obj = ObjectVersion.get(bucket, key)
    if obj is not None and not replace_existing:
        click.echo(click.style(u'File with key "{key}" already exists.'
                   u' Use `--replace-existing/-f` to overwrite it.'.format(
                        key=key, recid=recid), fg='red'))
        return

    fp.seek(SEEK_SET, SEEK_END)
    size = fp.tell()
    fp.seek(SEEK_SET)

    click.echo(u'Will add the following file:\n')
    click.echo(click.style(
        u'  key: "{key}"\n'
        u'  bucket: {bucket}\n'
        u'  size: {size}\n'
        u''.format(
            key=key,
            bucket=bucket.id,
            size=size),
        fg='green'))
    click.echo(u'to record:\n')
    click.echo(click.style(
        u'  Title: "{title}"\n'
        u'  RECID: {recid}\n'
        u'  UUID: {uuid}\n'
        u''.format(
            recid=record['recid'],
            title=record['title'],
            uuid=record.id),
        fg='green'))
    if replace_existing and obj is not None:
        click.echo(u'and remove the file:\n')
        click.echo(click.style(
            u'  key: "{key}"\n'
            u'  bucket: {bucket}\n'
            u'  size: {size}\n'
            u''.format(
                key=obj.key,
                bucket=obj.bucket,
                size=obj.file.size),
            fg='green'))

    if click.confirm(u'Continue?'):
        bucket.locked = False
        if obj is not None and replace_existing:
            ObjectVersion.delete(bucket, obj.key)
        ObjectVersion.create(bucket, key, stream=fp, size=size)
        bucket.locked = True

        record.files.flush()
        record.commit()
        db.session.commit()
        click.echo(click.style(u'File added successfully.', fg='green'))
    else:
        click.echo(click.style(u'File addition aborted.', fg='green'))
Exemple #33
0
def fetch_record(recid):
    """Cached record fetch."""
    return record_resolver.resolve(recid)
Exemple #34
0
def test_extra_formats_buckets(api, api_client, db, es, locations,
                               json_extra_auth_headers, deposit_url, get_json,
                               extra_auth_headers, json_headers,
                               license_record, communities, resolver,
                               minimal_deposit):
    """Test simple flow using REST API."""
    headers = json_extra_auth_headers
    client = api_client
    test_data = minimal_deposit

    # Create deposit
    response = client.post(deposit_url, json=test_data, headers=headers)
    data = get_json(response, code=201)
    # Get identifier and links
    depid = data['record_id']
    links = data['links']

    # Upload 1 files
    response = client.put(
        links['bucket'] + '/test1.txt',
        data='ctx',
        headers=extra_auth_headers,
    )
    assert response.status_code == 200

    # Check for extra_formats bucket
    response = api_client.options(extra_formats_urls['deposit'].format(depid),
                                  headers=headers)
    data = get_json(response, code=200)

    # Check that no extra_formats bucket is present
    buckets = Bucket.query.all()
    assert len(buckets) == 1

    # There are no extra_formats files
    assert data == []

    use_extra_formats_functions(extra_auth_headers,
                                api_client,
                                get_json,
                                depid=depid)

    buckets = Bucket.query.all()
    assert len(buckets) == 2

    deposit = deposit_resolver.resolve(depid)[1]
    assert deposit['_buckets']['extra_formats'] == \
        str(deposit.extra_formats.bucket.id)
    # Publish deposition
    response = client.post(links['publish'], headers=extra_auth_headers)
    data = get_json(response, code=202)
    first_version_recid = data['record_id']

    # Get the list of the extra_formats files attached to this deposit
    response = api_client.options(
        extra_formats_urls['record'].format(first_version_recid))
    data = get_json(response, code=200)

    assert data[0]['key'] == 'application/foo+xml'
    assert len(data) == 1

    # Test actions and clear extra_formats bucket
    use_extra_formats_functions(extra_auth_headers,
                                api_client,
                                get_json,
                                depid=depid,
                                recid=first_version_recid)

    # Get newversion url
    data = get_json(client.get(links['self'], headers=extra_auth_headers),
                    code=200)
    new_version_url = data['links']['newversion']

    # New Version
    data = get_json(client.post(new_version_url, headers=extra_auth_headers),
                    code=201)
    links = data['links']

    # Get the list of the extra_formats files attached to the new deposit
    # Should be the same with the previous version
    response = api_client.options(extra_formats_urls['deposit'].format(depid),
                                  headers=extra_auth_headers)
    data = get_json(response, code=200)

    assert data[0]['key'] == 'application/foo+xml'
    assert len(data) == 1

    # Get latest version
    data = get_json(client.get(links['latest_draft'],
                               headers=extra_auth_headers),
                    code=200)
    links = data['links']
    depid = data['record_id']

    # Add a file to the new deposit
    get_json(client.put(
        links['bucket'] + '/newfile.txt',
        data='newfile',
        headers=extra_auth_headers,
    ),
             code=200)

    # Publish the new record
    response = client.post(links['publish'], headers=extra_auth_headers)
    data = get_json(response, code=202)
    links = data['links']
    recid = data['record_id']

    # Get the list of the extra_formats files attached to the new record
    response = api_client.options(extra_formats_urls['record'].format(recid))
    data = get_json(response, code=200)

    assert data[0]['key'] == 'application/foo+xml'
    assert len(data) == 1

    # Add file to extra_formats bucket
    response = api_client.put(extra_formats_urls['deposit'].format(recid),
                              data='bar file content',
                              headers=extra_formats_headers['bar'] +
                              extra_auth_headers)
    data = get_json(response, code=200)
    assert data['message'] == 'Extra format "application/bar+xml" updated.'

    # Get the list of the extra_formats files attached to the new record
    response = api_client.options(extra_formats_urls['record'].format(recid))
    data = get_json(response, code=200)

    assert {f['key'] for f in data} == \
        {'application/foo+xml', 'application/bar+xml'}

    # Get the list of the extra_formats files attached to the previous record
    # Make sure that the snapshots are independent
    response = api_client.options(
        extra_formats_urls['record'].format(first_version_recid))
    data = get_json(response, code=200)

    first_record = record_resolver.resolve(first_version_recid)[1]
    new_record = record_resolver.resolve(recid)[1]
    assert first_record.extra_formats.bucket.id != \
        new_record.extra_formats.bucket.id

    assert data[0]['key'] == 'application/foo+xml'
    assert len(data) == 1

    # Test actions and clear extra_formats bucket of deposit
    use_extra_formats_functions(extra_auth_headers,
                                api_client,
                                get_json,
                                depid=depid,
                                recid=recid)
def test_extra_formats_buckets(
        api, api_client, db, es, locations, json_extra_auth_headers,
        deposit_url, get_json, extra_auth_headers, json_headers,
        license_record, communities, resolver, minimal_deposit):
    """Test simple flow using REST API."""
    headers = json_extra_auth_headers
    client = api_client
    test_data = minimal_deposit

    # Create deposit
    response = client.post(
        deposit_url, json=test_data, headers=headers)
    data = get_json(response, code=201)
    # Get identifier and links
    depid = data['record_id']
    links = data['links']

    # Upload 1 files
    response = client.put(
        links['bucket'] + '/test1.txt',
        data='ctx',
        headers=extra_auth_headers,
    )
    assert response.status_code == 200

    # Check for extra_formats bucket
    response = api_client.options(
        extra_formats_urls['deposit'].format(depid), headers=headers)
    data = get_json(response, code=200)

    # Check that no extra_formats bucket is present
    buckets = Bucket.query.all()
    assert len(buckets) == 1

    # There are no extra_formats files
    assert data == []

    use_extra_formats_functions(
        extra_auth_headers, api_client, get_json, depid=depid)

    buckets = Bucket.query.all()
    assert len(buckets) == 2

    deposit = deposit_resolver.resolve(depid)[1]
    assert deposit['_buckets']['extra_formats'] == \
        str(deposit.extra_formats.bucket.id)
    # Publish deposition
    response = client.post(links['publish'], headers=extra_auth_headers)
    data = get_json(response, code=202)
    first_version_recid = data['record_id']

    # Get the list of the extra_formats files attached to this deposit
    response = api_client.options(
        extra_formats_urls['record'].format(first_version_recid))
    data = get_json(response, code=200)

    assert data[0]['key'] == 'application/foo+xml'
    assert len(data) == 1

    # Test actions and clear extra_formats bucket
    use_extra_formats_functions(extra_auth_headers, api_client, get_json,
                                depid=depid, recid=first_version_recid)

    # Get newversion url
    data = get_json(
        client.get(links['self'], headers=extra_auth_headers), code=200
        )
    new_version_url = data['links']['newversion']

    # New Version
    data = get_json(
        client.post(new_version_url, headers=extra_auth_headers), code=201)
    links = data['links']

    # Get the list of the extra_formats files attached to the new deposit
    # Should be the same with the previous version
    response = api_client.options(
        extra_formats_urls['deposit'].format(depid),
        headers=extra_auth_headers
        )
    data = get_json(response, code=200)

    assert data[0]['key'] == 'application/foo+xml'
    assert len(data) == 1

    # Get latest version
    data = get_json(
        client.get(links['latest_draft'], headers=extra_auth_headers),
        code=200)
    links = data['links']
    depid = data['record_id']

    # Add a file to the new deposit
    get_json(client.put(
        links['bucket'] + '/newfile.txt',
        data='newfile',
        headers=extra_auth_headers,
    ), code=200)

    # Publish the new record
    response = client.post(links['publish'], headers=extra_auth_headers)
    data = get_json(response, code=202)
    links = data['links']
    recid = data['record_id']

    # Get the list of the extra_formats files attached to the new record
    response = api_client.options(extra_formats_urls['record'].format(recid))
    data = get_json(response, code=200)

    assert data[0]['key'] == 'application/foo+xml'
    assert len(data) == 1

    # Add file to extra_formats bucket
    response = api_client.put(
        extra_formats_urls['deposit'].format(recid),
        data='bar file content',
        headers=extra_formats_headers['bar'] + extra_auth_headers
    )
    data = get_json(response, code=200)
    assert data['message'] == 'Extra format "application/bar+xml" updated.'

    # Get the list of the extra_formats files attached to the new record
    response = api_client.options(extra_formats_urls['record'].format(recid))
    data = get_json(response, code=200)

    assert {f['key'] for f in data} == \
        {'application/foo+xml', 'application/bar+xml'}

    # Get the list of the extra_formats files attached to the previous record
    # Make sure that the snapshots are independent
    response = api_client.options(
        extra_formats_urls['record'].format(first_version_recid))
    data = get_json(response, code=200)

    first_record = record_resolver.resolve(first_version_recid)[1]
    new_record = record_resolver.resolve(recid)[1]
    assert first_record.extra_formats.bucket.id != \
        new_record.extra_formats.bucket.id

    assert data[0]['key'] == 'application/foo+xml'
    assert len(data) == 1

    # Test actions and clear extra_formats bucket of deposit
    use_extra_formats_functions(
        extra_auth_headers, api_client, get_json, depid=depid, recid=recid)
Exemple #36
0
def versioning_link_records(recids):
    """Link several non-versioned records into one versioning scheme.

    The records are linked in the order as they appear in the list, with
    the first record being base for minting of the conceptdoi.
    In case one of the records is already upgraded, its taken as the base
    for conceptdoi instead, with preserving the requested order.

    :param recids: list of recid values (strings) to link,
                   e.g.: ['1234','55125','51269']
    :type recids: list of str
    """
    recids_records = [
        record_resolver.resolve(recid_val) for recid_val in recids
    ]
    depids_deposits = [
        deposit_resolver.resolve(record['_deposit']['id'])
        for _, record in recids_records
    ]

    rec_comms = sorted(
        set(sum([rec.get('communities', []) for _, rec in recids_records],
                [])))

    dep_comms = sorted(
        set(sum([dep.get('communities', []) for _, dep in depids_deposits],
                [])))

    upgraded = [(recid, rec) for recid, rec in recids_records
                if 'conceptdoi' in rec]

    # Determine the base record for versioning
    if len(upgraded) == 0:
        recid_v, record_v = recids_records[0]
    elif len(upgraded) == 1:
        recid_v, record_v = upgraded[0]
    elif len(upgraded) > 1:
        recid_v, record_v = upgraded[0]
        child_recids = [
            int(recid.pid_value)
            for recid in PIDVersioning(child=recid_v).children.all()
        ]

        i_upgraded = [int(recid.pid_value) for recid, rec in upgraded]
        if set(child_recids) != set(i_upgraded):
            raise Exception('Multiple upgraded records, which belong'
                            'to different versioning schemes.')

    # Get the first record and mint the concept DOI for it
    conceptdoi = zenodo_concept_doi_minter(record_v.id, record_v)

    conceptrecid_v = PersistentIdentifier.get('recid',
                                              record_v['conceptrecid'])
    conceptrecid_v_val = conceptrecid_v.pid_value

    pv_r1 = PIDVersioning(parent=conceptrecid_v)
    children_recids = [c.pid_value for c in pv_r1.children.all()]
    if not all(cr in recids for cr in children_recids):
        raise Exception('Children of the already upgraded record: {0} are '
                        'not specified in the ordering: {1}'
                        ''.format(children_recids, recids))

    for (recid, record), (depid, deposit) in \
            zip(recids_records, depids_deposits):

        # Remove old versioning schemes for non-base recids
        # Note: This will remove the child of the base-conceptrecid as well
        # but that's OK, since it will be added again afterwards in the
        # correct order.
        conceptrecid = PersistentIdentifier.get('recid',
                                                record['conceptrecid'])
        pv = PIDVersioning(parent=conceptrecid)
        pv.remove_child(recid)
        if conceptrecid.pid_value != conceptrecid_v_val:
            conceptrecid.delete()

        # Update the 'conceptrecid' and 'conceptdoi' in records and deposits
        record['conceptdoi'] = conceptdoi.pid_value
        record['conceptrecid'] = conceptrecid_v.pid_value
        record['communities'] = rec_comms
        record.commit()
        deposit['conceptdoi'] = conceptdoi.pid_value
        deposit['conceptrecid'] = conceptrecid_v.pid_value
        deposit['communities'] = dep_comms
        deposit.commit()

        # Add the child to the new versioning scheme
        pv_r1.insert_child(recid)

    pv_r1.update_redirect()
    db.session.commit()

    conceptrecid_v = PersistentIdentifier.get('recid', conceptrecid_v_val)
    pv = PIDVersioning(parent=conceptrecid_v)
    if current_app.config['DEPOSIT_DATACITE_MINTING_ENABLED']:
        datacite_register.delay(pv.last_child.pid_value,
                                str(pv.last_child.object_uuid))

    index_siblings(pv.last_child, with_deposits=True, eager=True)
def test_autoadd(app, db, users, communities, deposit, deposit_file,
                 communities_autoadd_enabled):
    """Test basic workflow using Deposit and Communities API."""
    deposit_v1 = publish_and_expunge(db, deposit)
    depid_v1_value = deposit_v1['_deposit']['id']

    recid_v1, record_v1 = deposit_v1.fetch_published()
    recid_v1_value = recid_v1.pid_value

    deposit_v1 = deposit_v1.newversion()
    pv = PIDVersioning(child=recid_v1)
    depid_v2 = pv.draft_child_deposit
    depid_v2_value = depid_v2.pid_value
    deposit_v2 = ZenodoDeposit.get_record(depid_v2.get_assigned_object())
    deposit_v2.files['file.txt'] = BytesIO(b('file1'))
    deposit_v2 = publish_and_expunge(db, deposit_v2)
    deposit_v2 = deposit_v2.edit()
    # 1. Request for 'c1' and 'c3' (owned by user) through deposit v2
    deposit_v2['communities'] = ['c1', 'c2', 'c3', ]
    deposit_v2['grants'] = [{'title': 'SomeGrant'}, ]
    deposit_v2 = publish_and_expunge(db, deposit_v2)
    recid_v2, record_v2 = deposit_v2.fetch_published()
    assert record_v2['grants'] == [{'title': 'SomeGrant'}, ]
    recid_v2_value = recid_v2.pid_value
    depid_v1, deposit_v1 = deposit_resolver.resolve(depid_v1_value)
    recid_v1, record_v1 = deposit_v1.fetch_published()
    assert record_v1.get('communities', []) == ['c3', 'grants_comm']
    assert record_v2.get('communities', []) == ['c3', 'grants_comm']
    assert deposit_v1.get('communities', []) == ['c1', 'c2', 'c3', 'ecfunded',
                                                 'grants_comm', 'zenodo']
    assert deposit_v2.get('communities', []) == ['c1', 'c2', 'c3', 'ecfunded',
                                                 'grants_comm', 'zenodo']

    c1_api = ZenodoCommunity('c1')
    c2_api = ZenodoCommunity('c2')
    c3_api = ZenodoCommunity('c3')
    grants_comm_api = ZenodoCommunity('grants_comm')
    ecfunded_api = ZenodoCommunity('ecfunded')
    zenodo_api = ZenodoCommunity('zenodo')

    # Inclusion requests should be visible for both records
    assert c1_api.get_comm_irs(record_v1, pid=recid_v1).count() == 1
    assert c1_api.get_comm_irs(record_v2, pid=recid_v2).count() == 1
    assert c2_api.get_comm_irs(record_v1, pid=recid_v1).count() == 1
    assert c2_api.get_comm_irs(record_v2, pid=recid_v2).count() == 1
    assert c3_api.get_comm_irs(record_v1, pid=recid_v1).count() == 0
    assert c3_api.get_comm_irs(record_v2, pid=recid_v2).count() == 0
    assert grants_comm_api.get_comm_irs(
        record_v1, pid=recid_v1).count() == 0
    assert grants_comm_api.get_comm_irs(
        record_v2, pid=recid_v2).count() == 0
    assert ecfunded_api.get_comm_irs(
        record_v1, pid=recid_v1).count() == 1
    assert ecfunded_api.get_comm_irs(
        record_v2, pid=recid_v2).count() == 1
    assert zenodo_api.get_comm_irs(record_v1, pid=recid_v1).count() == 1
    assert zenodo_api.get_comm_irs(record_v2, pid=recid_v2).count() == 1

    # Accept to 'c1' through record_v2 (as originally requested),
    # and 'c2' through record_v1 (resolved through version)
    c1_api.accept_record(record_v2, pid=recid_v2)
    c2_api.accept_record(record_v1, pid=recid_v1)
    recid_v1, record_v1 = record_resolver.resolve(recid_v1_value)
    recid_v2, record_v2 = record_resolver.resolve(recid_v2_value)
    # Accepting individual record to a community should propagate the changes
    # to all versions
    assert record_v1.get('communities', []) == ['c1', 'c2', 'c3',
                                                'grants_comm']
    assert record_v2.get('communities', []) == ['c1', 'c2', 'c3',
                                                'grants_comm']
    assert deposit_v1.get('communities', []) == ['c1', 'c2', 'c3', 'ecfunded',
                                                 'grants_comm', 'zenodo']
    assert deposit_v2.get('communities', []) == ['c1', 'c2', 'c3', 'ecfunded',
                                                 'grants_comm', 'zenodo']

    # Removing 'c1'-'c3' from deposit_v1 should remove it from two published
    # records and other deposits as well
    depid_v1, deposit_v1 = deposit_resolver.resolve(depid_v1_value)
    deposit_v1 = deposit_v1.edit()
    deposit_v1['communities'] = []
    deposit_v1 = publish_and_expunge(db, deposit_v1)
    depid_v2, deposit_v2 = deposit_resolver.resolve(depid_v2_value)
    recid_v1, record_v1 = record_resolver.resolve(recid_v1_value)
    recid_v2, record_v2 = record_resolver.resolve(recid_v2_value)
    assert record_v1.get('communities', []) == ['grants_comm', ]
    assert record_v2.get('communities', []) == ['grants_comm', ]
    assert deposit_v1.get('communities', []) == ['ecfunded', 'grants_comm',
                                                 'zenodo']
    assert deposit_v2.get('communities', []) == ['ecfunded', 'grants_comm',
                                                 'zenodo']
Exemple #38
0
def test_autoadd(app, db, users, communities, deposit, deposit_file,
                 communities_autoadd_enabled):
    """Test basic workflow using Deposit and Communities API."""
    deposit_v1 = publish_and_expunge(db, deposit)
    depid_v1_value = deposit_v1['_deposit']['id']

    recid_v1, record_v1 = deposit_v1.fetch_published()
    recid_v1_value = recid_v1.pid_value

    deposit_v1 = deposit_v1.newversion()
    pv = PIDVersioning(child=recid_v1)
    depid_v2 = pv.draft_child_deposit
    depid_v2_value = depid_v2.pid_value
    deposit_v2 = ZenodoDeposit.get_record(depid_v2.get_assigned_object())
    deposit_v2.files['file.txt'] = BytesIO(b('file1'))
    deposit_v2 = publish_and_expunge(db, deposit_v2)
    deposit_v2 = deposit_v2.edit()
    # 1. Request for 'c1' and 'c3' (owned by user) through deposit v2
    deposit_v2['communities'] = [
        'c1',
        'c2',
        'c3',
    ]
    deposit_v2['grants'] = [
        {
            'title': 'SomeGrant'
        },
    ]
    deposit_v2 = publish_and_expunge(db, deposit_v2)
    recid_v2, record_v2 = deposit_v2.fetch_published()
    assert record_v2['grants'] == [
        {
            'title': 'SomeGrant'
        },
    ]
    recid_v2_value = recid_v2.pid_value
    depid_v1, deposit_v1 = deposit_resolver.resolve(depid_v1_value)
    recid_v1, record_v1 = deposit_v1.fetch_published()
    assert record_v1.get('communities', []) == ['c3', 'grants_comm']
    assert record_v2.get('communities', []) == ['c3', 'grants_comm']
    assert deposit_v1.get('communities', []) == [
        'c1', 'c2', 'c3', 'ecfunded', 'grants_comm', 'zenodo'
    ]
    assert deposit_v2.get('communities', []) == [
        'c1', 'c2', 'c3', 'ecfunded', 'grants_comm', 'zenodo'
    ]

    c1_api = ZenodoCommunity('c1')
    c2_api = ZenodoCommunity('c2')
    c3_api = ZenodoCommunity('c3')
    grants_comm_api = ZenodoCommunity('grants_comm')
    ecfunded_api = ZenodoCommunity('ecfunded')
    zenodo_api = ZenodoCommunity('zenodo')

    # Inclusion requests should be visible for both records
    assert c1_api.get_comm_irs(record_v1, pid=recid_v1).count() == 1
    assert c1_api.get_comm_irs(record_v2, pid=recid_v2).count() == 1
    assert c2_api.get_comm_irs(record_v1, pid=recid_v1).count() == 1
    assert c2_api.get_comm_irs(record_v2, pid=recid_v2).count() == 1
    assert c3_api.get_comm_irs(record_v1, pid=recid_v1).count() == 0
    assert c3_api.get_comm_irs(record_v2, pid=recid_v2).count() == 0
    assert grants_comm_api.get_comm_irs(record_v1, pid=recid_v1).count() == 0
    assert grants_comm_api.get_comm_irs(record_v2, pid=recid_v2).count() == 0
    assert ecfunded_api.get_comm_irs(record_v1, pid=recid_v1).count() == 1
    assert ecfunded_api.get_comm_irs(record_v2, pid=recid_v2).count() == 1
    assert zenodo_api.get_comm_irs(record_v1, pid=recid_v1).count() == 1
    assert zenodo_api.get_comm_irs(record_v2, pid=recid_v2).count() == 1

    # Accept to 'c1' through record_v2 (as originally requested),
    # and 'c2' through record_v1 (resolved through version)
    c1_api.accept_record(record_v2, pid=recid_v2)
    c2_api.accept_record(record_v1, pid=recid_v1)
    recid_v1, record_v1 = record_resolver.resolve(recid_v1_value)
    recid_v2, record_v2 = record_resolver.resolve(recid_v2_value)
    # Accepting individual record to a community should propagate the changes
    # to all versions
    assert record_v1.get('communities',
                         []) == ['c1', 'c2', 'c3', 'grants_comm']
    assert record_v2.get('communities',
                         []) == ['c1', 'c2', 'c3', 'grants_comm']
    assert deposit_v1.get('communities', []) == [
        'c1', 'c2', 'c3', 'ecfunded', 'grants_comm', 'zenodo'
    ]
    assert deposit_v2.get('communities', []) == [
        'c1', 'c2', 'c3', 'ecfunded', 'grants_comm', 'zenodo'
    ]

    # Removing 'c1'-'c3' from deposit_v1 should remove it from two published
    # records and other deposits as well
    depid_v1, deposit_v1 = deposit_resolver.resolve(depid_v1_value)
    deposit_v1 = deposit_v1.edit()
    deposit_v1['communities'] = []
    deposit_v1 = publish_and_expunge(db, deposit_v1)
    depid_v2, deposit_v2 = deposit_resolver.resolve(depid_v2_value)
    recid_v1, record_v1 = record_resolver.resolve(recid_v1_value)
    recid_v2, record_v2 = record_resolver.resolve(recid_v2_value)
    assert record_v1.get('communities', []) == [
        'grants_comm',
    ]
    assert record_v2.get('communities', []) == [
        'grants_comm',
    ]
    assert deposit_v1.get('communities',
                          []) == ['ecfunded', 'grants_comm', 'zenodo']
    assert deposit_v2.get('communities',
                          []) == ['ecfunded', 'grants_comm', 'zenodo']