def _prepare_edit(self, record): """Prepare deposit for editing. Extend the deposit's communities metadata by the pending inclusion requests. """ data = super(ZenodoDeposit, self)._prepare_edit(record) data.setdefault('communities', []).extend( [c.id_community for c in InclusionRequest.get_by_record(record.id)]) data['communities'] = sorted(list(set(data['communities']))) # Remove the OpenAIRE subtype if the record is no longer pending, # nor in the relevant community oa_type = data['resource_type'].get('openaire_subtype') if oa_type and not is_valid_openaire_type(data['resource_type'], data['communities']): del data['resource_type']['openaire_subtype'] if not data['communities']: del data['communities'] # If non-Zenodo DOI unlock the bucket to allow file-editing if not is_doi_locally_managed(data['doi']): self.files.bucket.locked = False return data
def registerconceptdoi(self, pid=None): """Register the conceptdoi for the deposit and record.""" if not self.is_published() and is_doi_locally_managed(self['doi']): raise PIDInvalidAction() pid, record = self.fetch_published() zenodo_concept_doi_minter(record.id, record) record.commit() self['conceptdoi'] = record['conceptdoi'] self.commit() if current_app.config['DEPOSIT_DATACITE_MINTING_ENABLED']: from zenodo.modules.deposit.tasks import datacite_register datacite_register.delay(pid.pid_value, str(record.id)) return self
def _publish_edited(self): """Publish the edited deposit with communities merging.""" dep_comms = set(self.get('communities', [])) pid, record = self.fetch_published() rec_comms = set(record.get('communities', [])) edited_record = super(ZenodoDeposit, self)._publish_edited() # Preserve some of the previously published record fields preserve_record_fields = ['_files', '_oai', '_buckets', '_internal'] for k in preserve_record_fields: if k in record: edited_record[k] = record[k] # If non-Zenodo DOIs also sync files if not is_doi_locally_managed(self['doi']): record_bucket = edited_record.files.bucket # Unlock the record's bucket record_bucket.locked = False sync_buckets( src_bucket=self.files.bucket, dest_bucket=record_bucket, delete_extras=True, ) # Update the record's metadata edited_record['_files'] = self.files.dumps(bucket=record_bucket.id) # Lock both record and deposit buckets record_bucket.locked = True self.files.bucket.locked = True zenodo_doi_updater(edited_record.id, edited_record) edited_record = self._sync_communities(dep_comms, rec_comms, edited_record) new_comms = set(edited_record.get('communities', [])) - (rec_comms or set()) self._send_community_signals(edited_record, new_comms) return edited_record
def doi_locally_managed(pid): """Determine if DOI is managed locally.""" return is_doi_locally_managed(pid)
def newversion(self, pid=None): """Create a new version deposit.""" if not self.is_published(): raise PIDInvalidAction() # Check that there is not a newer draft version for this record pid, record = self.fetch_published() pv = PIDVersioning(child=pid) if (not pv.draft_child and is_doi_locally_managed(record['doi'])): with db.session.begin_nested(): # Get copy of the latest record latest_record = ZenodoRecord.get_record( pv.last_child.object_uuid) data = latest_record.dumps() # Get the communities from the last deposit # and push those to the new version latest_depid = PersistentIdentifier.get( 'depid', data['_deposit']['id']) latest_deposit = ZenodoDeposit.get_record( latest_depid.object_uuid) last_communities = latest_deposit.get('communities', []) owners = data['_deposit']['owners'] # TODO: Check other data that may need to be removed keys_to_remove = ( '_deposit', 'doi', '_oai', '_files', '_buckets', '$schema') for k in keys_to_remove: data.pop(k, None) # NOTE: We call the superclass `create()` method, because we # don't want a new empty bucket, but an unlocked snapshot of # the old record's bucket. deposit = (super(ZenodoDeposit, self).create(data)) # Injecting owners is required in case of creating new # version this outside of request context deposit['_deposit']['owners'] = owners if last_communities: deposit['communities'] = last_communities ### conceptrecid = PersistentIdentifier.get( 'recid', data['conceptrecid']) recid = PersistentIdentifier.get( 'recid', str(data['recid'])) depid = PersistentIdentifier.get( 'depid', str(data['_deposit']['id'])) PIDVersioning(parent=conceptrecid).insert_draft_child( child=recid) RecordDraft.link(recid, depid) # Pre-fill the Zenodo DOI to prevent the user from changing it # to a custom DOI. deposit['doi'] = doi_generator(recid.pid_value) pv = PIDVersioning(child=pid) index_siblings(pv.draft_child, neighbors_eager=True, with_deposits=True) with db.session.begin_nested(): # Create snapshot from the record's bucket and update data snapshot = latest_record.files.bucket.snapshot(lock=False) snapshot.locked = False if 'extra_formats' in latest_record['_buckets']: extra_formats_snapshot = \ latest_record.extra_formats.bucket.snapshot( lock=False) deposit['_buckets'] = {'deposit': str(snapshot.id)} RecordsBuckets.create(record=deposit.model, bucket=snapshot) if 'extra_formats' in latest_record['_buckets']: deposit['_buckets']['extra_formats'] = \ str(extra_formats_snapshot.id) RecordsBuckets.create( record=deposit.model, bucket=extra_formats_snapshot) deposit.commit() return self
def newversion(self, pid=None): """Create a new version deposit.""" if not self.is_published(): raise PIDInvalidAction() # Check that there is not a newer draft version for this record pid, record = self.fetch_published() pv = PIDVersioning(child=pid) if (not pv.draft_child and is_doi_locally_managed(record['doi'])): with db.session.begin_nested(): # Get copy of the latest record latest_record = ZenodoRecord.get_record( pv.last_child.object_uuid) data = latest_record.dumps() # Get the communities from the last deposit # and push those to the new version latest_depid = PersistentIdentifier.get( 'depid', data['_deposit']['id']) latest_deposit = ZenodoDeposit.get_record( latest_depid.object_uuid) last_communities = latest_deposit.get('communities', []) owners = data['_deposit']['owners'] # TODO: Check other data that may need to be removed keys_to_remove = ( '_deposit', 'doi', '_oai', '_files', '_buckets', '$schema') for k in keys_to_remove: data.pop(k, None) # NOTE: We call the superclass `create()` method, because we # don't want a new empty bucket, but an unlocked snapshot of # the old record's bucket. deposit = (super(ZenodoDeposit, self).create(data)) # Injecting owners is required in case of creating new # version this outside of request context deposit['_deposit']['owners'] = owners if last_communities: deposit['communities'] = last_communities ### conceptrecid = PersistentIdentifier.get( 'recid', data['conceptrecid']) recid = PersistentIdentifier.get( 'recid', str(data['recid'])) depid = PersistentIdentifier.get( 'depid', str(data['_deposit']['id'])) PIDVersioning(parent=conceptrecid).insert_draft_child( child=recid) RecordDraft.link(recid, depid) # Pre-fill the Zenodo DOI to prevent the user from changing it # to a custom DOI. deposit['doi'] = doi_generator(recid.pid_value) pv = PIDVersioning(child=pid) index_siblings(pv.draft_child, neighbors_eager=True, with_deposits=True) with db.session.begin_nested(): # Create snapshot from the record's bucket and update data snapshot = latest_record.files.bucket.snapshot(lock=False) snapshot.locked = False # FIXME: `snapshot.id` might not be present because we need to # commit first to the DB. # db.session.commit() deposit['_buckets'] = {'deposit': str(snapshot.id)} RecordsBuckets.create(record=deposit.model, bucket=snapshot) deposit.commit() return self