def publish(self, pid=None, id_=None): """Publish a deposit.""" pid = pid or self.pid if not pid.is_registered(): raise PIDInvalidAction() self['_deposit']['status'] = 'published' if self['_deposit'].get('pid') is None: # First publishing minter = current_pidstore.minters[ current_app.config['DEPOSIT_PID_MINTER']] id_ = id_ or uuid.uuid4() record_pid = minter(id_, self) self['_deposit']['pid'] = { 'type': record_pid.pid_type, 'value': record_pid.pid_value } data = dict(self.dumps()) data['$schema'] = self.record_schema record = Record.create(data, id_=id_) else: # Update after edit record_pid, record = self.fetch_published() # TODO add support for patching assert record.revision_id == self['_deposit']['pid']['revision_id'] data = dict(self.dumps()) data['$schema'] = self.record_schema record = record.__class__(data, model=record.model) record.commit() self.commit() return self
def delete(self, *args, **kwargs): """Delete the deposit.""" if self['_deposit'].get('pid'): raise PIDInvalidAction() # Delete reserved recid. pid_recid = PersistentIdentifier.get(pid_type='recid', pid_value=self['recid']) if pid_recid.status == PIDStatus.RESERVED: db.session.delete(pid_recid) # Completely remove bucket q = RecordsBuckets.query.filter_by(record_id=self.id) bucket = q.one().bucket with db.session.begin_nested(): # Remove Record-Bucket link q.delete() mp_q = MultipartObject.query_by_bucket(bucket) # Remove multipart objects Part.query.filter( Part.upload_id.in_( mp_q.with_entities( MultipartObject.upload_id).subquery())).delete( synchronize_session='fetch') mp_q.delete(synchronize_session='fetch') bucket.remove() return super(ZenodoDeposit, self).delete(*args, **kwargs)
def edit(self, pid=None): """Edit deposit.""" pid = pid or self.pid if 'published' != self['_deposit']['status']: raise PIDInvalidAction() def _edit(record): """Update selected keys.""" data = record.dumps() # Keep current record revision for merging. data['_deposit']['pid']['revision_id'] = record.revision_id data['_deposit']['status'] = 'draft' data['$schema'] = self.build_deposit_schema(record) return data with db.session.begin_nested(): before_record_update.send(self) record_pid, record = self.fetch_published() assert PIDStatus.REGISTERED == record_pid.status assert record['_deposit'] == self['_deposit'] self.model.json = _edit(record) flag_modified(self.model, 'json') db.session.merge(self.model) after_record_update.send(self) return self.__class__(self.model.json, model=self.model)
def delete(self, delete_published=False, *args, **kwargs): """Delete the deposit. :param delete_published: If True, even deposit of a published record will be deleted (usually used by admin operations). :type delete_published: bool """ is_published = self['_deposit'].get('pid') if is_published and not delete_published: raise PIDInvalidAction() # Delete the recid recid = PersistentIdentifier.get(pid_type='recid', pid_value=self['recid']) versioning = PIDVersioning(child=recid) if versioning.exists: if versioning.draft_child and \ self.pid == versioning.draft_child_deposit: versioning.remove_draft_child() if versioning.last_child: index_siblings(versioning.last_child, children=versioning.children.all(), include_pid=True, neighbors_eager=True, with_deposits=True) if recid.status == PIDStatus.RESERVED: db.session.delete(recid) if 'conceptrecid' in self: concept_recid = PersistentIdentifier.get( pid_type='recid', pid_value=self['conceptrecid']) if concept_recid.status == PIDStatus.RESERVED: db.session.delete(concept_recid) # Completely remove bucket bucket = self.files.bucket with db.session.begin_nested(): # Remove Record-Bucket link RecordsBuckets.query.filter_by(record_id=self.id).delete() mp_q = MultipartObject.query_by_bucket(bucket) # Remove multipart objects Part.query.filter( Part.upload_id.in_( mp_q.with_entities( MultipartObject.upload_id).subquery())).delete( synchronize_session='fetch') mp_q.delete(synchronize_session='fetch') bucket.locked = False bucket.remove() depid = kwargs.get('pid', self.pid) if depid: depid.delete() # NOTE: We call the parent of Deposit, invenio_records.api.Record since # we need to completely override eveything that the Deposit.delete # method does. return super(Deposit, self).delete(*args, **kwargs)
def delete(self, force=True, pid=None): """Delete a project.""" videos = video_resolver(self.video_ids) # check if I can delete all videos if any(video['_deposit'].get('pid') for video in videos): raise PIDInvalidAction() for video in videos: video.delete(force=force) return super(Project, self).delete(force=force, pid=pid)
def delete(self, force=True, pid=None): """Delete deposit.""" pid = pid or self.pid if self['_deposit'].get('pid'): raise PIDInvalidAction() if pid: pid.delete() return super(Deposit, self).delete(force=force)
def publish(self, pid=None, id_=None): """Publish a deposit. If it's the first time: * it calls the minter and set the following meta information inside the deposit: .. code-block:: python deposit['_deposit'] = { 'type': pid_type, 'value': pid_value, 'revision_id': 0, } * A dump of all information inside the deposit is done. * A snapshot of the files is done. Otherwise, published the new edited version. In this case, if in the mainwhile someone already published a new version, it'll try to merge the changes with the latest version. .. note:: no need for indexing as it calls `self.commit()`. Status required: ``'draft'``. :param pid: Force the new pid value. (Default: ``None``) :param id_: Force the new uuid value as deposit id. (Default: ``None``) :returns: Returns itself. """ pid = pid or self.pid if not pid.is_registered(): raise PIDInvalidAction() self['_deposit']['status'] = 'published' if self['_deposit'].get('pid') is None: # First publishing # self._publish_new(id_=id_) self['_deposit']['pid'] = { 'type': pid.pid_type, 'value': pid.pid_value, 'revision_id': 0, } else: # Update after edit record = self._publish_edited() record.commit() self.commit() return self
def delete(self, force=True, pid=None): """Delete a project.""" videos = deposit_videos_resolver(self.video_ids) # check if I can delete all videos if any(video['_deposit'].get('pid') for video in videos): raise PIDInvalidAction() # delete all videos for video in videos: video.delete(force=force) # mark video PIDs as DELETED pid = get_video_pid(pid_value=video['_deposit']['id']) if not pid.is_deleted(): pid.delete() return super(Project, self).delete(force=force, pid=pid)
def registerconceptdoi(self, pid=None): """Register the conceptdoi for the deposit and record.""" if not self.is_published() and is_doi_locally_managed(self['doi']): raise PIDInvalidAction() pid, record = self.fetch_published() zenodo_concept_doi_minter(record.id, record) record.commit() self['conceptdoi'] = record['conceptdoi'] self.commit() if current_app.config['DEPOSIT_DATACITE_MINTING_ENABLED']: from zenodo.modules.deposit.tasks import datacite_register datacite_register.delay(pid.pid_value, str(record.id)) return self
def publish(self, pid=None, id_=None): """Publish a deposit.""" pid = pid or self.pid if not pid.is_registered(): raise PIDInvalidAction() self['_deposit']['status'] = 'published' if self['_deposit'].get('pid') is None: # First publishing self._publish_new(id_=id_) else: # Update after edit record = self._publish_edited() record.commit() self.commit() return self
def delete(self, force=True, pid=None): """Delete deposit. Status required: ``'draft'``. :param force: Force deposit delete. (Default: ``True``) :param pid: Force pid object. (Default: ``None``) :returns: A new Deposit object. """ pid = pid or self.pid if self['_deposit'].get('pid'): raise PIDInvalidAction() if pid: pid.delete() return super(Deposit, self).delete(force=force)
def _create_new_redirection(cls, old_pid, new_pid): try: with db.session.begin_nested(): redirection = cls(original_pid=old_pid, new_pid=new_pid) db.session.add(redirection) db.session.expire(old_pid) db.session.expire(new_pid) old_pid.status = PIDStatus.REDIRECTED db.session.add(old_pid) except IntegrityError as e: raise PIDInvalidAction(e) except SQLAlchemyError: LOGGER.exception("Failed to redirect record", old_pid=old_pid, new_pid=new_pid) raise return redirection
def validate_item_request(self, **kwargs): """Validate item request.""" id = str(uuid4()) if self.number_of_item_requests(): first_request = self.get_first_request() pickup_member_pid = first_request['pickup_member_pid'] location_pid = self.get('location_pid') location = Location.get_record_by_pid(location_pid) member = MemberWithLocations.get_member_by_locationid(location.id) member_pid = member.pid if member_pid == pickup_member_pid: self['_circulation']['status'] = ItemStatus.AT_DESK else: self['_circulation']['status'] = ItemStatus.IN_TRANSIT data = self.build_data(0, 'validate_item_request') CircTransaction.create(data, id=id) else: raise PIDInvalidAction()
def sort_by(*args, **kwargs): """Only in draft state.""" if 'draft' != self.status: raise PIDInvalidAction() return sort_by_(*args, **kwargs)
def sort_by(*args, **kwargs): """Only in draft state.""" if 'draft' != self['_deposit']['status']: raise PIDInvalidAction() return sort_by_(*args, **kwargs)
def newversion(self, pid=None): """Create a new version deposit.""" deposit = None try: if not self.is_published(): raise PIDInvalidAction() # Check that there is not a newer draft version for this record # and this is the latest version pv = PIDVersioning(child=pid) if pv.exists and not pv.draft_child and pid == pv.last_child: last_pid = pv.last_child # Get copy of the latest record latest_record = WekoDeposit.get_record(last_pid.object_uuid) if latest_record is not None: data = latest_record.dumps() owners = data['_deposit']['owners'] keys_to_remove = ('_deposit', 'doi', '_oai', '_files', '_buckets', '$schema') for k in keys_to_remove: data.pop(k, None) # NOTE: We call the superclass `create()` method, because we # don't want a new empty bucket, but an unlocked snapshot of # the old record's bucket. deposit = super(WekoDeposit, self).create(data) # Injecting owners is required in case of creating new # version this outside of request context deposit['_deposit']['owners'] = owners recid = PersistentIdentifier.get( 'recid', str(data['_deposit']['id'])) depid = PersistentIdentifier.get( 'depid', str(data['_deposit']['id'])) PIDVersioning(parent=pv.parent).insert_draft_child( child=recid) RecordDraft.link(recid, depid) # Create snapshot from the record's bucket and update data snapshot = latest_record.files.bucket.snapshot(lock=False) snapshot.locked = False deposit['_buckets'] = {'deposit': str(snapshot.id)} RecordsBuckets.create(record=deposit.model, bucket=snapshot) if 'extra_formats' in latest_record['_buckets']: extra_formats_snapshot = \ latest_record.extra_formats.bucket.snapshot( lock=False) deposit['_buckets']['extra_formats'] = \ str(extra_formats_snapshot.id) RecordsBuckets.create(record=deposit.model, bucket=extra_formats_snapshot) index = { 'index': self.get('path', []), 'actions': 'private' if self.get('publish_status', '1') == '1' else 'publish' } if 'activity_info' in session: del session['activity_info'] item_metadata = ItemsMetadata.get_record( last_pid.object_uuid).dumps() args = [index, item_metadata] deposit.update(*args) deposit.commit() return deposit except SQLAlchemyError as ex: current_app.logger.debug(ex) db.session.rollback() return None
def wrapper(self, *args, **kwargs): """Check current deposit status.""" if status != self.status: raise PIDInvalidAction() return method(self, *args, **kwargs)
def newversion(self, pid=None): """Create a new version deposit.""" if not self.is_published(): raise PIDInvalidAction() # Check that there is not a newer draft version for this record pid, record = self.fetch_published() pv = PIDVersioning(child=pid) if (not pv.draft_child and is_doi_locally_managed(record['doi'])): with db.session.begin_nested(): # Get copy of the latest record latest_record = ZenodoRecord.get_record( pv.last_child.object_uuid) data = latest_record.dumps() # Get the communities from the last deposit # and push those to the new version latest_depid = PersistentIdentifier.get( 'depid', data['_deposit']['id']) latest_deposit = ZenodoDeposit.get_record( latest_depid.object_uuid) last_communities = latest_deposit.get('communities', []) owners = data['_deposit']['owners'] # TODO: Check other data that may need to be removed keys_to_remove = ( '_deposit', 'doi', '_oai', '_files', '_buckets', '$schema') for k in keys_to_remove: data.pop(k, None) # NOTE: We call the superclass `create()` method, because we # don't want a new empty bucket, but an unlocked snapshot of # the old record's bucket. deposit = (super(ZenodoDeposit, self).create(data)) # Injecting owners is required in case of creating new # version this outside of request context deposit['_deposit']['owners'] = owners if last_communities: deposit['communities'] = last_communities ### conceptrecid = PersistentIdentifier.get( 'recid', data['conceptrecid']) recid = PersistentIdentifier.get( 'recid', str(data['recid'])) depid = PersistentIdentifier.get( 'depid', str(data['_deposit']['id'])) PIDVersioning(parent=conceptrecid).insert_draft_child( child=recid) RecordDraft.link(recid, depid) # Pre-fill the Zenodo DOI to prevent the user from changing it # to a custom DOI. deposit['doi'] = doi_generator(recid.pid_value) pv = PIDVersioning(child=pid) index_siblings(pv.draft_child, neighbors_eager=True, with_deposits=True) with db.session.begin_nested(): # Create snapshot from the record's bucket and update data snapshot = latest_record.files.bucket.snapshot(lock=False) snapshot.locked = False # FIXME: `snapshot.id` might not be present because we need to # commit first to the DB. # db.session.commit() deposit['_buckets'] = {'deposit': str(snapshot.id)} RecordsBuckets.create(record=deposit.model, bucket=snapshot) deposit.commit() return self
def wrapper(self, *args, **kwargs): """Check current deposit status.""" if self['_circulation']['status'] not in statuses: raise PIDInvalidAction() return method(self, *args, **kwargs)
def publish(self, pid=None, id_=None): """Publish a deposit. Overrides parent's `publish`. This is a needed wholesale port with tweaks because the tweaks have to be at specific locations. If it's the first time the deposit is published: * it calls the minter and set the following meta information inside the deposit: .. code-block:: python deposit['_deposit']['pid'] = { 'type': pid_type, 'value': pid_value, 'revision_id': 0, } * A dump of all information inside the deposit is done. * A snapshot of the files is done. Otherwise, it publishes the new edited version. In this case, if meanwhile someone already published a new version, it'll try to merge the changes with the latest version. .. note:: no need for indexing as it calls `self.commit()`. Status required: ``'draft'``. :param pid: Force the new pid value. (Default: ``None``) :param id_: Force the new uuid value as deposit id. (Default: ``None``) :returns: Returns itself because this is what Invenio Deposit expects. """ pid = pid or self.pid if not pid.is_registered(): raise PIDInvalidAction() self['_deposit']['status'] = 'published' if self['_deposit'].get('pid') is None: published_record = self._publish_new(id_=id_) else: # Publish after edit published_record = self._publish_edited() published_record['type'] = RecordType.published.value published_record.commit() self.commit() db.session.commit() # Above only flushes, this persists to the DB # TODO: Remove? because invenio-deposit takes care of it via signal try: self.indexer.index(published_record) except RequestError: current_app.logger.exception( 'Could not index {0}.'.format(published_record)) # We DONT rely on invenio-deposit's signal because we want # this method to be enough to publish a Record and perform all # associated work menrva_record_published.send(published_record['id']) return self