def header(self): return common.Header( element=None, identifier=self._identifier(), datestamp=self.data["timestamp"], setspec=self._sets_specs(), deleted=False, # TODO: armazenar o campo `deleted`? )
def record_for_book(self, book, headers_only=False): meta = None identifier = self.slug_to_identifier(book.slug) if isinstance(book, Book): # setSpec = map(self.tag_to_setspec, book.tags.filter(category__in=self.TAG_CATEGORIES)) header = common.Header(identifier, book.changed_at, [], False) if not headers_only: meta = common.Metadata(self.metadata(book)) about = None elif isinstance(book, Deleted): header = common.Header(identifier, book.deleted_at, [], True) if not headers_only: meta = common.Metadata({}) about = None if headers_only: return header return header, meta, about
def _record_for_dataset_datacite(self, dataset, set_spec): '''Show a tuple of a header and metadata for this dataset. ''' package = get_action('package_show')({}, {'id': dataset.id}) # Loops through extras -table: extras = {} for item in package['extras']: for key, value in item.iteritems(): key = item[ 'key'] # extras table is constructed as key: language, value: English value = item[ 'value'] # instead of language : English, that is why it is looped here values = value.split(";") extras.update({key: values}) temporal_begin = extras.get('TemporalCoverage:BeginDate') temporal_end = extras.get('TemporalCoverage:EndDate') dates = [] if temporal_begin or temporal_end: begin = temporal_begin[0] if temporal_begin else '' end = temporal_end[0] if temporal_end else '' dates.append("%s/%s" % (begin, end)) # identifiers = self._set_id(package, extras) subj = [tag.get('display_name') for tag in package['tags'] ] if package.get('tags', None) else None if subj is not None and 'Discipline' in extras: subj.extend(extras['Discipline']) author = package.get('author') if author: authors = [a for a in author.split(";")] else: authors = None meta = { 'DOI': extras['DOI'] if 'DOI' in extras else None, 'PID': extras['PID'] if 'PID' in extras else None, 'version': extras['Version'] if 'Version' in extras else None, 'source': package.get('url', None), 'relatedIdentifier': extras['RelatedIdentifier'] if 'RelatedIdentifier' in extras else None, 'creator': authors if authors else None, 'publisher': extras['Publisher'] if 'Publisher' in extras else None, 'publicationYear': extras['PublicationYear'] if 'PublicationYear' in extras else None, 'publicationTimestamp': extras['PublicationTimestamp'] if 'PublicationTimestamp' in extras else None, 'resourceType': extras['ResourceType'] if 'ResourceType' in extras else None, 'language': extras['Language'] if 'Language' in extras else None, 'titles': package.get('title', None) or package.get('name'), 'contributor': extras['Contributor'] if 'Contributor' in extras else None, 'descriptions': self._get_json_content(package.get('notes')) if package.get( 'notes', None) else None, 'subjects': subj, 'rights': extras['Rights'] if 'Rights' in extras else None, 'openAccess': extras['OpenAccess'] if 'OpenAccess' in extras else None, 'size': extras['Size'] if 'Size' in extras else None, 'format': extras['Format'] if 'Format' in extras else None, 'fundingReference': extras['FundingReference'] if 'FundingReference' in extras else None, 'dates': dates if dates else None, 'geoLocation': extras['SpatialCoverage'] if 'SpatialCoverage' in extras else None, } metadata = {} # Fixes the bug on having a large dataset being scrambled to individual # letters for key, value in meta.items(): if value and not isinstance(value, list): metadata[str(key)] = [value] else: metadata[str(key)] = value return (common.Header('', dataset.id, dataset.metadata_created, set_spec, False), common.Metadata('', metadata), None)