def get_item_id(item_type_id): """Get dictionary contain item id. Get from mapping between item type and jpcoar :param item_type_id: The item type id :return: dictionary """ results = dict() item_type_mapping = Mapping.get_record(item_type_id) try: for k, v in item_type_mapping.items(): jpcoar = v.get("jpcoar_mapping") first_index = True if isinstance(jpcoar, dict): for u, s in jpcoar.items(): if results.get(u) is not None: data = list() if isinstance(results.get(u), list): data = results.get(u) data.append({u: s, "model_id": k}) else: data.append({u: results.get(u)}) data.append({u: s, "model_id": k}) results[u] = data else: results[u] = s if first_index and isinstance(results[u], dict): results[u]['model_id'] = k first_index = False except Exception as e: results['error'] = str(e) return results
def combine_record_file_urls(record, object_uuid, meta_prefix): """Add file urls to record metadata. Get file property information by item_mapping and put to metadata. """ from weko_records.api import ItemsMetadata, Mapping from weko_records.serializers.utils import get_mapping from weko_schema_ui.schema import get_oai_metadata_formats metadata_formats = get_oai_metadata_formats(current_app) item_type = ItemsMetadata.get_by_object_id(object_uuid) item_type_id = item_type.item_type_id type_mapping = Mapping.get_record(item_type_id) mapping_type = metadata_formats[meta_prefix]['serializer'][1][ 'schema_type'] item_map = get_mapping(type_mapping, "{}_mapping".format(mapping_type)) if item_map: file_props = current_app.config["OAISERVER_FILE_PROPS_MAPPING"] if mapping_type in file_props: file_keys = item_map.get(file_props[mapping_type]) else: file_keys = None if not file_keys: return record else: file_keys = file_keys.split('.') if len(file_keys) == 3 and record.get(file_keys[0]): attr_mlt = record[file_keys[0]]["attribute_value_mlt"] if isinstance(attr_mlt, list): for attr in attr_mlt: if attr.get('filename'): if not attr.get(file_keys[1]): attr[file_keys[1]] = {} attr[file_keys[1]][file_keys[2]] = \ create_files_url( request.url_root, record.get('recid'), attr.get('filename')) elif isinstance(attr_mlt, dict) and \ attr_mlt.get('filename'): if not attr_mlt.get(file_keys[1]): attr_mlt[file_keys[1]] = {} attr_mlt[file_keys[1]][file_keys[2]] = \ create_files_url( request.url_root, record.get('recid'), attr_mlt.get('filename')) return record
def get_mapping(): if isinstance(self._record, dict): id = self._record.pop("item_type_id") self._record.pop("_buckets", {}) self._record.pop("_deposit", {}) mjson = Mapping.get_record(id) mp = mjson.dumps() if mjson: for k, v in self._record.items(): if isinstance(v, dict) and mp.get(k) and k != "_oai": v.update({self._schema_name: mp.get( k).get(self._schema_name)})
def get_mapping(item_type_id): """Get keys of metadata record by mapping.""" # Get default mapping key and lang from config (defaults are None). mapping_dict = RECORDS_REST_DEFAULT_MAPPING_DICT # Get mapping of this record. mapping = Mapping.get_record(item_type_id) if not mapping: return mapping_dict # Update default mapping key and lang by mapping of this record. identifier = 'system_identifier' for k, v in mapping.items(): if not type(v.get('jpcoar_mapping')) is dict: continue for k1, v1 in v.get('jpcoar_mapping').items(): for k2, v2 in mapping_dict.items(): if k1 != k2.split(':')[1] or not type(v1) is dict: continue key = identifier if identifier in k else k key_arr = ['metadata', key, 'attribute_value_mlt', 0] lang_arr = key_arr.copy() if k1 == 'creator': name = v1.get('creatorName') # Set all key for __lang attr = name.get('@attributes', {}) xml_lang = attr.get('xml:lang', '').split('.') lang_arr.extend(get_keys(xml_lang)) # Set all key for key name_arr = name.get('@value').split('.') key_arr.extend(get_keys(name_arr)) elif '.' in v1.get('@value', ''): # Set key for __lang attr = v1.get('@attributes', {}) xml_lang = attr.get('xml:lang', '').split('.') lang_arr.extend(get_keys(xml_lang)) # Set all key for key name_arr = v1.get('@value').split('.') key_arr.extend(get_keys(name_arr)) else: # Set key for __lang attr = v1.get('@attributes', {}) lang_arr.append(attr.get('xml:lang')) # Set all key for key key_arr.append(v1.get('@value')) mapping_dict[k2] = key_arr mapping_dict['{}__lang'.format(k2)] = lang_arr return mapping_dict
def mapping_index(ItemTypeID=0): """Renders an item type mapping view. :param ItemTypeID: Item type ID. (Default: 0) :return: The rendered template. """ lists = ItemTypes.get_all() if lists is None or len(lists) == 0: return render_template( current_app.config['WEKO_ITEMTYPES_UI_ERROR_TEMPLATE']) item_type = ItemTypes.get_by_id(ItemTypeID) if item_type is None: return redirect(url_for('.mapping_index', ItemTypeID=lists[0].id)) item_type_mapping = Mapping.get_record(ItemTypeID) mapping = json.dumps(item_type_mapping, indent=4, ensure_ascii=False) current_app.logger.debug(mapping) return render_template( current_app.config['WEKO_ITEMTYPES_UI_MAPPING_TEMPLATE'], lists=lists, mapping=mapping, id=ItemTypeID)
def check_correct_system_props_mapping(object_uuid, system_mapping_config): """Validate and return if selection mapping is correct. Correct mapping mean item map have the 2 field same with config """ from weko_records.api import ItemsMetadata, Mapping from weko_records.serializers.utils import get_mapping item_type = ItemsMetadata.get_by_object_id(object_uuid) item_type_id = item_type.item_type_id type_mapping = Mapping.get_record(item_type_id) item_map = get_mapping(type_mapping, "jpcoar_mapping") if system_mapping_config: for key in system_mapping_config: if key not in item_map or key in item_map and \ system_mapping_config[key] not in item_map[key]: return False else: return False return True
def get_title_pubdate_path(item_type_id): """Get title and pubdate path. :param item_type_id: :return: result json. """ result = {'title': '', 'pubDate': ''} item_type_mapping = Mapping.get_record(item_type_id) title = list() pub_date = list() for k, v in item_type_mapping.items(): jpcoar = v.get("jpcoar_mapping") if isinstance(jpcoar, dict): if 'title' in jpcoar.keys(): try: if str(k).index('item') is not None: title.append(k) title_value = jpcoar['title'] if '@value' in title_value.keys(): title.append(title_value['@value']) if '@attributes' in title_value.keys(): title_lang = title_value['@attributes'] if 'xml:lang' in title_lang.keys(): title.append(title_lang['xml:lang']) except Exception: pass elif 'date' in jpcoar.keys(): try: if str(k).index('item') is not None: pub_date.append(k) title_value = jpcoar['date'] if '@value' in title_value.keys(): pub_date.append(title_value['@value']) except Exception: pass result['title'] = title result['pubDate'] = pub_date return result
def mapping_index(ItemTypeID=0): """Renders an item type mapping view. :param ItemTypeID: Item type ID. (Default: 0) :return: The rendered template. """ try: lists = ItemTypes.get_latest() # ItemTypes.get_all() if lists is None or len(lists) == 0: return render_template( current_app.config['WEKO_ITEMTYPES_UI_ERROR_TEMPLATE'] ) item_type = ItemTypes.get_by_id(ItemTypeID) if item_type is None: return redirect(url_for('.mapping_index', ItemTypeID=lists[0].id)) itemtype_list = [] itemtype_prop = item_type.schema.get('properties') for key, prop in itemtype_prop.items(): cur_lang = current_i18n.language schema_form = item_type.form elemStr = '' if 'default' != cur_lang: for elem in schema_form: if 'items' in elem: for sub_elem in elem['items']: if 'key' in sub_elem and sub_elem['key'] == key: if 'title_i18n' in sub_elem: if cur_lang in sub_elem['title_i18n']: if len(sub_elem['title_i18n'][cur_lang]) > 0: elemStr = sub_elem['title_i18n'][ cur_lang] else: elemStr = sub_elem['title'] break else: if elem['key'] == key: if 'title_i18n' in elem: if cur_lang in elem['title_i18n']: if len(elem['title_i18n'][cur_lang]) > 0: elemStr = elem['title_i18n'][ cur_lang] else: elemStr = elem['title'] if elemStr != '': break if elemStr == '': elemStr = prop.get('title') itemtype_list.append((key, elemStr)) # itemtype_list.append((key, prop.get('title'))) # jpcoar_list = [] mapping_name = request.args.get('mapping_type', 'jpcoar_mapping') jpcoar_xsd = WekoSchema.get_all() jpcoar_lists = {} for item in jpcoar_xsd: jpcoar_lists[item.schema_name] = json.loads(item.xsd) # jpcoar_prop = json.loads(jpcoar_xsd.model.xsd) # for key in jpcoar_prop.keys(): # jpcoar_list.append((key, key)) item_type_mapping = Mapping.get_record(ItemTypeID) # mapping = json.dumps(item_type_mapping, indent=4, ensure_ascii=False) return render_template( current_app.config['WEKO_ITEMTYPES_UI_MAPPING_TEMPLATE'], lists=lists, hide_mapping_prop=item_type_mapping, mapping_name=mapping_name, hide_itemtype_prop=itemtype_prop, jpcoar_prop_lists=remove_xsd_prefix(jpcoar_lists), itemtype_list=itemtype_list, id=ItemTypeID ) except: current_app.logger.error('Unexpected error: ', sys.exc_info()[0]) return abort(400)
def itemtype_mapping(ItemTypeID=0): item_type_mapping = Mapping.get_record(ItemTypeID) return jsonify(item_type_mapping)
def serialize_search(self, pid_fetcher, search_result, links=None, item_links_factory=None, **kwargs): """Serialize a search result. :param pid_fetcher: Persistent identifier fetcher. :param search_result: Elasticsearch search result. :param links: Dictionary of links to add to response. """ fg = WekoFeedGenerator() # Add extentions fg.register_extension('dc', DcWekoBaseExtension, DcWekoEntryExtension) fg.register_extension('opensearch', extension_class_feed=OpensearchExtension, extension_class_entry=OpensearchEntryExtension) fg.register_extension('prism', extension_class_feed=PrismExtension, extension_class_entry=PrismEntryExtension) # Set title index_meta = {} _keywords = request.args.get('q', '') _indexId = request.args.get('index_id') if _indexId: index = Index.query.filter_by(id=_indexId).one_or_none() _indexName = index.index_name index_meta[_indexId] = _indexName if _indexName else \ 'Nonexistent Index' fg.title('WEKO OpenSearch: ' + str(index_meta[_indexId])) else: fg.title('WEKO OpenSearch: ' + str(_keywords)) # Set link fg.link(href=request.url) # Set id fg.id(request.url) # Set updated fg.updated(datetime.now(pytz.utc)) # Set totalResults _totalResults = search_result['hits']['total'] fg.opensearch.totalResults(str(_totalResults)) # Set startIndex _startIndex = request.args.get('page') fg.opensearch.startIndex(str(_startIndex)) # Set itemPerPage _itemPerPage = request.args.get('size') fg.opensearch.itemsPerPage(str(_itemPerPage)) if not _keywords and not _indexId: return fg.atom_str(pretty=True) jpcoar_map = {} for hit in search_result['hits']['hits']: item_metadata = hit['_source']['_item_metadata'] item_type_id = item_metadata['item_type_id'] type_mapping = Mapping.get_record(item_type_id) if item_type_id in jpcoar_map: item_map = jpcoar_map[item_type_id] else: item_map = self.get_mapping(type_mapping, 'jpcoar_mapping') jpcoar_map[item_type_id] = item_map fe = fg.add_entry() # Set title if 'title_en' in item_metadata: _enTitle = item_metadata['title_en']['attribute_value'] if 'title_ja' in item_metadata: _jaTitle = item_metadata['title_ja']['attribute_value'] if 'lang' in item_metadata: _lang = item_metadata['lang']['attribute_value'] if _lang: if (_lang == 'en'): if _enTitle: fe.title(_enTitle) elif _jaTitle: fe.title(_jaTitle) else: if _jaTitle: fe.title(_jaTitle) elif _enTitle: fe.title(_enTitle) else: if _enTitle: fe.title(_enTitle) if _jaTitle: fe.title(_jaTitle) # Set link _pid = item_metadata['control_number'] item_url = request.host_url + 'records/' + _pid fe.link(href=item_url, rel='alternate', type='text/xml') # Set oai _oai = item_metadata['_oai']['id'] item_url = request.host_url + 'oai2d?verb=GetRecord&metadataPrefix=jpcoar&identifier=' + _oai fe.link(href=item_url, rel='alternate', type='text/xml') # Set id fe.id(item_url) # Set weko id fe.dc.dc_identifier(_pid) # Set aggregationType _aggregationType = 'type.@value' if _aggregationType in item_map: item_id = item_map[_aggregationType].split('.')[0] # Get item data if item_id in item_metadata: type_metadata = self.get_metadata_from_map( item_metadata[item_id], item_id) aggregationTypes = type_metadata[ item_map[_aggregationType]] if aggregationTypes: if isinstance(aggregationTypes, list): for aggregationType in aggregationTypes: fe.prism.aggregationType(aggregationType) else: fe.prism.aggregationType(aggregationTypes) # Set item type fe.dc.dc_type(hit['_source']['itemtype']) # Set mimeType _mimeType = 'file.mimeType.@value' if _mimeType in item_map: item_id = item_map[_mimeType].split('.')[0] # Get item data if item_id in item_metadata: file_metadata = self.get_metadata_from_map( item_metadata[item_id], item_id) mime_types = file_metadata[item_map[_mimeType]] if mime_types: if isinstance(mime_types, list): for mime_type in mime_types: fe.dc.dc_format(mime_type) else: fe.dc.dc_format(mime_types) # Set file uri _uri = 'file.URI.@value' if _uri in item_map: item_id = item_map[_uri].split('.')[0] # Get item data if item_id in item_metadata: uri_metadata = self.get_metadata_from_map( item_metadata[item_id], item_id) uri_list = uri_metadata[item_map[_uri]] if uri_list: if isinstance(uri_list, list): for uri in uri_list: fe.dc.dc_identifier(uri, False) else: fe.dc.dc_identifier(uri_list, False) # Set author info request_lang = request.args.get('lang') _creatorName_attr_lang = '[email protected]:lang' _creatorName_value = 'creator.creatorName.@value' if _creatorName_value in item_map: item_id = item_map[_creatorName_value].split('.')[0] # Get item data if item_id in item_metadata: creator_metadata = self.get_metadata_from_map( item_metadata[item_id], item_id) creator_names = creator_metadata[ item_map[_creatorName_value]] creator_name_langs = creator_metadata[ item_map[_creatorName_attr_lang]] if creator_name_langs: if isinstance(creator_name_langs, list): for i in range(len(creator_name_langs)): creator_name_lang = creator_name_langs[i] if request_lang: if creator_name_lang == request_lang: fe.author({ 'name': creator_names[i], 'lang': creator_name_lang }) else: fe.author({ 'name': creator_names[i], 'lang': creator_name_lang }) else: if request_lang: if creator_name_langs == request_lang: fe.author({ 'name': creator_names, 'lang': creator_name_langs }) else: fe.author({ 'name': creator_names, 'lang': creator_name_langs }) # Set publisher _publisher_attr_lang = '[email protected]:lang' _publisher_value = 'publisher.@value' if _publisher_value in item_map: item_id = item_map[_publisher_value].split('.')[0] # Get item data if item_id in item_metadata: publisher_metadata = self.get_metadata_from_map( item_metadata[item_id], item_id) publisher_names = publisher_metadata[ item_map[_publisher_value]] publisher_name_langs = publisher_metadata[ item_map[_publisher_attr_lang]] if publisher_name_langs: if isinstance(publisher_name_langs, list): for i in range(len(publisher_name_langs)): publisher_name_lang = publisher_name_langs[i] if request_lang: if publisher_name_lang == request_lang: fe.dc.dc_publisher( publisher_names[i], publisher_name_lang) else: fe.dc.dc_publisher(publisher_names[i], publisher_name_lang) else: if request_lang: if publisher_name_langs == request_lang: fe.dc.dc_publisher(publisher_names, publisher_name_langs) else: fe.dc.dc_publisher(publisher_names, publisher_name_langs) # Set subject if _indexId: fe.dc.dc_subject(index_meta[_indexId]) else: indexes = item_metadata['path'][0].split('/') indexId = indexes[len(indexes) - 1] if indexId in index_meta: indexName = index_meta[indexId] else: index = Index.query.filter_by(id=indexId).one_or_none() indexName = index.index_name index_meta[indexId] = indexName fe.dc.dc_subject(indexName) # Set publicationName _sourceTitle_value = 'sourceTitle.@value' if _sourceTitle_value in item_map: item_id = item_map[_sourceTitle_value].split('.')[0] # Get item data if item_id in item_metadata: sourceTitle_metadata = self.get_metadata_from_map( item_metadata[item_id], item_id) source_titles = sourceTitle_metadata[ item_map[_sourceTitle_value]] if source_titles: if isinstance(source_titles, list): for source_title in source_titles: fe.prism.publicationName(source_title) else: fe.prism.publicationName(source_titles) # Set sourceIdentifier _sourceIdentifier_value = 'sourceIdentifier.@value' if _sourceIdentifier_value in item_map: item_id = item_map[_sourceIdentifier_value].split('.')[0] # Get item data if item_id in item_metadata: sourceIdentifier_metadata = self.get_metadata_from_map( item_metadata[item_id], item_id) source_identifiers = sourceIdentifier_metadata[ item_map[_sourceIdentifier_value]] if source_identifiers: if isinstance(source_identifiers, list): for source_identifier in source_identifiers: fe.prism.issn(source_identifier) else: fe.prism.issn(source_identifiers) # Set volume _volume = 'volume' if _volume in item_map: item_id = item_map[_volume].split('.')[0] # Get item data if item_id in item_metadata: volume_metadata = self.get_metadata_from_map( item_metadata[item_id], item_id) volumes = volume_metadata[item_map[_volume]] if volumes: if isinstance(volumes, list): for volume in volumes: fe.prism.volume(volume) else: fe.prism.volume(volumes) # Set number _issue = 'issue' if _issue in item_map: item_id = item_map[_issue].split('.')[0] # Get item data if item_id in item_metadata: issue_metadata = self.get_metadata_from_map( item_metadata[item_id], item_id) issues = issue_metadata[item_map[_issue]] if issues: if isinstance(issues, list): for issue in issues: fe.prism.number(issue) else: fe.prism.number(issues) # Set startingPage _pageStart = 'pageStart' if _pageStart in item_map: item_id = item_map[_pageStart].split('.')[0] # Get item data if item_id in item_metadata: pageStart_metadata = self.get_metadata_from_map( item_metadata[item_id], item_id) pageStarts = pageStart_metadata[item_map[_pageStart]] if pageStarts: if isinstance(pageStarts, list): for pageStart in pageStarts: fe.prism.startingPage(pageStart) else: fe.prism.startingPage(pageStarts) # Set endingPage _pageEnd = 'pageEnd' if _pageEnd in item_map: item_id = item_map[_pageEnd].split('.')[0] # Get item data if item_id in item_metadata: pageEnd_metadata = self.get_metadata_from_map( item_metadata[item_id], item_id) pageEnds = pageEnd_metadata[item_map[_pageEnd]] if pageEnds: if isinstance(pageEnds, list): for pageEnd in pageEnds: fe.prism.endingPage(pageEnd) else: fe.prism.endingPage(pageEnds) # Set publicationDate _date = 'date.@value' if _date in item_map: item_id = item_map[_date].split('.')[0] # Get item data if item_id in item_metadata: date_metadata = self.get_metadata_from_map( item_metadata[item_id], item_id) dates = date_metadata[item_map[_date]] if dates: if isinstance(dates, list): for date in dates: fe.prism.publicationDate(date) else: fe.prism.publicationDate(dates) # Set content _description_attr_lang = '[email protected]:lang' _description_value = 'description.@value' if _description_value in item_map: item_id = item_map[_description_value].split('.')[0] # Get item data if item_id in item_metadata: description_metadata = self.get_metadata_from_map( item_metadata[item_id], item_id) descriptions = description_metadata[ item_map[_description_value]] description_langs = description_metadata[ item_map[_description_attr_lang]] if description_langs: if isinstance(description_langs, list): for i in range(len(description_langs)): description_lang = description_langs[i] if request_lang: if description_lang == request_lang: fe.content(descriptions[i], description_lang) else: fe.content(descriptions[i], description_lang) else: if request_lang: if description_langs == request_lang: fe.content(descriptions, description_langs) else: fe.content(descriptions, description_langs) # Set updated _updated = hit['_source']['_updated'] if _updated: fe.updated(_updated) # Set creationDate _creationDate = hit['_source']['_created'] if _creationDate: fe.prism.creationDate(_creationDate) # Set modificationDate _modificationDate = hit['_source']['_updated'] if _modificationDate: fe.prism.modificationDate(_modificationDate) return fg.atom_str(pretty=True)
def index(self, ItemTypeID=0): """Renders an item type mapping view. :param ItemTypeID: Item type ID. (Default: 0) :return: The rendered template. """ try: lists = ItemTypes.get_latest() # ItemTypes.get_all() if lists is None or len(lists) == 0: return self.render( current_app. config['WEKO_ITEMTYPES_UI_ADMIN_ERROR_TEMPLATE']) item_type = ItemTypes.get_by_id(ItemTypeID) if item_type is None: current_app.logger.info(lists[0].item_type[0]) return redirect( url_for('itemtypesmapping.index', ItemTypeID=lists[0].item_type[0].id)) itemtype_list = [] itemtype_prop = item_type.schema.get('properties') table_rows = ['pubdate'] render_table_row = item_type.render.get('table_row') if isinstance(render_table_row, list): table_rows.extend(render_table_row) for key in table_rows: prop = itemtype_prop.get(key) cur_lang = current_i18n.language schema_form = item_type.form elemStr = '' if 'default' != cur_lang: for elem in schema_form: if 'items' in elem: for sub_elem in elem['items']: if 'key' in sub_elem and sub_elem['key'] == key: if 'title_i18n' in sub_elem: if cur_lang in sub_elem['title_i18n']: if len(sub_elem['title_i18n'] [cur_lang]) > 0: elemStr = sub_elem[ 'title_i18n'][cur_lang] else: elemStr = sub_elem['title'] break else: if elem['key'] == key: if 'title_i18n' in elem: if cur_lang in elem['title_i18n']: if len(elem['title_i18n'] [cur_lang]) > 0: elemStr = elem['title_i18n'][ cur_lang] else: elemStr = elem['title'] if elemStr != '': break if elemStr == '': elemStr = prop.get('title') itemtype_list.append((key, elemStr)) mapping_name = request.args.get('mapping_type', 'jpcoar_mapping') jpcoar_xsd = WekoSchema.get_all() jpcoar_lists = {} for item in jpcoar_xsd: jpcoar_lists[item.schema_name] = json.loads(item.xsd) item_type_mapping = Mapping.get_record(ItemTypeID) return self.render( current_app.config['WEKO_ITEMTYPES_UI_ADMIN_MAPPING_TEMPLATE'], lists=lists, hide_mapping_prop=item_type_mapping, mapping_name=mapping_name, hide_itemtype_prop=itemtype_prop, jpcoar_prop_lists=remove_xsd_prefix(jpcoar_lists), itemtype_list=itemtype_list, id=ItemTypeID, lang_code=session.get('selected_language', 'en') # Set default ) except BaseException as e: current_app.logger.error('Unexpected error: ', e) return abort(400)
def index(self, ItemTypeID=0): """Renders an item type mapping view. :param ItemTypeID: Item type ID. (Default: 0) :return: The rendered template. """ try: lists = ItemTypes.get_latest() # ItemTypes.get_all() if lists is None or len(lists) == 0: return self.render( current_app.config['WEKO_ITEMTYPE' 'S_UI_ADMIN_ERROR_TEMPLATE'] ) item_type = ItemTypes.get_by_id(ItemTypeID) if item_type is None: current_app.logger.info(lists[0].item_type[0]) return redirect(url_for('itemtypesmapping.index', ItemTypeID=lists[0].item_type[0].id)) itemtype_list = [] itemtype_prop = item_type.schema.get('properties') sys_admin = current_app.config['WEKO_ADMIN_PERMISSION_ROLE_SYSTEM'] is_admin = False with db.session.no_autoflush: for role in list(current_user.roles or []): if role.name == sys_admin: is_admin = True break cur_lang = current_i18n.language meta_system = item_type.render.get('meta_system') table_rows = ['pubdate'] render_table_row = item_type.render.get('table_row') meta_system_items = ['system_identifier_doi', 'system_identifier_hdl', 'system_identifier_uri', 'system_file', 'updated_date', 'created_date', 'persistent_identifier_doi', 'persistent_identifier_h', 'ranking_page_url', 'belonging_index_info'] for key in meta_system_items: if isinstance(meta_system, dict) and meta_system.get(key) \ and isinstance(meta_system[key], dict): if meta_system[key]['title_i18n'] and cur_lang in \ meta_system[key]['title_i18n'] and \ meta_system[key]['title_i18n'][cur_lang] and \ meta_system[key]['title_i18n'][cur_lang].strip(): meta_system[key]['title'] = \ meta_system[key]['title_i18n'][cur_lang] else: meta_system[key]['title'] = \ meta_system[key]['title_i18n']['en'] if \ meta_system[key]['title_i18n'] and \ meta_system[key]['title_i18n']['en'] else '' if isinstance(render_table_row, list): table_rows.extend(render_table_row) for key in table_rows: prop = itemtype_prop.get(key) schema_form = item_type.form elem_str = '' if 'default' != cur_lang: for elem in schema_form: if 'items' in elem: if elem['key'] == key: if 'title_i18n' in elem: if cur_lang in elem['title_i18n']: if len(elem['title_i18n'] [cur_lang]) > 0: elem_str = elem['title_i18n'][ cur_lang] else: elem_str = elem['title'] for sub_elem in elem['items']: if 'key' in sub_elem and \ sub_elem['key'] == key: if 'title_i18n' in sub_elem: if cur_lang in sub_elem['title_i18n']: if len( sub_elem['title_i18n'][ cur_lang]) > 0: elem_str = \ sub_elem['title_i18n'][ cur_lang] else: elem_str = sub_elem['title'] break else: if elem['key'] == key: if 'title_i18n' in elem: if cur_lang in elem['title_i18n']: if len(elem['title_i18n'] [cur_lang]) > 0: elem_str = elem['title_i18n'][ cur_lang] else: elem_str = elem['title'] if elem_str != '': break if elem_str == '': elem_str = prop.get('title') itemtype_list.append((key, elem_str)) mapping_name = request.args.get('mapping_type', 'jpcoar_mapping') jpcoar_xsd = WekoSchema.get_all() jpcoar_lists = {} for item in jpcoar_xsd: jpcoar_lists[item.schema_name] = json.loads(item.xsd) item_type_mapping = Mapping.get_record(ItemTypeID) return self.render( current_app.config['WEKO_ITEMTYPES_UI_ADMIN_MAPPING_TEMPLATE'], lists=lists, hide_mapping_prop=item_type_mapping, mapping_name=mapping_name, hide_itemtype_prop=itemtype_prop, jpcoar_prop_lists=remove_xsd_prefix(jpcoar_lists), meta_system=meta_system, itemtype_list=itemtype_list, id=ItemTypeID, is_system_admin=is_admin, lang_code=session.get('selected_language', 'en') # Set default ) except BaseException: current_app.logger.error('Unexpected error: ', sys.exc_info()[0]) return abort(400)
def make_combined_pdf(pid, obj_file_uri, fileobj, obj, lang_user): """Make the cover-page-combined PDF file. :param pid: PID object :param file_uri: URI of the file object :param lang_user: LANGUAGE of access user :return: cover-page-combined PDF file object """ lang_filepath = current_app.config['PDF_COVERPAGE_LANG_FILEPATH']\ + lang_user + current_app.config['PDF_COVERPAGE_LANG_FILENAME'] pidObject = PersistentIdentifier.get('recid', pid.pid_value) item_metadata_json = ItemsMetadata.get_record(pidObject.object_uuid) item_type = ItemsMetadata.get_by_object_id(pidObject.object_uuid) item_type_id = item_type.item_type_id type_mapping = Mapping.get_record(item_type_id) item_map = get_mapping(type_mapping, "jpcoar_mapping") with open(lang_filepath) as json_datafile: lang_data = json.loads(json_datafile.read()) # Initialize Instance pdf = FPDF('P', 'mm', 'A4') pdf.add_page() pdf.set_margins(20.0, 20.0) pdf.set_fill_color(100, 149, 237) pdf.add_font( 'IPAexg', '', current_app.config["JPAEXG_TTF_FILEPATH"], uni=True) pdf.add_font( 'IPAexm', '', current_app.config["JPAEXM_TTF_FILEPATH"], uni=True) # Parameters such as width and height of rows/columns w1 = 40 # width of the left column w2 = 130 # width of the right column footer_w = 90 # width of the footer cell # url_oapolicy_h = 7 # height of the URL & OA-policy # height of the URL & OA-policy url_oapolicy_h = current_app.config['URL_OA_POLICY_HEIGHT'] # title_h = 8 # height of the title title_h = current_app.config['TITLE_HEIGHT'] # height of the title # header_h = 20 # height of the header cell header_h = current_app.config['HEADER_HEIGHT'] # height of the header cell # footer_h = 4 # height of the footer cell footer_h = current_app.config['FOOTER_HEIGHT'] # height of the footer cell # meta_h = 9 # height of the metadata cell # height of the metadata cell meta_h = current_app.config['METADATA_HEIGHT'] max_letters_num = 51 # number of maximum letters that can be contained \ # in the right column cc_logo_xposition = 160 # x-position of Creative Commons logos # Get the header settings record = PDFCoverPageSettings.find(1) header_display_type = record.header_display_type header_output_string = record.header_output_string header_output_image = record.header_output_image header_display_position = record.header_display_position # Set the header position positions = {} if header_display_position == 'left': positions['str_position'] = 'L' positions['img_position'] = 20 elif header_display_position == 'center' or header_display_position is None: positions['str_position'] = 'C' positions['img_position'] = 85 elif header_display_position == 'right': positions['str_position'] = 'R' positions['img_position'] = 150 # Show header(string or image) if header_display_type == 'string': pdf.set_font('IPAexm', '', 22) pdf.multi_cell( w1 + w2, header_h, header_output_string, 0, positions['str_position'], False) else: pdf.image( header_output_image, x=positions['img_position'], y=None, w=0, h=30, type='') pdf.set_y(55) # Title settings title = item_metadata_json['title'] pdf.set_font('IPAexm', '', 20) pdf.multi_cell(w1 + w2, title_h, title, 0, 'L', False) pdf.ln(h='15') # Metadata fg = WekoFeedGenerator() fe = fg.add_entry() _file = 'file.URI.@value' _file_item_id = None if _file in item_map: _file_item_id = item_map[_file].split('.')[0] _file_item_id = _file_item_id.replace('fileinfo', 'files') _creator = 'creator.creatorName.@value' _creator_item_id = None if _creator in item_map: _creator_item_id = item_map[_creator].split('.')[0] publisher_attr_lang = '[email protected]:lang' publisher_value = 'publisher.@value' publisher_item_id = None publisher_lang_id = None publisher_text_id = None keyword_attr_lang = '[email protected]:lang' keyword_attr_value = 'subject.@value' keyword_base = None keyword_lang = None pdf.set_font('Arial', '', 14) pdf.set_font('IPAexg', '', 14) if item_metadata_json['lang'] == 'en': item_metadata_json['lang'] = 'English' elif item_metadata_json['lang'] == 'ja': item_metadata_json['lang'] = 'Japanese' try: lang_field = item_map['language.@value'].split('.') if item_metadata_json[lang_field[0]][lang_field[1]] == 'eng': item_metadata_json['lang'] = 'English' elif item_metadata_json[lang_field[0]][lang_field[1]] == 'jpn': item_metadata_json['lang'] = 'Japanese' except BaseException: pass try: lang = item_metadata_json.get('lang') except (KeyError, IndexError): lang = None try: publisher_item_id = item_map[publisher_attr_lang].split('.')[0] publisher_lang_ids = item_map[publisher_attr_lang].split('.')[1:] publisher_text_ids = item_map[publisher_value].split('.')[1:] publisher = None default_publisher = None publishers = item_metadata_json[publisher_item_id] pair_name_language_publisher = get_pair_value(publisher_text_ids, publisher_lang_ids, publishers) for publisher_name, publisher_lang in pair_name_language_publisher: if publisher_lang == lang_user: publisher = publisher_name if publisher_lang == 'en': default_publisher = publisher_name if publisher is None: publisher = default_publisher except (KeyError, IndexError): publisher = None try: pubdate = item_metadata_json.get('pubdate') except (KeyError, IndexError): pubdate = None try: keyword_item_id = item_map[keyword_attr_lang].split('.')[0] keyword_item_langs = item_map[keyword_attr_lang].split('.')[1:] keyword_item_values = item_map[keyword_attr_value].split('.')[1:] keyword_base = item_metadata_json.get(keyword_item_id) keywords_ja = None keywords_en = None pair_name_language_keyword = get_pair_value(keyword_item_values, keyword_item_langs, keyword_base) for name, lang in pair_name_language_keyword: keyword_lang = lang if keyword_lang == 'ja': keywords_ja = name elif keyword_lang == 'en': keywords_en = name except (KeyError, IndexError): pass creator_items = item_metadata_json.get(_creator_item_id) if type(creator_items) is dict: creator_items = [creator_items] creator_mail_list = [] creator_name_list = [] creator_affiliation_list = [] for creator_item in creator_items: # Get creator mail if creator_item.get('creatorMails'): for creator_mail in creator_item.get('creatorMails'): if creator_mail.get('creatorMail'): creator_mail_list.append(creator_mail.get('creatorMail')) # Get creator name default_creator_name_list = [] if creator_item.get('creatorNames'): for creator_name in creator_item.get('creatorNames'): if creator_name.get('creatorNameLang') == lang_user: creator_name_list.append(creator_name.get('creatorName')) if creator_name.get('creatorNameLang') == 'en': default_creator_name_list.append(creator_name.get( 'creatorName')) if not creator_name_list and default_creator_name_list: creator_name_list = default_creator_name_list # Get creator affiliation default_creator_affiliation_list = [] if creator_item.get('affiliation'): for creator_affiliation in creator_item.get('affiliation'): if creator_affiliation.get('affiliationNameLang') == lang_user: creator_affiliation_list.append(creator_affiliation.get( 'affiliationName')) if creator_affiliation.get('affiliationNameLang') == 'en': default_creator_affiliation_list.\ append(creator_affiliation.get('affiliationName')) if not creator_affiliation_list and default_creator_affiliation_list: creator_affiliation_list = default_creator_affiliation_list seperator = ', ' metadata_dict = { "lang": lang, "publisher": publisher, "pubdate": pubdate, "keywords_ja": keywords_ja, "keywords_en": keywords_en, "creator_mail": seperator.join(creator_mail_list), "creator_name": seperator.join(creator_name_list), "affiliation": seperator.join(creator_affiliation_list) } # Change the values from None to '' for printing for key in metadata_dict: if metadata_dict[key] is None: metadata_dict[key] = '' metadata_list = [ "{}: {}".format(lang_data["Metadata"]["LANG"], metadata_dict["lang"]), "{}: {}".format( lang_data["Metadata"]["PUBLISHER"], metadata_dict["publisher"]), "{}: {}".format( lang_data["Metadata"]["PUBLICDATE"], metadata_dict["pubdate"]), "{} (Ja): {}".format( lang_data["Metadata"]["KEY"], metadata_dict["keywords_ja"]), "{} (En): {}".format( lang_data["Metadata"]["KEY"], metadata_dict["keywords_en"]), "{}: {}".format( lang_data["Metadata"]["AUTHOR"], metadata_dict["creator_name"]), "{}: {}".format( lang_data["Metadata"]["EMAIL"], metadata_dict["creator_mail"]), "{}: {}".format( lang_data["Metadata"]["AFFILIATED"], metadata_dict["affiliation"]) ] metadata = '\n'.join(metadata_list) metadata_lfnum = int(metadata.count('\n')) for item in metadata_list: metadata_lfnum += int(get_east_asian_width_count(item) ) // max_letters_num url = '' # will be modified later url_lfnum = int(get_east_asian_width_count(url)) // max_letters_num oa_policy = '' # will be modified later oa_policy_lfnum = int( get_east_asian_width_count(oa_policy)) // max_letters_num # Save top coordinate top = pdf.y # Calculate x position of next cell offset = pdf.x + w1 pdf.multi_cell(w1, meta_h, lang_data["Title"]["METADATA"] + '\n' * (metadata_lfnum + 1), 1, 'C', True) # Reset y coordinate pdf.y = top # Move to computed offset pdf.x = offset pdf.multi_cell(w2, meta_h, metadata, 1, 'L', False) top = pdf.y pdf.multi_cell(w1, url_oapolicy_h, lang_data["Title"]["URL"] + '\n' * (url_lfnum + 1), 1, 'C', True) pdf.y = top pdf.x = offset pdf.multi_cell(w2, url_oapolicy_h, url, 1, 'L', False) top = pdf.y pdf.multi_cell(w1, url_oapolicy_h, lang_data["Title"]["OAPOLICY"] + '\n' * (oa_policy_lfnum + 1), 1, 'C', True) pdf.y = top pdf.x = offset pdf.multi_cell(w2, url_oapolicy_h, oa_policy, 1, 'L', False) pdf.ln(h=1) # Footer pdf.set_font('Courier', '', 10) pdf.set_x(108) try: license = item_metadata_json[_file_item_id][0].get('licensetype') except (KeyError, IndexError, TypeError): license = None list_license_dict = current_app.config['WEKO_RECORDS_UI_LICENSE_DICT'] for item in list_license_dict: if item['value'] == license: get_license_pdf(license, item_metadata_json, pdf, _file_item_id, footer_w, footer_h, cc_logo_xposition, item) break else: pdf.multi_cell(footer_w, footer_h, '', 0, 'L', False) """ Convert PDF cover page data as bytecode """ output = pdf.output(dest='S').encode('latin-1') b_output = io.BytesIO(output) # Combine cover page and existing pages cover_page = PdfFileReader(b_output) f = open(obj_file_uri, "rb") existing_pages = PdfFileReader(f) # In the case the PDF file is encrypted by the password, ''(i.e. not # encrypted intentionally) if existing_pages.isEncrypted: try: existing_pages.decrypt('') except BaseException: # Errors such as NotImplementedError return ObjectResource.send_object( obj.bucket, obj, expected_chksum=fileobj.get('checksum'), logger_data={ 'bucket_id': obj.bucket_id, 'pid_type': pid.pid_type, 'pid_value': pid.pid_value, }, as_attachment=False ) # In the case the PDF file is encrypted by the password except '' if existing_pages.isEncrypted: return ObjectResource.send_object( obj.bucket, obj, expected_chksum=fileobj.get('checksum'), logger_data={ 'bucket_id': obj.bucket_id, 'pid_type': pid.pid_type, 'pid_value': pid.pid_value, }, as_attachment=False ) combined_pages = PdfFileWriter() combined_pages.addPage(cover_page.getPage(0)) for page_num in range(existing_pages.numPages): existing_page = existing_pages.getPage(page_num) combined_pages.addPage(existing_page) # Download the newly generated combined PDF file try: combined_filename = 'CV_' + datetime.now().strftime('%Y%m%d') + '_' + \ item_metadata_json[_file_item_id][0].get("filename") except (KeyError, IndexError): combined_filename = 'CV_' + title + '.pdf' combined_filepath = "/code/invenio/{}.pdf".format(combined_filename) combined_file = open(combined_filepath, "wb") combined_pages.write(combined_file) combined_file.close() return send_file( combined_filepath, as_attachment=True, attachment_filename=combined_filename, mimetype='application/pdf', cache_timeout=-1)
def find_rss_value(data, keyword): """Analyze rss data from elasticsearch data. Arguments: data {dictionary} -- elasticsearch data keyword {string} -- The keyword Returns: string -- data for the keyword """ if not data or not data.get('_source'): return None source = data.get('_source') meta_data = source.get('_item_metadata') if keyword == 'title': return meta_data.get('item_title') elif keyword == 'link': root_url = request.url_root root_url = str(root_url).replace('/api/', '/') record_number = get_rss_data_source(meta_data, 'control_number') return '' if record_number == '' else \ root_url + 'records/' + record_number elif keyword == 'seeAlso': return config.WEKO_RDF_SCHEMA elif keyword == 'creator': if source.get('creator'): creator = source.get('creator') if (not creator or not creator.get('familyName') or not creator.get('givenName')): return '' else: family_name = creator.get('familyName') given_name = creator.get('givenName') list_creator = list() for i in range(0, len(family_name)): if family_name[i]: if given_name[i]: list_creator.append(family_name[i] + '.' + given_name[i]) else: list_creator.append(family_name[i]) else: continue return list_creator else: return '' elif keyword == 'publisher': return get_rss_data_source(source, 'publisher') elif keyword == 'sourceTitle': return get_rss_data_source(source, 'sourceTitle') elif keyword == 'issn': result = '' if source.get('sourceIdentifier') and source.get( 'sourceIdentifier')[0]: source_identifier = source.get('sourceIdentifier')[0] result = get_rss_data_source(source_identifier, 'value') return result elif keyword == 'volume': return get_rss_data_source(source, 'volume') elif keyword == 'issue': return get_rss_data_source(source, 'issue') elif keyword == 'pageStart': return get_rss_data_source(source, 'pageStart') elif keyword == 'pageEnd': return get_rss_data_source(source, 'pageEnd') elif keyword == 'date': result = '' if source.get('date') and source.get('date')[0] and \ get_rss_data_source(source.get('date')[0], 'dateType') == \ 'Issued': result = get_rss_data_source(source.get('date')[0], 'value') return result elif keyword == 'description': if source.get('description') and source.get('description')[0]: item_type_mapping = Mapping.get_record( source.get('_item_metadata').get('item_type_id')) item_map = get_mapping(item_type_mapping, "jpcoar_mapping") desc_typ = item_map.get('*****@*****.**') desc_val = item_map.get('description.@value') desc_dat = source.get('_item_metadata').get(desc_typ.split('.')[0]) if desc_dat and desc_dat.get('attribute_value_mlt'): list_des_data = get_pair_value( desc_val.split('.')[1:], desc_typ.split('.')[1:], desc_dat.get('attribute_value_mlt')) for des_text, des_type in list_des_data: if des_type == 'Abstract': return des_text else: return '' elif keyword == '_updated': return get_rss_data_source(source, '_updated') else: return ''
def output_open_search_detail_data(self): """Output open search detail data. :return: """ fg = WekoFeedGenerator() # Add extentions fg.register_extension('dc', DcWekoBaseExtension, DcWekoEntryExtension) fg.register_extension('opensearch', extension_class_feed=OpensearchExtension, extension_class_entry=OpensearchEntryExtension) fg.register_extension('prism', extension_class_feed=PrismExtension, extension_class_entry=PrismEntryExtension) # Set title index_meta = {} _keywords = request.args.get('q', '') _index_id = request.args.get('index_id', type=str) if _index_id: index = None if _index_id.isnumeric(): index = Index.query.filter_by(id=int(_index_id)).one_or_none() _index_name = 'Nonexistent Index' \ if index is None else index.index_name index_meta[_index_id] = 'Unnamed Index' \ if _index_name is None else _index_name fg.title('WEKO OpenSearch: ' + str(index_meta[_index_id])) else: fg.title('WEKO OpenSearch: ' + str(_keywords)) # Set link fg.link(href=request.url) # Set totalResults _total_results = self.search_result['hits']['total'] fg.opensearch.totalResults(str(_total_results)) if self.output_type == self.OUTPUT_ATOM: # Set id fg.id(request.url) # Set updated fg.updated(datetime.now(pytz.utc)) else: # Set date fg.dc.dc_date(str(datetime.now(pytz.utc))) # Set Request URL if int(_total_results) != 0: fg.requestUrl(request.url) start_page = request.args.get('page_no', type=str) start_page = 1 if start_page is None or not start_page.isnumeric() \ else int(start_page) size = request.args.get('list_view_num', type=str) size = 20 if size is None or not size.isnumeric() else int(size) # Set startIndex _start_index = (start_page - 1) * size + 1 fg.opensearch.startIndex(str(_start_index)) # Set itemPerPage _item_per_page = len(self.search_result['hits']['hits']) fg.opensearch.itemsPerPage(str(_item_per_page)) # Set language request_lang = request.args.get('lang') if request_lang: fg.language(request_lang) else: fg.language('en') rss_items = [] jpcoar_map = {} for hit in self.search_result['hits']['hits']: item_metadata = hit['_source']['_item_metadata'] item_type_id = item_metadata['item_type_id'] type_mapping = Mapping.get_record(item_type_id) if item_type_id in jpcoar_map: item_map = jpcoar_map[item_type_id] else: item_map = get_mapping(type_mapping, 'jpcoar_mapping') jpcoar_map[item_type_id] = item_map fe = fg.add_entry() # Set title fe.title(item_metadata.get('item_title', '')) # Set link _pid = item_metadata['control_number'] item_url = request.host_url + 'records/' + _pid fe.link(href=item_url, rel='alternate', type='text/xml') oai_param = 'oai2d?verb=GetRecord&metadataPrefix=jpcoar&identifier=' if self.output_type == self.OUTPUT_ATOM: # Set oai _oai = hit['_source']['_oai']['id'] item_url = request.host_url + oai_param + _oai fe.link(href=item_url, rel='alternate', type='text/xml') # Set id fe.id(item_url) else: # Set oai _oai = hit['_source']['_oai']['id'] oai_url = request.host_url + oai_param + _oai fe.seeAlso(oai_url) # Set item url fe.itemUrl(item_url) # Add to channel item list rss_items.append(item_url) # Set weko id fe.dc.dc_identifier(_pid) # Set aggregationType _aggregation_type = 'type.@value' if _aggregation_type in item_map: aggregation_type_key = item_map[_aggregation_type] item_id = aggregation_type_key.split('.')[0] # Get item data if item_id in item_metadata: type_metadata = get_metadata_from_map( item_metadata[item_id], item_id) aggregation_types = None if isinstance(type_metadata, dict): aggregation_types = type_metadata.get( aggregation_type_key) if aggregation_types: if isinstance(aggregation_types, list): for aggregation_type in aggregation_types: fe.prism.aggregationType(aggregation_type) else: fe.prism.aggregationType(aggregation_types) # Set item type fe.dc.dc_type(hit['_source']['itemtype']) # Set mimeType _mime_type = 'file.mimeType.@value' if _mime_type in item_map: mime_type_key = item_map[_mime_type] item_id = mime_type_key.split('.')[0] # Get item data if item_id in item_metadata: file_metadata = get_metadata_from_map( item_metadata[item_id], item_id) mime_types = None if isinstance(file_metadata, dict): mime_types = file_metadata.get(mime_type_key) if mime_types: if isinstance(mime_types, list): for mime_type in mime_types: fe.dc.dc_format(mime_type) else: fe.dc.dc_format(mime_types) # Set file uri _uri = 'file.URI.@value' if _uri in item_map: uri_key = item_map[_uri] item_id = uri_key.split('.')[0] # Get item data if item_id in item_metadata: uri_metadata = get_metadata_from_map( item_metadata[item_id], item_id) uri_list = None if isinstance(uri_metadata, dict): uri_list = uri_metadata.get(uri_key) if uri_list: if isinstance(uri_list, list): for uri in uri_list: fe.dc.dc_identifier(uri, False) else: fe.dc.dc_identifier(uri_list, False) # Set author info self._set_author_info(fe, item_map, item_metadata, request_lang) # Set publisher self._set_publisher(fe, item_map, item_metadata, request_lang) # Set subject if _index_id: fe.dc.dc_subject(index_meta[_index_id]) else: indexes = item_metadata['path'][0].split('/') index_id = indexes[len(indexes) - 1] if index_id in index_meta: index_name = index_meta[index_id] else: index = Index.query.filter_by(id=index_id).one_or_none() index_name = index.index_name index_meta[index_id] = index_name fe.dc.dc_subject(index_name) # Set publicationName _source_title_value = 'sourceTitle.@value' if _source_title_value in item_map: source_title_key = item_map[_source_title_value] item_id = source_title_key.split('.')[0] # Get item data if item_id in item_metadata: source_title_metadata = get_metadata_from_map( item_metadata[item_id], item_id) source_titles = None if isinstance(source_title_metadata, dict): source_titles = source_title_metadata.get( source_title_key) if source_titles: if isinstance(source_titles, list): for source_title in source_titles: fe.prism.publicationName(source_title) else: fe.prism.publicationName(source_titles) # Set sourceIdentifier self._set_source_identifier(fe, item_map, item_metadata) # Set volume _volume = 'volume' if _volume in item_map: volume_key = item_map[_volume] item_id = volume_key.split('.')[0] # Get item data if item_id in item_metadata: volume_metadata = get_metadata_from_map( item_metadata[item_id], item_id) volumes = None if isinstance(volume_metadata, dict): volumes = volume_metadata.get(volume_key) if volumes: if isinstance(volumes, list): for volume in volumes: fe.prism.volume(volume) else: fe.prism.volume(volumes) # Set number _issue = 'issue' if _issue in item_map: issue_key = item_map[_issue] item_id = issue_key.split('.')[0] # Get item data if item_id in item_metadata: issue_metadata = get_metadata_from_map( item_metadata[item_id], item_id) issues = None if isinstance(issue_metadata): issues = issue_metadata.get(issue_key) if issues: if isinstance(issues, list): for issue in issues: fe.prism.number(issue) else: fe.prism.number(issues) # Set startingPage _page_start = 'pageStart' if _page_start in item_map: page_start_key = item_map[_page_start] item_id = page_start_key.split('.')[0] # Get item data if item_id in item_metadata: page_start_metadata = get_metadata_from_map( item_metadata[item_id], item_id) page_starts = None if isinstance(page_start_metadata, dict): page_starts = page_start_metadata.get(page_start_key) if page_starts: if isinstance(page_starts, list): for page_start in page_starts: fe.prism.startingPage(page_start) else: fe.prism.startingPage(page_starts) # Set endingPage _page_end = 'pageEnd' if _page_end in item_map: page_end_key = item_map[_page_end] item_id = page_end_key.split('.')[0] # Get item data if item_id in item_metadata: page_end_metadata = get_metadata_from_map( item_metadata[item_id], item_id) page_ends = None if isinstance(page_end_metadata, dict): page_ends = page_end_metadata.get(page_end_key) if page_ends: if isinstance(page_ends, list): for page_end in page_ends: fe.prism.endingPage(page_end) else: fe.prism.endingPage(page_ends) # Set publicationDate self._set_publication_date(fe, item_map, item_metadata) # Set content self._set_description(fe, item_map, item_metadata, request_lang) if self.output_type == self.OUTPUT_ATOM: # Set updated _updated = hit['_source']['_updated'] if _updated: fe.updated(_updated) else: publish_date = item_metadata['pubdate']['attribute_value'] if publish_date: fe.dc.dc_date(str(datetime.now(pytz.utc))) # Set file preview url fe.prism.url(item_url) # Set creationDate _creation_date = hit['_source']['_created'] if _creation_date: fe.prism.creationDate(_creation_date) # Set modificationDate _modification_date = hit['_source']['_updated'] if _modification_date: fe.prism.modificationDate(_modification_date) if self.output_type == self.OUTPUT_ATOM: return fg.atom_str(pretty=True) else: # Set channel items fg.items(rss_items) return fg.rss_str(pretty=True)