Beispiel #1
0
    def _fetch_data(self, config, provider):
        url = config['url']
        api_key = config['api_key']

        last_update = provider.get(
            'last_updated', utcfromtimestamp(0)).strftime('%Y-%m-%dT%H:%M:%S')

        # Results are pagified so we'll read this many at a time
        offset_jump = 10

        params = {'start': last_update, 'limit': offset_jump}
        headers = {'apikey': api_key}

        items = []

        offset = 0
        while True:
            params['offset'] = offset

            try:
                response = requests.get(url,
                                        params=params,
                                        headers=headers,
                                        timeout=30)
            except requests.exceptions.ConnectionError as err:
                raise IngestApiError.apiConnectionError(exception=err)

            if response.ok:
                # The total number of results are given to us in json, get them
                # via a regex to read the field so we don't have to convert the
                # whole thing to json pointlessly
                item_ident = re.search('\"total\": *[0-9]*',
                                       response.text).group()
                results_str = re.search('[0-9]+', item_ident).group()

                if results_str is None:
                    raise IngestApiError.apiGeneralError(
                        Exception(response.text), provider)

                num_results = int(results_str)

                if num_results > 0:
                    items.append(response.text)

                if offset >= num_results:
                    return items

                offset += offset_jump
            else:
                if re.match('Error: No API Key provided', response.text):
                    raise IngestApiError.apiAuthError(Exception(response.text),
                                                      provider)
                elif response.status_code == 404:
                    raise IngestApiError.apiNotFoundError(
                        Exception(response.reason), provider)
                else:
                    raise IngestApiError.apiGeneralError(
                        Exception(response.reason), provider)

        return items
Beispiel #2
0
    def _fetch_data(self, config, provider):
        """Fetch the latest feed data.

        :param dict config: RSS resource configuration
        :param provider: data provider instance, needed as an argument when
            raising ingest errors
        :return: fetched RSS data
        :rtype: str

        :raises IngestApiError: if fetching data fails for any reason
            (e.g. authentication error, resource not found, etc.)
        """
        url = config["url"]

        if config.get("auth_required", False):
            auth = (config.get("username"), config.get("password"))
        else:
            auth = None

        response = requests.get(url, auth=auth)

        if response.ok:
            return response.content
        else:
            if response.status_code in (401, 403):
                raise IngestApiError.apiAuthError(Exception(response.reason), provider)
            elif response.status_code == 404:
                raise IngestApiError.apiNotFoundError(Exception(response.reason), provider)
            else:
                raise IngestApiError.apiGeneralError(Exception(response.reason), provider)
Beispiel #3
0
    def _fetch_data(self, config, provider):
        """Fetch the latest feed data.

        :param dict config: RSS resource configuration
        :param provider: data provider instance, needed as an argument when
            raising ingest errors
        :return: fetched RSS data
        :rtype: str

        :raises IngestApiError: if fetching data fails for any reason
            (e.g. authentication error, resource not found, etc.)
        """
        url = config['url']

        if config.get('auth_required', False):
            auth = (config.get('username'), config.get('password'))
        else:
            auth = None

        response = requests.get(url, auth=auth)

        if response.ok:
            return response.content
        else:
            if response.status_code in (401, 403):
                raise IngestApiError.apiAuthError(
                    Exception(response.reason), provider)
            elif response.status_code == 404:
                raise IngestApiError.apiNotFoundError(
                    Exception(response.reason), provider)
            else:
                raise IngestApiError.apiGeneralError(
                    Exception(response.reason), provider)
Beispiel #4
0
    def _test(self, provider):
        config = provider.get('config', {})
        url = config['url']
        api_key = config['api_key']

        # limit the data to a single article and filter out all article fields
        # to save bandwidth
        params = {'limit': 1, 'fields': 'id'}
        headers = {'apikey': api_key}

        try:
            response = requests.get(url,
                                    params=params,
                                    headers=headers,
                                    timeout=30)
        except requests.exceptions.ConnectionError as err:
            raise IngestApiError.apiConnectionError(exception=err)

        if not response.ok:
            if response.status_code == 404:
                raise IngestApiError.apiNotFoundError(
                    Exception(response.reason), provider)
            else:
                raise IngestApiError.apiGeneralError(
                    Exception(response.reason), provider)
    def _get_worksheet(self, provider):
        """Get worksheet from google spreadsheet

        :return: worksheet
        :rtype: object
        """
        scope = [
            'https://spreadsheets.google.com/feeds',
            'https://www.googleapis.com/auth/drive',
        ]
        config = provider.get('config', {})
        url = config.get('url', '')
        service_account = config.get('service_account', '')
        title = config.get('worksheet_title', '')

        try:
            service_account = json.loads(service_account)
            credentials = ServiceAccountCredentials.from_json_keyfile_dict(
                service_account, scope)
            gc = gspread.authorize(credentials)
            spreadsheet = gc.open_by_url(url)
            permission = spreadsheet.list_permissions()[0]
            if permission['role'] != 'writer':
                raise IngestSpreadsheetError.SpreadsheetPermissionError()
            worksheet = spreadsheet.worksheet(title)
            return worksheet
        except (json.decoder.JSONDecodeError, AttributeError, ValueError) as e:
            # both permission and credential raise Value error
            if e.args[0] == 15100:
                raise IngestSpreadsheetError.SpreadsheetPermissionError()
            raise IngestSpreadsheetError.SpreadsheetCredentialsError()
        except gspread.exceptions.NoValidUrlKeyFound:
            raise IngestApiError.apiNotFoundError()
        except gspread.exceptions.WorksheetNotFound:
            raise IngestSpreadsheetError.WorksheetNotFoundError()
        except gspread.exceptions.APIError as e:
            error = e.response.json()['error']
            response_code = error['code']
            logger.error('Provider %s: %s', provider.get('name'),
                         error['message'])
            if response_code == 403:
                raise IngestSpreadsheetError.SpreadsheetPermissionError()
            elif response_code == 429:
                raise IngestSpreadsheetError.SpreadsheetQuotaLimitError()
            else:
                raise IngestApiError.apiNotFoundError()
Beispiel #6
0
    def _fetch_data(self, config, provider):
        url = config['url']
        api_key = config['api_key']

        last_update = provider.get('last_updated', utcfromtimestamp(0)).strftime('%Y-%m-%dT%H:%M:%S')

        # Results are pagified so we'll read this many at a time
        offset_jump = 10

        params = {'start': last_update, 'limit': offset_jump}
        headers = {'apikey': api_key}

        items = []

        offset = 0
        while True:
            params['offset'] = offset

            try:
                response = requests.get(url, params=params, headers=headers, timeout=30)
            except requests.exceptions.ConnectionError as err:
                raise IngestApiError.apiConnectionError(exception=err)

            if response.ok:
                # The total number of results are given to us in json, get them
                # via a regex to read the field so we don't have to convert the
                # whole thing to json pointlessly
                item_ident = re.search('\"total\": *[0-9]*', response.text).group()
                results_str = re.search('[0-9]+', item_ident).group()

                if results_str is None:
                    raise IngestApiError.apiGeneralError(
                        Exception(response.text), provider)

                num_results = int(results_str)

                if num_results > 0:
                    items.append(response.text)

                if offset >= num_results:
                    return items

                offset += offset_jump
            else:
                if re.match('Error: No API Key provided', response.text):
                    raise IngestApiError.apiAuthError(
                        Exception(response.text), provider)
                elif response.status_code == 404:
                    raise IngestApiError.apiNotFoundError(
                        Exception(response.reason), provider)
                else:
                    raise IngestApiError.apiGeneralError(
                        Exception(response.reason), provider)

        return items
 def test_raise_apiNotFoundError(self):
     with assert_raises(IngestApiError) as error_context:
         ex = Exception("Testing apiNotFoundError")
         raise IngestApiError.apiNotFoundError(ex, self.provider)
     exception = error_context.exception
     self.assertTrue(exception.code == 4006)
     self.assertTrue(exception.message == "API service not found(404) error")
     self.assertIsNotNone(exception.system_exception)
     self.assertEqual(exception.system_exception.args[0], "Testing apiNotFoundError")
     self.assertEqual(len(self.mock_logger_handler.messages['error']), 1)
     self.assertEqual(self.mock_logger_handler.messages['error'][0],
                      "IngestApiError Error 4006 - API service not found(404) error: "
                      "Testing apiNotFoundError on channel TestProvider")
Beispiel #8
0
 def test_raise_apiNotFoundError(self):
     with assert_raises(IngestApiError) as error_context:
         ex = Exception("Testing apiNotFoundError")
         raise IngestApiError.apiNotFoundError(ex, self.provider)
     exception = error_context.exception
     self.assertTrue(exception.code == 4006)
     self.assertTrue(
         exception.message == "API service not found(404) error")
     self.assertIsNotNone(exception.system_exception)
     self.assertEquals(exception.system_exception.args[0],
                       "Testing apiNotFoundError")
     self.assertEqual(len(self.mock_logger_handler.messages['error']), 1)
     self.assertEqual(
         self.mock_logger_handler.messages['error'][0],
         "IngestApiError Error 4006 - API service not found(404) error: "
         "Testing apiNotFoundError on channel TestProvider")
Beispiel #9
0
    def _fetch_data(self, config, provider):
        """Fetch the latest feed data.

        :param dict config: RSS resource configuration
        :param provider: data provider instance, needed as an argument when
            raising ingest errors
        :return: fetched RSS data
        :rtype: str

        :raises IngestApiError: if fetching data fails for any reason
            (e.g. authentication error, resource not found, etc.)
        """
        url = config['url']

        if config.get('auth_required', False):
            auth = (config.get('username'), config.get('password'))
            self.auth_info = {
                'username': config.get('username', ''),
                'password': config.get('password', '')
            }
        else:
            auth = None

        try:
            response = requests.get(url, auth=auth, timeout=30)
        except requests.exceptions.ConnectionError as err:
            raise IngestApiError.apiConnectionError(exception=err, provider=provider)
        except requests.exceptions.RequestException as err:
            raise IngestApiError.apiURLError(exception=err, provider=provider)

        if response.ok:
            return response.content
        else:
            if response.status_code in (401, 403):
                raise IngestApiError.apiAuthError(
                    Exception(response.reason), provider)
            elif response.status_code == 404:
                raise IngestApiError.apiNotFoundError(
                    Exception(response.reason), provider)
            else:
                raise IngestApiError.apiGeneralError(
                    Exception(response.reason), provider)
Beispiel #10
0
    def _test(self, provider):
        config = provider.get('config', {})
        url = config['url']
        api_key = config['api_key']

        # limit the data to a single article and filter out all article fields
        # to save bandwidth
        params = {'limit': 1, 'fields': 'id'}
        headers = {'apikey': api_key}

        try:
            response = requests.get(url, params=params, headers=headers, timeout=30)
        except requests.exceptions.ConnectionError as err:
            raise IngestApiError.apiConnectionError(exception=err)

        if not response.ok:
            if response.status_code == 404:
                raise IngestApiError.apiNotFoundError(
                    Exception(response.reason), provider)
            else:
                raise IngestApiError.apiGeneralError(
                    Exception(response.reason), provider)
Beispiel #11
0
    def get_tree(self, endpoint, payload=None):
        """Get xml response for given API endpoint and payload."""
        if payload is None:
            payload = {}
        payload['token'] = self.get_token()
        url = self.get_url(endpoint)

        try:
            response = requests.get(url, params=payload, timeout=21.0)
        except requests.exceptions.Timeout as ex:
            # Maybe set up for a retry, or continue in a retry loop
            raise IngestApiError.apiTimeoutError(ex, self.provider)
        except requests.exceptions.TooManyRedirects as ex:
            # Tell the user their URL was bad and try a different one
            raise IngestApiError.apiRedirectError(ex, self.provider)
        except requests.exceptions.RequestException as ex:
            # catastrophic error. bail.
            raise IngestApiError.apiRequestError(ex, self.provider)
        except Exception as error:
            traceback.print_exc()
            raise IngestApiError(error, self.provider)

        if response.status_code == 404:
            raise IngestApiError.apiNotFoundError(
                LookupError('Not found %s' % payload), self.provider)

        try:
            # workaround for httmock lib
            # return etree.fromstring(response.text.encode('utf-8'))
            return etree.fromstring(response.content)
        except UnicodeEncodeError as error:
            traceback.print_exc()
            raise IngestApiError.apiUnicodeError(error, self.provider)
        except ParseError as error:
            traceback.print_exc()
            raise IngestApiError.apiParseError(error, self.provider)
        except Exception as error:
            traceback.print_exc()
            raise IngestApiError(error, self.provider)
Beispiel #12
0
    def get_tree(self, endpoint, payload=None):
        """Get xml response for given API endpoint and payload."""
        if payload is None:
            payload = {}
        payload['token'] = self.get_token()
        url = self.get_url(endpoint)

        try:
            response = requests.get(url, params=payload, timeout=21.0)
        except requests.exceptions.Timeout as ex:
            # Maybe set up for a retry, or continue in a retry loop
            raise IngestApiError.apiTimeoutError(ex, self.provider)
        except requests.exceptions.TooManyRedirects as ex:
            # Tell the user their URL was bad and try a different one
            raise IngestApiError.apiRedirectError(ex, self.provider)
        except requests.exceptions.RequestException as ex:
            # catastrophic error. bail.
            raise IngestApiError.apiRequestError(ex, self.provider)
        except Exception as error:
            traceback.print_exc()
            raise IngestApiError(error, self.provider)

        if response.status_code == 404:
            raise IngestApiError.apiNotFoundError(LookupError('Not found %s' % payload), self.provider)

        try:
            # workaround for httmock lib
            # return etree.fromstring(response.text.encode('utf-8'))
            return etree.fromstring(response.content)
        except UnicodeEncodeError as error:
            traceback.print_exc()
            raise IngestApiError.apiUnicodeError(error, self.provider)
        except ParseError as error:
            traceback.print_exc()
            raise IngestApiError.apiParseError(error, self.provider)
        except Exception as error:
            traceback.print_exc()
            raise IngestApiError(error, self.provider)
Beispiel #13
0
class RSSFeedingService(FeedingService):
    """
    Feeding service for providing feeds received in RSS 2.0 format.

    (NOTE: it should also work with other syndicated feeds formats, too, since
    the underlying parser supports them, but for our needs RSS 2.0 is assumed)
    """

    NAME = 'rss'
    ERRORS = [IngestApiError.apiAuthError().get_error_description(),
              IngestApiError.apiNotFoundError().get_error_description(),
              IngestApiError.apiGeneralError().get_error_description(),
              ParserError.parseMessageError().get_error_description()]

    ItemField = namedtuple('ItemField', ['name', 'name_in_data', 'type'])

    item_fields = [
        ItemField('guid', 'guid', str),
        ItemField('uri', 'guid', str),
        ItemField('firstcreated', 'published_parsed', datetime),
        ItemField('versioncreated', 'updated_parsed', datetime),
        ItemField('headline', 'title', str),
        ItemField('abstract', 'summary', str),
        ItemField('body_html', 'body_text', str),
	ItemField('timescalled', 'timescalled', int),
	ItemField('test', 'test', str),
	ItemField('testing', 'testing', str),
	ItemField('mobilecircle', 'mobilecircle', str),
	ItemField('audiofile', 'audiofile', str),
	ItemField('timesrecorded', 'timesrecorded', int),
	ItemField('timespublished', 'timespublished', int),
    ]
    """A list of fields that items created from the ingest data should contain.

    Each list item is a named tuple with the following three attribues:

    * name - the name of the field (attribute) in the resulting ingest item
    * name_in_data - the expected name of the data field in the retrieved
        ingest data (this can be overriden by providing a field name alias)
    * type - field's data type
    """

    IMG_MIME_TYPES = (
        'image/gif',
        'image/jpeg',
        'image/png',
        'image/tiff',
    )
    """
    Supported MIME types for ingesting external images referenced by the
    RSS entries.
    """

    IMG_FILE_SUFFIXES = ('.gif', '.jpeg', '.jpg', '.png', '.tif', '.tiff')
    """
    Supported image filename extensions for ingesting (used for the
    <media:thumbnail> tags - they lack the "type" attribute).
    """

    def __init__(self):
        super().__init__()
        self.auth_info = None

    def prepare_href(self, url, mimetype=None):
        """
        Prepare a link to an external resource (e.g. an image file) so
        that it can be directly used by the ingest machinery for fetching it.

        If provider requires authentication, basic HTTP authentication info is
        added to the given url, otherwise it is returned unmodified.

        :param str url: the original URL as extracted from an RSS entry

        :return: prepared URL
        :rtype: str
        """
        if self.auth_info:
            userinfo_part = '{}:{}@'.format(
                urlquote(self.auth_info['username']),
                urlquote(self.auth_info['password'])
            )
            scheme, netloc, path, query, fragment = urlsplit(url)
            netloc = userinfo_part + netloc
            url = urlunsplit((scheme, netloc, path, query, fragment))

        return url

    def _update(self, provider):
        """
        Check data provider for data updates and returns new items (if any).

        :param provider: data provider instance
        :return: a list containing a list of new content items
        :rtype: list

        :raises IngestApiError: if data retrieval error occurs
        :raises ParserError: if retrieved RSS data cannot be parsed
        """
        config = provider.get('config', {})

        if config.get('auth_required'):
            self.auth_info = {
                'username': config.get('username', ''),
                'password': config.get('password', '')
            }

        try:
            xml_data = self._fetch_data(config, provider)
            data = feedparser.parse(xml_data)
        except IngestApiError:
            raise
        except Exception as ex:
            raise ParserError.parseMessageError(ex, provider)

        # If provider last updated time is not available, set it to 1.1.1970
        # so that it will be recognized as "not up to date".
        # Also convert it to a naive datetime object (removing tzinfo is fine,
        # because it is in UTC anyway)
        t_provider_updated = provider.get('last_updated', utcfromtimestamp(0))
        t_provider_updated = t_provider_updated.replace(tzinfo=None)

        new_items = []
        field_aliases = config.get('field_aliases')

        for entry in data.entries:
            t_entry_updated = utcfromtimestamp(timegm(entry.updated_parsed))

            if t_entry_updated <= t_provider_updated:
                continue

            item = self._create_item(entry, field_aliases, provider.get('source', None))
            self.add_timestamps(item)

            # If the RSS entry references any images, create picture items from
            # them and create a package referencing them and the entry itself.
            # If there are no image references, treat entry as a simple text
            # item, even if it might reference other media types, e.g. videos.
            image_urls = self._extract_image_links(entry)
            if image_urls:
                image_items = self._create_image_items(image_urls, item)
                new_items.extend(image_items)
                new_items.append(item)
                item = self._create_package(item, image_items)

            new_items.append(item)

        return [new_items]

    def _fetch_data(self, config, provider):
        """Fetch the latest feed data.

        :param dict config: RSS resource configuration
        :param provider: data provider instance, needed as an argument when
            raising ingest errors
        :return: fetched RSS data
        :rtype: str

        :raises IngestApiError: if fetching data fails for any reason
            (e.g. authentication error, resource not found, etc.)
        """
        url = config['url']

        if config.get('auth_required', False):
            auth = (config.get('username'), config.get('password'))
        else:
            auth = None

        response = requests.get(url, auth=auth)

        if response.ok:
            return response.content
        else:
            if response.status_code in (401, 403):
                raise IngestApiError.apiAuthError(
                    Exception(response.reason), provider)
            elif response.status_code == 404:
                raise IngestApiError.apiNotFoundError(
                    Exception(response.reason), provider)
            else:
                raise IngestApiError.apiGeneralError(
                    Exception(response.reason), provider)

    def _extract_image_links(self, rss_entry):
        """Extract URLs of all images referenced by the given RSS entry.

        Images can be referenced via `<enclosure>`, `<media:thumbnail>` or
        `<media:content>` RSS tag and must be listed among the allowed image
        types. All other links to external media are ignored.

        Duplicate URLs are omitted from the result.

        :param rss_entry: parsed RSS item (entry)
        :type rss_entry: :py:class:`feedparser.FeedParserDict`

        :return: a list of all unique image URLs found (as strings)
        """
        img_links = set()

        for link in getattr(rss_entry, 'links', []):
            if link.get('type') in self.IMG_MIME_TYPES:
                img_links.add(link['href'])

        for item in getattr(rss_entry, 'media_thumbnail', []):
            url = item.get('url', '')
            if url.endswith(self.IMG_FILE_SUFFIXES):
                img_links.add(url)

        for item in getattr(rss_entry, 'media_content', []):
            if item.get('type') in self.IMG_MIME_TYPES:
                img_links.add(item['url'])

        return list(img_links)

    def _create_item(self, data, field_aliases=None, source=None):
        """Create a new content item from RSS feed data.

        :param dict data: parsed data of a single feed entry
        :param field_aliases: (optional) field name aliases. Used for content
             fields that are named differently in retrieved data.
        :type field_aliases: list of {field_name: alias} dictionaries or None
        :param str source: the source of provider

        :return: created content item
        :rtype: dict
        """
        if field_aliases is None:
            field_aliases = {}
        else:
            field_aliases = merge_dicts(field_aliases)
        aliased_fields = set(field_aliases.values())

        item = dict(type=CONTENT_TYPE.TEXT)

        # Only consider fields that are not used as an alias (i.e. used to
        # populate another field) - unless those fields have their own
        # aliases, too.
        # The idea is that if e.g. the main text field is aliased to use the
        # parsed data's summary field, that summary should not be used to
        # populate the field it was originally meant for.
        fields_to_consider = (
            f for f in self.item_fields
            if (f.name_in_data not in aliased_fields) or
               (f.name_in_data in aliased_fields and
                f.name_in_data in field_aliases)
        )

        for field in fields_to_consider:
            data_field_name = field_aliases.get(
                field.name_in_data, field.name_in_data
            )
            field_value = data.get(data_field_name)

            if (field.type is datetime) and field_value:
                field_value = utcfromtimestamp(timegm(field_value))

            item[field.name] = field_value

            # Some feeds use <content:encoded> tag for storing the main content,
            # and that tag is parsed differently. If the body_html has not been
            # found in its default data field and is not aliased, try to
            # populate it using the aforementioned content field as a fallback.
            if (
                field.name == 'body_html' and
                not field_value and
                field.name_in_data not in field_aliases
            ):
                try:
                    item['body_html'] = data.content[0].value
                except:
                    pass  # content either non-existent or parsed differently

        if item.get('uri', None):
            if not item.get('body_html', None):
                item['body_html'] = ''
            source = source or 'source'
            item['body_html'] = '<p><a href="%s" target="_blank">%s</a></p>' % (item['uri'], source) + item['body_html']
        return item

    def _create_image_items(self, image_links, text_item):
        """Create a list of picture items that represent the external images
        located on given URLs.

        Each created item's `firstcreated` and `versioncreated` fields are set
        to the same value as the values of these fields in `text_item`.

        :param iterable image_links: list of image URLs
        :param dict text_item: the "main" text item the images are related to

        :return: list of created image items (as dicts)
        """
        image_items = []

        for image_url in image_links:
            img_item = {
                'guid': generate_guid(type=GUID_TAG),
                ITEM_TYPE: CONTENT_TYPE.PICTURE,
                'firstcreated': text_item.get('firstcreated'),
                'versioncreated': text_item.get('versioncreated'),
                'renditions': {
                    'baseImage': {
                        'href': image_url
                    }
                }
            }
            image_items.append(img_item)

        return image_items

    def _create_package(self, text_item, image_items):
        """Create a new content package from given content items.

        The package's `main` group contains only the references to given items,
        not the items themselves. In the list of references, the reference to
        the text item preceeds the references to image items.

        Package's `firstcreated` and `versioncreated` fields are set to values
        of these fields in `text_item`, and the `headline` is copied as well.

        :param dict text_item: item representing the text content
        :param list image_items: list of items (dicts) representing the images
            related to the text content
        :return: the created content package
        :rtype: dict
        """
        package = {
            ITEM_TYPE: CONTENT_TYPE.COMPOSITE,
            'guid': generate_guid(type=GUID_TAG),
            'firstcreated': text_item['firstcreated'],
            'versioncreated': text_item['versioncreated'],
            'headline': text_item.get('headline', ''),
            'groups': [
                {
                    'id': 'root',
                    'role': 'grpRole:NEP',
                    'refs': [{'idRef': 'main'}],
                }, {
                    'id': 'main',
                    'role': 'main',
                    'refs': [],
                }
            ]
        }

        item_references = package['groups'][1]['refs']
        item_references.append({'residRef': text_item['guid']})

        for image in image_items:
            item_references.append({'residRef': image['guid']})

        return package
class SpreadsheetFeedingService(FeedingService):
    NAME = 'spreadsheet'
    service = 'events'
    ERRORS = [
        IngestApiError.apiNotFoundError().get_error_description(),
        ParserError.parseFileError().get_error_description(),
        IngestSpreadsheetError.SpreadsheetPermissionError(
        ).get_error_description(),
        IngestSpreadsheetError.SpreadsheetQuotaLimitError(
        ).get_error_description(),
        IngestSpreadsheetError.SpreadsheetCredentialsError().
        get_error_description(),
        IngestSpreadsheetError.WorksheetNotFoundError().get_error_description(
        ),
    ]

    label = 'Events from Google Documents Spreadsheet'

    fields = [
        {
            'id': 'service_account',
            'type': 'text',
            'label': 'Service account',
            'required': True,
            'errors': {
                15300: 'Invalid service account key'
            },
        },
        {
            'id': 'url',
            'type': 'text',
            'label': 'Source',
            'placeholder': 'Google Spreadsheet URL',
            'required': True,
            'errors': {
                1001: 'Can\'t parse spreadsheet.',
                1002: 'Can\'t parse spreadsheet.',
                4006: 'URL not found.',
                15100: 'Missing write permission while processing file',
                15200: 'Server reaches read quota limits.'
            }
        },
        {
            'id': 'worksheet_title',
            'type': 'text',
            'label': 'Sheet title',
            'placeholder': 'Title / Name of sheet',
            'required': True,
            'errors': {
                15400: 'Sheet not found'
            }
        },
    ]

    def _test(self, provider):
        worksheet = self._get_worksheet(provider)
        data = worksheet.get_all_values()
        BelgaSpreadsheetParser().parse_titles(data[0])

    def _update(self, provider, update):
        """Load items from google spreadsheet and insert (update) to events database

        If STATUS field is empty, create new item
        If STATUS field is UPDATED, update item
        """
        worksheet = self._get_worksheet(provider)

        # Get all values to avoid reaching read limit
        data = worksheet.get_all_values()
        titles = [s.lower().strip() for s in data[0]]

        # avoid maximum limit cols error
        total_col = worksheet.col_count
        if total_col < len(titles) + 3:
            worksheet.add_cols(len(titles) + 3 - total_col)

        for field in ('_STATUS', '_ERR_MESSAGE', '_GUID'):
            if field.lower() not in titles:
                titles.append(field)
                worksheet.update_cell(1, len(titles), field)
        data[0] = titles  # pass to parser uses for looking up index

        parser = BelgaSpreadsheetParser()
        items, cells_list = parser.parse(data, provider)
        items = self._process_event_items(items, provider)
        # add ingest item
        yield items
        # Update status for google sheet
        if cells_list:
            worksheet.update_cells(cells_list)

    def _get_worksheet(self, provider):
        """Get worksheet from google spreadsheet

        :return: worksheet
        :rtype: object
        """
        scope = [
            'https://spreadsheets.google.com/feeds',
            'https://www.googleapis.com/auth/drive',
        ]
        config = provider.get('config', {})
        url = config.get('url', '')
        service_account = config.get('service_account', '')
        title = config.get('worksheet_title', '')

        try:
            service_account = json.loads(service_account)
            credentials = ServiceAccountCredentials.from_json_keyfile_dict(
                service_account, scope)
            gc = gspread.authorize(credentials)
            spreadsheet = gc.open_by_url(url)
            permission = spreadsheet.list_permissions()[0]
            if permission['role'] != 'writer':
                raise IngestSpreadsheetError.SpreadsheetPermissionError()
            worksheet = spreadsheet.worksheet(title)
            return worksheet
        except (json.decoder.JSONDecodeError, AttributeError, ValueError) as e:
            # both permission and credential raise Value error
            if e.args[0] == 15100:
                raise IngestSpreadsheetError.SpreadsheetPermissionError()
            raise IngestSpreadsheetError.SpreadsheetCredentialsError()
        except gspread.exceptions.NoValidUrlKeyFound:
            raise IngestApiError.apiNotFoundError()
        except gspread.exceptions.WorksheetNotFound:
            raise IngestSpreadsheetError.WorksheetNotFoundError()
        except gspread.exceptions.APIError as e:
            error = e.response.json()['error']
            response_code = error['code']
            logger.error('Provider %s: %s', provider.get('name'),
                         error['message'])
            if response_code == 403:
                raise IngestSpreadsheetError.SpreadsheetPermissionError()
            elif response_code == 429:
                raise IngestSpreadsheetError.SpreadsheetQuotaLimitError()
            else:
                raise IngestApiError.apiNotFoundError()

    def _process_event_items(self, items, provider):
        events_service = superdesk.get_resource_service('events')
        list_items = []
        for item in items:
            status = item.pop('status')
            location = item.get('location')
            if item.get('contact'):
                contact = item.pop('contact')
                contact_service = superdesk.get_resource_service('contacts')
                _contact = contact_service.find_one(
                    req=None,
                    **{
                        'first_name':
                        contact['first_name'],
                        'last_name':
                        contact['last_name'],
                        'organisation':
                        contact['organisation'],
                        'contact_email':
                        contact['contact_email'][0],
                        'contact_phone.number':
                        contact['contact_phone'][0]['number'],
                    })
                if _contact and status == 'UPDATED':
                    item.setdefault('event_contact_info',
                                    [_contact[superdesk.config.ID_FIELD]])
                    contact_service.patch(_contact[superdesk.config.ID_FIELD],
                                          contact)
                else:
                    item.setdefault('event_contact_info',
                                    list(contact_service.post([contact])))

            if location:
                location_service = superdesk.get_resource_service('locations')
                saved_location = list(
                    location_service.find({
                        'name':
                        location[0]['name'],
                        'address.line':
                        location[0]['address']['line'],
                        'address.country':
                        location[0]['address']['country'],
                    }))
                if saved_location and status == 'UPDATED':
                    location_service.patch(
                        saved_location[0][superdesk.config.ID_FIELD],
                        location[0])
                elif not saved_location:
                    _location = deepcopy(location)
                    location_service.post(_location)
                    item['location'][0]['qcode'] = _location[0]['guid']

            old_item = events_service.find_one(guid=item[GUID_FIELD], req=None)
            if not old_item:
                if not status:
                    item.setdefault('firstcreated', datetime.now())
                    item.setdefault('versioncreated', datetime.now())
                    list_items.append(item)
            else:
                old_item.update(item)
                list_items.append(old_item)
        return list_items
Beispiel #15
0
from calendar import timegm
from collections import namedtuple
from datetime import datetime

from superdesk.errors import IngestApiError, ParserError
from superdesk.io import register_provider
from superdesk.io.ingest_service import IngestService
from superdesk.utils import merge_dicts


PROVIDER = 'rss'

utcfromtimestamp = datetime.utcfromtimestamp

errors = [IngestApiError.apiAuthError().get_error_description(),
          IngestApiError.apiNotFoundError().get_error_description(),
          IngestApiError.apiGeneralError().get_error_description(),
          ParserError.parseMessageError().get_error_description()]


class RssIngestService(IngestService):
    """Ingest service for providing feeds received in RSS 2.0 format.

    (NOTE: it should also work with other syndicated feeds formats, too, since
    the underlying parser supports them, but for our needs RSS 2.0 is assumed)
    """

    ItemField = namedtuple('ItemField', ['name', 'name_in_data', 'type'])

    item_fields = [
        ItemField('guid', 'guid', str),
Beispiel #16
0
from superdesk.errors import IngestApiError, ParserError
from superdesk.io import register_provider
from superdesk.io.ingest_service import IngestService
from superdesk.utils import merge_dicts

from urllib.parse import quote as urlquote, urlsplit, urlunsplit


PROVIDER = "rss"

utcfromtimestamp = datetime.utcfromtimestamp

errors = [
    IngestApiError.apiAuthError().get_error_description(),
    IngestApiError.apiNotFoundError().get_error_description(),
    IngestApiError.apiGeneralError().get_error_description(),
    ParserError.parseMessageError().get_error_description(),
]


class RssIngestService(IngestService):
    """Ingest service for providing feeds received in RSS 2.0 format.

    (NOTE: it should also work with other syndicated feeds formats, too, since
    the underlying parser supports them, but for our needs RSS 2.0 is assumed)
    """

    ItemField = namedtuple("ItemField", ["name", "name_in_data", "type"])

    item_fields = [
Beispiel #17
0
class BBCLDRSFeedingService(FeedingService):
    """
    Feeding Service class for reading BBC's Local Democracy Reporting Service
    """

    # Following the api spec at https://docs.ldrs.org.uk/

    NAME = 'bbc_ldrs'
    ERRORS = [
        IngestApiError.apiAuthError().get_error_description(),
        IngestApiError.apiNotFoundError().get_error_description(),
        IngestApiError.apiGeneralError().get_error_description(),
        ParserError.parseMessageError().get_error_description()
    ]

    label = 'BBC Local Democracy Reporter Service'

    fields = [{
        'id': 'url',
        'type': 'text',
        'label': 'LDRS URL',
        'placeholder': 'LDRS URL',
        'required': True,
        'default': 'https://api.ldrs.org.uk/v1/item'
    }, {
        'id': 'api_key',
        'type': 'text',
        'label': 'API Key',
        'placeholder': 'API Key',
        'required': True,
        'default': ''
    }]

    def __init__(self):
        super().__init__()

    def _test(self, provider):
        config = provider.get('config', {})
        url = config['url']
        api_key = config['api_key']

        # limit the data to a single article and filter out all article fields
        # to save bandwidth
        params = {'limit': 1, 'fields': 'id'}
        headers = {'apikey': api_key}

        try:
            response = requests.get(url,
                                    params=params,
                                    headers=headers,
                                    timeout=30)
        except requests.exceptions.ConnectionError as err:
            raise IngestApiError.apiConnectionError(exception=err)

        if not response.ok:
            if response.status_code == 404:
                raise IngestApiError.apiNotFoundError(
                    Exception(response.reason), provider)
            else:
                raise IngestApiError.apiGeneralError(
                    Exception(response.reason), provider)

    def _update(self, provider, update):
        config = provider.get('config', {})
        json_items = self._fetch_data(config, provider)
        parsed_items = []

        for item in json_items:
            try:
                parser = self.get_feed_parser(provider, item)
                parsed_items.append(parser.parse(item))
            except Exception as ex:
                raise ParserError.parseMessageError(ex, provider, data=item)

        return parsed_items

    def _fetch_data(self, config, provider):
        url = config['url']
        api_key = config['api_key']

        last_update = provider.get(
            'last_updated', utcfromtimestamp(0)).strftime('%Y-%m-%dT%H:%M:%S')

        # Results are pagified so we'll read this many at a time
        offset_jump = 10

        params = {'start': last_update, 'limit': offset_jump}
        headers = {'apikey': api_key}

        items = []

        offset = 0
        while True:
            params['offset'] = offset

            try:
                response = requests.get(url,
                                        params=params,
                                        headers=headers,
                                        timeout=30)
            except requests.exceptions.ConnectionError as err:
                raise IngestApiError.apiConnectionError(exception=err)

            if response.ok:
                # The total number of results are given to us in json, get them
                # via a regex to read the field so we don't have to convert the
                # whole thing to json pointlessly
                item_ident = re.search('\"total\": *[0-9]*',
                                       response.text).group()
                results_str = re.search('[0-9]+', item_ident).group()

                if results_str is None:
                    raise IngestApiError.apiGeneralError(
                        Exception(response.text), provider)

                num_results = int(results_str)

                if num_results > 0:
                    items.append(response.text)

                if offset >= num_results:
                    return items

                offset += offset_jump
            else:
                if re.match('Error: No API Key provided', response.text):
                    raise IngestApiError.apiAuthError(Exception(response.text),
                                                      provider)
                elif response.status_code == 404:
                    raise IngestApiError.apiNotFoundError(
                        Exception(response.reason), provider)
                else:
                    raise IngestApiError.apiGeneralError(
                        Exception(response.reason), provider)

        return items
    def get_url(self, url=None, **kwargs):
        """Do an HTTP Get on URL

        :param string url: url to use (None to use self.HTTP_URL)
        :param **kwargs: extra parameter for requests
        :return requests.Response: response
        """
        if not url:
            url = self.HTTP_URL
        config = self.config
        user = config.get('username')
        password = config.get('password')
        if user:
            user = user.strip()
        if password:
            password = password.strip()

        auth_required = config.get('auth_required', self.HTTP_AUTH)
        if auth_required is None:
            # auth_required may not be user in the feeding service
            # in this case with use authentification only if user
            # and password are set.
            auth_required = bool(user and password)

        if auth_required:
            if not user:
                raise SuperdeskIngestError.notConfiguredError(
                    "user is not configured")
            if not password:
                raise SuperdeskIngestError.notConfiguredError(
                    "password is not configured")
            kwargs.setdefault('auth', (user, password))

        params = kwargs.pop("params", {})
        if params or self.HTTP_DEFAULT_PARAMETERS:
            # if we have default parameters, we want them to be overriden
            # by conflicting params given in arguments
            if self.HTTP_DEFAULT_PARAMETERS:
                params.update(self.HTTP_DEFAULT_PARAMETERS)
            kwargs["params"] = params

        try:
            response = requests.get(url, timeout=self.HTTP_TIMEOUT, **kwargs)
        except requests.exceptions.Timeout as exception:
            raise IngestApiError.apiTimeoutError(exception, self.provider)
        except requests.exceptions.ConnectionError as exception:
            raise IngestApiError.apiConnectionError(exception, self.provider)
        except requests.exceptions.RequestException as exception:
            raise IngestApiError.apiRequestError(exception, self.provider)
        except Exception as exception:
            traceback.print_exc()
            raise IngestApiError.apiGeneralError(exception, self.provider)

        if not response.ok:
            exception = Exception(response.reason)
            if response.status_code in (401, 403):
                raise IngestApiError.apiAuthError(exception, self.provider)
            elif response.status_code == 404:
                raise IngestApiError.apiNotFoundError(exception, self.provider)
            else:
                raise IngestApiError.apiGeneralError(exception, self.provider)

        return response
Beispiel #19
0
class RSSFeedingService(HTTPFeedingServiceBase):
    """
    Feeding service for providing feeds received in RSS 2.0 format.

    (NOTE: it should also work with other syndicated feeds formats, too, since
    the underlying parser supports them, but for our needs RSS 2.0 is assumed)
    """

    NAME = "rss"

    ERRORS = [
        IngestApiError.apiAuthError().get_error_description(),
        IngestApiError.apiNotFoundError().get_error_description(),
        IngestApiError.apiGeneralError().get_error_description(),
        ParserError.parseMessageError().get_error_description(),
    ]

    label = "RSS/Atom"

    fields = ([{
        "id": "url",
        "type": "text",
        "label": "Host",
        "placeholder": "RSS Feed URL",
        "required": True,
        "errors": {
            4001: "Connection timed out.",
            4006: "URL not found.",
            4009: "Can't connect to host.",
            1001: "Can't parse the RSS.",
        },
    }] + HTTPFeedingServiceBase.AUTH_REQ_FIELDS + [{
        "id": "field_aliases",
        "type": "mapping",
        "label": "Content Field Aliases",
        "add_mapping_label": "Add alias",
        "remove_mapping_label": "Remove",
        "empty_label": "No field aliases defined.",
        "first_field_options": {
            "label":
            "Content Field Name",
            "values": [
                "body_text", "guid", "published_parsed", "summary", "title",
                "updated_parsed"
            ],
        },
        "second_field_options": {
            "label": "Field Alias",
            "placeholder": "Enter field alias"
        },
    }])

    HTTP_AUTH = None

    field_groups = {
        "auth_data": {
            "label": "Authentication Info",
            "fields": ["username", "password"]
        }
    }

    ItemField = namedtuple("ItemField", ["name", "name_in_data", "type"])

    item_fields = [
        ItemField("guid", "guid", str),
        ItemField("uri", "guid", str),
        ItemField("firstcreated", "published_parsed", datetime),
        ItemField("versioncreated", "updated_parsed", datetime),
        ItemField("headline", "title", str),
        ItemField("abstract", "summary", str),
        ItemField("body_html", "body_text", str),
        ItemField("byline", "author", str),
    ]
    """A list of fields that items created from the ingest data should contain.

    Each list item is a named tuple with the following three attribues:

    * name - the name of the field (attribute) in the resulting ingest item
    * name_in_data - the expected name of the data field in the retrieved
        ingest data (this can be overriden by providing a field name alias)
    * type - field's data type
    """

    IMG_MIME_TYPES = (
        "image/gif",
        "image/jpeg",
        "image/png",
        "image/tiff",
    )
    """
    Supported MIME types for ingesting external images referenced by the
    RSS entries.
    """

    IMG_FILE_SUFFIXES = (".gif", ".jpeg", ".jpg", ".png", ".tif", ".tiff")
    """
    Supported image filename extensions for ingesting (used for the
    <media:thumbnail> tags - they lack the "type" attribute).
    """
    def prepare_href(self, url, mimetype=None):
        """Prepare a link to an external resource (e.g. an image file).

        It can be directly used by the ingest machinery for fetching it.

        If provider requires authentication, basic HTTP authentication info is
        added to the given url, otherwise it is returned unmodified.

        :param str url: the original URL as extracted from an RSS entry

        :return: prepared URL
        :rtype: str
        """
        if self.auth_info:
            userinfo_part = "{}:{}@".format(
                urlquote(self.auth_info["username"]),
                urlquote(self.auth_info["password"]))
            scheme, netloc, path, query, fragment = urlsplit(url)
            netloc = userinfo_part + netloc
            url = urlunsplit((scheme, netloc, path, query, fragment))

        return url

    def _test(self, provider):
        """Test connection."""
        self.provider = provider
        xml = self._fetch_data()
        data = feedparser.parse(xml)
        if data.bozo:
            raise ParserError.parseMessageError(data.bozo_exception, provider)

    def _update(self, provider, update):
        """
        Check data provider for data updates and returns new items (if any).

        :param provider: data provider instance
        :return: a list containing a list of new content items
        :rtype: list

        :raises IngestApiError: if data retrieval error occurs
        :raises ParserError: if retrieved RSS data cannot be parsed
        """
        xml_data = self._fetch_data()

        try:
            data = feedparser.parse(xml_data)
        except Exception as ex:
            raise ParserError.parseMessageError(ex, provider, data=xml_data)

        # If provider last updated time is not available, set it to 1.1.1970
        # so that it will be recognized as "not up to date".
        # Also convert it to a naive datetime object (removing tzinfo is fine,
        # because it is in UTC anyway)
        t_provider_updated = provider.get(LAST_ITEM_UPDATE,
                                          utcfromtimestamp(0))
        t_provider_updated = t_provider_updated.replace(tzinfo=None)

        new_items = []
        field_aliases = self.config.get("field_aliases")

        for entry in data.entries:
            try:
                t_entry_updated = utcfromtimestamp(timegm(
                    entry.updated_parsed))
                if t_entry_updated <= t_provider_updated:
                    continue
            except (AttributeError, TypeError):
                # missing updated info, so better ingest it
                pass

            item = self._create_item(entry, field_aliases,
                                     provider.get("source", None))
            self.localize_timestamps(item)

            # If the RSS entry references any images, create picture items from
            # them and create a package referencing them and the entry itself.
            # If there are no image references, treat entry as a simple text
            # item, even if it might reference other media types, e.g. videos.
            image_urls = self._extract_image_links(entry)
            if image_urls:
                image_items = self._create_image_items(image_urls, item)
                new_items.extend(image_items)
                new_items.append(item)
                item = self._create_package(item, image_items)

            new_items.append(item)

        return [new_items]

    def _fetch_data(self):
        """Fetch the latest feed data.

        :return: fetched RSS data
        :rtype: str

        :raises IngestApiError: if fetching data fails for any reason
            (e.g. authentication error, resource not found, etc.)
        """
        url = self.config["url"]

        response = self.get_url(url)

        return response.content

    def _extract_image_links(self, rss_entry):
        """Extract URLs of all images referenced by the given RSS entry.

        Images can be referenced via `<enclosure>`, `<media:thumbnail>` or
        `<media:content>` RSS tag and must be listed among the allowed image
        types. All other links to external media are ignored.

        Duplicate URLs are omitted from the result.

        :param rss_entry: parsed RSS item (entry)
        :type rss_entry: :py:class:`feedparser.FeedParserDict`

        :return: a list of all unique image URLs found (as strings)
        """
        img_links = set()

        for link in getattr(rss_entry, "links", []):
            if link.get("type") in self.IMG_MIME_TYPES:
                img_links.add(link["href"])

        for item in getattr(rss_entry, "media_thumbnail", []):
            url = item.get("url", "")
            if url.endswith(self.IMG_FILE_SUFFIXES):
                img_links.add(url)

        for item in getattr(rss_entry, "media_content", []):
            if item.get("type") in self.IMG_MIME_TYPES:
                img_links.add(item["url"])

        return list(img_links)

    def _create_item(self, data, field_aliases=None, source="source"):
        """Create a new content item from RSS feed data.

        :param dict data: parsed data of a single feed entry
        :param field_aliases: (optional) field name aliases. Used for content
             fields that are named differently in retrieved data.
        :type field_aliases: list of {field_name: alias} dictionaries or None
        :param str source: the source of provider

        :return: created content item
        :rtype: dict
        """
        if field_aliases is None:
            field_aliases = {}
        else:
            field_aliases = merge_dicts(field_aliases)
        aliased_fields = set(field_aliases.values())

        item = dict(type=CONTENT_TYPE.TEXT)

        # Only consider fields that are not used as an alias (i.e. used to
        # populate another field) - unless those fields have their own
        # aliases, too.
        # The idea is that if e.g. the main text field is aliased to use the
        # parsed data's summary field, that summary should not be used to
        # populate the field it was originally meant for.
        fields_to_consider = (f for f in self.item_fields
                              if (f.name_in_data not in aliased_fields) or (
                                  f.name_in_data in aliased_fields
                                  and f.name_in_data in field_aliases))

        utc_now = datetime.utcnow()
        for field in fields_to_consider:
            data_field_name = field_aliases.get(field.name_in_data,
                                                field.name_in_data)
            field_value = data.get(data_field_name)

            if (field.type is datetime) and field_value:
                field_value = utcfromtimestamp(timegm(field_value))
                field_value = utc_now if field_value > utc_now else field_value

            item[field.name] = field_value

            # Some feeds use <content:encoded> tag for storing the main content,
            # and that tag is parsed differently. If the body_html has not been
            # found in its default data field and is not aliased, try to
            # populate it using the aforementioned content field as a fallback.
            if field.name == "body_html" and not field_value and field.name_in_data not in field_aliases:
                try:
                    item["body_html"] = data.content[0].value
                except Exception:
                    pass  # content either non-existent or parsed differently

        if not data.get("guidislink") and data.get("link"):
            item["uri"] = data["link"]
            scheme, netloc, path, query, fragment = urlsplit(item["uri"])
            if data.get("guid"):
                item["guid"] = generate_tag(domain=netloc, id=data.get("guid"))
            else:
                item["guid"] = generate_tag_from_url(data["link"])

        if item.get("uri", None):
            if not item.get("body_html", None):
                item["body_html"] = ""
            item[
                "body_html"] = '<p><a href="%s" target="_blank">%s</a></p>' % (
                    item["uri"], source) + item["body_html"]

        item["dateline"] = {
            "source": source,
            "date": item.get("firstcreated", item.get("versioncreated"))
        }

        if not item.get("versioncreated") and item.get("firstcreated"):
            item["versioncreated"] = item["firstcreated"]

        return item

    def _create_image_items(self, image_links, text_item):
        """Create a list of picture items that represent the external images located on given URLs.

        Each created item's `firstcreated` and `versioncreated` fields are set
        to the same value as the values of these fields in `text_item`.

        :param iterable image_links: list of image URLs
        :param dict text_item: the "main" text item the images are related to

        :return: list of created image items (as dicts)
        """
        image_items = []

        for image_url in image_links:
            img_item = {
                "guid": generate_tag_from_url(image_url),
                ITEM_TYPE: CONTENT_TYPE.PICTURE,
                "firstcreated": text_item.get("firstcreated"),
                "versioncreated": text_item.get("versioncreated"),
                "renditions": {
                    "baseImage": {
                        "href": image_url
                    }
                },
            }
            image_items.append(img_item)

        return image_items

    def _create_package(self, text_item, image_items):
        """Create a new content package from given content items.

        The package's `main` group contains only the references to given items,
        not the items themselves. In the list of references, the reference to
        the text item preceeds the references to image items.

        Package's `firstcreated` and `versioncreated` fields are set to values
        of these fields in `text_item`, and the `headline` is copied as well.

        :param dict text_item: item representing the text content
        :param list image_items: list of items (dicts) representing the images
            related to the text content
        :return: the created content package
        :rtype: dict
        """
        package = {
            ITEM_TYPE:
            CONTENT_TYPE.COMPOSITE,
            "guid":
            "{}:pkg".format(text_item["guid"]),
            "firstcreated":
            text_item["firstcreated"],
            "versioncreated":
            text_item["versioncreated"],
            "headline":
            text_item.get("headline", ""),
            "groups": [
                {
                    "id": "root",
                    "role": "grpRole:NEP",
                    "refs": [{
                        "idRef": "main"
                    }],
                },
                {
                    "id": "main",
                    "role": "main",
                    "refs": [],
                },
            ],
        }

        item_references = package["groups"][1]["refs"]
        item_references.append({"residRef": text_item["guid"]})

        for image in image_items:
            item_references.append({"residRef": image["guid"]})

        return package