コード例 #1
0
def handler_dataset_landing_page_fallback(scraper, this_dataset_page, tree):
    """
    At time of writing there's an issue with the latest version of datasets 404'ing on the
    versions page.
    
    this function will create what the latest version should be, using the information on the
    base dataset landing page.
    """

    logging.warning(
        "Using fallback logic to scrape latest distribution from dataset landing page (rather "
        "than previous page). This scrape will only have a single distribution of xls."
    )

    this_distribution = Distribution(scraper)

    release_date = this_dataset_page["description"]["releaseDate"]
    this_distribution.issued = parse(release_date.strip()).date()

    # gonna have to go via html ...
    download_url = tree.xpath("//a[text()='xls']/@href")
    this_distribution.downloadURL = download_url

    media_type = Excel
    this_distribution.mediaType = media_type

    this_distribution.title = scraper.dataset.title
    this_distribution.description = scraper.dataset.description
    this_distribution.contactPoint = scraper.dataset.contactPoint

    logging.debug(
        "Created distribution for download '{}'.".format(download_url))
    scraper.distributions.append(this_distribution)
コード例 #2
0
ファイル: lcc.py プロジェクト: GSS-Cogs/gss-utils
def scrape(scraper, tree):
    """
    Scraper for https://www.lowcarboncontracts.uk/data-portal/dataset/*

    Example: https://www.lowcarboncontracts.uk/data-portal/dataset/actual-ilr-income
    """

    article = assert_get_one(tree.xpath('//article'), "article element")

    title_element = assert_get_one(article.xpath('./div/h1'), 'title element')
    scraper.dataset.title = title_element.text.strip()

    description_elements = article.xpath('./div/div/p')
    scraper.dataset.description = "\n\n".join(
        [x.text.strip() for x in description_elements])

    issued_element = assert_get_one(
        article.xpath('./div/section/table/tbody/tr[1]/td/span'),
        "issued element")
    scraper.dataset.issued = parse(issued_element.text.split("(")[0].strip())

    scraper.dataset.license = "http://reference.data.gov.uk/id/open-government-licence"

    for resource in assert_get_one(article.xpath('./div/section[1]/ul[1]'),
                                   "resource list").xpath('./li/a'):

        distro = Distribution(scraper)

        url = f'https://www.lowcarboncontracts.uk/{resource.get("href")}'
        resp = scraper.session.get(url)
        if resp.status_code != 200:
            raise Exception(f'Failed to get url resource {url}')

        distro_tree = html.fromstring(resp.text)
        section = assert_get_one(
            distro_tree.xpath(
                '/html[1]/body[1]/div[3]/div[1]/div[3]/section[1]'),
            "section of distro")

        distro_title_element = assert_get_one(section.xpath('./div/h1'),
                                              "distro title")
        distro.title = distro_title_element.text

        distro_description_element = assert_get_one(
            section.xpath('./div/div/blockquote[1]'), "distro description")
        distro.description = distro_description_element.text

        distro_download_url_element = assert_get_one(
            section.xpath('./div/p/a'), "download url")
        distro.downloadURL = distro_download_url_element.text

        # Note: issued is the one thing not in the "section" element, so xpathing the whole distro tree
        distro_issued_element = assert_get_one(
            distro_tree.xpath('//table[1]/tbody[1]/tr[2]/td[1]'), "issued")
        distro.issued = parse(distro_issued_element.text)

        media_type, _ = mimetypes.guess_type(distro.downloadURL)
        distro.mediaType = media_type if media_type is not None else CSV  # the default/not-specified offering is csv

        scraper.distributions.append(distro)
コード例 #3
0
ファイル: main.py プロジェクト: GSS-Cogs/family-covid-19
def opendata_nhs(scraper, tree):

    # TODO - this feels more like a catalogue than a list of distributions, investigate

    # Populate the dataset
    details = tree.xpath('//tr/td[@class="dataset-details"]')

    dates = tree.xpath('//span[@class="automatic-local-datetime"]/text()')
    date_updated = parse(" ".join([
        x.replace("\n", "").replace("(BST)", "").strip()
        for x in dates[0].split(" ")
    ]))
    date_created = parse(" ".join([
        x.replace("\n", "").replace("(BST)", "").strip()
        for x in dates[1].split(" ")
    ]))

    # Populate distributions
    distro_resources = tree.xpath('//li[@class="resource-item"]')
    for dr in distro_resources:

        download = dr.xpath(
            'div/ul/li/a[contains(@class, "resource-url-analytics")]/@href')[0]

        # Need to go to the preview page for full description and title as they've helpfully truncated both...
        preview_url = "https://www.opendata.nhs.scot" + dr.xpath(
            'div/ul[@class="dropdown-menu"]/li/a/@href')[0]
        r = scraper.session.get(preview_url)
        if r.status_code != 200:
            raise Exception(
                "Unable to follow url to get full description, url: '{}', status code '{}'."
                .format(preview_url, r.status_code))

        preview_tree = html.fromstring(r.text)
        description1 = preview_tree.xpath(
            '//div[contains(@class, "prose notes")]/p/text()')[0]
        # Some (but not all) descriptions have some additional itallic information
        try:
            description2 = preview_tree.xpath(
                '//div[contains(@class, "prose notes")]/p/em/text()')[0]
        except IndexError:
            description2 = ""

        description = description1 + "\n\n" + description2
        description.strip("\n")

        title = preview_tree.xpath('//title/text()')[0]
        this_distribution = Distribution(scraper)

        this_distribution.issued = date_updated
        this_distribution.downloadURL = download
        this_distribution.mediaType = CSV

        this_distribution.title = title.strip()
        this_distribution.description = description

        scraper.distributions.append(this_distribution)
コード例 #4
0
def handler_static_adhoc(scraper, landing_page, tree):

    # A static adhoc is a one-off unscheduled release
    # These pages should be simpler and should lack the historical distributions

    for download in landing_page["downloads"]:

        title = download["title"]
        file = download["file"]

        # Create an empty Distribution object to represent this distribution
        # from here we're just looking to fill in it's fields
        this_distribution = Distribution(scraper)

        # if we can't get the release date, continue but throw a warning.
        try:
            this_distribution.issued = parse(
                landing_page["description"]["releaseDate"]).date()
        except KeyError:
            logging.warning("Unable to acquire or parse release date")

        download_url = ONS_DOWNLOAD_PREFIX + landing_page["uri"] + "/" + file
        this_distribution.downloadURL = download_url

        # TODO - we're doing this in two place, pull it out
        # we've had some issues with type-guessing so we're getting the media type
        # by checking the download url ending
        if download_url.endswith(".csdb"):
            media_type = CSDB
        elif download_url.endswith(".csv"):
            media_type = CSV
        elif download_url.endswith(".xlsx"):
            media_type = Excel
        elif download_url.endswith(".ods"):
            media_type = ODS
        else:
            media_type, _ = mimetypes.guess_type(download_url)
        this_distribution.mediaType = media_type

        this_distribution.title = title

        # inherit metadata from the dataset where it hasn't explicitly been changed
        this_distribution.description = scraper.dataset.description

        logging.debug(
            "Created distribution for download '{}'.".format(download_url))
        scraper.distributions.append(this_distribution)
コード例 #5
0
def handler_dataset_landing_page(scraper, landing_page, tree):

    # A dataset landing page has uri's to one or more datasets via it's "datasets" field.
    # We need to look at each in turn, this is an example one as json:
    # https://www.ons.gov.uk//businessindustryandtrade/internationaltrade/datasets/uktradeingoodsbyclassificationofproductbyactivity/current/data
    for dataset_page_url in landing_page["datasets"]:

        # Get the page as json. Throw an information error if we fail for whatever reason
        dataset_page_json_url = ONS_PREFIX + dataset_page_url["uri"] + "/data"
        r = scraper.session.get(dataset_page_json_url)
        if r.status_code != 200:
            raise ValueError("Scrape of url '{}' failed with status code {}." \
                             .format(dataset_page_json_url, r.status_code))

        # get the response json into a python dict
        this_dataset_page = r.json()

        # start a list of dataset versions (to hold current + all previous) as a list
        # we'll start with just the current/latest version
        versions_list = [ONS_PREFIX + this_dataset_page["uri"] + "/data"]

        # if there are older versions of this datasets availible.
        # iterate and add their uri's to the versions list
        try:
            for version_as_dict in this_dataset_page["versions"]:
                versions_list.append(ONS_PREFIX + version_as_dict["uri"] +
                                     "/data")
        except KeyError:
            logging.debug(
                "No older versions found for {}.".format(dataset_page_url))

        # NOTE - we've had an issue with the very latest dataset not being updated on the previous versions
        # page (the page we're getting the distributions from) so we're taking the details for it from
        # the landing page to use as a fallback in that scenario.

        # iterate through the lot, we're aiming to create at least one distribution object for each
        for i, version_url in enumerate(versions_list):
            logging.debug(
                "Identified distribution url, building distribution object for: "
                + version_url)

            r = scraper.session.get(version_url)
            if r.status_code != 200:

                # If we've got a 404 on the latest, fallback on using the details from the
                # landing page instead
                if r.status_code == 404 and i == len(versions_list) - 1:
                    handler_dataset_landing_page_fallback(
                        scraper, this_dataset_page, tree)
                    continue
                else:
                    raise Exception("Scraper unable to acquire the page: {} with http code {}." \
                                .format(version_url, r.status_code))

            # get the response json into a python dict
            this_page = r.json()

            # Get the download urls, if there's more than 1 format of this version of the dataset
            # each forms a separate distribution
            distribution_formats = this_page["downloads"]
            for dl in distribution_formats:

                # Create an empty Distribution object to represent this distribution
                # from here we're just looking to fill in it's fields
                this_distribution = Distribution(scraper)

                # Every distribution SHOULD have a release date, but it seems they're not
                # always included. If it happens continue but throw a warning.
                try:
                    release_date = this_page["description"]["releaseDate"]
                    this_distribution.issued = parse_as_local_date(
                        release_date.strip())
                except KeyError:
                    logging.warning(
                        "Download {}. Of datasset versions {} of dataset {} does not have "
                        "a release date".format(distribution_formats,
                                                version_url, dataset_page_url))

                # I don't trust dicts with one constant field (they don't make sense), so just in case...
                try:
                    download_url = ONS_DOWNLOAD_PREFIX + this_page[
                        "uri"] + "/" + dl["file"].strip()
                    this_distribution.downloadURL = download_url
                except:
                    # raise up this time. If we don't have a downloadURL it's not much use
                    raise ValueError("Unable to create complete download url for {} on page {}" \
                                     .format(dl, version_url))

                # we've had some issues with type-guessing so we're getting the media type
                # by checking the download url ending
                if download_url.endswith(".csdb"):
                    media_type = CSDB
                elif download_url.endswith(".csv"):
                    media_type = CSV
                elif download_url.endswith(".xlsx"):
                    media_type = Excel
                elif download_url.endswith(".ods"):
                    media_type = ODS
                else:
                    media_type, _ = mimetypes.guess_type(download_url)

                this_distribution.mediaType = media_type

                # inherit metadata from the dataset where it hasn't explicitly been changed
                this_distribution.title = scraper.dataset.title
                this_distribution.description = scraper.dataset.description

                logging.debug("Created distribution for download '{}'.".format(
                    download_url))
                scraper.distributions.append(this_distribution)
コード例 #6
0
def scrape(scraper, tree):

    # A quick safety in case people are using this scraper incorrectly
    if "?search=" not in scraper.uri:
        raise Exception(
            """Aborting. This scraper is intended to run off the DCNI seach page.
        Please modify your url to use the site search.

        If in doubt, work from this page, change the quoted search text and capture the url
        https://www.communities-ni.gov.uk/publications/topic/8182?search=%22Northern+Ireland+Housing+Bulletin%22&Search-exposed-form=Go&sort_by=field_published_date
        """)

    scraper.dataset.publisher = GOV[
        'department-for-communities-northern-ireland']
    scraper.dataset.license = 'http://www.nationalarchives.gov.uk/doc/open-" \
        "government-licence/version/3/'

    # We're taking each search result as a distribution
    search_result_urls = []
    for linkObj in tree.xpath("//h3/a"):

        # linkObj.items() is eg ("href", "www.foo.com") where we want a url
        href = [x[1] for x in linkObj.items() if x[0] == "href"][0]

        # Add to distributions url list, get the root from the original url
        search_result_urls.append(
            scraper.uri.split("/publications/topic")[0] + href)

    # keep track of dates issued so we can find the latest
    last_issued = None

    for url in search_result_urls:

        # Get the distribution page
        page = scraper.session.get(url)
        distro_tree = html.fromstring(page.text)

        # Get any spreadsheets (ods or excel) linked on the page
        spreadsheet_files = [
            x for x in distro_tree.xpath('//a/@href')
            if x.lower().endswith(".ods") or x.lower().endswith(".xlsx")
        ]

        # Now map them together, so we have the supporting info for each relevent download
        # TODO - make better, kinda nasty
        title_download_map = {}
        for spreadsheet_file in spreadsheet_files:

            # Create our new distribution object
            this_distribution = Distribution(scraper)

            # Identify the correct title
            this_distribution.title = distro_tree.xpath(
                "//a[@href='" + spreadsheet_file +
                "']/text()".format(spreadsheet_file))[0]
            this_distribution.downloadURL = spreadsheet_file

            if this_distribution.downloadURL.lower().endswith(".xlsx"):
                media_type = Excel
            elif this_distribution.downloadURL.lower().endswith(".ods"):
                media_type = ODS
            else:
                raise Exception(
                    "Aborting. Unexpected media type for url: '{}'".format(
                        this_distribution.downloadURL))
            this_distribution.mediaType = media_type

            # Published and modifed time
            this_distribution.issued = isoparse(
                distro_tree.xpath(
                    "//*[@property='article:published_time']/@content")
                [0]).date()
            this_distribution.modified = isoparse(
                distro_tree.xpath(
                    "//*[@property='article:modified_time']/@content")
                [0]).date()
            this_distribution.description = distro_tree.xpath(
                "//*[@class='field-summary']/p/text()")[0]

            if last_issued is None:
                last_issued = this_distribution.issued
            elif this_distribution.issued > last_issued:
                last_issued = this_distribution.issued

            scraper.distributions.append(this_distribution)

    # Whatever date the latest distribution was issued, is the last issued date for this "dataset"
    scraper.dataset.issued = last_issued
コード例 #7
0
ファイル: ons.py プロジェクト: GSS-Cogs/gss-utils
def handler_dataset_landing_page(scraper, landing_page, tree):
    # A dataset landing page has uri's to one or more datasets via it's "datasets" field.
    # We need to look at each in turn, this is an example one as json:
    # https://www.ons.gov.uk//businessindustryandtrade/internationaltrade/datasets/uktradeingoodsbyclassificationofproductbyactivity/current/data
    for dataset_page_url in landing_page["datasets"]:

        this_dataset_page = get_dict_from_json_url(ONS_PREFIX + dataset_page_url["uri"] + "/data", scraper)

        # create a list, with each entry a dict of a versions url and update date
        versions_dict_list = []

        # Where the dataset is versioned, use the versions as the distributions
        try:
            all_versions = this_dataset_page["versions"]
        except KeyError:
            all_versions = []

        # Release dates:
        # --------------
        # ONS does this odd thing where each version on the /data api
        # has a updateDate field which is actually the date THE DATA
        # WAS SUPERCEDED (so the release fate of the NEXT version of the data).
        # ......this takes a bit of unpicking.

        # If no initial release date for the dataset has been provided
        # We're just going to ignore v1, we don't have a use for it
        # and with no provided release date ... not a lot to be done
        initial_release = this_dataset_page["description"].get("releaseDate", None)

        next_release = None
        # Where there's multiple versions, iterate all and populate a list
        if len(all_versions) != 0:
            try:
                for version_as_dict in all_versions:
                    if next_release is None:
                        release_date = initial_release
                    else:
                        release_date = next_release

                    if release_date is not None:
                        versions_dict_list.append({
                            "url": ONS_PREFIX + version_as_dict["uri"] + "/data",
                            "issued": release_date
                        })
                    next_release = version_as_dict["updateDate"]
            except KeyError:
                logging.debug("No older versions found for {}.".format(dataset_page_url))

        # Add the current release
        versions_dict_list.append({
            "url": ONS_PREFIX + this_dataset_page["uri"] + "/data",
            "issued": initial_release if next_release is None else next_release
        })

        # NOTE - we've had an issue with the very latest dataset not being updated on the previous versions
        # page (the page we're getting the distributions from) so we're taking the details for it from
        # the landing page to use as a fallback in that scenario.

        # iterate through the lot, we're aiming to create at least one distribution object for each
        for i, version_dict in enumerate(versions_dict_list):

            version_url = version_dict["url"]
            issued = version_dict["issued"]

            logging.debug("Identified distribution url, building distribution object for: " + version_url)

            # get the response json into a python dict
            this_page = get_dict_from_json_url(version_url, scraper)

            # Get the download urls, if there's more than 1 format of this version of the dataset
            # each forms a separate distribution
            distribution_formats = this_page["downloads"]
            for dl in distribution_formats:

                # Create an empty Distribution object to represent this distribution
                # from here we're just looking to fill in it's fields
                this_distribution = Distribution(scraper)
                this_distribution.issued = parse_as_local_date(issued)

                # I don't trust dicts with one constant field (they don't make sense), so just in case...
                try:
                    download_url = ONS_DOWNLOAD_PREFIX + this_page["uri"] + "/" + dl["file"].strip()
                    this_distribution.downloadURL = download_url
                except:
                    # Throw a warning and abandon this distribution, ff we don't have a downloadURL it's not much use
                    logging.warning("Unable to create complete download url for {} on page {}"
                                    .format(dl, version_url))
                    continue

                # we've had some issues with type-guessing so we're getting the media type
                # by checking the download url ending
                if download_url.endswith(".csdb"):
                    media_type = CSDB
                else:
                    media_type, _ = mimetypes.guess_type(download_url)

                this_distribution.mediaType = media_type
                
                # inherit metadata from the dataset where it hasn't explicitly been changed
                this_distribution.title = scraper.dataset.title
                this_distribution.description = scraper.dataset.description

                logging.debug("Created distribution for download '{}'.".format(download_url))
                scraper.distributions.append(this_distribution)
コード例 #8
0
ファイル: onscmd.py プロジェクト: jwestw/gss-utils
def scrape(scraper, tree):
    """
    This is a scraper intended to use the ONS cmd (customise my data) functionality.

    :param scraper:         the Scraper object
    :param landing_page:    lxml tree
    :return:
    """

    dataset_document = request_json_data(scraper, scraper.uri)

    scraper.dataset.title = dataset_document["id"]
    scraper.dataset.description = dataset_document["description"]

    # Need to get issued from the assciated publication
    publication_document = request_json_data(
        scraper, dataset_document["publications"][0]["href"] + "/data")
    scraper.dataset.issued = parse(
        publication_document["description"]["releaseDate"])

    # Only take next release it its a date
    try:
        next_release = parse(dataset_document["next_release"])
        scraper.dataset.updateDueOn = next_release
    except:
        pass  # it's fine, "unknown" etc

    # Theoretically you can have more than one contact, but I'm just taking the first
    scraper.dataset.contactPoint = "mailto:" + dataset_document["contacts"][0][
        "email"].strip()

    scraper.dataset.publisher = 'https://www.gov.uk/government/organisations/office-for-national-statistics'
    scraper.dataset.license = "http://www.nationalarchives.gov.uk/doc/open-government-licence/version/3/"

    edition_documents = request_json_data(scraper, scraper.uri + "/editions")

    for edition_document in edition_documents["items"]:

        edition_name = edition_document["edition"]

        version_documents = request_json_data(
            scraper, edition_document["links"]["versions"]["href"])

        for version_document in version_documents["items"]:

            version_name = str(version_document["version"])

            this_distribution = Distribution(scraper)

            this_distribution.issued = version_document["release_date"]
            this_distribution.downloadURL = version_document["downloads"][
                "csv"]["href"]
            this_distribution.mediaType = CSV

            this_distribution.title = scraper.dataset.title + ", {}, version {}".format(
                edition_name, version_name)
            this_distribution.description = scraper.dataset.description
            this_distribution.contactPoint = scraper.dataset.contactPoint

            logging.debug("Created distribution for download '{}'.".format(
                this_distribution.downloadURL))
            scraper.distributions.append(this_distribution)