예제 #1
0
def _item_to_table(iterator, resource):
    """Convert a JSON table to the native object.

    :type iterator: :class:`~google.api.core.page_iterator.Iterator`
    :param iterator: The iterator that is currently in use.

    :type resource: dict
    :param resource: An item to be converted to a table.

    :rtype: :class:`~google.cloud.bigquery.table.Table`
    :returns: The next table in the page.
    """
    return Table.from_api_repr(resource, iterator.dataset)
예제 #2
0
def _item_to_table(iterator, resource):
    """Convert a JSON table to the native object.

    :type iterator: :class:`~google.cloud.iterator.Iterator`
    :param iterator: The iterator that is currently in use.

    :type resource: dict
    :param resource: An item to be converted to a table.

    :rtype: :class:`~google.cloud.bigquery.table.Table`
    :returns: The next table in the page.
    """
    return Table.from_api_repr(resource, iterator.dataset)
예제 #3
0
    def list_tables(self, max_results=None, page_token=None):
        """List tables for the project associated with this client.

        See:
        https://cloud.google.com/bigquery/docs/reference/v2/tables/list

        :type max_results: int
        :param max_results: maximum number of tables to return, If not
                            passed, defaults to a value set by the API.

        :type page_token: string
        :param page_token: opaque marker for the next "page" of datasets. If
                           not passed, the API will return the first page of
                           datasets.

        :rtype: tuple, (list, str)
        :returns: list of :class:`google.cloud.bigquery.table.Table`, plus a
                  "next page token" string:  if not ``None``, indicates that
                  more tables can be retrieved with another call (pass that
                  value as ``page_token``).
        """
        params = {}

        if max_results is not None:
            params['maxResults'] = max_results

        if page_token is not None:
            params['pageToken'] = page_token

        path = '/projects/%s/datasets/%s/tables' % (self.project, self.name)
        connection = self._client.connection
        resp = connection.api_request(method='GET',
                                      path=path,
                                      query_params=params)
        tables = [
            Table.from_api_repr(resource, self)
            for resource in resp.get('tables', ())
        ]
        return tables, resp.get('nextPageToken')
예제 #4
0
    def list_tables(self, max_results=None, page_token=None):
        """List tables for the project associated with this client.

        See:
        https://cloud.google.com/bigquery/docs/reference/v2/tables/list

        :type max_results: int
        :param max_results: maximum number of tables to return, If not
                            passed, defaults to a value set by the API.

        :type page_token: str
        :param page_token: opaque marker for the next "page" of datasets. If
                           not passed, the API will return the first page of
                           datasets.

        :rtype: tuple, (list, str)
        :returns: list of :class:`google.cloud.bigquery.table.Table`, plus a
                  "next page token" string:  if not ``None``, indicates that
                  more tables can be retrieved with another call (pass that
                  value as ``page_token``).
        """
        params = {}

        if max_results is not None:
            params['maxResults'] = max_results

        if page_token is not None:
            params['pageToken'] = page_token

        path = '/projects/%s/datasets/%s/tables' % (self.project, self.name)
        connection = self._client.connection
        resp = connection.api_request(method='GET', path=path,
                                      query_params=params)
        tables = [Table.from_api_repr(resource, self)
                  for resource in resp.get('tables', ())]
        return tables, resp.get('nextPageToken')
예제 #5
0
def create_temporary_data_source(source_uri):
    """Create a temporary data source so BigQuery can query the CSV in
    Google Cloud Storage.

    Nothing like this is currently implemented in the
    google-cloud-python library.

    Returns a table reference suitable for using in a BigQuery SQL
    query (legacy format).

    """
    schema = [
        {
            "name": "Regional_Office_Name",
            "type": "string"
        },
        {
            "name": "Regional_Office_Code",
            "type": "string"
        },
        {
            "name": "Area_Team_Name",
            "type": "string"
        },
        {
            "name": "Area_Team_Code",
            "type": "string",
            "mode": "required"
        },
        {
            "name": "PCO_Name",
            "type": "string"
        },
        {
            "name": "PCO_Code",
            "type": "string"
        },
        {
            "name": "Practice_Name",
            "type": "string"
        },
        {
            "name": "Practice_Code",
            "type": "string",
            "mode": "required"
        },
        {
            "name": "BNF_Code",
            "type": "string",
            "mode": "required"
        },
        {
            "name": "BNF_Description",
            "type": "string",
            "mode": "required"
        },
        {
            "name": "Items",
            "type": "integer",
            "mode": "required"
        },
        {
            "name": "Quantity",
            "type": "integer",
            "mode": "required"
        },
        {
            "name": "ADQ_Usage",
            "type": "float"
        },
        {
            "name": "NIC",
            "type": "float",
            "mode": "required"
        },
        {
            "name": "Actual_Cost",
            "type": "float",
            "mode": "required"
        },
    ]
    resource = {
        "tableReference": {
            "tableId": TEMP_SOURCE_NAME
        },
        "externalDataConfiguration": {
            "csvOptions": {
                "skipLeadingRows": "1"
            },
            "sourceFormat": "CSV",
            "sourceUris": [source_uri],
            "schema": {
                "fields": schema
            }
        }
    }
    client = bigquery.client.Client(project='ebmdatalab')
    # delete the table if it exists
    dataset = Dataset("tmp_eu", client)
    table = Table.from_api_repr(resource, dataset)
    try:
        table.delete()
    except NotFound:
        pass
    # Now create it
    path = "/projects/ebmdatalab/datasets/%s/tables" % TEMP_DATASET
    client._connection.api_request(method='POST', path=path, data=resource)
    return "[ebmdatalab:%s.%s]" % (TEMP_DATASET, TEMP_SOURCE_NAME)