def execute_async(self, table_name=None, table_mode='create', use_cache=True, priority='interactive', allow_large_results=False): """ Initiate the query and return a QueryJob. Args: dataset_id: the datasetId for the result table. table_name: the result table name as a string or TableName; if None (the default), then a temporary table will be used. table_mode: one of 'create', 'overwrite' or 'append'. If 'create' (the default), the request will fail if the table exists. use_cache: whether to use past query results or ignore cache. Has no effect if destination is specified (default True). priority:one of 'batch' or 'interactive' (default). 'interactive' jobs should be scheduled to run quickly but are subject to rate limits; 'batch' jobs could be delayed by as much as three hours but are not rate-limited. allow_large_results: whether to allow large results; i.e. compressed data over 100MB. This is slower and requires a table_name to be specified) (default False). Returns: A QueryJob. Raises: Exception if query could not be executed. """ batch = priority == 'low' append = table_mode == 'append' overwrite = table_mode == 'overwrite' if table_name is not None: table_name = _utils.parse_table_name(table_name, self._api.project_id) try: query_result = self._api.jobs_insert_query(self._sql, self._code, self._imports, table_name=table_name, append=append, overwrite=overwrite, use_cache=use_cache, batch=batch, allow_large_results=allow_large_results, table_definitions=self._external_tables) except Exception as e: raise e if 'jobReference' not in query_result: raise Exception('Unexpected query response.') job_id = query_result['jobReference']['jobId'] if not table_name: try: destination = query_result['configuration']['query']['destinationTable'] table_name = (destination['projectId'], destination['datasetId'], destination['tableId']) except KeyError: # The query was in error raise Exception(_utils.format_query_errors(query_result['status']['errors'])) return _query_job.QueryJob(job_id, table_name, self._sql, context=self._context)
def execute_async(self, table_name=None, table_mode='create', use_cache=True, priority='interactive', allow_large_results=False): """ Initiate the query and return a QueryJob. Args: table_name: the result table name as a string or TableName; if None (the default), then a temporary table will be used. table_mode: one of 'create', 'overwrite' or 'append'. If 'create' (the default), the request will fail if the table exists. use_cache: whether to use past query results or ignore cache. Has no effect if destination is specified (default True). priority:one of 'batch' or 'interactive' (default). 'interactive' jobs should be scheduled to run quickly but are subject to rate limits; 'batch' jobs could be delayed by as much as three hours but are not rate-limited. allow_large_results: whether to allow large results; i.e. compressed data over 100MB. This is slower and requires a table_name to be specified) (default False). Returns: A QueryJob. Raises: Exception if query could not be executed. """ batch = priority == 'low' append = table_mode == 'append' overwrite = table_mode == 'overwrite' if table_name is not None: table_name = _utils.parse_table_name(table_name, self._api.project_id) try: query_result = self._api.jobs_insert_query(self._sql, self._code, self._imports, table_name=table_name, append=append, overwrite=overwrite, use_cache=use_cache, batch=batch, allow_large_results=allow_large_results, table_definitions=self._external_tables) except Exception as e: raise e if 'jobReference' not in query_result: raise Exception('Unexpected response from server') job_id = query_result['jobReference']['jobId'] if not table_name: try: destination = query_result['configuration']['query']['destinationTable'] table_name = (destination['projectId'], destination['datasetId'], destination['tableId']) except KeyError: # The query was in error raise Exception(_utils.format_query_errors(query_result['status']['errors'])) return _query_job.QueryJob(job_id, table_name, self._sql, context=self._context)
def __init__(self, name, context=None): """Initializes an instance of a Table object. The Table need not exist yet. Args: name: the name of the table either as a string or a 3-part tuple (projectid, datasetid, name). If a string, it must have the form '<project>:<dataset>.<table>' or '<dataset>.<table>'. context: an optional Context object providing project_id and credentials. If a specific project id or credentials are unspecified, the default ones configured at the global level are used. Raises: Exception if the name is invalid. """ if context is None: context = gcp.Context.default() self._context = context self._api = _api.Api(context) self._name_parts = _utils.parse_table_name(name, self._api.project_id) self._full_name = "%s:%s.%s%s" % self._name_parts self._info = None self._cached_page = None self._cached_page_index = 0
def __init__(self, name, context=None): """Initializes an instance of a Table object. The Table need not exist yet. Args: name: the name of the table either as a string or a 3-part tuple (projectid, datasetid, name). If a string, it must have the form '<project>:<dataset>.<table>' or '<dataset>.<table>'. context: an optional Context object providing project_id and credentials. If a specific project id or credentials are unspecified, the default ones configured at the global level are used. Raises: Exception if the name is invalid. """ if context is None: context = gcp.Context.default() self._context = context self._api = _api.Api(context) self._name_parts = _utils.parse_table_name(name, self._api.project_id) self._full_name = '%s:%s.%s%s' % self._name_parts self._info = None self._cached_page = None self._cached_page_index = 0