def main():
    credentials = GoogleCredentials.get_application_default()
    http = credentials.authorize(httplib2.Http())
    projectId = raw_input("Enter the project ID: ")
    datasetId = raw_input("Enter a dataset ID: ")
    tableId = raw_input("Enter a table name to load the data to: ")
    schema_path = raw_input("Enter the path to the schema file for the table: ")

    with open(schema_path, "r") as schema_file:
        schema = schema_file.read()

    data_path = raw_input("Enter the path to the data file: ")

    with open(data_path, "r") as data_file:
        data = data_file.read()

    resp, content = make_post(http, schema, data, projectId, datasetId, tableId)

    if resp.status == 200:
        job_resource = json.loads(content)
        service = get_service(credentials)
        poll_job(service, **job_resource["jobReference"])
        print("Success!")
    else:
        print("Http error code: {}".format(resp.status))
def main():
    credentials = GoogleCredentials.get_application_default()
    http = credentials.authorize(httplib2.Http())
    projectId = raw_input('Enter the project ID: ')
    datasetId = raw_input('Enter a dataset ID: ')
    tableId = raw_input('Enter a table name to load the data to: ')
    schema_path = raw_input(
        'Enter the path to the schema file for the table: ')

    with open(schema_path, 'r') as schema_file:
        schema = schema_file.read()

    data_path = raw_input('Enter the path to the data file: ')

    with open(data_path, 'r') as data_file:
        data = data_file.read()

    resp, content = make_post(http, schema, data, projectId, datasetId,
                              tableId)

    if resp.status == 200:
        job_resource = json.loads(content)
        service = get_service(credentials)
        poll_job(service, **job_resource['jobReference'])
        print("Success!")
    else:
        print("Http error code: {}".format(resp.status))
def run(cloud_storage_path, projectId, datasetId, tableId, num_retries,
        interval):

    bigquery = get_service()
    resource = export_table(bigquery, cloud_storage_path, projectId, datasetId,
                            tableId, num_retries)
    poll_job(bigquery, resource['jobReference']['projectId'],
             resource['jobReference']['jobId'], interval, num_retries)
def run(source_schema, source_csv, projectId, datasetId, tableId, interval,
        num_retries):
    service = get_service()

    job = load_table(service, source_schema, source_csv, projectId, datasetId,
                     tableId, num_retries)

    poll_job(service, job['jobReference']['projectId'],
             job['jobReference']['jobId'], interval, num_retries)
def run(project_id, dataset_id, table_id, rows, num_retries):
    service = get_service()
    for row in rows:
        response = stream_row_to_bigquery(service,
                                          project_id,
                                          dataset_id,
                                          table_id,
                                          row,
                                          num_retries)
        yield json.dumps(response)
def run(cloud_storage_path,
        projectId, datasetId, tableId,
        num_retries, interval):

    bigquery = get_service()
    resource = export_table(bigquery, cloud_storage_path,
                            projectId, datasetId, tableId, num_retries)
    poll_job(bigquery,
             resource['jobReference']['projectId'],
             resource['jobReference']['jobId'],
             interval,
             num_retries)
def run(source_schema, source_csv,
        projectId, datasetId, tableId, interval,  num_retries):
    service = get_service()

    job = load_table(service, source_schema, source_csv,
                     projectId, datasetId, tableId, num_retries)

    poll_job(service,
             job['jobReference']['projectId'],
             job['jobReference']['jobId'],
             interval,
             num_retries)
def run(project_id, query, timeout, num_retries):
    service = get_service()
    response = sync_query(service,
                          project_id,
                          query,
                          timeout,
                          num_retries)

    for page in paging(service,
                       service.jobs().getQueryResults,
                       num_retries=num_retries,
                       **response['jobReference']):
        yield json.dumps(page['rows'])
def run(project_id, query_string, batch, num_retries, interval):
    service = get_service()

    query_job = async_query(service,
                            project_id,
                            query_string,
                            batch,
                            num_retries)

    poll_job(service,
             query_job['jobReference']['projectId'],
             query_job['jobReference']['jobId'],
             interval,
             num_retries)


    for page in paging(service,
                       service.jobs().getQueryResults,
                       num_retries=num_retries,
                       **query_job['jobReference']):

        yield json.dumps(page['rows'])
def run(project_id, dataset_id, table_id, rows, num_retries):
    service = get_service()
    for row in rows:
        response = stream_row_to_bigquery(service, project_id, dataset_id,
                                          table_id, row, num_retries)
        yield json.dumps(response)