예제 #1
0
def get_http():
    http = httplib2.Http()
    GoogleCredentials.get_application_default().create_scoped([
        'https://www.googleapis.com/auth/firebase.database',
        'https://www.googleapis.com/auth/userinfo.email'
    ]).authorize(http)
    return http
def get_storage_resource():
    try:
        credentials = GoogleCredentials.get_application_default()
    except:
        # todo: replace filename with variable
        credentials = GoogleCredentials.from_stream("privatekey.json").create_scoped(STORAGE_SCOPES)
    return discovery.build('storage', 'v1', credentials=credentials)
예제 #3
0
def get_service():
    from googleapiclient.discovery import build
    from oauth2client.client import GoogleCredentials
    credentials = GoogleCredentials.get_application_default()
    if credentials.create_scoped_required():
        credentials = credentials.create_scoped('https://www.googleapis.com/auth/bigquery')
    return build('bigquery','v2', credentials=GoogleCredentials.get_application_default())
def hello():

    credentials = AppAssertionCredentials([])
    client = datastore.Client(project = 'mq-cloud-prototyping-3', credentials = credentials)
    sys.stdout.write(credentials.to_json())
    
    query = client.query(kind='Person')
    res = query.fetch()
    all = dict(res)
    sys.stdout.write(str(all))
    
    return credentials.to_json()

    try :        
    	
    	token = ''

    	#ouath
        O_AUTH_EMAIL_SCOPE = 'https://www.googleapis.com/auth/userinfo.email'

        credentials = GoogleCredentials.get_application_default()
        if credentials.create_scoped_required():
	        credentials = credentials.create_scoped(PUBSUB_SCOPES)
        http = httplib2.Http()
        credentials.authorize(http)

        cl = discovery.build('pubsub', 'v1', http=http)
        return credentials.to_json()

        credentials = GoogleCredentials.get_application_default()
        credentials = credentials.create_scoped([O_AUTH_EMAIL_SCOPE])
      	http = httplib2.Http()
    	credentials.authorize(http)
        return credentials.to_json()
	    #if not http:
	    #    http = httplib2.Http()
	    #credentials.authorize(http)

    	#temp hardcoded token
        #token = 'ya29.CjjlAlrvqUwXrujCnJuqa08HTtmNilyP7K1GGrHQ40Gt489H6NGT9WQAxEL92OSQ6anGYeFPRcvI4g'

        



        tokenBearer = 'Bearer %s' % token
        url = 'https://admin-dot-mq-vouchers.appspot.com/api/communities/mtv1/campaigns?page=0&size=1000&sorting=campaignName,ASC'
        req = urllib2.Request(url, headers = {'Content-Type': 'application/json', 'Authorization' : tokenBearer})
        f = urllib2.urlopen(req)
        response = f.read()
        sys.stdout.write(str(response))
        respjson = json.loads(response)
        
        f.close()
        #respjson = '3333'
        #sys.stdout.write(str(all))
        return str(response)
    except urllib2.HTTPError, error:
    	return ('get failed %s' % error)
예제 #5
0
파일: google.py 프로젝트: Ormod/pghoard
 def __init__(self, project_id, bucket_name, credential_file=None):
     BaseTransfer.__init__(self)
     self.project_id = project_id
     if credential_file:
         creds = GoogleCredentials.from_stream(credential_file)
     else:
         creds = GoogleCredentials.get_application_default()
     gs = build("storage", "v1", credentials=creds)
     self.gs_buckets = gs.buckets()  # pylint: disable=no-member
     self.gs_objects = gs.objects()  # pylint: disable=no-member
     self.bucket_name = self.get_or_create_bucket(bucket_name)
     self.log.debug("GoogleTransfer initialized")
 def test_get_application_default_environment_not_set_up(self):
   # It is normal for this test to fail if run inside
   # a Google Compute Engine VM or after 'gcloud auth login' command
   # has been executed on a non Windows machine.
   os.environ['SERVER_SOFTWARE'] = ''
   os.environ[GOOGLE_APPLICATION_CREDENTIALS] = ''
   os.environ['APPDATA'] = ''
   # we can't use self.assertRaisesRegexp() because it is only in Python 2.7+
   try:
     GoogleCredentials.get_application_default()
     self.fail('An exception was expected!')
   except ApplicationDefaultCredentialsError as error:
     self.assertEqual(ADC_HELP_MSG, str(error))
 def test_get_default_from_environment_variable_malformed_file(self):
   os.environ['SERVER_SOFTWARE'] = ''
   environment_variable_file = datafile(
       os.path.join('gcloud', 'credentials_default_malformed_3.json'))
   os.environ[GOOGLE_CREDENTIALS_DEFAULT] = environment_variable_file
   # we can't use self.assertRaisesRegexp() because it is only in Python 2.7+
   try:
     GoogleCredentials.get_default()
     self.fail('An exception was expected!')
   except DefaultCredentialsError as error:
     self.assertTrue(str(error).startswith(
         'An error was encountered while reading json file: ' +
         environment_variable_file + ' (pointed to by ' +
         GOOGLE_CREDENTIALS_DEFAULT + ' environment variable):'))
예제 #8
0
def main(photo_file):
  '''Run a label request on a single image'''

  API_DISCOVERY_FILE = 'https://vision.googleapis.com/$discovery/rest?version=v1'
  http = httplib2.Http()

  credentials = GoogleCredentials.get_application_default().create_scoped(
      ['https://www.googleapis.com/auth/cloud-platform'])
  credentials.authorize(http)

  service = build('vision', 'v1', http, discoveryServiceUrl=API_DISCOVERY_FILE)

  with open(photo_file, 'rb') as image:
    image_content = base64.b64encode(image.read())
    service_request = service.images().annotate(
      body={
        'requests': [{
          'image': {
            'content': image_content
           },
          'features': [{
            'type': 'LABEL_DETECTION',
            'maxResults': 1,
           }]
         }]
      })
    response = service_request.execute()
    label = response['responses'][0]['labelAnnotations'][0]['description']
    print('Found label: %s for %s' % (label, photo_file))
    return 0
def run(project, zone, instance_name):
    credentials = GoogleCredentials.get_application_default()
    compute = build('compute', 'v1', credentials=credentials)

    print('Creating instance.')

    operation = create_instance(compute, project, zone, instance_name)
    wait_for_operation(compute, project, zone, operation['name'])

    instances = list_instances(compute, project, zone)

    print('Instances in project %s and zone %s:' % (project, zone))
    for instance in instances:
        print(' - ' + instance['name'])

    print("""
Instance created.
It will take a minute or two for the instance to complete work.
Check this URL: http://storage.googleapis.com/%s/output.png
Once the image is uploaded press enter to delete the instance.
""" % project)

    input()

    print('Deleting instance.')

    operation = delete_instance(compute, project, zone, instance_name)
    wait_for_operation(compute, project, zone, operation['name'])
예제 #10
0
def get_service():
    credentials = GoogleCredentials.get_application_default()
    scoped_credentials = credentials.create_scoped(
        ['https://www.googleapis.com/auth/cloud-platform'])
    http = httplib2.Http()
    scoped_credentials.authorize(http)
    return discovery.build('language', 'v1beta1', http=http)
예제 #11
0
def main(bucket, sources):
    # Get the application default credentials. When running locally, these are
    # available after running `gcloud init`. When running on compute
    # engine, these are available from the environment.
    credentials = GoogleCredentials.get_application_default()

    # Construct the service object for the interacting with the Cloud Storage
    # API.
    service = discovery.build('storage', 'v1', credentials=credentials)

    print "\n-----------\n"
    print "sources===", sources
    print "\n-----------\n"

    # Upload the source files.
    for filename in sources:
        req = service.objects().insert(
            media_body=filename,
            name=filename,
            bucket=bucket)
        resp = req.execute()
        print('> Uploaded source file {}'.format(filename))
        print(json.dumps(resp, indent=2))

    """
예제 #12
0
def get_service():
    """Build a client to the Google Cloud Natural Language API."""

    credentials = GoogleCredentials.get_application_default()

    return discovery.build('language', 'v1beta1',
                           credentials=credentials)
예제 #13
0
def run(project, zone, instance_name, package):
    credentials = GoogleCredentials.get_application_default()
    compute = build('compute', 'v1', credentials=credentials)

    print 'Creating instance.'

    #creating instance
    operation = create_instance(compute, project, zone, instance_name, package)

    #execute operations
    wait_for_operation(compute, project, zone, operation['name'])

    #listing instances
    instances = list_instances(compute, project, zone)

    print 'Instances in project %s and zone %s:' % (project, zone)
    for instance in instances:
        print ' - ' + instance['name']

    print """ Instance created """

    raw_input()

    print 'Deleting instance'

    operation = delete_instance(compute, project, zone, instance_name)
    wait_for_operation(compute, project, zone, operation['name'])
예제 #14
0
파일: auth.py 프로젝트: JavierRoger/beam
def get_service_credentials():
  """For internal use only; no backwards-compatibility guarantees.

  Get credentials to access Google services."""
  user_agent = 'beam-python-sdk/1.0'
  if is_running_in_gce:
    # We are currently running as a GCE taskrunner worker.
    #
    # TODO(ccy): It's not entirely clear if these credentials are thread-safe.
    # If so, we can cache these credentials to save the overhead of creating
    # them again.
    return _GCEMetadataCredentials(user_agent=user_agent)
  else:
    client_scopes = [
        'https://www.googleapis.com/auth/bigquery',
        'https://www.googleapis.com/auth/cloud-platform',
        'https://www.googleapis.com/auth/devstorage.full_control',
        'https://www.googleapis.com/auth/userinfo.email',
        'https://www.googleapis.com/auth/datastore'
    ]

    try:
      credentials = GoogleCredentials.get_application_default()
      credentials = credentials.create_scoped(client_scopes)
      logging.debug('Connecting using Google Application Default '
                    'Credentials.')
      return credentials
    except Exception as e:
      logging.warning(
          'Unable to find default credentials to use: %s\n'
          'Connecting anonymously.', e)
      return None
예제 #15
0
  def _tpuService(self):
    """Creates a new Cloud TPU API object.

    This works around an issue where the underlying HTTP connection sometimes
    times out when the script has been running for too long. Other methods in
    this object calls this method to get a new API object whenever they need
    to communicate with the Cloud API.

    Returns:
      A Google Cloud TPU API object.
    """
    if self._service:
      return self._service

    credentials = self._credentials
    if credentials is None or credentials == 'default':
      credentials = GoogleCredentials.get_application_default()

    if self._discovery_url:
      return discovery.build(
          'tpu', 'v1alpha1',
          credentials=credentials,
          discoveryServiceUrl=self._discovery_url)
    else:
      return discovery.build(
          'tpu', 'v1alpha1',
          credentials=credentials)
예제 #16
0
 def _connect_google_monitoring():
     c = getattr(GoogleMonitoringV3.threadlocal, 'cm_conn', None)
     if c is None:
         creds = GoogleCredentials.get_application_default()
         GoogleMonitoringV3.threadlocal.cm_conn = c = build('monitoring', 'v3',
                                                            credentials=creds).projects()
     return c
예제 #17
0
def main():
	conn = None

	device_opt = ["port", "no_password", "zone", "project"]

	atexit.register(atexit_handler)

	define_new_opts()

	all_opt["power_timeout"]["default"] = "60"

	options = check_input(device_opt, process_input(device_opt))

	docs = {}
	docs["shortdesc"] = "Fence agent for GCE (Google Cloud Engine)"
	docs["longdesc"] = "fence_gce is an I/O Fencing agent for GCE (Google Cloud " \
			   "Engine). It uses the googleapiclient library to connect to GCE.\n" \
			   "googleapiclient can be configured with Google SDK CLI or by " \
			   "executing 'gcloud auth application-default login'.\n" \
			   "For instructions see: https://cloud.google.com/compute/docs/tutorials/python-guide"
	docs["vendorurl"] = "http://cloud.google.com"
	show_docs(options, docs)

	run_delay(options)

	try:
		credentials = GoogleCredentials.get_application_default()
		conn = discovery.build('compute', 'v1', credentials=credentials)
	except:
		fail_usage("Failed: Unable to connect to GCE. Check your configuration.")

	# Operate the fencing device
	result = fence_action(conn, options, set_power_status, get_power_status, get_nodes_list)
	sys.exit(result)
def main():
    credentials = GoogleCredentials.get_application_default()
    http = credentials.authorize(httplib2.Http())
    projectId = raw_input('Enter the project ID: ')
    datasetId = raw_input('Enter a dataset ID: ')
    tableId = raw_input('Enter a table name to load the data to: ')
    schema_path = raw_input(
        'Enter the path to the schema file for the table: ')

    with open(schema_path, 'r') as schema_file:
        schema = schema_file.read()

    data_path = raw_input('Enter the path to the data file: ')

    with open(data_path, 'r') as data_file:
        data = data_file.read()

    resp, content = make_post(http,
                              schema,
                              data,
                              projectId,
                              datasetId,
                              tableId)

    if resp.status == 200:
        job_resource = json.loads(content)
        service = get_service(credentials)
        poll_job(service, **job_resource['jobReference'])
        print("Success!")
    else:
        print("Http error code: {}".format(resp.status))
예제 #19
0
def main(
    description, project_id, day, month, year, hours, minutes, source_bucket, access_key, secret_access_key, sink_bucket
):
    """Create a one-off transfer from Amazon S3 to Google Cloud Storage."""
    credentials = GoogleCredentials.get_application_default()
    storagetransfer = discovery.build("storagetransfer", "v1", credentials=credentials)

    # Edit this template with desired parameters.
    # Specify times below using US Pacific Time Zone.
    transfer_job = {
        "description": description,
        "status": "ENABLED",
        "projectId": project_id,
        "schedule": {
            "scheduleStartDate": {"day": day, "month": month, "year": year},
            "scheduleEndDate": {"day": day, "month": month, "year": year},
            "startTimeOfDay": {"hours": hours, "minutes": minutes},
        },
        "transferSpec": {
            "awsS3DataSource": {
                "bucketName": source_bucket,
                "awsAccessKey": {"accessKeyId": access_key, "secretAccessKey": secret_access_key},
            },
            "gcsDataSink": {"bucketName": sink_bucket},
        },
    }

    result = storagetransfer.transferJobs().create(body=transfer_job).execute()
    print("Returned transferJob: {}".format(json.dumps(result, indent=4)))
예제 #20
0
    def handle(self, *args, **options):
        if len(args) < 2:
            raise CommandError("Not enough arguments")
        org = args[0]
        client_secret_path = args[1]

        # Fail early if the university does not exist
        university = University.objects.get(code=org)

        # is the file a client-secret file or a credentials file?
        with open(client_secret_path) as f:
            json_data = f.read()
            file_data = json.loads(json_data)
            if "scopes" in file_data:
                self.stdout.write("Importing access token...")
                credentials = GoogleCredentials.from_json(json_data)
            else:
                self.stdout.write("Start flow from client secrets...")
                credentials = flow_from_client_secrets(client_secret_path)

        # Save credentials to database
        YoutubeAuth.objects.create(
            university=university,
            client_id=credentials.client_id,
            client_secret=credentials.client_secret,
            access_token=credentials.access_token,
            refresh_token=credentials.refresh_token,
            token_expiry=credentials.token_expiry,
        )
        self.stdout.write("Youtube auth token created")
예제 #21
0
def main(photo_file):
    """Run a label request on a single image"""

    # [START authenticate]
    credentials = GoogleCredentials.get_application_default()
    service = discovery.build('vision', 'v1', credentials=credentials,
                              discoveryServiceUrl=DISCOVERY_URL)
    # [END authenticate]

    # [START construct_request]
    with open(photo_file, 'rb') as image:
        image_content = base64.b64encode(image.read())
        service_request = service.images().annotate(body={
            'requests': [{
                'image': {
                    'content': image_content.decode('UTF-8')
                },
                'features': [{
                    'type': 'LABEL_DETECTION',
                    'maxResults': 1
                }]
            }]
        })
        # [END construct_request]
        # [START parse_response]
        response = service_request.execute()
        label = response['responses'][0]['labelAnnotations'][0]['description']
        print('Found label: %s for %s' % (label, photo_file))
        return 0
예제 #22
0
 def __init__(self, project, session_id, logger):
     super(GCEService, self).__init__(project, session_id, logger)
     self.credentials = GoogleCredentials.get_application_default()
     self.compute = discovery.build('compute', 'v1',
             credentials=self.credentials)
     self.storage = discovery.build('storage', 'v1',
             credentials=self.credentials)
예제 #23
0
  def __init__(self, config, logger):
    """See README.md for the config format."""
    self._project_name = config['project_name']
    self._taskqueue_tag = config['taskqueue_tag']
    self._src_path = config['src_path']
    self._credentials = GoogleCredentials.get_application_default()
    self._logger = logger

    # Separate the cloud storage path into the bucket and the base path under
    # the bucket.
    storage_path_components = config['cloud_storage_path'].split('/')
    self._bucket_name = storage_path_components[0]
    self._base_path_in_bucket = ''
    if len(storage_path_components) > 1:
      self._base_path_in_bucket = '/'.join(storage_path_components[1:])
      if not self._base_path_in_bucket.endswith('/'):
        self._base_path_in_bucket += '/'

    self._google_storage_accessor = GoogleStorageAccessor(
        credentials=self._credentials, project_name=self._project_name,
        bucket_name=self._bucket_name)

    self._traces_dir = os.path.join(self._base_path_in_bucket, 'traces')
    self._trace_database_path = os.path.join(
        self._traces_dir,
        config.get('trace_database_filename', 'trace_database.json'))

    # Recover any existing trace database in case the worker died.
    self._DownloadTraceDatabase()

    # Initialize the global options that will be used during trace generation.
    options.OPTIONS.ParseArgs([])
    options.OPTIONS.local_binary = config['chrome_path']
def anaylze_content(text):
  '''Run a sentiment analysis request on text.
  This function is a modified version of the tutorial at this link on 7/24/16
  https://cloud.google.com/natural-language/docs/sentiment-tutorial
  '''

  http = httplib2.Http()

  credentials = GoogleCredentials.get_application_default().create_scoped(
      ['https://www.googleapis.com/auth/cloud-platform'])
  http=httplib2.Http()
  credentials.authorize(http)

  service = discovery.build('language', 'v1beta1',
                            http=http, discoveryServiceUrl=DISCOVERY_URL)

  service_request = service.documents().annotateText(
    body={
            "document":{
              "type":"PLAIN_TEXT",
              "content": text
            },
            "features":{
              "extractDocumentSentiment":True
            },
            "encodingType":"UTF8"
          })

  try:
    response = service_request.execute()
    return response
  except:
    # Normally you don't want to catch all errors but what is a side project
    # without tech debt.
    return {"error_msg": str(sys.exc_info()[1])}
예제 #25
0
    def __init__(self, config=None):
        # Initialize Handler
        Handler.__init__(self, config)

        if discovery is None:
            logging.error("Failed to load apiclient.discovery")
            return
        elif GoogleCredentials is None:
            logging.error("Failed to load "
                          "oauth2client.client.GoogleCredentials")
            return

        # Initialize options
        self.topic = self.config['topic']
        self.scopes = self.config['scopes']
        self.retries = int(self.config['retries'])
        self.batch = self.config['batch']
        self.batch_size = int(self.config['batch_size'])
        self.metrics = []
        tags_items = self.config['tags']
        self.tags = {}
        for item in tags_items:
            k, v = item.split(':')
            self.tags[k] = v

        # Initialize client
        credentials = GoogleCredentials.get_application_default()
        if credentials.create_scoped_required():
            credentials = credentials.create_scoped(self.scopes)
        self.client = discovery.build('pubsub', 'v1', credentials=credentials)
def main(project_id, dataset_id, table_name, schema_path, data_path):
    credentials = GoogleCredentials.get_application_default()
    http = credentials.authorize(httplib2.Http())
    bigquery = discovery.build('bigquery', 'v2', credentials=credentials)

    with open(schema_path, 'r') as schema_file:
        schema = schema_file.read()

    with open(data_path, 'r') as data_file:
        data = data_file.read()

    resp, content = make_post(
        http,
        schema,
        data,
        project_id,
        dataset_id,
        table_name)

    if resp.status == 200:
        job = json.loads(content)
        poll_job(bigquery, job)
        print("Success!")
    else:
        print("Http error code: {}".format(resp.status))
def main(project_id):
    credentials = GoogleCredentials.get_application_default()
    # Construct the service object for interacting with the BigQuery API.
    bigquery = discovery.build('bigquery', 'v2', credentials=credentials)

    list_datasets(bigquery, project_id)
    list_projects(bigquery)
예제 #28
0
def main(bucket):
    # Get the application default credentials. When running locally, these are
    # available after running `gcloud init`. When running on compute
    # engine, these are available from the environment.
    credentials = GoogleCredentials.get_application_default()

    # Construct the service object for interacting with the Cloud Storage API -
    # the 'storage' service, at version 'v1'.
    # You can browse other available api services and versions here:
    #     https://developers.google.com/api-client-library/python/apis/
    service = discovery.build('storage', 'v1', credentials=credentials)

    # Make a request to buckets.get to retrieve a list of objects in the
    # specified bucket.
    req = service.buckets().get(bucket=bucket)
    resp = req.execute()
    print(json.dumps(resp, indent=2))

    # Create a request to objects.list to retrieve a list of objects.
    fields_to_return = \
        'nextPageToken,items(name,size,contentType,metadata(my-key))'
    req = service.objects().list(bucket=bucket, fields=fields_to_return)

    # If you have too many items to list in one request, list_next() will
    # automatically handle paging with the pageToken.
    while req:
        resp = req.execute()
        print(json.dumps(resp, indent=2))
        req = service.objects().list_next(req, resp)
예제 #29
0
def main(project_id):
    # [START build_service]
    # Grab the application's default credentials from the environment.
    credentials = GoogleCredentials.get_application_default()
    # Construct the service object for interacting with the BigQuery API.
    bigquery_service = build('bigquery', 'v2', credentials=credentials)
    # [END build_service]

    try:
        # [START run_query]
        query_request = bigquery_service.jobs()
        query_data = {
            'query': (
                'SELECT TOP(corpus, 10) as title, '
                'COUNT(*) as unique_words '
                'FROM [publicdata:samples.shakespeare];')
        }

        query_response = query_request.query(
            projectId=project_id,
            body=query_data).execute()
        # [END run_query]

        # [START print_results]
        print('Query Results:')
        for row in query_response['rows']:
            print('\t'.join(field['v'] for field in row['f']))
        # [END print_results]

    except HttpError as err:
        print('Error: {}'.format(err.content))
        raise err
예제 #30
0
def OCR(photo_file):
    """Run a label request on a single image"""
    #print photo_file
    credentials = GoogleCredentials.get_application_default()
    service = discovery.build('vision', 'v1', credentials=credentials,
                              discoveryServiceUrl=DISCOVERY_URL)

    with open(photo_file, 'rb') as image:
        image_content = base64.b64encode(image.read())
        service_request = service.images().annotate(body={
            'requests': [{
                'image': {
                    'content': image_content.decode('UTF-8')
                },
                'features': [{
                    'type': 'TEXT_DETECTION',
                    'maxResults': 1
                }]
            }]
        })
        response = service_request.execute()
        line = response['responses'][0]['textAnnotations'][0]['description']
        string =str(line)
        proc = string.splitlines()
        #print proc
        card = BusinessCard(proc)
        #print response
        #label = response['responses'][0]['labelAnnotations'][0]['description']
        #print('Found label: %s for %s' % (label, photo_file))
        return card.cleanup()
def clean_up_resource(args, deployments):
    """Clean up deployment / app config from previous test

  Args:
    args: The args from ArgParse.
    deployments set(string): which contains all deployment names in current test round.
  Returns:
    bool: True if cleanup is done
  """
    logging.info("Clean up project resource (backend service and deployment)")

    # Will reuse source repo for continuous tests
    # Within 7 days after repo deleted, source repo won't allow recreation with same name

    # Delete deployment
    credentials = GoogleCredentials.get_application_default()
    service = discovery.build('deploymentmanager',
                              'v2',
                              credentials=credentials)
    delete_done = False
    for deployment in deployments:
        try:
            request = service.deployments().delete(project=args.project,
                                                   deployment=deployment)
            request.execute()
        except Exception as e:
            logging.info("Deployment doesn't exist, continue")
    # wait up to 10 minutes till delete finish.
    end_time = datetime.datetime.now() + datetime.timedelta(minutes=10)
    while datetime.datetime.now() < end_time:
        sleep(10)
        try:
            request = service.deployments().list(project=args.project)
            response = request.execute()
            if ('deployments' not in response) or (len(deployments & set(
                    d['name'] for d in response['deployments'])) == 0):
                delete_done = True
                break
        except Exception:
            logging.info(
                "Failed listing current deployments, retry in 10 seconds")

    # Delete forwarding-rules
    delete_gcloud_resource(args, 'forwarding-rules', dlt_params=['--global'])
    # Delete target-http-proxies
    delete_gcloud_resource(args, 'target-http-proxies')
    # Delete target-http-proxies
    delete_gcloud_resource(args, 'target-https-proxies')
    # Delete url-maps
    delete_gcloud_resource(args, 'url-maps')
    # Delete backend-services
    delete_gcloud_resource(args, 'backend-services', dlt_params=['--global'])
    # Delete instance-groups
    for zone in LOADTEST_ZONE:
        delete_gcloud_resource(args,
                               'instance-groups unmanaged',
                               filter=' --filter=INSTANCES:0',
                               dlt_params=['--zone=' + zone])
    # Delete ssl-certificates
    delete_gcloud_resource(args, 'ssl-certificates')
    # Delete health-checks
    delete_gcloud_resource(args, 'health-checks')

    if not delete_done:
        logging.error(
            "failed to clean up resources for project %s deployments %s",
            args.project, deployments)
    return delete_done
예제 #32
0
def main ( sqlFilename, billingProjID, tmpDestTable, delTableFlag, 
           maxCost, gcsPath, localDir, outFile, verboseFlag ):

    ## let's read in the SQL query first
    ( queryString, BQtableList ) = getBQueryFromFile ( sqlFilename, verboseFlag )

    ## first we need to get credentials and build the service
    if ( verboseFlag ):
        print " "
        print " getting credentials to access BigQuery ... "
    credentials = GoogleCredentials.get_application_default()
    BQsvc = discovery.build('bigquery', 'v2', credentials=credentials)

    ( destProjID, destDatasetID, destTableID ) = parseTableName ( tmpDestTable, billingProjID )

    if ( verboseFlag ):
        print " "
        print " "

    ## submit the SQL query as a dry-run to validate and verify cost
    estCost = submitDryRunQueryJob ( BQsvc, queryString, billingProjID, \
                      destProjID, destDatasetID, destTableID )
    if ( estCost > maxCost ):
        print " "
        print " The estimated cost exceeds the maximum specified cost. "
        print " If you want this to run, rerun with a higher maxCost parameter. "
        print " "
        sys.exit(-1)

    ## now submit the SQL query for real
    jobInfo = submitRealQueryJob ( BQsvc, queryString, billingProjID, \
                    destProjID, destDatasetID, destTableID )

    ## and poll for job completion
    pollJob ( BQsvc, jobInfo )

    ## the 'destination' table from the previous query is now the 'source' table
    srcProjID = destProjID
    srcDatasetID = destDatasetID
    srcTableID   = destTableID

    ## once we've saved the results of the query, the next step is to
    ## export the contents of the table to GCS -- depending on the
    ## size of the table, there may be multiple output files ("shards")
    jobInfo = exportBQtable2GCS ( BQsvc, gcsPath, billingProjID, 
                        srcProjID, srcDatasetID, srcTableID )

    ## and poll this job too ...
    pollJob ( BQsvc, jobInfo )

    ## at this point we can delete that table if this has been requested
    if ( delTableFlag ):
        deleteBQtable ( BQsvc, srcProjID, srcDatasetID, srcTableID )

    ## now starts the the second part of this ... we need to download the files/shards
    ## that have been written to GCS by the 'export' request, and then parse them
    ## and 'un-melt' the input 'tidy-formatted' data ...

    readShards_write2dTSV ( gcsPath, localDir, outFile, verboseFlag )

    print " "
    print " DONE! "
    print " "
예제 #33
0
 def __init__(self, api_discovery_file='vision_api.json'):
     self.credentials = GoogleCredentials.get_application_default()
     self.service = discovery.build('vision',
                                    'v1',
                                    credentials=self.credentials,
                                    discoveryServiceUrl=DISCOVERY_URL)
예제 #34
0
 def connect(self):
     credentials = GoogleCredentials.get_application_default()
     self.conn = discovery.build('storage', 'v1', credentials=credentials)
예제 #35
0
def get_job_id(pipeline_options: Dict[str, str]) -> str:
    """Captures the job_id of the pipeline job specified by the given options.

    For local jobs, generates a job_id using the given job_timestamp. For jobs
    running on Dataflow, finds the currently running job with the same job name
    as the current pipeline. Note: this works because there can only be one job
    with the same job name running on Dataflow at a time.

    Args:
        pipeline_options: Dictionary containing details about the pipeline.

    Returns:
        The job_id string of the current pipeline job.

    """
    runner = pipeline_options.get('runner')

    if runner == 'DataflowRunner':
        # Job is running on Dataflow. Get job_id.
        project = pipeline_options.get('project')
        region = pipeline_options.get('region')
        job_name = pipeline_options.get('job_name')

        if not project:
            raise ValueError("No project provided in pipeline options: "
                             f"{pipeline_options}")
        if not region:
            raise ValueError("No region provided in pipeline options: "
                             f"{pipeline_options}")
        if not job_name:
            raise ValueError("No job_name provided in pipeline options: "
                             f"{pipeline_options}")

        try:
            logging.info("Looking for job_id on Dataflow.")

            service_name = 'dataflow'
            dataflow_api_version = 'v1b3'
            credentials = GoogleCredentials.get_application_default()

            dataflow = build(serviceName=service_name,
                             version=dataflow_api_version,
                             credentials=credentials)

            result = dataflow.projects().locations().jobs().list(
                projectId=project,
                location=region,
            ).execute()

            pipeline_job_id = 'none'

            for job in result['jobs']:
                if job['name'] == job_name:
                    if job['currentState'] == 'JOB_STATE_RUNNING':
                        pipeline_job_id = job['id']
                    break

            if pipeline_job_id == 'none':
                msg = "Could not find currently running job with the " \
                    f"name: {job_name}."
                logging.error(msg)
                raise LookupError(msg)

        except Exception as e:
            logging.error("Error retrieving Job ID")
            raise LookupError(e)

    else:
        # Job is running locally. Generate id from the timestamp.
        pipeline_job_id = '_local_job'
        job_timestamp = pipeline_options.get('job_timestamp')

        if not job_timestamp:
            raise ValueError("Must provide a job_timestamp for local jobs.")

        pipeline_job_id = job_timestamp + pipeline_job_id

    return pipeline_job_id
예제 #36
0
def teardown_minikube(args):
    """Delete the VM used for minikube."""

    credentials = GoogleCredentials.get_application_default()
    gce = discovery.build("compute", "v1", credentials=credentials)
    instances = gce.instances()

    request = instances.delete(project=args.project,
                               zone=args.zone,
                               instance=args.vm_name)

    response = request.execute()

    op_id = response.get("name")
    final_op = vm_util.wait_for_operation(gce, args.project, args.zone, op_id)

    logging.info("Final result for delete operation: %s", final_op)
    if final_op.get("status") != "DONE":
        raise ValueError("Delete operation has status %s",
                         final_op.get("status"))

    if final_op.get("error"):
        message = "Delete operation resulted in error %s".format(
            final_op.get("error"))
        logging.error(message)
        raise ValueError(message)

    # Ensure the disk is deleted. The disk should be auto-deleted with
    # the VM but just in case we issue a delete request anyway.
    disks = gce.disks()
    request = disks.delete(project=args.project,
                           zone=args.zone,
                           disk=args.vm_name)

    response = None
    try:
        response = request.execute()
    except errors.HttpError as e:
        if not e.content:
            raise
        content = json.loads(e.content)
        if content.get("error", {}).get("code") == requests.codes.NOT_FOUND:
            logging.info("Disk %s in zone %s in project %s already deleted.",
                         args.vm_name, args.zone, args.project)
        else:
            raise

    if response:
        logging.info("Waiting for disk to be deleted.")
        op_id = response.get("name")
        final_op = vm_util.wait_for_operation(gce, args.project, args.zone,
                                              op_id)

        logging.info("Final result for disk delete operation: %s", final_op)
        if final_op.get("status") != "DONE":
            raise ValueError("Disk delete operation has status %s",
                             final_op.get("status"))

        if final_op.get("error"):
            message = "Delete disk operation resulted in error %s".format(
                final_op.get("error"))
            logging.error(message)
            raise ValueError(message)
예제 #37
0
def settings(request):
    account = request.POST.get('account', '')
    if account == '' and request.GET.get('code', False) == False:
        profile = Profile.objects.filter(user_id=request.user.id).first()
        # GET REFERRAL OBJECT OF THIS USER
        url = '%s://%s/i/%s' % (
            request.scheme, request.META['HTTP_HOST'],
            profile.profilelink.short
        ) if profile and profile.profilelink and profile.profilelink.short else ''
        profilerank = profile.profilerank if profile else None
        return render(request, 'settings/settings.html', {
            'profile': profile,
            'profilerank': profilerank,
            'url': url
        })

    elif account == '':
        if request.GET.get('code', False):
            flow = client.flow_from_clientsecrets(
                'client_secret_sb_webapp_GA.json',
                scope='https://www.googleapis.com/auth/analytics',
                redirect_uri="%s://%s%s" %
                (request.scheme, request.META['HTTP_HOST'], '/settings/'),
                prompt='select_account')

            google_authcode = request.GET['code']
            credentials = flow.step2_exchange(google_authcode)
            access_token1 = credentials.access_token
            refresh_token1 = credentials.refresh_token
            client_id = credentials.client_id
            client_secret = credentials.client_secret
            token_expiry = credentials.token_expiry
            token_uri = credentials.token_uri
            user_agent = credentials.user_agent
            revoke_uri = credentials.revoke_uri

            Profile.objects.filter(user_id=request.user.id).update(
                google_authcode=google_authcode,
                access_token=access_token1,
                refresh_token=refresh_token1,
                client_id=client_id,
                client_secret=client_secret,
                token_expiry=token_expiry,
                token_uri=token_uri,
                user_agent=user_agent,
                revoke_uri=revoke_uri)

            http_auth = credentials.authorize(httplib2.Http())

            service = build('analytics',
                            'v3',
                            http_auth,
                            cache_discovery=False)
            accounts = service.management().accounts().list().execute()

            if accounts.get('items'):
                account1 = []
                account2 = []
                for account in accounts.get('items'):
                    account1 = account1 + [account.get('name')]
                    account2 = account2 + [account.get('id')]
                zipped_data = zip(account1, account2)
                return render(request, 'settings/settings.html',
                              {'accounts': zipped_data})
            else:
                profile = Profile.objects.filter(
                    user_id=request.user.id).first()
                # GET REFERRAL OBJECT OF THIS USER
                url = '%s://%s/i/%s' % (
                    request.scheme, request.META['HTTP_HOST'],
                    profile.profilelink.short
                ) if profile and profile.profilelink and profile.profilelink.short else ''
                profilerank = profile.profilerank if profile else None
                message = "You don't have analytics account "
                return render(
                    request, 'settings/settings.html', {
                        'profile': profile,
                        'profilerank': profilerank,
                        'url': url,
                        'message': message
                    })

    elif account:
        profile = Profile.objects.filter(user_id=request.user.id).first()

        access_token = profile.access_token
        refresh_token = profile.refresh_token
        client_id = profile.client_id
        client_secret = profile.client_secret
        token_expiry = profile.token_expiry
        token_uri = profile.token_uri
        user_agent = profile.user_agent
        revoke_uri = profile.revoke_uri

        credentials = GoogleCredentials(access_token, client_id, client_secret,
                                        refresh_token, token_expiry, token_uri,
                                        'my-user-agent/1.0', revoke_uri)
        http_auth = credentials.authorize(httplib2.Http())
        service = build('analytics', 'v3', http_auth, cache_discovery=False)
        # Get a list of all the properties for the first account.

        profile1 = None
        properties = service.management().webproperties().list(
            accountId=account).execute()
        if properties.get('items'):
            # Get the first property id.
            property = properties.get('items')[0].get('id')
            # Get a list of all views (profiles) for the first property.
            profiles = service.management().profiles().list(
                accountId=account, webPropertyId=property).execute()
            if profiles.get('items'):
                # return the first view (profile) id.
                profile1 = profiles.get('items')[0].get('id')

        if profile1 == None:
            profile = Profile.objects.filter(user_id=request.user.id).first()
            # GET REFERRAL OBJECT OF THIS USER
            url = '%s://%s/i/%s' % (
                request.scheme, request.META['HTTP_HOST'],
                profile.profilelink.short
            ) if profile and profile.profilelink and profile.profilelink.short else ''
            profilerank = profile.profilerank if profile else None
            message = "Your account  don't have sufficient permission "
            return render(
                request, 'settings/settings.html', {
                    'profile': profile,
                    'profilerank': profilerank,
                    'url': url,
                    'message': message
                })

        service = build('analytics', 'v4', http_auth, cache_discovery=False)
        message = 'Not have permission of'
        try:
            conversion_rate = service.reports().batchGet(
                body={
                    'reportRequests': [{
                        'viewId':
                        profile1,
                        'dateRanges': [{
                            'startDate': '100daysAgo',
                            'endDate': 'today'
                        }],
                        'metrics': [{
                            'expression': 'ga:goalConversionRateAll'
                        }]
                    }]
                }).execute()

            for ccr in conversion_rate['reports']:
                for rate in ccr['data']['totals']:
                    conversion_vals = rate['values']
                    # Profile._meta.get_field('current_conversion_rate').default = float(vals[0])
                    # Profile._meta.get_field('optimal_conversion_rate').default = float(vals[0])
        except:
            # raise Exception('Permission Denied')
            conversion_vals = [0]
            message = message + ' Conversion Rate '
            pass
            # return sb_traceback(request)
        try:
            bounce_rate = service.reports().batchGet(
                body={
                    'reportRequests': [{
                        'viewId':
                        profile1,
                        'dateRanges': [{
                            'startDate': '100daysAgo',
                            'endDate': 'today'
                        }],
                        'metrics': [{
                            'expression': 'ga:bounceRate'
                        }],
                    }]
                }).execute()

            for cbr in bounce_rate['reports']:
                for rate in cbr['data']['totals']:
                    bounce_vals = rate['values']
                    # Profile._meta.get_field('current_bounce_rate').default = float(vals[0])
                    # Profile._meta.get_field('optimal_bounce_rate').default = float(vals[0])
        except:
            # print traceback.print_exc()
            # print e
            bounce_vals = [0]
            message = message + ' Bounce Rate '
            pass
            # return sb_traceback(request)
            # raise Exception('Permission Denied')

        try:
            metric_expression = {
                'expression': 'ga:transactionRevenue/ga:transactions',
                'formattingType': 'FLOAT'
            }

            avg_order_value = service.reports().batchGet(
                body={
                    'reportRequests': [{
                        'viewId':
                        profile1,
                        'dateRanges': [{
                            'startDate': '100daysAgo',
                            'endDate': 'today'
                        }],
                        'metrics': [metric_expression],
                    }]
                }).execute()

            for caov in avg_order_value['reports']:
                for order in caov['data']['totals']:
                    order_value = order['values']
                    # Profile._meta.get_field('current_average_order_value').default = float(value[0])
                    # Profile._meta.get_field('optimal_average_order_value').default = float(value[0])
        except:
            order_value = [0]
            message = message + ' Average Order Value '
            pass
            # return sb_traceback(request)
            # raise Exception('Permission Denied')

        try:
            cart_abandonment_rate = service.reports().batchGet(
                body={
                    'reportRequests': [{
                        'viewId':
                        profile1,
                        'dateRanges': [{
                            'startDate': '100daysAgo',
                            'endDate': 'today'
                        }],
                        'metrics': [{
                            'expression': 'ga:goalAbandonRateAll'
                        }],
                    }]
                }).execute()

            for ccar in cart_abandonment_rate['reports']:
                for cart_rate in ccar['data']['totals']:
                    cart_vals = cart_rate['values']
                    # Profile._meta.get_field('current_shopping_cart_abandonment_rate').default = float(cart_vals[0])
                    # Profile._meta.get_field('optimal_shopping_cart_abandonment_rate').default = float(cart_vals[0])
        except:
            cart_vals = [0]
            message = message + ' Cart Abandonment Rate '
            pass
            # return sb_traceback(request)
            # raise Exception('Permission Denied')

        try:
            revenue_per_user = service.reports().batchGet(
                body={
                    'reportRequests': [{
                        'viewId':
                        profile1,
                        'dateRanges': [{
                            'startDate': '100daysAgo',
                            'endDate': 'today'
                        }],
                        'metrics': [{
                            'expression': 'ga:revenuePerUser'
                        }],
                        # "dimensions":[{"name":"ga:transactionId"}],
                    }]
                }).execute()

            for crpu in revenue_per_user['reports']:
                for revenue in crpu['data']['totals']:
                    revenue_vals = revenue['values']
                    # Profile._meta.get_field('current_revenue_per_user').default = float(vals[0])
                    # Profile._meta.get_field('optimal_revenue_per_user').default = float(vals[0])
        except:
            revenue_vals = [0]
            message = message + ' Revenue Per User '
            pass
            # return sb_traceback(request)
            # raise Exception('Permission Denied')

        try:
            traffic = service.reports().batchGet(
                body={
                    'reportRequests': [{
                        'viewId':
                        profile1,
                        'dateRanges': [{
                            'startDate': '100daysAgo',
                            'endDate': 'today'
                        }],
                        'metrics': [{
                            'expression': 'ga:organicSearches'
                        }],
                        "dimensions": [{
                            "name": "ga:month"
                        }],
                    }]
                }).execute()

            for current_montn_traffic in traffic['reports']:
                for dimension in current_montn_traffic['data']['rows']:
                    pass

                a = max(int(d) for d in dimension['dimensions'])
                for traffic in current_montn_traffic['data']['totals']:
                    traffic_vals = cart_rate['values']
                    # Profile._meta.get_field('optimal_traffic').default = float(traffic_vals[0])

            # Analytics matrics of Traffic This Month
            traffic_this_month = service.reports().batchGet(
                body={
                    'reportRequests': [{
                        'viewId':
                        profile1,
                        'dateRanges': [{
                            'startDate': '100daysAgo',
                            'endDate': 'today'
                        }],
                        'metrics': [{
                            'expression': 'ga:organicSearches'
                        }],
                        "dimensions": [{
                            "name": "ga:month"
                        }],
                        "dimensionFilterClauses": [{
                            "filters": [{
                                "dimensionName": "ga:month",
                                "operator": "EXACT",
                                "expressions": ["0%s" % a]
                            }]
                        }]
                    }]
                }).execute()

            for cttm in traffic_this_month['reports']:
                for traffic in cttm['data']['totals']:
                    traffic_this_vals = traffic['values']
                    # Profile._meta.get_field('traffic_this_month').default = float(vals[0])
        except:
            traffic_this_vals = [0]
            message = message + ' Traffic This Month '
            pass
            # return sb_traceback(request)
            # raise Exception('Permission Denied')

        try:
            # Analytics matrics of Traffic Last Month
            traffic_last_month = service.reports().batchGet(
                body={
                    'reportRequests': [{
                        'viewId':
                        profile1,
                        'dateRanges': [{
                            'startDate': '30daysAgo',
                            'endDate': 'today'
                        }],
                        'metrics': [{
                            'expression': 'ga:organicSearches'
                        }],
                        # "dimensions":[{"name":"ga:month"}],
                    }]
                }).execute()

            for ctlm in traffic_last_month['reports']:
                for traffic_last in ctlm['data']['totals']:
                    traffic_last_vals = traffic_last['values']
                    # Profile._meta.get_field('traffic_last_month').default = float(last_vals[0])
        except:
            traffic_last_vals = [0]
            message = message + ' Traffic Last Month '
            pass
            # return sb_traceback(request)
            # raise Exception('Permission Denied')

        try:
            # Analytics matrics of Revenue This Month
            revenue_this_month = service.reports().batchGet(
                body={
                    'reportRequests': [{
                        'viewId':
                        profile1,
                        'dateRanges': [{
                            'startDate': '30daysAgo',
                            'endDate': 'today'
                        }],
                        'metrics': [{
                            'expression': 'ga:transactionRevenue'
                        }],
                        "dimensions": [{
                            "name": "ga:month"
                        }],
                    }]
                }).execute()

            for crtm in revenue_this_month['reports']:
                for revenue in crtm['data']['totals']:
                    revenue_this_vals = revenue['values']
                    # Profile._meta.get_field('revenue_this_month').default = float(vals[0])
        except:
            revenue_this_vals = [0]
            message = message + ' Revenue This Month '
            pass
            # return sb_traceback(request)
            # raise Exception('Permission Denied')

        try:
            # Analytics matrics of Revenue Last Month
            revenue_last_month = service.reports().batchGet(
                body={
                    'reportRequests': [{
                        'viewId':
                        profile1,
                        'dateRanges': [{
                            'startDate': '60daysAgo',
                            'endDate': '30daysago'
                        }],
                        'metrics': [{
                            'expression': 'ga:transactionRevenue'
                        }],
                        # "dimensions":[{"name":"ga:month"}],
                    }]
                }).execute()

            for crlm in revenue_last_month['reports']:
                for revenue in crlm['data']['totals']:
                    revenue_last_vals = revenue['values']
                    # Profile._meta.get_field('revenue_last_month').default = float(vals[0])
        except:
            revenue_last_vals = [0]
            message = message + ' Revenue Last Month '
            pass
            # return sb_traceback(request)
            # raise Exception('Permission Denied')

        try:
            optimal_revenue = service.reports().batchGet(
                body={
                    'reportRequests': [{
                        'viewId':
                        profile1,
                        'dateRanges': [{
                            'startDate': '30daysAgo',
                            'endDate': 'today'
                        }],
                        'metrics': [{
                            'expression': 'ga:transactionRevenue'
                        }],
                        # "dimensions":[{"name":"ga:month"}],
                    }]
                }).execute()

            for optrev in optimal_revenue['reports']:
                for revenue in optrev['data']['totals']:
                    optimal_revenue_vals = revenue['values']
                    # Profile._meta.get_field('optimal_revenue').default = float(vals[0])
        except:
            optimal_revenue_vals = [0]
            message = message + ' Optimal Revenue '
            pass
            # return sb_traceback(request)
            # raise Exception('Permission Denied')

        profile = Profile.objects.filter(user_id=request.user.id).first()
        # if not UserTaskHistory:
        #   guide = Guide.objects.get(pk=8)
        #   steps = Step.objects.filter(guide=guide.id).first()
        #   tasks = Task.objects.filter(step=steps).first()
        # else:
        #   pass
        user = User.objects.get(id=request.user.id)
        if not profile:
            pr = Profile.objects.create(
                current_conversion_rate=float(conversion_vals[0]),
                optimal_conversion_rate=float(conversion_vals[0]),
                current_bounce_rate=float(bounce_vals[0]),
                optimal_bounce_rate=float(bounce_vals[0]),
                current_average_order_value=float(order_value[0]),
                optimal_average_order_value=float(order_value[0]),
                current_shopping_cart_abandonment_rate=float(cart_vals[0]),
                optimal_shopping_cart_abandonment_rate=float(cart_vals[0]),
                traffic_last_month=float(traffic_last_vals[0]),
                traffic_this_month=float(traffic_this_vals[0]),
                optimal_traffic=float(revenue_this_vals[0]),
                revenue_last_month=float(revenue_last_vals[0]),
                revenue_this_month=float(conversion_vals[0]),
                optimal_revenue=float(optimal_revenue_vals[0]),
                current_revenue_per_user=float(revenue_vals[0]),
                optimal_revenue_per_user=float(revenue_vals[0]),
                current_user_name=request.user.username,
                user_id=user,
                connected=True,
                accountid=account,
            )
            pr.save()
        else:
            Profile.objects.filter(user_id=request.user.id).update(
                current_conversion_rate=float(conversion_vals[0]),
                optimal_conversion_rate=float(conversion_vals[0]),
                current_bounce_rate=float(bounce_vals[0]),
                optimal_bounce_rate=float(bounce_vals[0]),
                current_average_order_value=float(order_value[0]),
                optimal_average_order_value=float(order_value[0]),
                current_shopping_cart_abandonment_rate=float(cart_vals[0]),
                optimal_shopping_cart_abandonment_rate=float(cart_vals[0]),
                traffic_last_month=float(traffic_last_vals[0]),
                traffic_this_month=float(traffic_this_vals[0]),
                optimal_traffic=float(revenue_this_vals[0]),
                revenue_last_month=float(revenue_last_vals[0]),
                revenue_this_month=float(conversion_vals[0]),
                optimal_revenue=float(optimal_revenue_vals[0]),
                current_revenue_per_user=float(revenue_vals[0]),
                optimal_revenue_per_user=float(revenue_vals[0]),
                current_user_name=request.user.username,
                connected=True,
                accountid=account,
            )

        profile = Profile.objects.filter(user_id=request.user.id).first()

        # GET REFERRAL OBJECT OF THIS USER
        url = '%s://%s/i/%s' % (
            request.scheme, request.META['HTTP_HOST'],
            profile.profilelink.short
        ) if profile and profile.profilelink and profile.profilelink.short else ''

        profilerank = profile.profilerank if profile else None

        message = '' if message == 'Not have permission of' else message

        return render(
            request, 'settings/settings.html', {
                'profile': profile,
                'profilerank': profilerank,
                'url': url,
                'message': message
            })
예제 #38
0
   https://cloud.google.com/sdk and run
   `gcloud beta auth application-default login`.
   For more information, see
   https://developers.google.com/identity/protocols/application-default-credentials
3. Install the Python client library for Google APIs by running
   `pip install --upgrade google-api-python-client`
"""

from pprint import pprint

from googleapiclient import discovery
from oauth2client.client import GoogleCredentials

from time import sleep

credentials = GoogleCredentials.get_application_default()

service = discovery.build('sqladmin', 'v1beta4', credentials=credentials)

# Project ID of the project that contains the instance.
project = 'able-cogency-234306'  # TODO: Update placeholder value.

# Cloud SQL instance ID. This does not include the project ID.
instance = 'testddd'  # TODO: Update placeholder value.

instances_import_request_body = {
    "importContext": {
        "database": "recommendation_spark",
        "fileType": "CSV",
        "uri": "gs://able-cogency-234306/tmp/accommodation.csv",
        "csvImportOptions": {
예제 #39
0
def provide_gce_cluster(nodes_num, algorithm):
    # 1. spin machines [configure daemons will be run by the startup script on every node]
    credentials = GoogleCredentials.get_application_default()
    gce = discovery.build("compute", "v1", credentials=credentials)
    config_dir = "config/"
    config_file_template = "provision_node_{}_config.json"
    zone_operations, vm_ids, cluster = [], [], []
    for i in range(1, nodes_num + 1):
        vm_ids.append("vm-node-{}-{}".format(i, get_random_string(6)))
        metadata = {"bucket": GCS_BUCKET,
                    "clusterConfig": config_dir + config_file_template.format(vm_ids[-1]),
                    "myid": vm_ids[-1]}
        print("Starting a new virtual machine with name {}".format(vm_ids[-1]))
        zone_operations.append(create_instance(gce, vm_ids[-1], metadata, "gce-startup-script.sh"))
        # vm creation is run asynchronously: check that the operation is really completed

    for (i, zone) in [(i, zone_op["name"]) for (i, zone_op) in enumerate(zone_operations)]:
        print("Checking if virtual machine {} is ready...".format(vm_ids[i]))
        while True:
            result = gce.zoneOperations().get(project=GCP_PROJECT_ID, zone=GCE_ZONE_ID, operation=zone).execute()
            if result["status"] == "DONE":
                print("Virtual machine {} is ready.".format(vm_ids[i]))
                result = gce.instances().get(project=GCP_PROJECT_ID, zone=GCE_ZONE_ID, instance=vm_ids[i]).execute()
                new_node = {
                    "id": i + 1,
                    "address": result["networkInterfaces"][0]["accessConfigs"][0]["natIP"],  # ephemeral external IP
                    "port": CONSENSUS_ALGORITHM_PORT,
                    "rpcPort": NETWORK_MANAGER_PORT,
                    "interface": "eth0",
                    "vmID": vm_ids[i],
                    "internalIP": result["networkInterfaces"][0]["networkIP"]  # internal IP
                }
                print("Adding node to the configuration: {}".format(new_node))
                cluster.append(new_node)
                break
            sleep(2)
    # ✓ 1. spin machines

    # 1.1 allow network traffic on VMs if needed
    try:
        gce.firewalls().get(project=GCP_PROJECT_ID, firewall=GCE_FIREWALL_RULE_NAME).execute()
        print("Firewall rule to allow traffic already exists.")
    except:  # rule does not exist: create it
        print("Creating firewall rule to allow traffic...")
        firewall_rule = {
            "description": "Allow traffic on every TCP/UDP port",
            "allowed": [{"IPProtocol": "tcp", "ports": ["1-65535"]},
                        {"IPProtocol": "udp", "ports": ["1-65535"]},
                        {"IPProtocol": "icmp"}],
            "name": GCE_FIREWALL_RULE_NAME,
        }
        gce.firewalls().insert(project=GCP_PROJECT_ID, body=firewall_rule).execute()
        print("Firewall rule to allow traffic created: {}.".format(firewall_rule))
    # ✓ 1.1 allow network traffic on VMs

    # 1.2 wait for startup scripts
    print("Waiting for startup scripts to be completed on VMs...")
    gcs = discovery.build('storage', 'v1', credentials=credentials)
    for vm_id_file in vm_ids:
        while not gcs_file_exists(gcs, vm_id_file):  # acknowledge file created by startup script at the end
            sleep(2)
        gcs.objects().delete(bucket=GCS_BUCKET, object=vm_id_file).execute()  # clean up
    # ✓ 1.2 wait for startup scripts

    # 1.3 provide node-specific configuration files [via configure daemons]
    configure_daemons = [rpcClient('http://{}:{}'.format(node["address"], CONFIGURE_DAEMON_PORT)) for node in cluster]
    for node in cluster:
        node_config_file = "/tmp/" + config_file_template.format(node["vmID"])
        with open(node_config_file, "w") as out_f:
            if algorithm == "rethinkdb":
                node_conf = get_node_config(cluster, node, "gce", additional_ports=[GCE_RETHINKDB_PORTS["cluster_port"]])
            else:
                node_conf = get_node_config(cluster, node, "gce")
            json.dump(node_conf, out_f, indent=4)
        upload_object(gcs, node_config_file, config_dir + config_file_template.format(node["vmID"]))  # send file to VM
    for configure_daemon in configure_daemons:
        configure_daemon.download_node_config()
    # ✓ 1.3 provide node-specific configuration files

    # 2. run algorithm [via configure daemons]
    test_daemon_endpoint = cluster[0]["address"] + ":" + str(TEST_DAEMON_PORT)  # arbitrarily run the test daemon on first node
    print("Going to run algorithm {} on cluster...".format(algorithm))
    if algorithm == "pso":
        configure_pso_gce(cluster, configure_daemons, test_daemon_endpoint)
    elif algorithm == "paxos":
        configure_paxos_gce(cluster, configure_daemons, test_daemon_endpoint)
    elif algorithm == "rethinkdb":
        configure_rethinkdb_gce(cluster, configure_daemons)
    elif algorithm == "datastore":
        pass
    # ✓ 2. run algorithm [via configure daemons]

    # 3. run network managers [via configure daemons]
    print("Running network manager on every node...")
    for node in cluster:
        configure_daemons[node["id"] - 1].run_network_manager()
        # this rpc will download the node-specific configuration file from gs and run the network manager
        # each node can discover its configuration file by querying its own metadata
        print("Network manager active on {}:{}".format(node["address"], str(node["rpcPort"])))
    # ✓ 3. run network managers

    # 4. run test daemon [via configure daemon]
    print("Running the test daemon on {}...".format(test_daemon_endpoint))
    configure_daemons[0].run_test_daemon(algorithm, cluster[0]["port"])
    print("Test daemon active on {}".format(test_daemon_endpoint))
    # ✓ 4. run test daemon

    print("Provisioning completed: cluster is ready to execute tests on {} at {}".format(algorithm, test_daemon_endpoint))
    cluster_config_file = "masterConfig.json"  # this file will be used to tear down the cluster
    with open(cluster_config_file, "w") as out_f:
        json.dump({
            "mode": "gce",
            "algorithm": algorithm,
            "testDaemon": test_daemon_endpoint,
            "nodes": cluster
        }, out_f, indent=4)
    print("Cluster configuration saved in file {}.\n".format(cluster_config_file))
    print("To perform a test run: test_executor.py {} test_file_name".format(cluster_config_file))
    print("To stop the cluster run: tear_down.py {}".format(cluster_config_file))
예제 #40
0
def deploy_minikube(args):
  """Create a VM and setup minikube."""

  credentials = GoogleCredentials.get_application_default()
  gce = discovery.build(
      "compute", "v1", credentials=credentials, cache_discovery=False)
  instances = gce.instances()
  body = {
      "name":
      args.vm_name,
      "machineType":
      "zones/{0}/machineTypes/n1-standard-16".format(args.zone),
      "disks": [
          {
              "boot": True,
              "initializeParams": {
                  "sourceImage":
                  "projects/ubuntu-os-cloud/global/images/family/ubuntu-1604-lts",
                  "diskSizeGb": 100,
                  "autoDelete": True,
              },
          },
      ],
      "networkInterfaces": [
          {
              "accessConfigs": [
                  {
                      "name": "external-nat",
                      "type": "ONE_TO_ONE_NAT",
                  },
              ],
              "network":
              "global/networks/default",
          },
      ],
  }
  request = instances.insert(project=args.project, zone=args.zone, body=body)
  response = None
  try:
    response = request.execute()
    print("done")
  except errors.HttpError as e:
    if not e.content:
      raise
    content = json.loads(e.content)
    if content.get("error", {}).get("code") == requests.codes.CONFLICT:
      # We don't want to keep going so we reraise the error after logging
      # a helpful error message.
      logging.error(
          "Either the VM or the disk %s already exists in zone "
          "%s in project %s ", args.vm_name, args.zone, args.project)
      raise
    else:
      raise

  op_id = response.get("name")
  final_op = vm_util.wait_for_operation(gce, args.project, args.zone, op_id)

  logging.info("Final result for insert operation: %s", final_op)
  if final_op.get("status") != "DONE":
    raise ValueError("Insert operation has status %s", final_op.get("status"))

  if final_op.get("error"):
    message = "Insert operation resulted in error %s".format(
        final_op.get("error"))
    logging.error(message)
    raise ValueError(message)

  # Locate the install minikube script.
  install_script = os.path.join(
      os.path.dirname(__file__), "install_minikube.sh")

  if not os.path.exists(install_script):
    logging.error("Could not find minikube install script: %s", install_script)

  vm_util.wait_for_vm(args.project, args.zone, args.vm_name)
  vm_util.execute_script(args.project, args.zone, args.vm_name, install_script)

  # Copy the .kube and .minikube files to test_dir
  target = "~/.kube"
  full_target = "{0}:{1}".format(args.vm_name, target)
  logging.info("Copying %s to %s", target, args.test_dir)
  util.run([
      "gcloud", "compute", "--project=" + args.project, "scp", "--recurse",
      full_target, args.test_dir, "--zone=" + args.zone
  ])

  # The .minikube directory contains some really large ISO and other files that we don't need; so we
  # only copy the files we need.
  minikube_dir = os.path.join(args.test_dir, ".minikube")
  try:
    os.makedirs(minikube_dir)
  except OSError as exc:  # Python >2.5
    if exc.errno == errno.EEXIST and os.path.isdir(minikube_dir):
      pass
    else:
      raise

  for target in ["~/.minikube/*.crt", "~/.minikube/client.key"]:
    full_target = "{0}:{1}".format(args.vm_name, target)
    logging.info("Copying %s to %s", target, minikube_dir)
    util.run([
        "gcloud", "compute", "--project=" + args.project, "scp", "--recurse",
        full_target, minikube_dir, "--zone=" + args.zone
    ])

  config_path = os.path.join(args.test_dir, ".kube", "config")
  modify_minikube_config(config_path, minikube_dir)
예제 #41
0
def getSpreadsheetAuth():
    auth.authenticate_user()
    spreadsheetAuth = gspread.authorize(
        GoogleCredentials.get_application_default())
    return spreadsheetAuth
예제 #42
0
def get_credentials():
    """Get the Google credentials needed to access our services."""
    credentials = GoogleCredentials.get_application_default()
    if credentials.create_scoped_required():
        credentials = credentials.create_scoped(SCOPES)
    return credentials
예제 #43
0
def google_compute():
    credentials = GoogleCredentials.get_application_default()
    return discovery.build('compute', 'v1', credentials=credentials)
예제 #44
0
def get_client():
    """Creates Pub/Sub client and returns it."""
    if not hasattr(client_store, 'client'):
        client_store.client = get_client_from_credentials(
                GoogleCredentials.get_application_default())
    return client_store.client
예제 #45
0
def deploy():
    # See: https://cloud.google.com/ai-platform/prediction/docs/regional-endpoints#python
    json_data = request.get_json()

    region = 'us-central1'
    runtime_version = '2.2'
    python_version = '3.7'
    deployment_uri = None
    model_name = 'babyweight'
    version_name = None

    for key in json_data.keys():
        if key == 'region':
            region = json_data[key]
        elif key == 'runtimeVersion':
            runtime_version = json_data[key]
        elif key == 'pythonVersion':
            python_version = json_data[key]
        elif key == 'deploymentUri':
            deployment_uri = json_data[key]
        elif key == 'modelName':
            model_name = json_data[key]
        elif key == 'versionName':
            version_name = json_data[key]

    if deployment_uri is None or version_name is None:
        resp = {
            'message': 'Option deploymentUri or versionName is not specified.'
        }
        return resp, 500

    if region == 'us-central1':
        endpoint = 'https://ml.googleapis.com'
    else:
        endpoint = 'https://{}-ml.googleapis.com'.format(region)

    client_options = ClientOptions(api_endpoint=endpoint)
    credentials = GoogleCredentials.get_application_default()
    api = discovery.build('ml',
                          'v1',
                          credentials=credentials,
                          cache_discovery=False,
                          client_options=client_options)

    api_request = api.projects().models().get(
        name='projects/{}/models/{}'.format(PROJECT_ID, model_name))
    try:
        resp = api_request.execute()
    except googleapiclient.errors.HttpError as err:
        # Create model
        request_body = {'name': model_name}
        api_request = api.projects().models().create(
            parent='projects/{}'.format(PROJECT_ID), body=request_body)
        api_request.execute()

    request_body = {
        'name': version_name,
        'deploymentUri': deployment_uri,
        'runtimeVersion': runtime_version,
        'pythonVersion': python_version
    }

    api_request = api.projects().models().versions().create(
        parent='projects/{}/models/{}'.format(PROJECT_ID, model_name),
        body=request_body)

    try:
        resp = api_request.execute()
    except googleapiclient.errors.HttpError as err:
        resp = {'message': err._get_reason()}
        return resp, 500

    return resp, 200
예제 #46
0
def train():
    json_data = request.get_json()

    scale_tier = 'BASIC_GPU'
    region = 'us-central1'
    runtime_version = '2.2'
    python_version = '3.7'
    data_dir = None
    job_dir = None
    num_train_examples = '60000000'
    num_eval_examples = '50000'
    num_evals = '100'
    learning_rate = '0.0001'

    for key in json_data.keys():
        if key == 'scaleTier':
            scale_tier = json_data[key]
        elif key == 'region':
            region = json_data[key]
        elif key == 'runtimeVersion':
            runtime_version = json_data[key]
        elif key == 'pythonVersion':
            python_version = json_data[key]
        elif key == 'dataDir':
            data_dir = json_data[key]
        elif key == 'jobDir':
            job_dir = json_data[key]
        elif key == 'numTrainExamples':
            num_train_examples = str(json_data[key])
        elif key == 'numEvalExamples':
            num_eval_examples = str(json_data[key])
        elif key == 'numEvals':
            num_evals = str(json_data[key])
        elif key == 'learningRate':
            learning_rate = str(json_data[key])

    if data_dir is None or job_dir is None:
        resp = {'message': 'Option dataDir or jobDir is not specified.'}
        return resp, 500

    with tempfile.TemporaryDirectory() as tmpdir:
        id_string = str(uuid.uuid4())
        job_id = 'train-babyweight-{}'.format(id_string).replace('-', '_')
        job_dir = os.path.join(job_dir, id_string)

        clone_cmd = 'cd {}; git init; git config core.sparsecheckout true;'\
                    'git remote add origin {};'\
                    'echo {} > .git/info/sparse-checkout;'\
                    'git pull origin {}'
        clone_cmd = clone_cmd.format(tmpdir, GIT_REPO, MODEL_PATH, BRANCH)
        subprocess.run(clone_cmd,
                       shell=True,
                       stdout=subprocess.DEVNULL,
                       stderr=subprocess.DEVNULL)
        train_dir = os.path.join(tmpdir, MODEL_PATH)
        subprocess.run('cd {};python3 setup.py sdist'.format(train_dir),
                       shell=True,
                       stdout=subprocess.DEVNULL,
                       stderr=subprocess.DEVNULL)
        package_file = os.path.join(train_dir, 'dist', 'trainer-0.0.0.tar.gz')
        package = '{}/trainer-0.0.0.tar.gz'.format(job_dir)
        subprocess.run('gsutil cp {} {}'.format(package_file, package),
                       shell=True,
                       stdout=subprocess.DEVNULL,
                       stderr=subprocess.DEVNULL)

        training_inputs = {
            'scaleTier':
            scale_tier,
            'packageUris': [package],
            'pythonModule':
            'trainer.task',
            'region':
            region,
            'jobDir':
            job_dir,
            'runtimeVersion':
            runtime_version,
            'pythonVersion':
            python_version,
            'args': [
                '--data-dir', data_dir, '--num-train-examples',
                num_train_examples, '--num-eval-examples', num_eval_examples,
                '--num-evals', num_evals, '--learning-rate', learning_rate
            ]
        }
        job_spec = {'jobId': job_id, 'trainingInput': training_inputs}

        credentials = GoogleCredentials.get_application_default()
        api = discovery.build('ml',
                              'v1',
                              credentials=credentials,
                              cache_discovery=False)
        api_request = api.projects().jobs().create(
            body=job_spec, parent='projects/{}'.format(PROJECT_ID))
        resp = None
        try:
            resp = api_request.execute()
        except googleapiclient.errors.HttpError as err:
            resp = {'message': err._get_reason()}
            return resp, 500

        return resp, 200
예제 #47
0
        def task_manager_scope(session):
            credentials = GoogleCredentials.get_application_default()
            ml = discovery.build('ml', 'v1', credentials=credentials)
            projectID = 'projects/{}'.format(settings.GOOGLE_PROJECT_NAME)

            project = get_current_project(session=session)
            version = get_current_version(session=session)
            machine_learning_settings = get_ml_settings(session=session,
                                                        version=version)

            Images_db = session.query(Image).filter_by(version_id=version.id,
                                                       is_test_image=True)

            REGION = "us-central1"
            RUNTIME_VERSION = "1.2"

            modelName = "a_" + str(project.id)
            versionName = "a_" + str(version.id) + "_" + str(
                machine_learning_settings.ml_compute_engine_id)
            versionName += "_" + str(machine_learning_settings.re_train_id)
            modelVersionName = '{}/models/{}/versions/{}'.format(
                projectID, modelName, versionName)

            gcs = storage.Client()
            bucket = gcs.get_bucket(settings.CLOUD_STORAGE_BUCKET)
            filenames = []

            root_dir = str(project.id) + "/" + str(version.id) + "/"
            for image in Images_db:
                #print(image.is_test_image, file=sys.stderr)
                if image.soft_delete != True:
                    filenames.append(root_dir + "images/" + str(image.id))
                    break

            Rows = []
            Images = []
            print("len(filenames):", len(filenames), file=sys.stderr)

            for file in filenames:
                blob = bucket.blob(file)
                image = blob.download_as_string()

                # Resize
                image = scipy.misc.imread(BytesIO(image))
                if image is None:
                    raise IOError("Could not open")

                # TODO BETTER WAY
                #image = scipy.misc.imresize(image, (640, 960))
                temp = tempfile.mkdtemp()
                new_temp_filename = temp + "/resized.jpg"
                scipy.misc.imsave(new_temp_filename, image)

                # Otherwise have strange byte issues
                blob = bucket.blob(file + "_test_resized")
                blob.upload_from_filename(new_temp_filename,
                                          content_type="image/jpg")
                image = blob.download_as_string()

                encoded_contents = base64.b64encode(image).decode('UTF-8')
                row = {'b64': encoded_contents}
                Rows.append(row)
                Images.append(image)

            output = {'instances': Rows}

            ml_request = ml.projects().predict(name=modelVersionName,
                                               body=output)

            PATH_TO_LABELS = root_dir + "ml/" + str(
                machine_learning_settings.ml_compute_engine_id
            ) + "/label_map.pbtext"

            label_map_blob = bucket.blob(PATH_TO_LABELS)
            label_map_data = label_map_blob.download_as_string()

            category_index = categoryMap(session=session)

            try:
                time0 = time.time()
                response = ml_request.execute()
                time1 = time.time()
                print("Time in seconds", (time1 - time0), file=sys.stderr)

                print(response, file=sys.stderr)

                for i in range(len(Images)):
                    response = response['predictions'][i]  # First one

                    boxes = response['detection_boxes']
                    scores = response['detection_scores']
                    classes = response['detection_classes']

                    boxes = np.array(boxes)
                    scores = np.array(scores)
                    classes = np.array(classes, dtype=int)
                    print(classes, file=sys.stderr)

                    image_np = scipy.misc.imread(BytesIO(Images[i]))

                    # Handle gray scale
                    if len(image_np.shape) == 2:
                        image_np = np.stack((image_np, ) * 3, axis=2)

                    print(image_np.shape)

                    visualization_utils.visualize_boxes_and_labels_on_image_array(
                        image_np,
                        boxes,
                        classes,
                        scores,
                        category_index,
                        use_normalized_coordinates=True,
                        min_score_thresh=.3,
                        line_thickness=2)

                    blob = bucket.blob(root_dir + "test_inference_out/" +
                                       str(i) + "_.jpg")

                    temp = tempfile.mkdtemp()
                    new_temp_filename = temp + "/inference_" + str(i) + "_.jpg"
                    scipy.misc.imsave(new_temp_filename, image_np)
                    blob.upload_from_filename(new_temp_filename,
                                              content_type="image/jpg")

                min_score_thresh = .05
                for i in range(len(boxes)):
                    if scores[i] > min_score_thresh:

                        class_name = category_index[classes[i]]['name']
                        print(class_name, scores[i], file=sys.stderr)

                # TODO add pub sub messaging
                out = 'success'

            except errors.HttpError as EOFError:
                print('There was an error. Check the details:',
                      file=sys.stderr)
                print(EOFError._get_reason(), file=sys.stderr)
                out = 'failed'

            t.cancel()
# importing required libraries
import numpy as np
import pandas as pd
from tabula import read_pdf

"""I had shown here that how we can link the google spreadsheet and make changes without even going to sheet.
For further details on how we can use various functions refer the following link 'https://gspread.readthedocs.io/en/latest/'.
"""

# This is the authenticatication code 
# We don't have to pay much attention on this code
from google.colab import auth
auth.authenticate_user()
import gspread
from oauth2client.client import GoogleCredentials
gc = gspread.authorize(GoogleCredentials.get_application_default())

# Using the URL importing the googlesheet 
# This is an example where I have created a google sheet and put some data 
# The sheet is stored in the google drive (practice_spreadsheet)

wb = gc.open_by_url('https://docs.google.com/spreadsheets/d/1SVM2TySNDLT2FOdE_ifmT4HBnvd6q6pflTeFmGpnz8k/edit#gid=0')

# On which worksheet we want to work currently we will work with sheet 1 
sheet = wb.worksheet('Sheet1')

# Using all the data of the sheet
data = sheet.get_all_values()

# Converting our data into dataframe
df = pd.DataFrame(data)
예제 #49
0
def get_vision_service():
    credentials = GoogleCredentials.get_application_default()
    return discovery.build('vision', 'v1', credentials=credentials)
예제 #50
0
    def __init__(self,
                 project,
                 zone,
                 instance_group,
                 port,
                 task_type='worker',
                 task_id=0,
                 rpc_layer='grpc',
                 credentials='default',
                 service=None):
        """Creates a new GCEClusterResolver object.

    This takes in a few parameters and creates a GCEClusterResolver project. It
    will then use these parameters to query the GCE API for the IP addresses of
    each instance in the instance group.

    Args:
      project: Name of the GCE project.
      zone: Zone of the GCE instance group.
      instance_group: Name of the GCE instance group.
      port: Port of the listening TensorFlow server (default: 8470)
      task_type: Name of the TensorFlow job this GCE instance group of VM
        instances belong to.
      task_id: The task index for this particular VM, within the GCE
        instance group. In particular, every single instance should be assigned
        a unique ordinal index within an instance group manually so that they
        can be distinguished from each other.
      rpc_layer: The RPC layer TensorFlow should use to communicate across
        instances.
      credentials: GCE Credentials. If nothing is specified, this defaults to
        GoogleCredentials.get_application_default().
      service: The GCE API object returned by the googleapiclient.discovery
        function. (Default: discovery.build('compute', 'v1')). If you specify a
        custom service object, then the credentials parameter will be ignored.

    Raises:
      ImportError: If the googleapiclient is not installed.
    """
        self._project = project
        self._zone = zone
        self._instance_group = instance_group
        self._task_type = task_type
        self._task_id = task_id
        self._rpc_layer = rpc_layer
        self._port = port
        self._credentials = credentials

        if credentials == 'default':
            if _GOOGLE_API_CLIENT_INSTALLED:
                self._credentials = GoogleCredentials.get_application_default()

        if service is None:
            if not _GOOGLE_API_CLIENT_INSTALLED:
                raise ImportError(
                    'googleapiclient must be installed before using the '
                    'GCE cluster resolver')
            self._service = discovery.build('compute',
                                            'v1',
                                            credentials=self._credentials)
        else:
            self._service = service
예제 #51
0
파일: dsub_util.py 프로젝트: gavinbee/dsub
def _get_storage_service(credentials):
  """Get a storage client using the provided credentials or defaults."""
  if credentials is None:
    credentials = GoogleCredentials.get_application_default()
  return discovery.build('storage', 'v1', credentials=credentials)
def data_processor(job_type):
    status = 200
    message = 'Prcoess Complete '
    startTime = datetime.datetime.now()
    lock_file = True
    log.info('BUCKET_NAME')
    log.info(BUCKET_NAME)
    log.info('ARCHIVE_BUCKET_NAME')
    log.info(ARCHIVE_BUCKET_NAME)

    try:
        bucket = BUCKET_NAME
        archive_bucket = ARCHIVE_BUCKET_NAME
        random_number = binascii.hexlify(os.urandom(32)).decode()
        log.info(' RANDOM NUMBER --- {0}'.format(random_number))

        # Get the application default credentials. When running locally, these are
        # available after running `gcloud init`. When running on compute
        # engine, these are available from the environment.
        credentials = GoogleCredentials.get_application_default()

        # Construct the service object for interacting with the Cloud Storage API -
        # the 'storage' service, at version 'v1'.
        # You can browse other available api services and versions here:
        # https://developers.google.com/api-client-library/python/apis/
        service = discovery.build('storage', 'v1', credentials=credentials)

        # Make a request to buckets.get to retrieve a list of objects in the
        # specified bucket.
        req = service.buckets().get(bucket=bucket)
        resp = req.execute()

        # print(json.dumps(resp, indent=2))

        # Create a request to objects.list to retrieve a list of objects.
        fields_to_return = \
            'nextPageToken,items(name,size,contentType,metadata(my-key))'
        req = service.objects().list(bucket=bucket, fields=fields_to_return)
        file_count = 0
        log.info('Process {0} Start time --- {1}'.format(bucket, startTime))
        # If you have too many items to list in one request, list_next() will
        # automatically handle paging with the pageToken.
        while req:
            resp = req.execute()
            # print(json.dumps(resp, indent=2))
            if len(resp) == 0:
                log.info(
                    '############################################################################################'
                )
                log.info('--------- THE BUCKET LIST IS EMPTY --------------')
                log.info('--------- NO FILES TO PROCESS  --------------')
                log.info(resp)
                log.info(
                    '############################################################################################'
                )

            else:
                get_filenames(resp, service, random_number)

            req = service.objects().list_next(req, resp)

    except Exception as e:
        log.error(' Error in getting Bucket Details - {0}'.format(e[0]))
        message = e[0]
        status = 500

    endTime = datetime.datetime.now()
    log.info('Process End time --- {0}'.format(endTime))

    elapsedTime = endTime - startTime

    time = 'Total Time to Process all the files -- {0}'.format(
        divmod(elapsedTime.total_seconds(), 60))
    log.info(time)

    log.info(' ARGS PASSED --- {0}'.format(job_type))
    if job_type == 'now':
        set_scheduler(os.environ.get('SCHEDULER_HOUR'),
                      os.environ.get('SCHEDULER_MIN'))

    response = dict(data=json.dumps(message), status=status, time=time)
    return response
예제 #53
0
 def get_a_google_credentials_object(self):
   return GoogleCredentials(None, None, None, None, None, None, None, None)
 def mock_run_flow(flow, storage, args):
     return GoogleCredentials.get_application_default()
예제 #55
0
 def __connect(self):
     credentials = GoogleCredentials.get_application_default()
     self.service = discovery.build('compute',
                                    'v1',
                                    credentials=credentials)
예제 #56
0
def transcribe_file(speech_file, sample_rate, parser):
    # authenticate with google using credentials in JSON file
    credentials = GoogleCredentials.get_application_default()

    client = speech.SpeechClient()

    # open audio file
    with io.open(speech_file, 'rb') as audio_file:
        content = audio_file.read()

    # send audio file to recognizer
    audio = types.RecognitionAudio(content=content)
    config = types.RecognitionConfig(
        encoding=enums.RecognitionConfig.AudioEncoding.LINEAR16,
        sample_rate_hertz=sample_rate,
        language_code='en-US')

    utterances = []
    marker_coordinates = []

    # send audio to recognizer
    response = client.recognize(config, audio)

    # Each result is for a consecutive portion of the audio.
    # #Iterate through them to get the transcripts for the entire audio file.
    for result in response.results:
        # The first alternative is the most likely one for this portion.
        application.logger.info('Transcript: {}'.format(
            result.alternatives[0].transcript))

        # send transcribed text to geo parse services_
        lat, long, address, geo_json = geo_parse(
            result.alternatives[0].transcript, parser)

        # if we get back any geo parse results store as markers for map and append to utterance text
        if bool(address) and lat is not None:
            my_point = Point((float(long), float(lat)))
            my_feature = Feature(geometry=my_point,
                                 properties={
                                     'title':
                                     'Geo Location: {}'.format(address),
                                     'description':
                                     'Transcript: {}'.format(
                                         result.alternatives[0].transcript),
                                     'marker-size':
                                     'large',
                                     'marker-color':
                                     '#FF0000',
                                     'marker-symbol':
                                     'police'
                                 })

            # Insert record into DB
            row = [
                str(uuid.uuid4()),
                float(long),
                float(lat), 'Geo Location: {}'.format(address),
                'Transcript: {}'.format(result.alternatives[0].transcript),
                str(datetime.datetime.now())
            ]
            db.InsertRow(tablename='security_events', row=row)
            #use this line to temp export the table for debugging
            #db.ExportCSV(tablename='security_events')

            # store lat long as marker coordinates for map
            marker_coordinates.append(my_feature)
            # store utterance with geo parsed address after for disaply
            utterances.append(
                'Transcript: {}'.format(result.alternatives[0].transcript) +
                ' ( ' + '<em style="color:LightGray;">' +
                'Geo Location: {}'.format(address) + '</em>' + ' )')

        elif bool(address) and lat is None:
            utterances.append(
                'Transcript: {}'.format(result.alternatives[0].transcript) +
                ' ( ' + '<em style="color:LightGray;">' +
                'Geo Location: {}'.format(address) + '</em>' + ' )')
        # if there are no geo parsed results just added text without address
        else:
            utterances.append('Transcript: {}'.format(
                result.alternatives[0].transcript))

    return utterances, marker_coordinates
 def __init__(self):
   # Connect to Google Cloud Compute Engine API using the environment's service
   # account.
   credentials = GoogleCredentials.get_application_default()
   self.compute = discovery.build('compute', 'v1', credentials=credentials)
예제 #58
0
    def get(self):
        """ 
      Uses the check_header parameters to retrieve the email of the already
      authenticated user.
      For JupyterHub + Inverting Proxy agent, use X-Inverting-Proxy-User-Id.
      For JupyterHub + Cloud IAP, X-Goog-IAP-JWT-Assertion. """

        if self.authenticator.check_header == "X-Inverting-Proxy-User-Id":

            b64_email = self.request.headers.get(
                self.authenticator.check_header, "")

            # Provides a dummy email to try the code locally.
            if b64_email == "" and self.authenticator.dummy_email:
                self.log.info(
                    f'Using a dummy email {self.authenticator.dummy_email}')
                b64_email = b64encode(
                    self.authenticator.dummy_email.encode('ascii'))

            if b64_email == "":
                raise web.HTTPError(
                    401, f'Missing header {self.authenticator.check_header}')

            user_email = b64decode(b64_email).decode("ascii")
            self.log.info(f'user_email is {user_email}')

        elif self.authenticator.check_header == "X-Goog-IAP-JWT-Assertion":

            from googleapiclient import discovery
            from oauth2client.client import GoogleCredentials

            credentials = GoogleCredentials.get_application_default()
            compute = discovery.build('compute',
                                      'v1',
                                      credentials=credentials,
                                      cache_discovery=False)

            request = compute.backendServices().get(
                project=self.authenticator.project_id,
                backendService=self.authenticator.backend_service_name)
            backend_service = request.execute()
            backend_service_id = backend_service['id']

            self.log.info(f'''self.authenticator.check_header name is 
            {self.authenticator.check_header}''')
            self.log.info(f'''self.authenticator.check_header value is 
            {self.request.headers.get(self.authenticator.check_header, "")}''')
            self.log.info(f'''self.authenticator.backend_service_id is 
            {backend_service_id}''')

            _, user_email, _ = validate_iap_jwt_from_compute_engine(
                self.request.headers.get(self.authenticator.check_header, ""),
                self.authenticator.project_number, backend_service_id)

            if not user_email:
                raise web.HTTPError(401,
                                    'Can not verify the IAP authentication.')

        else:
            raise web.HTTPError(400,
                                'Mismatch Authentication method and Header.')

        username, _ = user_email.split("@")
        user = self.user_from_username(username)

        # JupyterHub doesn't set the value for that key for some reason.
        if not hasattr(user, 'json_escaped_name'):
            setattr(user, 'json_escaped_name', json.dumps(user.name)[1:-1])

        self.log.info(f'username is {username}')
        self.log.info(f'user.name is {user.name}')

        self.set_login_cookie(user)

        self.write(
            self.render_template(
                self.authenticator.template_to_render,
                sync=True,
                user=user,
                next_url=self.get_next_url(user),
            ))
예제 #59
0
def main(event, context):
    credentials = GoogleCredentials.get_application_default()
    service = discovery.build('compute', 'v1', credentials=credentials)

    with open("config.yaml", 'r') as yamlfile:
        cfg = yaml.full_load(yamlfile)

    project_configs = cfg['project']
    project = project_configs.get('project-id')
    region = project_configs.get('region')
    zone = project_configs.get('zone')

    bucket_configs = cfg['bucket']
    bucket = bucket_configs.get('bucket-name')

    instance_configs = cfg['instance']
    instance_prefix_name = instance_configs.get('instance-prefix-name')
    instance_name = instance_prefix_name + str(randint(0, 100))
    machine_type = instance_configs.get('machine-type')

    startup_script = instance_configs.get('startup-script')
    startup_script = open(startup_script, 'r')
    sst = startup_script.read()

    disk_configs = cfg['disks']
    snapShot_name = disk_configs.get('snapshot-name')

    config_vm = {
        "kind":
        "compute#instance",
        "name":
        instance_name,
        "zone":
        "projects/" + project + "/zones/" + zone,
        "machineType":
        "projects/" + project + "/zones/" + zone + "/machineTypes/" +
        machine_type,
        "displayDevice": {
            "enableDisplay": False
        },
        "disks": [{
            "kind":
            "compute#attachedDisk",
            "type":
            "PERSISTENT",
            "boot":
            True,
            "mode":
            "READ_WRITE",
            "autoDelete":
            True,
            "deviceName":
            instance_name,
            "source":
            "projects/" + project + "/zones/" + zone + "/disks/" +
            instance_name
        }],
        "metadata": {
            "kind":
            "compute#metadata",
            "items": [{
                "key": "startup-script",
                "value": sst
            }, {
                "key": "bucket",
                "value": bucket
            }, {
                "key": "zone",
                "value": zone
            }, {
                "key": "instance_name",
                "value": instance_name
            }, {
                "key": "file_name",
                "value": event['name']
            }]
        },
        "canIpForward":
        False,
        "networkInterfaces": [{
            "kind":
            "compute#networkInterface",
            "subnetwork":
            "projects/" + project + "/regions/" + region +
            "/subnetworks/default",
            "accessConfigs": [{
                "kind": "compute#accessConfig",
                "name": "External NAT",
                "type": "ONE_TO_ONE_NAT",
                "networkTier": "PREMIUM"
            }],
            "aliasIpRanges": []
        }],
        "description":
        "",
        "labels": {},
        "scheduling": {
            "preemptible": False,
            "onHostMaintenance": "MIGRATE",
            "automaticRestart": True,
            "nodeAffinities": []
        },
        "deletionProtection":
        False,
        "reservationAffinity": {
            "consumeReservationType": "ANY_RESERVATION"
        },
        "serviceAccounts": [{
            "email":
            "*****@*****.**",
            "scopes": [
                "https://www.googleapis.com/auth/servicecontrol",
                "https://www.googleapis.com/auth/service.management.readonly",
                "https://www.googleapis.com/auth/logging.write",
                "https://www.googleapis.com/auth/monitoring.write",
                "https://www.googleapis.com/auth/trace.append",
                "https://www.googleapis.com/auth/devstorage.full_control"
            ]
        }]
    }

    create_disk(service, project, zone, instance_name, snapShot_name)
    request = service.instances().insert(project=project,
                                         zone=zone,
                                         body=config_vm)
    return request.execute()
from base64 import b64encode

import googleapiclient.discovery
from oauth2client.client import GoogleCredentials

# Settings
IMAGE_FILE = "text.png"
CREDENTIALS_FILE = "credentials-googleapi.json"

# Connect to Google cloud service
credentials = GoogleCredentials.from_stream(CREDENTIALS_FILE)
service = googleapiclient.discovery.build('vision',
                                          'v1',
                                          credentials=credentials)

# read file and covert to base64 encoding
with open(IMAGE_FILE, "rb") as f:
    image_data = f.read()
    encoded_image_data = b64encode(image_data).decode('UTF-8')

# Create the request object for Google Vision API
batch_request = [{
    'image': {
        'content': encoded_image_data
    },
    'features': [{
        'type': 'TEXT_DETECTION'
    }]  # (asking for extracted text from the image back)
}]
request = service.images().annotate(body={'requests': batch_request})