예제 #1
0
def get_http():
    http = httplib2.Http()
    GoogleCredentials.get_application_default().create_scoped([
        'https://www.googleapis.com/auth/firebase.database',
        'https://www.googleapis.com/auth/userinfo.email'
    ]).authorize(http)
    return http
예제 #2
0
def get_service():
    from googleapiclient.discovery import build
    from oauth2client.client import GoogleCredentials
    credentials = GoogleCredentials.get_application_default()
    if credentials.create_scoped_required():
        credentials = credentials.create_scoped('https://www.googleapis.com/auth/bigquery')
    return build('bigquery','v2', credentials=GoogleCredentials.get_application_default())
def hello():

    credentials = AppAssertionCredentials([])
    client = datastore.Client(project = 'mq-cloud-prototyping-3', credentials = credentials)
    sys.stdout.write(credentials.to_json())
    
    query = client.query(kind='Person')
    res = query.fetch()
    all = dict(res)
    sys.stdout.write(str(all))
    
    return credentials.to_json()

    try :        
    	
    	token = ''

    	#ouath
        O_AUTH_EMAIL_SCOPE = 'https://www.googleapis.com/auth/userinfo.email'

        credentials = GoogleCredentials.get_application_default()
        if credentials.create_scoped_required():
	        credentials = credentials.create_scoped(PUBSUB_SCOPES)
        http = httplib2.Http()
        credentials.authorize(http)

        cl = discovery.build('pubsub', 'v1', http=http)
        return credentials.to_json()

        credentials = GoogleCredentials.get_application_default()
        credentials = credentials.create_scoped([O_AUTH_EMAIL_SCOPE])
      	http = httplib2.Http()
    	credentials.authorize(http)
        return credentials.to_json()
	    #if not http:
	    #    http = httplib2.Http()
	    #credentials.authorize(http)

    	#temp hardcoded token
        #token = 'ya29.CjjlAlrvqUwXrujCnJuqa08HTtmNilyP7K1GGrHQ40Gt489H6NGT9WQAxEL92OSQ6anGYeFPRcvI4g'

        



        tokenBearer = 'Bearer %s' % token
        url = 'https://admin-dot-mq-vouchers.appspot.com/api/communities/mtv1/campaigns?page=0&size=1000&sorting=campaignName,ASC'
        req = urllib2.Request(url, headers = {'Content-Type': 'application/json', 'Authorization' : tokenBearer})
        f = urllib2.urlopen(req)
        response = f.read()
        sys.stdout.write(str(response))
        respjson = json.loads(response)
        
        f.close()
        #respjson = '3333'
        #sys.stdout.write(str(all))
        return str(response)
    except urllib2.HTTPError, error:
    	return ('get failed %s' % error)
 def test_get_application_default_environment_not_set_up(self):
   # It is normal for this test to fail if run inside
   # a Google Compute Engine VM or after 'gcloud auth login' command
   # has been executed on a non Windows machine.
   os.environ['SERVER_SOFTWARE'] = ''
   os.environ[GOOGLE_APPLICATION_CREDENTIALS] = ''
   os.environ['APPDATA'] = ''
   # we can't use self.assertRaisesRegexp() because it is only in Python 2.7+
   try:
     GoogleCredentials.get_application_default()
     self.fail('An exception was expected!')
   except ApplicationDefaultCredentialsError as error:
     self.assertEqual(ADC_HELP_MSG, str(error))
예제 #5
0
def main(photo_file):
  '''Run a label request on a single image'''

  API_DISCOVERY_FILE = 'https://vision.googleapis.com/$discovery/rest?version=v1'
  http = httplib2.Http()

  credentials = GoogleCredentials.get_application_default().create_scoped(
      ['https://www.googleapis.com/auth/cloud-platform'])
  credentials.authorize(http)

  service = build('vision', 'v1', http, discoveryServiceUrl=API_DISCOVERY_FILE)

  with open(photo_file, 'rb') as image:
    image_content = base64.b64encode(image.read())
    service_request = service.images().annotate(
      body={
        'requests': [{
          'image': {
            'content': image_content
           },
          'features': [{
            'type': 'LABEL_DETECTION',
            'maxResults': 1,
           }]
         }]
      })
    response = service_request.execute()
    label = response['responses'][0]['labelAnnotations'][0]['description']
    print('Found label: %s for %s' % (label, photo_file))
    return 0
def main(photo_file):
    '''Run a label request on a single image'''

    credentials = GoogleCredentials.get_application_default()
    with open(API_DISCOVERY_FILE, 'r') as f:
        doc = f.read()
    service = discovery.build_from_document(
        doc, credentials=credentials, http=httplib2.Http())

    with open(photo_file, 'rb') as image:
        image_content = base64.b64encode(image.read())
        service_request = service.images().annotate(
            body={
                'requests': [{
                    'image': {
                        'content': image_content
                    },
                    'features': [{
                        'type': 'LABEL_DETECTION',
                        'maxResults': 5,
                    }]
                }]
            })
        response = service_request.execute()
        # print(response)
        labels = map(lambda x: x['description'], response[
                     'responses'][0]['labelAnnotations'])
        sys.stdout.write("There are " + ", ".join(labels))
예제 #7
0
def get_service():
    credentials = GoogleCredentials.get_application_default()
    scoped_credentials = credentials.create_scoped(
        ['https://www.googleapis.com/auth/cloud-platform'])
    http = httplib2.Http()
    scoped_credentials.authorize(http)
    return discovery.build('language', 'v1beta1', http=http)
예제 #8
0
def OCR(photo_file):
    """Run a label request on a single image"""
    #print photo_file
    credentials = GoogleCredentials.get_application_default()
    service = discovery.build('vision', 'v1', credentials=credentials,
                              discoveryServiceUrl=DISCOVERY_URL)

    with open(photo_file, 'rb') as image:
        image_content = base64.b64encode(image.read())
        service_request = service.images().annotate(body={
            'requests': [{
                'image': {
                    'content': image_content.decode('UTF-8')
                },
                'features': [{
                    'type': 'TEXT_DETECTION',
                    'maxResults': 1
                }]
            }]
        })
        response = service_request.execute()
        line = response['responses'][0]['textAnnotations'][0]['description']
        string =str(line)
        proc = string.splitlines()
        #print proc
        card = BusinessCard(proc)
        #print response
        #label = response['responses'][0]['labelAnnotations'][0]['description']
        #print('Found label: %s for %s' % (label, photo_file))
        return card.cleanup()
def run(project, zone, instance_name):
    credentials = GoogleCredentials.get_application_default()
    compute = build('compute', 'v1', credentials=credentials)

    print('Creating instance.')

    operation = create_instance(compute, project, zone, instance_name)
    wait_for_operation(compute, project, zone, operation['name'])

    instances = list_instances(compute, project, zone)

    print('Instances in project %s and zone %s:' % (project, zone))
    for instance in instances:
        print(' - ' + instance['name'])

    print("""
Instance created.
It will take a minute or two for the instance to complete work.
Check this URL: http://storage.googleapis.com/%s/output.png
Once the image is uploaded press enter to delete the instance.
""" % project)

    input()

    print('Deleting instance.')

    operation = delete_instance(compute, project, zone, instance_name)
    wait_for_operation(compute, project, zone, operation['name'])
예제 #10
0
def main(bucket, sources):
    # Get the application default credentials. When running locally, these are
    # available after running `gcloud init`. When running on compute
    # engine, these are available from the environment.
    credentials = GoogleCredentials.get_application_default()

    # Construct the service object for the interacting with the Cloud Storage
    # API.
    service = discovery.build('storage', 'v1', credentials=credentials)

    print "\n-----------\n"
    print "sources===", sources
    print "\n-----------\n"

    # Upload the source files.
    for filename in sources:
        req = service.objects().insert(
            media_body=filename,
            name=filename,
            bucket=bucket)
        resp = req.execute()
        print('> Uploaded source file {}'.format(filename))
        print(json.dumps(resp, indent=2))

    """
예제 #11
0
def get_service():
    """Build a client to the Google Cloud Natural Language API."""

    credentials = GoogleCredentials.get_application_default()

    return discovery.build('language', 'v1beta1',
                           credentials=credentials)
예제 #12
0
def run(project, zone, instance_name, package):
    credentials = GoogleCredentials.get_application_default()
    compute = build('compute', 'v1', credentials=credentials)

    print 'Creating instance.'

    #creating instance
    operation = create_instance(compute, project, zone, instance_name, package)

    #execute operations
    wait_for_operation(compute, project, zone, operation['name'])

    #listing instances
    instances = list_instances(compute, project, zone)

    print 'Instances in project %s and zone %s:' % (project, zone)
    for instance in instances:
        print ' - ' + instance['name']

    print """ Instance created """

    raw_input()

    print 'Deleting instance'

    operation = delete_instance(compute, project, zone, instance_name)
    wait_for_operation(compute, project, zone, operation['name'])
예제 #13
0
파일: auth.py 프로젝트: JavierRoger/beam
def get_service_credentials():
  """For internal use only; no backwards-compatibility guarantees.

  Get credentials to access Google services."""
  user_agent = 'beam-python-sdk/1.0'
  if is_running_in_gce:
    # We are currently running as a GCE taskrunner worker.
    #
    # TODO(ccy): It's not entirely clear if these credentials are thread-safe.
    # If so, we can cache these credentials to save the overhead of creating
    # them again.
    return _GCEMetadataCredentials(user_agent=user_agent)
  else:
    client_scopes = [
        'https://www.googleapis.com/auth/bigquery',
        'https://www.googleapis.com/auth/cloud-platform',
        'https://www.googleapis.com/auth/devstorage.full_control',
        'https://www.googleapis.com/auth/userinfo.email',
        'https://www.googleapis.com/auth/datastore'
    ]

    try:
      credentials = GoogleCredentials.get_application_default()
      credentials = credentials.create_scoped(client_scopes)
      logging.debug('Connecting using Google Application Default '
                    'Credentials.')
      return credentials
    except Exception as e:
      logging.warning(
          'Unable to find default credentials to use: %s\n'
          'Connecting anonymously.', e)
      return None
예제 #14
0
  def _tpuService(self):
    """Creates a new Cloud TPU API object.

    This works around an issue where the underlying HTTP connection sometimes
    times out when the script has been running for too long. Other methods in
    this object calls this method to get a new API object whenever they need
    to communicate with the Cloud API.

    Returns:
      A Google Cloud TPU API object.
    """
    if self._service:
      return self._service

    credentials = self._credentials
    if credentials is None or credentials == 'default':
      credentials = GoogleCredentials.get_application_default()

    if self._discovery_url:
      return discovery.build(
          'tpu', 'v1alpha1',
          credentials=credentials,
          discoveryServiceUrl=self._discovery_url)
    else:
      return discovery.build(
          'tpu', 'v1alpha1',
          credentials=credentials)
예제 #15
0
 def _connect_google_monitoring():
     c = getattr(GoogleMonitoringV3.threadlocal, 'cm_conn', None)
     if c is None:
         creds = GoogleCredentials.get_application_default()
         GoogleMonitoringV3.threadlocal.cm_conn = c = build('monitoring', 'v3',
                                                            credentials=creds).projects()
     return c
예제 #16
0
def main():
	conn = None

	device_opt = ["port", "no_password", "zone", "project"]

	atexit.register(atexit_handler)

	define_new_opts()

	all_opt["power_timeout"]["default"] = "60"

	options = check_input(device_opt, process_input(device_opt))

	docs = {}
	docs["shortdesc"] = "Fence agent for GCE (Google Cloud Engine)"
	docs["longdesc"] = "fence_gce is an I/O Fencing agent for GCE (Google Cloud " \
			   "Engine). It uses the googleapiclient library to connect to GCE.\n" \
			   "googleapiclient can be configured with Google SDK CLI or by " \
			   "executing 'gcloud auth application-default login'.\n" \
			   "For instructions see: https://cloud.google.com/compute/docs/tutorials/python-guide"
	docs["vendorurl"] = "http://cloud.google.com"
	show_docs(options, docs)

	run_delay(options)

	try:
		credentials = GoogleCredentials.get_application_default()
		conn = discovery.build('compute', 'v1', credentials=credentials)
	except:
		fail_usage("Failed: Unable to connect to GCE. Check your configuration.")

	# Operate the fencing device
	result = fence_action(conn, options, set_power_status, get_power_status, get_nodes_list)
	sys.exit(result)
def main():
    credentials = GoogleCredentials.get_application_default()
    http = credentials.authorize(httplib2.Http())
    projectId = raw_input('Enter the project ID: ')
    datasetId = raw_input('Enter a dataset ID: ')
    tableId = raw_input('Enter a table name to load the data to: ')
    schema_path = raw_input(
        'Enter the path to the schema file for the table: ')

    with open(schema_path, 'r') as schema_file:
        schema = schema_file.read()

    data_path = raw_input('Enter the path to the data file: ')

    with open(data_path, 'r') as data_file:
        data = data_file.read()

    resp, content = make_post(http,
                              schema,
                              data,
                              projectId,
                              datasetId,
                              tableId)

    if resp.status == 200:
        job_resource = json.loads(content)
        service = get_service(credentials)
        poll_job(service, **job_resource['jobReference'])
        print("Success!")
    else:
        print("Http error code: {}".format(resp.status))
예제 #18
0
def main(
    description, project_id, day, month, year, hours, minutes, source_bucket, access_key, secret_access_key, sink_bucket
):
    """Create a one-off transfer from Amazon S3 to Google Cloud Storage."""
    credentials = GoogleCredentials.get_application_default()
    storagetransfer = discovery.build("storagetransfer", "v1", credentials=credentials)

    # Edit this template with desired parameters.
    # Specify times below using US Pacific Time Zone.
    transfer_job = {
        "description": description,
        "status": "ENABLED",
        "projectId": project_id,
        "schedule": {
            "scheduleStartDate": {"day": day, "month": month, "year": year},
            "scheduleEndDate": {"day": day, "month": month, "year": year},
            "startTimeOfDay": {"hours": hours, "minutes": minutes},
        },
        "transferSpec": {
            "awsS3DataSource": {
                "bucketName": source_bucket,
                "awsAccessKey": {"accessKeyId": access_key, "secretAccessKey": secret_access_key},
            },
            "gcsDataSink": {"bucketName": sink_bucket},
        },
    }

    result = storagetransfer.transferJobs().create(body=transfer_job).execute()
    print("Returned transferJob: {}".format(json.dumps(result, indent=4)))
예제 #19
0
def main(photo_file):
    """Run a label request on a single image"""

    # [START authenticate]
    credentials = GoogleCredentials.get_application_default()
    service = discovery.build('vision', 'v1', credentials=credentials,
                              discoveryServiceUrl=DISCOVERY_URL)
    # [END authenticate]

    # [START construct_request]
    with open(photo_file, 'rb') as image:
        image_content = base64.b64encode(image.read())
        service_request = service.images().annotate(body={
            'requests': [{
                'image': {
                    'content': image_content.decode('UTF-8')
                },
                'features': [{
                    'type': 'LABEL_DETECTION',
                    'maxResults': 1
                }]
            }]
        })
        # [END construct_request]
        # [START parse_response]
        response = service_request.execute()
        label = response['responses'][0]['labelAnnotations'][0]['description']
        print('Found label: %s for %s' % (label, photo_file))
        return 0
 def test_get_application_default_from_environment_variable_malformed_file(
     self):
   os.environ['SERVER_SOFTWARE'] = ''
   environment_variable_file = datafile(
       os.path.join('gcloud',
                    'application_default_credentials_malformed_3.json'))
   os.environ[GOOGLE_APPLICATION_CREDENTIALS] = environment_variable_file
   # we can't use self.assertRaisesRegexp() because it is only in Python 2.7+
   try:
     GoogleCredentials.get_application_default()
     self.fail('An exception was expected!')
   except ApplicationDefaultCredentialsError as error:
     self.assertTrue(str(error).startswith(
         'An error was encountered while reading json file: ' +
         environment_variable_file + ' (pointed to by ' +
         GOOGLE_APPLICATION_CREDENTIALS + ' environment variable):'))
예제 #21
0
 def __init__(self, project, session_id, logger):
     super(GCEService, self).__init__(project, session_id, logger)
     self.credentials = GoogleCredentials.get_application_default()
     self.compute = discovery.build('compute', 'v1',
             credentials=self.credentials)
     self.storage = discovery.build('storage', 'v1',
             credentials=self.credentials)
예제 #22
0
def main(bucket):
    # Get the application default credentials. When running locally, these are
    # available after running `gcloud init`. When running on compute
    # engine, these are available from the environment.
    credentials = GoogleCredentials.get_application_default()

    # Construct the service object for interacting with the Cloud Storage API -
    # the 'storage' service, at version 'v1'.
    # You can browse other available api services and versions here:
    #     https://developers.google.com/api-client-library/python/apis/
    service = discovery.build('storage', 'v1', credentials=credentials)

    # Make a request to buckets.get to retrieve a list of objects in the
    # specified bucket.
    req = service.buckets().get(bucket=bucket)
    resp = req.execute()
    print(json.dumps(resp, indent=2))

    # Create a request to objects.list to retrieve a list of objects.
    fields_to_return = \
        'nextPageToken,items(name,size,contentType,metadata(my-key))'
    req = service.objects().list(bucket=bucket, fields=fields_to_return)

    # If you have too many items to list in one request, list_next() will
    # automatically handle paging with the pageToken.
    while req:
        resp = req.execute()
        print(json.dumps(resp, indent=2))
        req = service.objects().list_next(req, resp)
def anaylze_content(text):
  '''Run a sentiment analysis request on text.
  This function is a modified version of the tutorial at this link on 7/24/16
  https://cloud.google.com/natural-language/docs/sentiment-tutorial
  '''

  http = httplib2.Http()

  credentials = GoogleCredentials.get_application_default().create_scoped(
      ['https://www.googleapis.com/auth/cloud-platform'])
  http=httplib2.Http()
  credentials.authorize(http)

  service = discovery.build('language', 'v1beta1',
                            http=http, discoveryServiceUrl=DISCOVERY_URL)

  service_request = service.documents().annotateText(
    body={
            "document":{
              "type":"PLAIN_TEXT",
              "content": text
            },
            "features":{
              "extractDocumentSentiment":True
            },
            "encodingType":"UTF8"
          })

  try:
    response = service_request.execute()
    return response
  except:
    # Normally you don't want to catch all errors but what is a side project
    # without tech debt.
    return {"error_msg": str(sys.exc_info()[1])}
def main(project_id):
    credentials = GoogleCredentials.get_application_default()
    # Construct the service object for interacting with the BigQuery API.
    bigquery = discovery.build('bigquery', 'v2', credentials=credentials)

    list_datasets(bigquery, project_id)
    list_projects(bigquery)
예제 #25
0
    def __init__(self, config=None):
        # Initialize Handler
        Handler.__init__(self, config)

        if discovery is None:
            logging.error("Failed to load apiclient.discovery")
            return
        elif GoogleCredentials is None:
            logging.error("Failed to load "
                          "oauth2client.client.GoogleCredentials")
            return

        # Initialize options
        self.topic = self.config['topic']
        self.scopes = self.config['scopes']
        self.retries = int(self.config['retries'])
        self.batch = self.config['batch']
        self.batch_size = int(self.config['batch_size'])
        self.metrics = []
        tags_items = self.config['tags']
        self.tags = {}
        for item in tags_items:
            k, v = item.split(':')
            self.tags[k] = v

        # Initialize client
        credentials = GoogleCredentials.get_application_default()
        if credentials.create_scoped_required():
            credentials = credentials.create_scoped(self.scopes)
        self.client = discovery.build('pubsub', 'v1', credentials=credentials)
def main(photo_file):
    """Run a label request on a single image"""

    credentials = GoogleCredentials.get_application_default()
    service = discovery.build('vision', 'v1', credentials=credentials,
                              discoveryServiceUrl=DISCOVERY_URL)

    with open(photo_file, 'rb') as image:
        image_content = base64.b64encode(image.read())
        service_request = service.images().annotate(body={
            'requests': [{
                'image': {
                    'content': image_content.decode('UTF-8')
                },
                'features': [{
                    'type': 'LABEL_DETECTION',
                    'maxResults': 5
                }]
            }]
        })
        response = service_request.execute()
        label = response['responses'][0]['labelAnnotations'][0]['description']
  	label_jp = translate(label)
	score = response['responses'][0]['labelAnnotations'][0]['score']
        print('Found label: %s(%s), score: %f for %s' % (label_jp, label, score, photo_file))
	jtalk("これは"+label_jp.encode('utf-8')+"です")
 	for label_num in range(1, 5): 
           label = response['responses'][0]['labelAnnotations'][label_num]['description']
  	   label_jp = translate(label)
	   score = response['responses'][0]['labelAnnotations'][label_num]['score']
           print('Found label: %s(%s), score: %f for %s' % (label_jp, label, score, photo_file))

        return 0
def main(project_id, dataset_id, table_name, schema_path, data_path):
    credentials = GoogleCredentials.get_application_default()
    http = credentials.authorize(httplib2.Http())
    bigquery = discovery.build('bigquery', 'v2', credentials=credentials)

    with open(schema_path, 'r') as schema_file:
        schema = schema_file.read()

    with open(data_path, 'r') as data_file:
        data = data_file.read()

    resp, content = make_post(
        http,
        schema,
        data,
        project_id,
        dataset_id,
        table_name)

    if resp.status == 200:
        job = json.loads(content)
        poll_job(bigquery, job)
        print("Success!")
    else:
        print("Http error code: {}".format(resp.status))
예제 #28
0
  def __init__(self, config, logger):
    """See README.md for the config format."""
    self._project_name = config['project_name']
    self._taskqueue_tag = config['taskqueue_tag']
    self._src_path = config['src_path']
    self._credentials = GoogleCredentials.get_application_default()
    self._logger = logger

    # Separate the cloud storage path into the bucket and the base path under
    # the bucket.
    storage_path_components = config['cloud_storage_path'].split('/')
    self._bucket_name = storage_path_components[0]
    self._base_path_in_bucket = ''
    if len(storage_path_components) > 1:
      self._base_path_in_bucket = '/'.join(storage_path_components[1:])
      if not self._base_path_in_bucket.endswith('/'):
        self._base_path_in_bucket += '/'

    self._google_storage_accessor = GoogleStorageAccessor(
        credentials=self._credentials, project_name=self._project_name,
        bucket_name=self._bucket_name)

    self._traces_dir = os.path.join(self._base_path_in_bucket, 'traces')
    self._trace_database_path = os.path.join(
        self._traces_dir,
        config.get('trace_database_filename', 'trace_database.json'))

    # Recover any existing trace database in case the worker died.
    self._DownloadTraceDatabase()

    # Initialize the global options that will be used during trace generation.
    options.OPTIONS.ParseArgs([])
    options.OPTIONS.local_binary = config['chrome_path']
예제 #29
0
def main(project_id):
    # [START build_service]
    # Grab the application's default credentials from the environment.
    credentials = GoogleCredentials.get_application_default()
    # Construct the service object for interacting with the BigQuery API.
    bigquery_service = build('bigquery', 'v2', credentials=credentials)
    # [END build_service]

    try:
        # [START run_query]
        query_request = bigquery_service.jobs()
        query_data = {
            'query': (
                'SELECT TOP(corpus, 10) as title, '
                'COUNT(*) as unique_words '
                'FROM [publicdata:samples.shakespeare];')
        }

        query_response = query_request.query(
            projectId=project_id,
            body=query_data).execute()
        # [END run_query]

        # [START print_results]
        print('Query Results:')
        for row in query_response['rows']:
            print('\t'.join(field['v'] for field in row['f']))
        # [END print_results]

    except HttpError as err:
        print('Error: {}'.format(err.content))
        raise err
예제 #30
0
    def __init__(self, credentials=None, project_id=None,
                 read_only=False, admin=False, user_agent=DEFAULT_USER_AGENT,
                 timeout_seconds=DEFAULT_TIMEOUT_SECONDS):
        if read_only and admin:
            raise ValueError('A read-only client cannot also perform'
                             'administrative actions.')

        if credentials is None:
            credentials = GoogleCredentials.get_application_default()

        scopes = []
        if read_only:
            scopes.append(READ_ONLY_SCOPE)
        else:
            scopes.append(DATA_SCOPE)

        if admin:
            scopes.append(ADMIN_SCOPE)

        self._admin = bool(admin)
        self._credentials = credentials.create_scoped(scopes)
        self._project_id = _determine_project_id(project_id)
        self.user_agent = user_agent
        self.timeout_seconds = timeout_seconds

        # These will be set in start().
        self._data_stub = None
        self._cluster_stub = None
        self._operations_stub = None
        self._table_stub = None
예제 #31
0
def dqn_learing(
    env,
    q_func,
    optimizer_spec,
    exploration,
    stopping_criterion=None,
    replay_buffer_size=1000000,
    batch_size=32,
    gamma=0.99,
    learning_starts=50000,
    learning_freq=4,
    frame_history_len=4,
    target_update_freq=10000
    ):

    """Run Deep Q-learning algorithm.

    You can specify your own convnet using q_func.

    All schedules are w.r.t. total number of steps taken in the environment.

    Parameters
    ----------
    env: gym.Env
        gym environment to train on.
    q_func: function
        Model to use for computing the q function. It should accept the
        following named arguments:
            input_channel: int
                number of channel of input.
            num_actions: int
                number of actions
    optimizer_spec: OptimizerSpec
        Specifying the constructor and kwargs, as well as learning rate schedule
        for the optimizer
    exploration: Schedule (defined in utils.schedule)
        schedule for probability of chosing random action.
    stopping_criterion: (env) -> bool
        should return true when it's ok for the RL algorithm to stop.
        takes in env and the number of steps executed so far.
    replay_buffer_size: int
        How many memories to store in the replay buffer.
    batch_size: int
        How many transitions to sample each time experience is replayed.
    gamma: float
        Discount Factor
    learning_starts: int
        After how many environment steps to start replaying experiences
    learning_freq: int
        How many steps of environment to take between every experience replay
    frame_history_len: int
        How many past frames to include as input to the model.
    target_update_freq: int
        How many experience replay rounds (not steps!) to perform between
        each update to the target Q network
    """
    assert type(env.observation_space) == gym.spaces.Box
    assert type(env.action_space)      == gym.spaces.Discrete
    Statistic['parameters'] = {'replay_buffer_size':replay_buffer_size,
                               'batch_size':batch_size,
                               'gamma':gamma,
                               'frame_history_len':frame_history_len,
                               'learning_starts':learning_starts,
                               'learning_freq':learning_freq,
                               'target_update_freq':target_update_freq,
                               'name':env.env.unwrapped.spec.id}
    ###############
    # BUILD MODEL #
    ###############

    if len(env.observation_space.shape) == 1:
        # This means we are running on low-dimensional observations (e.g. RAM)
        input_arg = env.observation_space.shape[0]
    else:
        img_h, img_w, img_c = env.observation_space.shape
        input_arg = frame_history_len * img_c
    num_actions = env.action_space.n

    # Construct an epilson greedy policy with given exploration schedule
    def select_epilson_greedy_action(model, obs, t):
        sample = random.random()
        eps_threshold = exploration.value(t)
        if sample > eps_threshold:
            obs = torch.from_numpy(obs).type(dtype).unsqueeze(0) / 255.0
            # Use volatile = True if variable is only used in inference mode, i.e. don’t save the history
            with torch.no_grad():
                return model(Variable(obs)).data.max(1)[1].cpu()
        else:
            return torch.IntTensor([[random.randrange(num_actions)]])

    # Initialize target q function and q function, i.e. build the model.
    ######

    Q = q_func(input_arg, num_actions).type(dtype)
    target_Q = q_func(input_arg, num_actions).type(dtype)

    if USE_CUDA:
        Q = Q.cuda()
        target_Q = target_Q.cuda()

    ######


    # Construct Q network optimizer function
    optimizer = optimizer_spec.constructor(Q.parameters(), **optimizer_spec.kwargs)

    # Construct the replay buffer
    replay_buffer = ReplayBuffer(replay_buffer_size, frame_history_len)

    ###############
    # RUN ENV     #
    ###############
    num_param_updates = 0
    mean_episode_reward = -float('nan')
    best_mean_episode_reward = -float('inf')
    last_obs = env.reset()
    LOG_EVERY_N_STEPS = 10000
    filename = 'statistics.pkl'

    # Google Drive
    # try:
    #     import google.colab
    #     IN_COLAB = True
    # except:
    IN_COLAB = False

    if IN_COLAB:
        run_in_colab_message()
        try:
            from google.colab import auth
            import logging
            from pydrive.auth import GoogleAuth
            from pydrive.drive import GoogleDrive
            from oauth2client.client import GoogleCredentials
            logging.getLogger('googleapicliet.discovery_cache').setLevel(logging.ERROR)
            auth.authenticate_user()
            gauth = GoogleAuth()
            gauth.credentials = GoogleCredentials.get_application_default()
            drive = GoogleDrive(gauth)
        except:
            pass

    iter_time = time()

    for t in count():
        ### 1. Check stopping criterion
        if stopping_criterion is not None and stopping_criterion(env):
            break

        ### 2. Step the env and store the transition
        # At this point, "last_obs" contains the latest observation that was
        # recorded from the simulator. Here, your code needs to store this
        # observation and its outcome (reward, next observation, etc.) into
        # the replay buffer while stepping the simulator forward one step.
        # At the end of this block of code, the simulator should have been
        # advanced one step, and the replay buffer should contain one more
        # transition.
        # Specifically, last_obs must point to the new latest observation.
        # Useful functions you'll need to call:
        # obs, reward, done, info = env.step(action)
        # this steps the environment forward one step
        # obs = env.reset()
        # this resets the environment if you reached an episode boundary.
        # Don't forget to call env.reset() to get a new observation if done
        # is true!!
        # Note that you cannot use "last_obs" directly as input
        # into your network, since it needs to be processed to include context
        # from previous frames. You should check out the replay buffer
        # implementation in dqn_utils.py to see what functionality the replay
        # buffer exposes. The replay buffer has a function called
        # encode_recent_observation that will take the latest observation
        # that you pushed into the buffer and compute the corresponding
        # input that should be given to a Q network by appending some
        # previous frames.
        # Don't forget to include epsilon greedy exploration!
        # And remember that the first time you enter this loop, the model
        # may not yet have been initialized (but of course, the first step
        # might as well be random, since you haven't trained your net...)
        #####

        idx = replay_buffer.store_frame(last_obs)
        enc_obs = replay_buffer.encode_recent_observation()

        if t > learning_starts:
            action = select_epilson_greedy_action(Q, enc_obs, t)
        else:
            action = torch.IntTensor([[random.randrange(num_actions)]])


        obs, reward, done, info = env.step(action)
        if done:
            obs = env.reset()

        replay_buffer.store_effect(idx, action, reward, done)

        last_obs = obs

        #####

        # at this point, the environment should have been advanced one step (and
        # reset if done was true), and last_obs should point to the new latest
        # observation

        ### 3. Perform experience replay and train the network.
        # Note that this is only done if the replay buffer contains enough samples
        # for us to learn something useful -- until then, the model will not be
        # initialized and random actions should be taken
        if (t > learning_starts and
                t % learning_freq == 0 and
                replay_buffer.can_sample(batch_size)):
            # Here, you should perform training. Training consists of four steps:
            # 3.a: use the replay buffer to sample a batch of transitions (see the
            # replay buffer code for function definition, each batch that you sample
            # should consist of current observations, current actions, rewards,
            # next observations, and done indicator).
            # Note: Move the variables to the GPU if avialable
            # 3.b: fill in your own code to compute the Bellman error. This requires
            # evaluating the current and next Q-values and constructing the corresponding error.
            # Note: don't forget to clip the error between [-1,1], multiply is by -1 (since pytorch minimizes) and
            #       maskout post terminal status Q-values (see ReplayBuffer code).
            # 3.c: train the model. To do this, use the bellman error you calculated perviously.
            # Pytorch will differentiate this error for you, to backward the error use the following API:
            #       current.backward(d_error.data.unsqueeze(1))
            # Where "current" is the variable holding current Q Values and d_error is the clipped bellman error.
            # Your code should produce one scalar-valued tensor.
            # Note: don't forget to call optimizer.zero_grad() before the backward call and
            #       optimizer.step() after the backward call.
            # 3.d: periodically update the target network by loading the current Q network weights into the
            #      target_Q network. see state_dict() and load_state_dict() methods.
            #      you should update every target_update_freq steps, and you may find the
            #      variable num_param_updates useful for this (it was initialized to 0)
            #####

            #3.a
            obs_batch, act_batch, rew_batch, next_obs_batch, done_mask = replay_buffer.sample(batch_size)
            obs_batch = Variable(torch.from_numpy(obs_batch).type(dtype) / 255., requires_grad=True)
            act_batch = Variable(torch.from_numpy(act_batch).type(torch.int64))
            rew_batch = Variable(torch.from_numpy(rew_batch).type(dtype), requires_grad=True)
            next_obs_batch = Variable(torch.from_numpy(next_obs_batch).type(dtype) / 255., requires_grad=True)
            done_mask = Variable(torch.from_numpy(done_mask).type(torch.int64))

            if USE_CUDA:
                obs_batch = obs_batch.cuda()
                act_batch = act_batch.cuda()
                rew_batch = rew_batch.cuda()
                next_obs_batch = next_obs_batch.cuda()
                done_mask = done_mask.cuda()

            # Q network
            val = Q(obs_batch).gather(dim=1, index=act_batch.unsqueeze(1))

            # Q target network
            with torch.no_grad():
                tar_val_t = target_Q(next_obs_batch).max(1)[0]
            tar_val = torch.addcmul(rew_batch, gamma, 1-done_mask.type(dtype), tar_val_t)

            # 3.b error calculate
            d_error = (tar_val - val.squeeze()).clamp_(-1, 1) * -1.
            # d_error = torch.pow((tar_val - val.squeeze()).clamp_(-1, 1), 2) * -1.

            # 3.c train Q network
            optimizer.zero_grad()
            val.backward(d_error.data.unsqueeze(1))
            optimizer.step()

            # 3.d update target network
            num_param_updates += 1
            if num_param_updates % target_update_freq == 0:
                target_Q.load_state_dict(Q.state_dict())
            #####

        ### 4. Log progress and keep track of statistics
        episode_rewards = get_wrapper_by_name(env, "Monitor").get_episode_rewards()
        if len(episode_rewards) > 0:
            mean_episode_reward = np.mean(episode_rewards[-100:])
        if len(episode_rewards) > 100:
            best_mean_episode_reward = max(best_mean_episode_reward, mean_episode_reward)

        Statistic["mean_episode_rewards"].append(mean_episode_reward)
        Statistic["best_mean_episode_rewards"].append(best_mean_episode_reward)

        if t % LOG_EVERY_N_STEPS == 0 and t > learning_starts:
            print("Timestep %d" % (t,))
            print(f"Iteration time:{time()-iter_time:.2f}")
            iter_time = time()
            print("mean reward (100 episodes) %f" % mean_episode_reward)
            print("best mean reward %f" % best_mean_episode_reward)
            print("episodes %d" % len(episode_rewards))
            print("exploration %f" % exploration.value(t))
            sys.stdout.flush()

            # Dump statistics to pickle
            filename = f"{t}" + 'statistics.pkl' if IN_COLAB else 'statistics.pkl'
            with open(filename, 'wb') as f:
                pickle.dump(Statistic, f)
                print("Saved to %s" % filename)
            if IN_COLAB and t % (LOG_EVERY_N_STEPS * 10) == 0:
                try:
                    stat_pkl = drive.CreateFile()
                    stat_pkl.SetContentFile(filename)
                    stat_pkl.Upload()
                    print("Uploaded to drive")
                except Exception:
                    print("Exception during upload to drive")


        if t % 500000 == 0 and t > learning_starts:
            filename = f"{t}" + 'statistics.pkl'
            with open(filename, 'wb') as f:
                pickle.dump(Statistic, f)
                print("Saved to %s" % filename)
예제 #32
0
 def create_credentials(cls):
     cls.credentials = GoogleCredentials.get_application_default()
     scope = ['https://www.googleapis.com/auth/drive']
     return cls.credentials.create_scoped(scope)
예제 #33
0
def get_client():
    """Builds an http client authenticated with the service account credentials."""
    credentials = GoogleCredentials.get_application_default()
    api_client = discovery.build('monitoring', 'v3', credentials=credentials)
    return api_client
예제 #34
0
def teardown_kubeflow_gcp(_):
    """Teardown Kubeflow deployment."""
    args = parse_args()
    project = args.project
    deployment_name = args.name
    credentials = GoogleCredentials.get_application_default()
    deploy = discovery.build("deploymentmanager",
                             "v2",
                             credentials=credentials)

    deployments = deploy.deployments()

    response = None
    try:
        logging.info("Deleting deployment %s in project %s", deployment_name,
                     project)
        response = deployments.delete(project=project,
                                      deployment=deployment_name).execute()
    except errors.HttpError as e:
        logging.error("Got exception %s", e)
        if not e.content:
            raise

        try:
            content = json.loads(e.content)
        except ValueError:
            logging.error("Could not parse content %s as json", e.content)

        code = content.get("error", {}).get("code")
        if code == requests.codes.not_found:
            logging.info("Deployment %s does not exist", deployment_name)
            return
        elif code == requests.codes.conflict:
            logging.warning(
                "Deployment %s return error 409 when trying to delete. "
                "One possible cause is deletion is already in progress",
                deployment_name)
        else:
            raise

    if not response:
        # An operation was most likely already in progress. Lets get that operation.
        d = deployments.get(project=project,
                            deployment=deployment_name).execute()
        op_id = d.get("operation", {}).get("name")
        if not op_id:
            raise ValueError("Could not get operation name.")
    else:
        op_id = response["name"]

    logging.info("Wait for deployment; operation %s", op_id)
    final_status = deploy_utils.wait_for_operation(deploy, project, op_id)

    op_errors = final_status.get("error", {}).get("errors", [])

    if op_errors:
        logging.error(
            "Deployment operation had errors\n%s:",
            json.dumps(final_status,
                       sort_keys=True,
                       indent=2,
                       separators=(',', ': ')))

        raise RuntimeError("Deployment operation had errors.")

    if final_status.get("status") != "DONE":
        logging.error("Deployment operation isn't done.")
        raise RuntimeError("Deployment operation isn't done.")

    if final_status.get("operationType", "").lower() != "delete":
        # Its possible that if an operation was already in progress then the
        # operation we just waited for was not a delete operation.
        # We wanted to wait for that operation to finish and then raise an error
        # so that the delete will be retried.
        message = ("Operation {0} is type {1} which is not a delete "
                   "operation.").format(op_id,
                                        final_status.get("operationType"))
        logging.error(message)
        raise ValueError(message)
예제 #35
0
def data_processor(job_type):
    status = 200
    message = 'Prcoess Complete '
    startTime = datetime.datetime.now()
    lock_file = True
    log.info('BUCKET_NAME')
    log.info(BUCKET_NAME)
    log.info('ARCHIVE_BUCKET_NAME')
    log.info(ARCHIVE_BUCKET_NAME)

    try:
        bucket = BUCKET_NAME
        archive_bucket = ARCHIVE_BUCKET_NAME
        random_number = binascii.hexlify(os.urandom(32)).decode()
        log.info(' RANDOM NUMBER --- {0}'.format(random_number))

        # Get the application default credentials. When running locally, these are
        # available after running `gcloud init`. When running on compute
        # engine, these are available from the environment.
        credentials = GoogleCredentials.get_application_default()

        # Construct the service object for interacting with the Cloud Storage API -
        # the 'storage' service, at version 'v1'.
        # You can browse other available api services and versions here:
        # https://developers.google.com/api-client-library/python/apis/
        service = discovery.build('storage', 'v1', credentials=credentials)

        # Make a request to buckets.get to retrieve a list of objects in the
        # specified bucket.
        req = service.buckets().get(bucket=bucket)
        resp = req.execute()

        # print(json.dumps(resp, indent=2))

        # Create a request to objects.list to retrieve a list of objects.
        fields_to_return = \
            'nextPageToken,items(name,size,contentType,metadata(my-key))'
        req = service.objects().list(bucket=bucket, fields=fields_to_return)
        file_count = 0
        log.info('Process {0} Start time --- {1}'.format(bucket, startTime))
        # If you have too many items to list in one request, list_next() will
        # automatically handle paging with the pageToken.
        while req:
            resp = req.execute()
            # print(json.dumps(resp, indent=2))
            if len(resp) == 0:
                log.info(
                    '############################################################################################'
                )
                log.info('--------- THE BUCKET LIST IS EMPTY --------------')
                log.info('--------- NO FILES TO PROCESS  --------------')
                log.info(resp)
                log.info(
                    '############################################################################################'
                )

            else:
                get_filenames(resp, service, random_number)

            req = service.objects().list_next(req, resp)

    except Exception as e:
        log.error(' Error in getting Bucket Details - {0}'.format(e[0]))
        message = e[0]
        status = 500

    endTime = datetime.datetime.now()
    log.info('Process End time --- {0}'.format(endTime))

    elapsedTime = endTime - startTime

    time = 'Total Time to Process all the files -- {0}'.format(
        divmod(elapsedTime.total_seconds(), 60))
    log.info(time)

    log.info(' ARGS PASSED --- {0}'.format(job_type))
    if job_type == 'now':
        set_scheduler(os.environ.get('SCHEDULER_HOUR'),
                      os.environ.get('SCHEDULER_MIN'))

    response = dict(data=json.dumps(message), status=status, time=time)
    return response
예제 #36
0
Automatically generated by Colaboratory.

Original file is located at
    https://colab.research.google.com/drive/1gzu5RUAPJS9_Y1QryaqvE926WaXRInYC
"""

# Code to read csv file into colaboratory:
!pip install -U -q PyDrive
from pydrive.auth import GoogleAuth
from pydrive.drive import GoogleDrive
from google.colab import auth
from oauth2client.client import GoogleCredentials

auth.authenticate_user()
gauth = GoogleAuth()
gauth.credentials = GoogleCredentials.get_application_default()
drive = GoogleDrive(gauth)

downloaded = drive.CreateFile({'id':'1RbG76av_2kFXsKv_RstsC3PTmSXkcPp9'}) # replace the id with id of file you want to access
downloaded.GetContentFile('SampleData.xlsx')

import xlrd
path = "SampleData.xlsx"
inputWorkbook = xlrd.open_workbook(path)
inputWorksheet = inputWorkbook.sheet_by_index(0)
a = inputWorksheet.nrows
b = inputWorksheet.ncols
rep = list()
item = list()
c = 2
d = 2
예제 #37
0
def main(detect="", photo_file="", trans_lang=""):
    pixels.wakeup()
    if photo_file == "":
        photo_file = camera()
    pixels.off()

    credentials = GoogleCredentials.get_application_default()
    service = discovery.build('vision',
                              'v1',
                              credentials=credentials,
                              discoveryServiceUrl=DISCOVERY_URL)

    with open(photo_file, 'rb') as image:
        image_content = base64.b64encode(image.read())
        if detect == "":  #No parameter
            DETECT = default_detect
        else:  #Paremater specified
            DETECT = [detect.upper()]

        result = ""
        bounds = []
        tlocale = ""
        for DET in DETECT:
            pixels.listen()
            service_request = service.images().annotate(
                body={
                    'requests': [{
                        'image': {
                            'content': image_content.decode('UTF-8')
                        },
                        'features': [{
                            'type': DET + '_DETECTION',
                            'maxResults': default_max
                        }]
                    }]
                })
            response = service_request.execute()
            annotation = DET.lower() + 'Annotations'
            try:
                results = response['responses'][0][annotation]
                for res in results:
                    if DET in ["LABEL", "LOGO"]:
                        if res["score"] > 0.7:
                            result += res["description"] + ", "

                    elif DET in ["TEXT"]:
                        tlocale = res["locale"]
                        result += res["description"] + ", "
                        bounds += res["boundingPoly"]["vertices"]

                    elif DET in ["FACE"]:
                        if res["joyLikelihood"] == "VERY_LIKELY" or res[
                                "joyLikelihood"] == "LIKELY":
                            result += "Smile "
                        if res["angerLikelihood"] == "VERY_LIKELY" or res[
                                "angerLikelihood"] == "LIKELY":
                            result += "Angry "
                        if res["headwearLikelihood"] == "VERY_LIKELY" or res[
                                "headwearLikelihood"] == "LIKELY":
                            rsult += "Capped "

                    result += DET + ", "
            except:
                result += "No " + DET + ", "
            pixels.off()

        print('Result: ' + result)
        pixels.listen()
        if trans_lang:
            trans_text = translate_text(result, trans_lang)
            trans_text = trans_text.replace("'", "")
            print('Trans: ' + trans_text)
            if trans_lang in aiy_lang:
                aiy.audio.say(trans_text, trans_lang)
            elif trans_lang == "ja-JP":
                os.system(aquest_dir +
                          ' -g {} {} | aplay -D plughw:{},{}'.format(
                              VOLUME, trans_text, CARD, DEVICE))
            else:
                aiy.audio.say('Nothing to trans!', 'en-US')

        else:  #trans_lang = null then default en-US
            aiy.audio.say(result, 'en-US')
        pixels.off()
예제 #38
0
def _create_tasks_client():
    credentials = GoogleCredentials.get_application_default()
    with open(os.path.join(os.path.dirname(__file__), 'cloudtasks.json'),
              'r') as f:
        return build_from_document(f.read(), credentials=credentials)
예제 #39
0
 def get_oauth_credentials(self):
     return self.create_scoped(GoogleCredentials.get_application_default())
예제 #40
0
def get_job_id(pipeline_options: Dict[str, str]) -> str:
    """Captures the job_id of the pipeline job specified by the given options.

    For local jobs, generates a job_id using the given job_timestamp. For jobs
    running on Dataflow, finds the currently running job with the same job name
    as the current pipeline. Note: this works because there can only be one job
    with the same job name running on Dataflow at a time.

    Args:
        pipeline_options: Dictionary containing details about the pipeline.

    Returns:
        The job_id string of the current pipeline job.

    """
    runner = pipeline_options.get('runner')

    if runner == 'DataflowRunner':
        # Job is running on Dataflow. Get job_id.
        project = pipeline_options.get('project')
        region = pipeline_options.get('region')
        job_name = pipeline_options.get('job_name')

        if not project:
            raise ValueError("No project provided in pipeline options: "
                             f"{pipeline_options}")
        if not region:
            raise ValueError("No region provided in pipeline options: "
                             f"{pipeline_options}")
        if not job_name:
            raise ValueError("No job_name provided in pipeline options: "
                             f"{pipeline_options}")

        try:
            logging.info("Looking for job_id on Dataflow.")

            service_name = 'dataflow'
            dataflow_api_version = 'v1b3'
            credentials = GoogleCredentials.get_application_default()

            dataflow = build(serviceName=service_name,
                             version=dataflow_api_version,
                             credentials=credentials)

            result = dataflow.projects().locations().jobs().list(
                projectId=project,
                location=region,
            ).execute()

            pipeline_job_id = 'none'

            for job in result['jobs']:
                if job['name'] == job_name:
                    if job['currentState'] == 'JOB_STATE_RUNNING':
                        pipeline_job_id = job['id']
                    break

            if pipeline_job_id == 'none':
                msg = "Could not find currently running job with the " \
                    f"name: {job_name}."
                logging.error(msg)
                raise LookupError(msg)

        except Exception as e:
            logging.error("Error retrieving Job ID")
            raise LookupError(e)

    else:
        # Job is running locally. Generate id from the timestamp.
        pipeline_job_id = '_local_job'
        job_timestamp = pipeline_options.get('job_timestamp')

        if not job_timestamp:
            raise ValueError("Must provide a job_timestamp for local jobs.")

        pipeline_job_id = job_timestamp + pipeline_job_id

    return pipeline_job_id
예제 #41
0
def get_service():
    '''Build a client to the Google Cloud Natural Language API.'''
    credentials = GoogleCredentials.get_application_default()
    return discovery.build('language', 'v1beta1', credentials=credentials)
예제 #42
0
from google.api_core.exceptions import InvalidArgument
from google.api_core.exceptions import PermissionDenied
from google.cloud import automl_v1beta1
from google.cloud import pubsub_v1
# Output this module's logs (INFO and above) to stdout.
_logger = logging.getLogger(__name__)
_logger.addHandler(logging.StreamHandler(sys.stdout))
_logger.setLevel(logging.INFO)

FLAGS = None

# Prefix for Cloud Healthcare API.
_HEALTHCARE_API_URL_PREFIX = 'https://healthcare.googleapis.com/v1beta1'

# Credentails used to access Cloud Healthcare API.
_CREDENTIALS = GoogleCredentials.get_application_default().create_scoped(
    ['https://www.googleapis.com/auth/cloud-platform'])

# SOP Class UID for Basic Text Structured Reports.
_BASIC_TEXT_SR_CUID = '1.2.840.10008.5.1.4.1.1.88.11'

# DICOM Tags.
_SOP_INSTANCE_UID_TAG = '00080018'
_SOP_CLASS_UID_TAG = '00080016'

_VALUE_TYPE = 'Value'

# Number of times to retry failing CMLE predictions.
_NUM_RETRIES_CMLE = 5

# Prefix of the UIDs generated by the module.
_UUID_INFERENCE_PREFIX = '2.25'
예제 #43
0
def get_vision_service():
    credentials = GoogleCredentials.get_application_default()
    return discovery.build('vision', 'v1', credentials=credentials)
예제 #44
0
def clean_up_resource(args, deployments):
    """Clean up deployment / app config from previous test

  Args:
    args: The args from ArgParse.
    deployments set(string): which contains all deployment names in current test round.
  Returns:
    bool: True if cleanup is done
  """
    logging.info(
        "Clean up project resource (source repo, backend service and deployment)"
    )

    # Delete source repo
    sr_cmd = 'gcloud -q source repos delete %s-kubeflow-config --project=%s' % (
        args.project, args.project)
    try:
        util_run(sr_cmd.split(' '), cwd=FILE_PATH)
    except Exception as e:
        logging.warning(e)

    # Delete deployment
    credentials = GoogleCredentials.get_application_default()
    service = discovery.build('deploymentmanager',
                              'v2',
                              credentials=credentials)
    delete_done = False
    for deployment in deployments:
        try:
            request = service.deployments().delete(project=args.project,
                                                   deployment=deployment)
            request.execute()
        except Exception as e:
            logging.info("Deployment doesn't exist, continue")
    # wait up to 10 minutes till delete finish.
    end_time = datetime.datetime.now() + datetime.timedelta(minutes=10)
    while datetime.datetime.now() < end_time:
        sleep(10)
        try:
            request = service.deployments().list(project=args.project)
            response = request.execute()
            if ('deployments' not in response) or (len(deployments & set(
                    d['name'] for d in response['deployments'])) == 0):
                delete_done = True
                break
        except Exception:
            logging.info(
                "Failed listing current deployments, retry in 10 seconds")

    # Delete target-http-proxies
    delete_gcloud_resource(args, 'target-http-proxies')
    # Delete target-http-proxies
    delete_gcloud_resource(args, 'target-https-proxies')
    # Delete url-maps
    delete_gcloud_resource(args, 'url-maps')
    # Delete backend-services
    delete_gcloud_resource(args, 'backend-services', dlt_params=['--global'])
    # Delete instance-groups
    for zone in LOADTEST_ZONE:
        delete_gcloud_resource(args,
                               'instance-groups unmanaged',
                               filter=' --filter=INSTANCES:0',
                               dlt_params=['--zone=' + zone])
    # Delete ssl-certificates
    delete_gcloud_resource(args, 'ssl-certificates')
    # Delete health-checks
    delete_gcloud_resource(args, 'health-checks')

    return delete_done
def main():

    # Grab the application's default credentials from the environment.
    # check the command line
    #outFile = "/Users/bobbrown/Desktop/bobMatrixOut.txt"

    try:
        name = '"' + sys.argv[1] + '"'
        pfile = sys.argv[2]
        study = '"' + sys.argv[3] + '"'
        tableType = sys.argv[4]
        stat_type = sys.argv[5]
        SDreq = sys.argv[6]
        samp_type = sys.argv[7]  # tumor or normal
        ofile = sys.argv[8]

        credentials = GoogleCredentials.get_application_default()

        # Construct the service object for interacting with the BigQuery API.
        bigquery_service = build('bigquery', 'v2', credentials=credentials)

        #         project_id= 'ngchmgalaxy'
        #        project_id= 'isb-cgc-bq'
        projfile = '/data/Galaxy_ISB_NGCHM_BigQuery_Project_ID.txt'  #should only contain one row with id in docker dir /data
        pxxxfile = open(projfile, 'rU')
        cnt = 0

        for row in pxxxfile:
            cnt += 1
            if cnt == 1:
                a = row[:].split('\t')  #  separate out first field
                project_id = a[0].replace('\n', '')

        sys.stdout.write(
            'Project ID supplied from directory mapped to /export/credentials/Galaxy_ISB_NGCHM_BigQuery_Project_ID.txt= '
            + project_id + '\n')
        pxxxfile.close()

        datasetId = "Gene_Filtering"
        tableId = "Gene_RPKM_Results"  # (Created via the Web)

        # check the command line
        sys.stdout.write(' starting new ISB Big Query. Arguments=')
        sys.stdout.write(str(sys.argv[1:]) + '\n')

        #outFile = '/Users/bobbrown/Desktop/bobMatrixOut.txt'

        #bb 31may add table to store query output to NGCHMgalaxy project  (Created via the Web)
        #     https://cloud.google.com/bigquery/docs/reference/v2/tables#methods
        #         POST https://www.googleapis.com/bigquery/v2/projects/NGCHMgalaxy/datasets/datasetId/tableId
        #     Bigquery.Tables.Insert request = bigqueryService.tables().insert(projectId, datasetId, content);
        #    Table response = request.execute();

        #         query_request = bigquery_service.jobs()

        geneSDAvginfo = {}

        query_request = bigquery_service.jobs()

        geneSDAvginfo = GetSDforStudy(geneSDAvginfo, SDreq, study, tableType,
                                      stat_type, samp_type, query_request,
                                      project_id, datasetId, tableId)
        #
        query_request = bigquery_service.jobs()

        GetResults(SDreq, geneSDAvginfo, pfile, ofile, study, tableType,
                   stat_type, query_request, project_id, datasetId, tableId)

    except HttpError as err:
        print('Error: {}'.format(err.content))
        raise err

    return
예제 #46
0
def set_up_creds():
    global CREDENTIALS
    CREDENTIALS = GoogleCredentials.get_application_default()
예제 #47
0
 def mock_run_flow(flow, storage, args):
     return GoogleCredentials.get_application_default()
예제 #48
0
import base64
import json

from google.cloud import bigquery

project_id = '901492054369'
sheetName = 'PubSub Monitor'
# datasetId = "24617418"
client = bigquery.Client(project_id)

import time
import gspread
from oauth2client.client import GoogleCredentials
gc = gspread.authorize(GoogleCredentials.get_application_default())



def changeLabelsOnSingleTable(datasetId, tableId):
     print(f"Haalt {tableId} op in {datasetId}")
     dataset = client.get_dataset(datasetId)
     table_ref = dataset.table(tableId)
     table = client.get_table(table_ref)
     if table.labels == {}:
          print(f"{datasetId}.{tableId} wordt aangepast.")
          table.labels = dataset.labels
          table = client.update_table(table, ["labels"])
          print(f"\t{table.table_id} is updatet!")
          pushInfoToGsheet(sheetName, f"Dataset: {datasetId}", f"table: {tableId}")
          
     else:
          print(f"Tabel ({tableId}) heeft al labels en wordt daarom niet opnieuw gezet!")
예제 #49
0
 def __init__(self, api_discovery_file='vision_api.json'):
     self.credentials = GoogleCredentials.get_application_default()
     self.service = discovery.build('vision',
                                    'v1',
                                    credentials=self.credentials,
                                    discoveryServiceUrl=DISCOVERY_URL)
예제 #50
0
def restartInstance(zone,name):
    credentials = GoogleCredentials.get_application_default()
    service = discovery.build('compute', 'v1', credentials=credentials)
    project = 'moz-fx-dev-djackson-torperf'
    request = service.instances().start(project=project, zone=zone, instance=name)
    response = request.execute()
예제 #51
0
 def __init__(self):
     self.bucket_name = "bucket-videos"
     self.credentials = GoogleCredentials.get_application_default()
     self.service = discovery.build('storage',
                                    'v1',
                                    credentials=self.credentials)
예제 #52
0
파일: main.py 프로젝트: yamada-masa/McAlexa
def _init():
    credentials = GoogleCredentials.get_application_default()
    return googleapiclient.discovery.build("compute",
                                           "v1",
                                           credentials=credentials)
예제 #53
0
def list_deployments(project,
                     name_prefix,
                     testing_label,
                     http=None,
                     desc_ordered=True):
    """List all the deployments matching name prefix and having testing labels.

  Args:
    project: string, Name of the deployed project.
    name_prefix: string, Base name of deployments.
    testing_label: string, labels assigned to testing clusters used for identification.
    http: httplib2.Http, An instance of httplib2.Http or something that acts
      like it that HTTP requests will be made through. Should only be used in tests.

  Returns:
    deployments: list of dictionary in the format of {
      "name": string of deployment name,
      "endpoint": string of endpoint service name,
      "insertTime": timestamp deployment is inserted.
      "zone": location of deployment.
    }
  """
    dm = None
    if http:
        # This should only be used in testing.
        dm = discovery.build("deploymentmanager", "v2", http=http)
    else:
        credentials = GoogleCredentials.get_application_default()
        dm = discovery.build("deploymentmanager",
                             "v2",
                             credentials=credentials)
    dm_client = dm.deployments()
    resource_client = dm.resources()

    list_filter = "labels.purpose eq " + testing_label
    # pylint: disable=anomalous-backslash-in-string
    name_re = re.compile("{0}\-n[0-9]+\Z".format(name_prefix))
    # pylint: enable=anomalous-backslash-in-string
    deployments = dm_client.list(project=project, filter=list_filter).execute()
    next_page_token = None
    cls = []
    while True:
        next_page_token = deployments.get("nextPageToken", None)
        for d in deployments.get("deployments", []):
            name = d.get("name", "")
            if not name or name_re.match(name) is None:
                continue
            resource = resource_client.get(project=project,
                                           deployment=name,
                                           resource=name).execute()
            # Skip the latest deployment if having any kind of errors.
            if (resource.get("error", None) and resource.get("error", {}).get("errors", [])) or \
            not resource.get("properties", ""):
                continue
            info = yaml.load(resource.get("properties", ""))
            # Skip deployment without zone info - most likely an error case.
            if not info.get("zone", ""):
                continue
            cls.append({
                "name":
                name,
                "endpoint":
                get_deployment_endpoint(project, name),
                "insertTime":
                d.get("insertTime", "1969-12-31T23:59:59+00:00"),
                "zone":
                info["zone"],
            })

        if next_page_token is None:
            break
        deployments = dm_client.list(project=project,
                                     pageToken=next_page_token,
                                     filter=list_filter).execute()

    return sorted(cls,
                  key=lambda entry: entry["insertTime"],
                  reverse=desc_ordered)
예제 #54
0
from oauth2client.client import GoogleCredentials
from apiclient import discovery

custom_claim = "some custom_claim"
audience = 'api.endpoints.YOUR_PROJECT.cloud.goog'
svc_account_A = '*****@*****.**'
svc_account_B = '*****@*****.**'
svc_account_C = '*****@*****.**'


# initialize root creds for A
os.environ["GOOGLE_APPLICATION_CREDENTIALS"] = "/home/srashid/gcp_misc/certs/mineral-minutia-820-83b3ce7dcddb.json"

project_id ='-'

cc = GoogleCredentials.get_application_default()
iam_scopes = 'https://www.googleapis.com/auth/iam https://www.googleapis.com/auth/cloud-platform'
if cc.create_scoped_required():
  cc = cc.create_scoped(iam_scopes)
http = cc.authorize(httplib2.Http())
service = build(serviceName='iam', version= 'v1',http=http)
resource = service.projects()
now = int(time.time())
exptime = now + 3600
claim =('{"iss":"%s",'
  '"aud":"%s",'
  '"sub":"%s",'
  '"X-Goog-Authenticated-User-ID":"%s",'
  '"exp":%s,'
  '"iat":%s}') %(svc_account_B,audience,svc_account_B,custom_claim,exptime,now)
slist = resource.serviceAccounts().signJwt(name='projects/' + project_id + '/serviceAccounts/' + svc_account_B, body={'payload': claim })