def GetService(version=DEFAULT_VERSION):
  """Builds the adexchangebuyer service used for the REST API.

  Args:
    version: a str indicating the Authorized Buyers Ad Exchange version to be
        retrieved. Depending on the version specified, either the v1 or
        the v2 API will be used.

  Returns:
    A googleapiclient.discovery.Resource instance used to interact with the
    Ad Exchange Buyer API. This will be for either v1 or the v2 API depending on
    the specified version.

  Raises:
    ValueError: raised if the specified version is not a valid version of either
    the v1 or v2 Ad Exchange Buyer API.
  """
  credentials = _GetCredentials()

  if version in ADEXCHANGEBUYER_VERSIONS:
    # Initialize client for Ad Exchange Buyer API v1
    service = build(_LEGACY_API_NAME, version, credentials=credentials)
  elif version in ADEXCHANGEBUYERII_VERSIONS:
    # Initialize client for Ad Exchange Buyer API v2
    service = build(_API_NAME, version, credentials=credentials)
  else:
    raise ValueError('Invalid version provided. Supported versions are: %s'
                     % ', '.join(ADEXCHANGEBUYER_VERSIONS +
                                 ADEXCHANGEBUYERII_VERSIONS))

  return service
    def init(self):
        self.i = 0
        self.instances = []
        self.config = Config()
        self.config.client_email = self.cfg.get('CREDENTIALS', 'client_email')
        self.config.p12_path = self.cfg.get('CREDENTIALS', 'p12_path')
        self.config.project = self.cfg.get('INSTANCES', 'project')
        self.config.zone = self.cfg.get('INSTANCES', 'zone')
        self.config.image = self.cfg.get('INSTANCES', 'image')
        self.config.instance_type = self.cfg.get('INSTANCES', 'instance_type')
        self.config.public_key_path = self.cfg.get('INSTANCES', 'public_key_path')


        self.key_pair = self.cfg.get('INSTANCES', 'private_key_path')
        self.jmeter_url = self.cfg.get('JMETER', 'url')
        self.user = self.cfg.get('INSTANCES', 'remote_user')
        self.startup_threads = int(self.cfg.get('TEST', 'startup_threads'))
        self.rest_threads = int(self.cfg.get('TEST', 'rest_threads'))
        self.host = self.cfg.get('SHOWCASE', 'host')
        self.is_autoscalable = True if self.cfg.get('SHOWCASE', 'autoscalable') == 'yes' else False
        self.num_threads = int(self.cfg.get('SCENARIO', 'num_threads'))
        self.scenario_duration = int(self.cfg.get('SCENARIO', 'duration_in_minutes'))
        self.num_jmeter_slaves = int(self.cfg.get('TEST', 'num_jmeter_slaves'))
        self.frontend_instances_identifier = self.cfg.get('INSTANCES', 'frontend_instances_identifiers').split(',')
        self.rds_identifiers = self.cfg.get('SHOWCASE', 'database_identifiers')

        credentials = self.login()
        self.compute = build('compute', 'v1', credentials=credentials)
        self.monitoring = build('cloudmonitoring', 'v2beta2', credentials=credentials)
Пример #3
0
 def __init__(self, project, session_id, logger):
     super(GCEService, self).__init__(project, session_id, logger)
     self.credentials = GoogleCredentials.get_application_default()
     self.compute = discovery.build('compute', 'v1',
             credentials=self.credentials)
     self.storage = discovery.build('storage', 'v1',
             credentials=self.credentials)
Пример #4
0
def youtube_setup(course_id, force_load=False):
    global api_youtube
    global api_youtube_analytics

    youtube_scopes = ["https://www.googleapis.com/auth/youtube.readonly", "https://www.googleapis.com/auth/yt-analytics.readonly"]
    youtube_servicename = "youtube"
    youtube_version = "v3"
    youtube_analytics_servicename = "youtubeAnalytics"
    youtube_analytics_version = "v1"

    flow = OAuth2WebServerFlow(client_id=settings.YOUTUBE_CLIENT_ID,
                           client_secret=settings.YOUTUBE_CLIENT_SECRET,
                           scope=" ".join(youtube_scopes))

    storage = Storage("cache/youtube_"+course_id+"_oauth2.json")
    youtube_credentials = storage.get()

    print "AAA"
    print youtube_credentials
    print "BBB"

    if youtube_credentials is None or youtube_credentials.invalid:
        if not force_load:
            return False
        youtube_credentials = run_flow(flow, storage)
        # youtube_credentials = run(flow, storage)

    http = youtube_credentials.authorize(httplib2.Http())
    api_youtube = build(youtube_servicename, youtube_version, http=http)
    api_youtube_analytics = build(youtube_analytics_servicename, youtube_analytics_version, http=http)
    return True
Пример #5
0
def youtube_required_info(code):
    """
    Obtains youtube channel id and email from code.

    Args:
        code: Google user agent code.
    Returns:
        Id of youtube channel and google email.
    Raises:
        GoogleAccessError
        ChannelNotFoundError
    """
    try:
        creds = credentials_from_code(CLIENT_ID, CLIENT_SECRET, SCOPES, code)
    except FlowExchangeError as exchange_error:
        raise GoogleAccessError(exchange_error)
    # then can use cache
    service = build(OAUTH_API, OAUTH_VERSION, credentials=creds)
    google_account = service.userinfo()
    service = build(YOUTUBE_API, YOUTUBE_VERSION, credentials=creds)
    channels = service.channels()
    try:
        email = google_account.get(fields='email').execute()['email']
        channel = channels.list(part='id', mine=True, fields='items/id')
        channel_id = channel.execute()['items'][0]['id']
    except (KeyError, IndexError, HttpAccessTokenRefreshError, HttpError,
            AccessTokenCredentialsError,) as api_error:
        raise GoogleAccessError(api_error)
    if not ChannelInfo(channel_id).is_exists():
        raise ChannelNotFoundError()
    return channel_id, email
Пример #6
0
  def _tpuService(self):
    """Creates a new Cloud TPU API object.

    This works around an issue where the underlying HTTP connection sometimes
    times out when the script has been running for too long. Other methods in
    this object calls this method to get a new API object whenever they need
    to communicate with the Cloud API.

    Returns:
      A Google Cloud TPU API object.
    """
    if self._service:
      return self._service

    credentials = self._credentials
    if credentials is None or credentials == 'default':
      credentials = GoogleCredentials.get_application_default()

    if self._discovery_url:
      return discovery.build(
          'tpu', 'v1alpha1',
          credentials=credentials,
          discoveryServiceUrl=self._discovery_url)
    else:
      return discovery.build(
          'tpu', 'v1alpha1',
          credentials=credentials)
  def test_file_based_cache(self):
    cache = mock.Mock(wraps=DictCache())
    with mock.patch('googleapiclient.discovery_cache.file_cache.cache',
                    new=cache):
      self.http = HttpMock(datafile('plus.json'), {'status': '200'})

      plus = build('plus', 'v1', http=self.http)

      # cache.get is called once
      url = 'https://www.googleapis.com/discovery/v1/apis/plus/v1/rest'
      cache.get.assert_called_once_with(url)

      # cache.set is called once
      with open(datafile('plus.json')) as f:
        content = f.read()
      cache.set.assert_called_once_with(url, content)

      # Make sure there is a cache entry for the plus v1 discovery doc.
      self.assertTrue(cache.contains(url))

      # Make sure the contents are returned from the cache.
      # (Otherwise it should through an error)
      self.http = HttpMock(None, {'status': '200'})

      plus = build('plus', 'v1', http=self.http)

      # cache.get is called twice
      cache.get.assert_has_calls([mock.call(url), mock.call(url)])

      # cahce.set is called just once
      cache.set.assert_called_once_with(url, content)
Пример #8
0
    def _build_service(self, api_name, api_version, scopes):

        if self._keyfile_path is None:
            credentials = GoogleCredentials.get_application_default()
            service = build(api_name, api_version, credentials=credentials)
            return credentials, service
        else:
            if self._keyfile_path.lower().endswith(".json"):
                credentials = ServiceAccountCredentials.from_json_keyfile_name(
                    self._keyfile_path,
                    scopes=scopes)
            elif self._keyfile_path.lower().endswith(".p12"):
                if self._account_email is None:
                    raise Exception("Input account email.")
                credentials = ServiceAccountCredentials.from_p12_keyfile(
                    self._account_email,
                    self._keyfile_path,
                    scopes=scopes)
            else:
                error_message = """
                    Key file format [{0}] is illegal.
                    Key file must be .json or .p12.
                """.format(self._keyfile_path)
                raise Exception(error_message)

            #http = httplib2.Http()
            #auth_http = credentials.authorize(http)
            #service = build(api_name, api_version, http=auth_http)
            service = build(api_name, api_version, credentials=credentials)
            return credentials, service
Пример #9
0
 def setUpClass(cls):
     cls.credentials = cls.create_credentials()
     http = cls.credentials.authorize(httplib2.Http())
     cls.credentials.refresh(http)
     cls.service = build('sheets', 'v4', http=http)
     cls.drive_service = build('drive', 'v3', http=http)
     # Hide STDOUT output generated by snippets.
     cls.stdout = sys.stdout
     sys.stdout = None
Пример #10
0
 def get_api():
     if settings.GOOGLE_DEV_KEY is not None:
         service = build(
                 'books',
                 'v1',
                 developerKey=settings.GOOGLE_DEV_KEY)
     else:
         service = build('books', 'v1')
     return service
Пример #11
0
def setup_oauth():
  """Setup oauth for the apps domain.

  Returns:
    A json message indicating success or a flask abort with 403 for oauth
    exceptions.
  """
  oauth_code = flask.request.form.get('oauth_code', None)
  if oauth_code is None:
    flask.abort(403, ufo.get_json_message('noOauthCodeError'))

  config = ufo.get_user_config()
  flow = oauth.getOauthFlow()
  credentials = None
  domain = flask.request.form.get('domain', None)

  try:
    credentials = flow.step2_exchange(oauth_code)
  except oauth2client.client.FlowExchangeError as e:
    flask.abort(403, e.message)

  apiClient = credentials.authorize(httplib2.Http())
  plusApi = discovery.build(serviceName='plus', version='v1', http=apiClient)
  adminApi = discovery.build(serviceName='admin', version='directory_v1',
                             http = apiClient)

  profileResult = None
  try:
    profileResult = plusApi.people().get(userId='me').execute()
  except Exception as e:
    ufo.app.logger.error(e, exc_info=True)
    flask.abort(403, ufo.get_json_message('domainInvalidError'))

  if domain is None or domain != profileResult.get('domain', None):
    flask.abort(403, ufo.get_json_message('domainInvalidError'))

  user_id = profileResult['id']
  userResult = None
  try:
    userResult = adminApi.users().get(userKey=user_id).execute()
  except Exception as e:
    ufo.app.logger.error(e, exc_info=True)
    flask.abort(403, ufo.get_json_message('nonAdminAccessError'))

  if not userResult.get('isAdmin', False):
    flask.abort(403, ufo.get_json_message('nonAdminAccessError'))

  config.credentials = credentials.to_json()
  config.domain = domain
  flask.session['domain'] = domain
  config.save()

  response_dict = {'domain': domain, 'credentials': config.credentials}
  response_json = json.dumps((response_dict))
  return flask.Response(ufo.XSSI_PREFIX + response_json,
                        headers=ufo.JSON_HEADERS)
Пример #12
0
def main():
    """
    main function
    """

    credentials = get_cred(email, "admin.directory.user")
    http = httplib2.Http()

    service = build(
        "admin", "directory_v1", http=credentials.authorize(http)
    )

    user_list = []
    page_token = None
    while True:
        results = service.users().list(
            domain=email.split('@')[1],
            maxResults=500,
            pageToken=page_token,
            orderBy='familyName', viewType='domain_public'
        ).execute()

        for r in results["users"]:
            user_list.append(r)

        page_token = results.get('nextPageToken')
        if not page_token:
            break

    for user in user_list:
        pmail = user.get('primaryEmail')
        if pmail:
            credentials = get_cred(pmail, "gmail.settings.basic")
            http = httplib2.Http()
            service = build(
                "gmail", "v1", http=credentials.authorize(http)
            )
            try:
                # sometimes this barfs with 400 server error:
                # "Mail service not enabled"
                # not certain why at this moment.
                aliases = service.users().settings().sendAs().list(
                    userId=pmail
                ).execute(num_retries=10)
                for alias in aliases.get('sendAs'):
                    #if alias.get('treatAsAlias') and alias.get('verificationStatus')=='accepted':
                    if alias.get('treatAsAlias'):
                        print '{}|{}|{}|{}|{}'.format(
                            user.get('name').get('familyName'),
                            user.get('name').get('givenName'),
                            user.get('primaryEmail'), alias.get('sendAsEmail'),
                            alias.get('verificationStatus')
                        )
            except:
                pass
Пример #13
0
 def ga_login(self, api_name='analytics', api_version='v3'):
     try:
         from oauth2client.client import SignedJwtAssertionCredentials
         credentials = SignedJwtAssertionCredentials(self.json_key['client_email'], self.json_key['private_key'].encode(), self.scope_ga)
         http = credentials.authorize(httplib2.Http())
         service = build(api_name, api_version, http=http)
     except ImportError:
         from oauth2client.service_account import ServiceAccountCredentials
         credentials = ServiceAccountCredentials.from_json_keyfile_name(self.json_key, self.scope_ga)
         http = credentials.authorize(httplib2.Http())
         service = build(api_name, api_version, http=http)
     return service
  def test_credentials(self):
    class CredentialsMock:
      def create_scoped_required(self):
        return False

      def authorize(self, http):
        http.orest = True

    self.http = HttpMock(datafile('plus.json'), {'status': '200'})
    build('plus', 'v1', http=self.http, credentials=None)
    self.assertFalse(hasattr(self.http, 'orest'))
    build('plus', 'v1', http=self.http, credentials=CredentialsMock())
    self.assertTrue(hasattr(self.http, 'orest'))
  def test_appengine_memcache(self):
    # Hack module import
    self.orig_import = __import__
    self.mocked_api = mock.MagicMock()

    def import_mock(name, *args):
      if name == 'google.appengine.api':
        return self.mocked_api
      return self.orig_import(name, *args)

    import_fullname = '__builtin__.__import__'
    if sys.version_info[0] >= 3:
      import_fullname = 'builtins.__import__'

    with mock.patch(import_fullname, side_effect=import_mock):
      namespace = 'google-api-client'
      self.http = HttpMock(datafile('plus.json'), {'status': '200'})

      self.mocked_api.memcache.get.return_value = None

      plus = build('plus', 'v1', http=self.http)

      # memcache.get is called once
      url = 'https://www.googleapis.com/discovery/v1/apis/plus/v1/rest'
      self.mocked_api.memcache.get.assert_called_once_with(url,
                                                           namespace=namespace)

      # memcache.set is called once
      with open(datafile('plus.json')) as f:
        content = f.read()
      self.mocked_api.memcache.set.assert_called_once_with(
        url, content, time=DISCOVERY_DOC_MAX_AGE, namespace=namespace)

      # Returns the cached content this time.
      self.mocked_api.memcache.get.return_value = content

      # Make sure the contents are returned from the cache.
      # (Otherwise it should through an error)
      self.http = HttpMock(None, {'status': '200'})

      plus = build('plus', 'v1', http=self.http)

      # memcache.get is called twice
      self.mocked_api.memcache.get.assert_has_calls(
        [mock.call(url, namespace=namespace),
         mock.call(url, namespace=namespace)])

      # memcahce.set is called just once
      self.mocked_api.memcache.set.assert_called_once_with(
        url, content, time=DISCOVERY_DOC_MAX_AGE,namespace=namespace)
Пример #16
0
def get_service(service_name, **kwargs):
    service_scope = SCOPES[service_name]

    if 'json_credentials_path' in kwargs:
        # store in multistore_file by default, requires client_id as a key
        assert 'client_id' in kwargs, 'client_id required when using json_credential_path'

        import httplib2
        from oauth2client.contrib import multistore_file

        storage = multistore_file.get_credential_storage(
            filename=kwargs['json_credentials_path'],
            client_id=kwargs['client_id'],
            user_agent=None,
            scope=service_scope['scope']
        )

        credentials = storage.get()

        if credentials is None or credentials.invalid:
            # rerun auth flow if credentials are missing or invalid
            # flow requires client secret file
            assert 'client_secret_path' in kwargs, 'Credentials invalid, client_secret_path required for reauthorization'

            from oauth2client.client import flow_from_clientsecrets
            from oauth2client.tools import run_flow

            FLOW = flow_from_clientsecrets(kwargs['client_secret_path'], scope=service_scope['scope'])
            credentials = run_flow(FLOW, storage, None)

        # Create an httplib2.Http object and authorize it with your credentials
        http = httplib2.Http()
        http = credentials.authorize(http)

        return build(
            service_name,
            service_scope['version'],
            http=http
        )

    else:
        from oauth2client.client import GoogleCredentials
        credentials = GoogleCredentials.get_application_default()

        return build(
            service_name,
            service_scope['version'],
            credentials=credentials
        )
Пример #17
0
def search_for_videos(search_term, max_results=10, page=None):
    youtube = build(API_SERVICE_NAME, API_VERSION, developerKey=API_KEY)
    videos = []

    try:
        if page is None:
            result = youtube.search().list(
                q=search_term,
                part="id,snippet",
                maxResults=max_results
            ).execute()
        else:
            result = youtube.search().list(
                q=search_term,
                part="id,snippet",
                pageToken=page,
                maxResults=max_results
            ).execute()
    except HttpError:
        raise ConnectionError("Connection problem.")

    for v in result.get("items", []):
        if v["id"]["kind"] == "youtube#video":
            videos.append({'title': v["snippet"]["title"],
                           'thumbnail': v["snippet"]["thumbnails"]["default"]["url"],
                           'id': v["id"]["videoId"]})
        else:
            pass

    next_page = result.get("nextPageToken", [])
    prev_page = result.get("prevPageToken", [])

    return {"results": videos, "next page": next_page, "previous page": prev_page}
Пример #18
0
def get_service():
    credentials = GoogleCredentials.get_application_default()
    scoped_credentials = credentials.create_scoped(
        ['https://www.googleapis.com/auth/cloud-platform'])
    http = httplib2.Http()
    scoped_credentials.authorize(http)
    return discovery.build('language', 'v1beta1', http=http)
Пример #19
0
def OCR(photo_file):
    """Run a label request on a single image"""
    #print photo_file
    credentials = GoogleCredentials.get_application_default()
    service = discovery.build('vision', 'v1', credentials=credentials,
                              discoveryServiceUrl=DISCOVERY_URL)

    with open(photo_file, 'rb') as image:
        image_content = base64.b64encode(image.read())
        service_request = service.images().annotate(body={
            'requests': [{
                'image': {
                    'content': image_content.decode('UTF-8')
                },
                'features': [{
                    'type': 'TEXT_DETECTION',
                    'maxResults': 1
                }]
            }]
        })
        response = service_request.execute()
        line = response['responses'][0]['textAnnotations'][0]['description']
        string =str(line)
        proc = string.splitlines()
        #print proc
        card = BusinessCard(proc)
        #print response
        #label = response['responses'][0]['labelAnnotations'][0]['description']
        #print('Found label: %s for %s' % (label, photo_file))
        return card.cleanup()
Пример #20
0
    def __enter__(self):
        # Ensure that we have not re-entered
        if self.temp_path != None or self.service != None:
            raise Exception('Cannot use multiple nested with blocks on same Youtube object!')

        flow = flow_from_clientsecrets(
            self.client_secrets_path,
            scope=YOUTUBE_UPLOAD_SCOPE,
            message=MISSING_CLIENT_SECRETS_MESSAGE)

        temp_file = NamedTemporaryFile(delete=False)
        self.temp_path = temp_file.name
        temp_file.close()

        storage = Storage(self.temp_path)
        credentials = storage.get()

        if credentials is None or credentials.invalid:
            credentials = run_flow(
                flow, storage, argparser.parse_args(list())
            )

        self.service = build(YOUTUBE_API_SERVICE_NAME, YOUTUBE_API_VERSION,
            http=credentials.authorize(httplib2.Http()))

        return self
Пример #21
0
def main(bucket, sources):
    # Get the application default credentials. When running locally, these are
    # available after running `gcloud init`. When running on compute
    # engine, these are available from the environment.
    credentials = GoogleCredentials.get_application_default()

    # Construct the service object for the interacting with the Cloud Storage
    # API.
    service = discovery.build('storage', 'v1', credentials=credentials)

    print "\n-----------\n"
    print "sources===", sources
    print "\n-----------\n"

    # Upload the source files.
    for filename in sources:
        req = service.objects().insert(
            media_body=filename,
            name=filename,
            bucket=bucket)
        resp = req.execute()
        print('> Uploaded source file {}'.format(filename))
        print(json.dumps(resp, indent=2))

    """
Пример #22
0
    def __init__(self, creds_file):

        credentials = ServiceAccountCredentials.from_json_keyfile_name(
            creds_file, SCOPES)
        self.gc = gspread.authorize(credentials)
        self.storage_service = discovery.build('storage', 'v1',
                                               credentials=credentials)
Пример #23
0
def init(argv, name, version, doc, filename, scope=None, parents=[]):
  """A common initialization routine for samples.

  Many of the sample applications do the same initialization, which has now
  been consolidated into this function. This function uses common idioms found
  in almost all the samples, i.e. for an API with name 'apiname', the
  credentials are stored in a file named apiname.dat, and the
  client_secrets.json file is stored in the same directory as the application
  main file.

  Args:
    argv: list of string, the command-line parameters of the application.
    name: string, name of the API.
    version: string, version of the API.
    doc: string, description of the application. Usually set to __doc__.
    file: string, filename of the application. Usually set to __file__.
    parents: list of argparse.ArgumentParser, additional command-line flags.
    scope: string, The OAuth scope used.

  Returns:
    A tuple of (service, flags), where service is the service object and flags
    is the parsed command-line flags.
  """
  if scope is None:
    scope = 'https://www.googleapis.com/auth/' + name

  # Parser command-line arguments.
  parent_parsers = [tools.argparser]
  parent_parsers.extend(parents)
  parser = argparse.ArgumentParser(
      description=doc,
      formatter_class=argparse.RawDescriptionHelpFormatter,
      parents=parent_parsers)
  flags = parser.parse_args(argv[1:])

  # Name of a file containing the OAuth 2.0 information for this
  # application, including client_id and client_secret, which are found
  # on the API Access tab on the Google APIs
  # Console <http://code.google.com/apis/console>.
  client_secrets = os.path.join(os.path.dirname(filename),
                                'client_secrets.json')

  # Set up a Flow object to be used if we need to authenticate.
  flow = client.flow_from_clientsecrets(client_secrets,
      scope=scope,
      message=tools.message_if_missing(client_secrets))

  # Prepare credentials, and authorize HTTP object with them.
  # If the credentials don't exist or are invalid run through the native client
  # flow. The Storage object will ensure that if successful the good
  # credentials will get written back to a file.
  storage = file.Storage(name + '.dat')
  credentials = storage.get()
  if credentials is None or credentials.invalid:
    credentials = tools.run_flow(flow, storage, flags)
  http = credentials.authorize(http = httplib2.Http())

  # Construct a service object via the discovery service.
  service = discovery.build(name, version, http=http)
  return (service, flags)
def run(project, zone, instance_name):
    credentials = GoogleCredentials.get_application_default()
    compute = build('compute', 'v1', credentials=credentials)

    print('Creating instance.')

    operation = create_instance(compute, project, zone, instance_name)
    wait_for_operation(compute, project, zone, operation['name'])

    instances = list_instances(compute, project, zone)

    print('Instances in project %s and zone %s:' % (project, zone))
    for instance in instances:
        print(' - ' + instance['name'])

    print("""
Instance created.
It will take a minute or two for the instance to complete work.
Check this URL: http://storage.googleapis.com/%s/output.png
Once the image is uploaded press enter to delete the instance.
""" % project)

    input()

    print('Deleting instance.')

    operation = delete_instance(compute, project, zone, instance_name)
    wait_for_operation(compute, project, zone, operation['name'])
Пример #25
0
def youtube_search(options):
  youtube = build(YOUTUBE_API_SERVICE_NAME, YOUTUBE_API_VERSION,
    developerKey=DEVELOPER_KEY)

  # Call the search.list method to retrieve results matching the specified
  # query term.
  search_response = youtube.playlists().list(
    part="id,snippet",
    maxResults=options.max_results,
    id="LLJkMlOu7faDgqh4PfzbpLdg"
  ).execute()


  videos = []
  channels = []
  playlists = []

  # Add each result to the appropriate list, and then display the lists of
  # matching videos, channels, and playlists.
  for search_result in search_response.get("items", []):
    if search_result["kind"] == "youtube#video":
      videos.append("%s (%s)" % (search_result["snippet"]["title"],
                                 search_result["id"]["videoId"]))
    elif search_result["kind"] == "youtube#channel":
      channels.append("%s (%s)" % (search_result["snippet"]["title"],
                                   search_result["id"]["channelId"]))
    elif search_result["kind"] == "youtube#playlist":
      print search_result
      playlists.append("%s (%s)" % (search_result["snippet"]["title"],
                                    search_result["id"]))

  print "Videos:\n", "\n".join(videos), "\n"
  print "Channels:\n", "\n".join(channels), "\n"
  print "Playlists:\n", "\n".join(playlists), "\n"
Пример #26
0
 def __init__(self, context, db_driver=None):
     self.check_gcs_options()
     backup_bucket = CONF.backup_gcs_bucket
     backup_credential = CONF.backup_gcs_credential_file
     self.gcs_project_id = CONF.backup_gcs_project_id
     chunk_size_bytes = CONF.backup_gcs_object_size
     sha_block_size_bytes = CONF.backup_gcs_block_size
     enable_progress_timer = CONF.backup_gcs_enable_progress_timer
     super(GoogleBackupDriver, self).__init__(context, chunk_size_bytes,
                                              sha_block_size_bytes,
                                              backup_bucket,
                                              enable_progress_timer,
                                              db_driver)
     credentials = client.GoogleCredentials.from_stream(backup_credential)
     self.reader_chunk_size = CONF.backup_gcs_reader_chunk_size
     self.writer_chunk_size = CONF.backup_gcs_writer_chunk_size
     self.bucket_location = CONF.backup_gcs_bucket_location
     self.storage_class = CONF.backup_gcs_storage_class
     self.num_retries = CONF.backup_gcs_num_retries
     http_user_agent = http.set_user_agent(
         httplib2.Http(proxy_info=self.get_gcs_proxy_info()),
         CONF.backup_gcs_user_agent)
     self.conn = discovery.build('storage',
                                 'v1',
                                 http=http_user_agent,
                                 credentials=credentials)
     self.resumable = self.writer_chunk_size != -1
Пример #27
0
def run(project, zone, instance_name, package):
    credentials = GoogleCredentials.get_application_default()
    compute = build('compute', 'v1', credentials=credentials)

    print 'Creating instance.'

    #creating instance
    operation = create_instance(compute, project, zone, instance_name, package)

    #execute operations
    wait_for_operation(compute, project, zone, operation['name'])

    #listing instances
    instances = list_instances(compute, project, zone)

    print 'Instances in project %s and zone %s:' % (project, zone)
    for instance in instances:
        print ' - ' + instance['name']

    print """ Instance created """

    raw_input()

    print 'Deleting instance'

    operation = delete_instance(compute, project, zone, instance_name)
    wait_for_operation(compute, project, zone, operation['name'])
Пример #28
0
 def __init__(self, api_key=environ.get('PIZ_GOOGLE_API_KEY'), cx=environ.get('PIZ_GOOGLE_SEARCH_CX')):
     if api_key is None or cx is None:
         raise UserMisconfigurationError('You must have both PIZ_GOOGLE_API_KEY and PIZ_GOOGLE_SEARCH_CX set as '
                                         'environment variables in your shell.')
     self.api_key = api_key
     self.cx = cx
     self.service = build('customsearch', 'v1', developerKey=api_key)
Пример #29
0
 def post(self):
     if self.json_body:
         event_id = self.json_body.get('event_id')
         language = self.json_body.get('language') or self.json_body.get('locale')
         if not event_id:
             self.add_error('Need to pass event_id argument')
         if not language:
             self.add_error('Need to pass language/locale argument')
     else:
         self.add_error('Need to pass a post body of json params')
     # Remap our traditional/simplified chinese languages
     if language == 'zh':
         language = 'zh-TW'
     elif language == 'zh-Hant':
         language = 'zh-TW'
     elif language == 'zh-Hans':
         language = 'zh-CN'
     self.errors_are_fatal()
     db_event = eventdata.DBEvent.get_by_id(event_id)
     service = build('translate', 'v2', developerKey=keys.get('google_server_key'))
     result = service.translations().list(
         target=language,
         format='text',
         q=[db_event.name or '', db_event.description or '']
     ).execute()
     translations = [x['translatedText'] for x in result['translations']]
     self.write_json_success({'name': translations[0], 'description': translations[1]})
Пример #30
0
def main():
    """Shows basic usage of the Tasks API.
    Prints the title and ID of the first 10 task lists.
    """
    creds = None
    # The file token.pickle stores the user's access and refresh tokens, and is
    # created automatically when the authorization flow completes for the first
    # time.
    if os.path.exists('token.pickle'):
        with open('token.pickle', 'rb') as token:
            creds = pickle.load(token)
    # If there are no (valid) credentials available, let the user log in.
    if not creds or not creds.valid:
        if creds and creds.expired and creds.refresh_token:
            creds.refresh(Request())
        else:
            flow = InstalledAppFlow.from_client_secrets_file(
                'credentials.json', SCOPES)
            creds = flow.run_local_server()
        # Save the credentials for the next run
        with open('token.pickle', 'wb') as token:
            pickle.dump(creds, token)

    service = build('tasks', 'v1', credentials=creds)

    # Call the Tasks API
    results = service.tasklists().list(maxResults=10).execute()
    items = results.get('items', [])

    if not items:
        print('No task lists found.')
    else:
        print('Task lists:')
        for item in items:
            print(u'{0} ({1})'.format(item['title'], item['id']))
Пример #31
0
#APITest (3 vids): PLorw3mfu-J0gEgljMVhsWcIzZYaYL29_u

playlistid = input("Enter playlist id: ") #input playlist id

api_file = os.path.join(sys.path[0], 'APIKEY.txt') #path to API key
api_txt = open(api_file, "r", encoding = "utf-8")

'''
IMPORTANT: You must obtain a YouTube API key in order to use this program.
- This video (0:00 - 5:00) gives a detailed guide of how to obtain an API key: https://www.youtube.com/watch?v=th5_9woFJmk
- Once you obtain your API key, paste it in the separate file named "APIKEY.txt", with no extra spaces or new lines
'''
api_key = api_txt.read() #get from yt
plst_id = playlistid #get from yt playlist
yt = build("youtube", "v3", developerKey = api_key)

api_txt.close()

plst_req = yt.playlists().list( #request details of type Playlist
    part = "contentDetails, snippet",
    id = plst_id
)

plst_raw = plst_req.execute()

plst_raw = plst_raw['items'][0] #list of playlist info

plst_title = plst_raw['snippet']['title']
plst_author = plst_raw['snippet']['channelTitle']
plst_authorId = plst_raw['snippet']['channelId']
Пример #32
0
 def setUp(self):
   http = HttpMock(datafile('zoo.json'), {'status': '200'})
   zoo = build('zoo', 'v1', http=http)
   self.request = zoo.animals().get_media(name='Lion')
   self.fd = BytesIO()
Пример #33
0
import csv
import sys
import json
import datetime
from googleapiclient.discovery import build
from httplib2 import Http
from oauth2client import file, client, tools

# If modifying these scopes, delete the file token.json.
SCOPES = 'https://www.googleapis.com/auth/calendar'

# AUTHORIZE CALENDAR API #
store = file.Storage('token.json')
creds = store.get()
if not creds or creds.invalid:
    flow = client.flow_from_clientsecrets('credentials.json', SCOPES)
    creds = tools.run_flow(flow, store)
service = build('calendar', 'v3', http=creds.authorize(Http()))

# Lists all events for given calendar ID
page_token = None
while True:
    events = service.events().list(
        calendarId='*****@*****.**',
        pageToken=page_token).execute()
    for event in events['items']:
        print(event['summary'])
        print(event['end']['date'])
    page_token = events.get('nextPageToken')
    if not page_token:
        break
Пример #34
0
def copy_database(project, existing_env, new_env, running_as_service_account):
    print("Starting database transfer.")
    gke_service_account_name = None
    gke_service_account_key = None
    gcs_db_dump_bucket = None
    try:
        # create default creds clients
        default_credentials, _ = google.auth.default(scopes=DEFAULT_SCOPES)
        storage_client = storage.Client(credentials=default_credentials)
        iam_client = discovery.build(
            "iam", "v1", credentials=default_credentials
        )

        # create service account creds sql client
        gke_service_account_name = (
            new_env.get("config", {})
            .get("nodeConfig", {})
            .get("serviceAccount")
        )
        gke_service_account_credentials = None
        # Only the service account used for Composer Environment has access to
        # hidden SQL database. If running in a VM as the service account, use
        # default credentials, otherwise create a key and authenticate as the
        # service account.
        if running_as_service_account:
            gke_service_account_credentials = default_credentials
        else:
            print(
                "Obtaining service account `{}` credentials to access SQL "
                "database.".format(gke_service_account_name)
            )
            gke_service_account_key = create_service_account_key(
                iam_client, project, gke_service_account_name
            )
            gke_service_account_credentials = (
                service_account.Credentials.from_service_account_info(
                    gke_service_account_key
                )
            ).with_scopes(DEFAULT_SCOPES)
        sql_client = discovery.build(
            "sqladmin", "v1beta4", credentials=gke_service_account_credentials
        )

        # create a bucket, export data from existing env to bucket, import data
        # to new env
        print("Creating temporary Cloud Storage bucket for database dump.")
        gcs_db_dump_bucket = create_temp_bucket(storage_client, project)
        prev_sql_project, prev_sql_instance = get_sql_project_and_instance(
            existing_env
        )
        new_sql_project, new_sql_instance = get_sql_project_and_instance(
            new_env
        )

        print("Granting permissions on bucket to enable database dump.")
        grant_rw_permissions(
            gcs_db_dump_bucket,
            get_sql_instance_service_account(
                sql_client, prev_sql_project, prev_sql_instance
            ),
        )
        print("Exporting database from old Environment.")
        export_data(
            sql_client,
            prev_sql_project,
            prev_sql_instance,
            gcs_db_dump_bucket.name,
            "db_dump.sql",
        )
        print("Obtaining fernet keys for Composer Environments.")
        old_fernet_key = get_fernet_key(existing_env)
        new_fernet_key = get_fernet_key(new_env)
        print("Preparing database import to new Environment.")
        import_data(
            sql_client,
            gke_service_account_key,
            new_sql_project,
            new_sql_instance,
            gcs_db_dump_bucket.name,
            "db_dump.sql",
            old_fernet_key,
            new_fernet_key,
        )
    finally:
        if gke_service_account_key:
            print("Deleting temporary service account key.")
            delete_service_account_key(
                iam_client,
                project,
                gke_service_account_name,
                gke_service_account_key,
            )
        if gcs_db_dump_bucket:
            print("Deleting temporary Cloud Storage bucket.")
            delete_bucket(gcs_db_dump_bucket)
Пример #35
0
from httplib2 import Http
from oauth2client import file, client, tools

# TODO: Change placeholder below to generate authentication credentials. See
# https://developers.google.com/sheets/quickstart/python#step_3_set_up_the_sample
#
# Authorize using one of the following scopes:
#     'https://www.googleapis.com/auth/drive'
#     'https://www.googleapis.com/auth/drive.file'
#     'https://www.googleapis.com/auth/spreadsheets'
SCOPES = 'https://www.googleapis.com/auth/spreadsheets'
store = file.Storage('credentials.json')
credentials = store.get()
if not credentials or credentials.invalid:
    flow = client.flow_from_clientsecrets('client_secret.json', SCOPES)
    credentials = tools.run_flow(flow, store)

service = discovery.build('sheets', 'v4', credentials=credentials)

spreadsheet_body = {
    # TODO: Add desired entries to the request body.
    "range": "A1:A5",
    "majorDimension": "ROWS",
    "values": [["hello", "my", "name", "is", "god"]]
}

request = service.spreadsheets().create(body=spreadsheet_body)
response = request.execute()

# TODO: Change code below to process the `response` dict:
pprint(response)
Пример #36
0
def google_search(search_term, api_key, cse_id, **kwargs):
    service = build("customsearch", "v1", developerKey=api_key)
    res = service.cse().list(q=search_term, cx=cse_id, **kwargs).execute()
    return res["items"]
Пример #37
0
def connect_to_api(key_file):
    credentials = Credentials.from_service_account_file(
        key_file,
        scopes=SCOPES)
    return discovery.build('sheets', 'v4', credentials=credentials)
from pprint import pprint

from googleapiclient import discovery
from oauth2client.client import GoogleCredentials

credentials = GoogleCredentials.get_application_default()

service = discovery.build('cloudresourcemanager',
                          'v1',
                          credentials=credentials)

# The Project ID (for example, `my-project-123`).
# Required.
project_id = 'oxford-iiit-pets-183103'  # TODO: Update placeholder value.

request = service.projects().get(projectId=project_id)
response = request.execute()

# TODO: Change code below to process the `response` dict:
pprint(response)

#==============================================
# Results:
#{'createTime': '2017-10-16T03:05:27.884Z',
# 'lifecycleState': 'ACTIVE',
# 'name': 'Oxford-IIIT Pets',
# 'projectId': 'oxford-iiit-pets-183103',
# 'projectNumber': '493151018659'}
#==============================================
Пример #39
0
# 	return keyword


def translate(inputtext):

	if slang = 'english'
		sourcel = 'en'
	if tlang = 'english'
		targetl = 'en'
	if slang = 'japanese'
		sourcel = 'ja'
	if tlang = 'japanese'
		sourcel = 'ja'

  # Google Tranlsate API need to enter own key
  service = build('translate', 'v2',
            developerKey='HIDDEN')
  returntext = (service.translations().list(
      source='%s'%sourcel,
      target='%s'%targetl,
      q=['%s'%inputtext]

    ).execute())

	return returntext


@app.route('/chat/text/<keyword>') #for getting GFYCAT mp4 URL

def api_article(keyword):

	URL       = 'htpps://api.gyfcat.com/v1test/gyfcat/search?search_text=%s'%keyword
def get_instances():
    credentials = GoogleCredentials.get_application_default()
    service = build('compute', 'v1', credentials=credentials)
    request = service.instances().list(project=PROJECT, zone=ZONE)
    return request.execute()
Пример #41
0
 def _service(self) -> object:
     return build("drive", "v3", credentials=_CREDS, cache_discovery=False)
Пример #42
0
import datetime
import json
import os

import dateutil.parser
from google.oauth2 import service_account
from googleapiclient.discovery import build

INFO = json.loads(os.environ['GOOGLE_CLOUD_CREDS'])
SCOPES = ['https://www.googleapis.com/auth/calendar.readonly']

credentials = service_account.Credentials.from_service_account_info(
    info=INFO, scopes=SCOPES)

calendar_service = build('calendar', 'v3', credentials=credentials)

CALENDAR_ID = '*****@*****.**'

EST = dateutil.tz.gettz('EST')
UTC = dateutil.tz.UTC


def parse(d):
    if 'date' in d:
        return dateutil.parser.parse(d['date']).date()
    else:
        return dateutil.parser.parse(d['dateTime'])


class Event:
    def __init__(self,
def main(reportName):
    # argv = [sys.argv, argv]

    flow = client.flow_from_clientsecrets("client_secrets.json",
                                          scope=OAUTH_SCOPES)
    storage = Storage(CREDENTIAL_STORE_FILE)
    credentials = storage.get()
    http_setup = credentials.authorize(httplib2.Http())

    service = discovery.build('dfareporting', 'v3.1', http=http_setup)
    # Retrieve command line arguments.
    #flags = dfareporting_utils.get_arguments(argv, __doc__, parents=[argparser])

    # Authenticate and construct service.
    # service = dfareporting_utils.setup(flags)

    report_name = reportName

    ##############

    try:
        # Construct the request.
        # Profile ID
        request = service.userProfiles().list()

        # Execute request and print response.
        response = request.execute()

        get_profile = [
            profile for profile in response['items']
            if profile['accountId'] in ['817771']
        ]
        profile_id = get_profile[0]['profileId']

        print(
            '1 - Searching reports for Account ID 817771 (Diageo DCM) with Profile ID %s.'
            % profile_id)
        print()

        #############

        # Construct the request.
        # Report ID
        request = service.reports().list(profileId=profile_id)

        while True:
            # Execute request and print response.
            response = request.execute()

            report_id = []
            for report in response['items']:
                if report['name'] == report_name:
                    print('2 - Found %s report with ID %s and name "%s".' %
                          (report['type'], report['id'], report['name']))
                    print()
                    report_id.append(report['id'])
                    break

            if response['items'] and response['nextPageToken']:
                request = service.reports().list_next(request, response)
            else:
                break

        ##############

        # Run report
        report_file = service.reports().run(profileId=profile_id,
                                            reportId=report_id[0]).execute()
        print('3. File with ID %s has been created' % report_file['id'])
        print()

        # Wait for the report file to finish processing.
        # An exponential backoff strategy is used to conserve request quota.
        sleep = 0
        start_time = time.time()
        while True:
            report_file = service.files().get(
                reportId=report_id[0], fileId=report_file['id']).execute()

            status = report_file['status']
            if status == 'REPORT_AVAILABLE':
                print('5. File status is %s, ready to download.' % status)
                ####
                # Prepare a local file to download the report contents to.
                print('hi')
                out_file = io.FileIO(generate_file_name(report_file),
                                     mode='wb')

                # Create a get request.
                request = service.files().get_media(reportId=report_id[0],
                                                    fileId=report_file['id'])

                # Create a media downloader instance.
                # Optional: adjust the chunk size used when downloading the file.
                downloader = http.MediaIoBaseDownload(out_file,
                                                      request,
                                                      chunksize=CHUNK_SIZE)

                # Execute the get request and download the file.
                download_finished = False
                while download_finished is False:
                    _, download_finished = downloader.next_chunk()

                print('File %s downloaded to %s' %
                      (report_file['id'], os.path.realpath(out_file.name)))
                ####
                return
            elif status != 'PROCESSING':
                print('5. File status is %s, processing failed.' % status)
                return
            elif time.time() - start_time > MAX_RETRY_ELAPSED_TIME:
                print('5. File processing deadline exceeded.')
                return

            sleep = next_sleep_interval(sleep)
            print('4. File status is %s, sleeping for %d seconds.' %
                  (status, sleep))
            time.sleep(sleep)

    except client.AccessTokenRefreshError:
        print(
            'The credentials have been revoked or expired, please re-run the '
            'application to re-authorize')
Пример #44
0
def main():
    """Shows basic usage of the Drive v3 API.
    Prints the names and ids of the first 10 files the user has access to.
    """
    creds = None
    # The file token.pickle stores the user's access and refresh tokens, and is
    # created automatically when the authorization flow completes for the first
    # time.
    if os.path.exists('token.pickle'):
        with open('token.pickle', 'rb') as token:
            creds = pickle.load(token)
    # If there are no (valid) credentials available, let the user log in.
    if not creds or not creds.valid:
        if creds and creds.expired and creds.refresh_token:
            creds.refresh(Request())
        else:
            flow = InstalledAppFlow.from_client_secrets_file(
                'credentials.json', SCOPES)
            creds = flow.run_local_server(port=0)
        # Save the credentials for the next run
        with open('token.pickle', 'wb') as token:
            pickle.dump(creds, token)

    now = date.today()
    #print(now.strftime("%Y"))
    day = now.strftime("%d")
    month = now.strftime("%m")
    year = now.strftime("%Y")

    d1 = (int(day))

    if d1 < 10:
        day = '0' + str(d1 - 1)
        #print(day)

    dat = []
    dat.append(year)
    dat.append(month)
    dat.append(day)
    print(dat)
    directory = "D:/Testwer/" + year + "/" + month + "/" + day + "/"
    folder = "wxarchive"
    file = "00"

    service = build('drive', 'v3', credentials=creds)

    # Call the Drive v3 API
    queries = [
        "mimeType = 'application/vnd.google-apps.folder'", "sharedWithMe"
    ]

    drive_str_query = queries[0] if len(queries) == 1 else " and ".join(
        queries)
    results = service.files().list(q=drive_str_query,
                                   pageSize=1000,
                                   spaces='drive',
                                   fields="files(id, name)").execute()

    items = results.get('files', [])
    # print(items)

    id = ""
    for item in items:
        if folder == item['name']:
            id = item['id']
            break
    #print(id)

    for d in dat:
        queries = [
            "mimeType = 'application/vnd.google-apps.folder'",
            "'" + id + "'" + " in parents"
        ]
        drive_str_query = queries[0] if len(queries) == 1 else " and ".join(
            queries)
        results = service.files().list(q=drive_str_query,
                                       pageSize=1000,
                                       spaces='drive',
                                       fields="files(id, name)").execute()

        items = results.get('files', [])

        for item in items:
            if d == item['name']:
                id = item['id']
                break

    queries = [
        "mimeType = 'application/vnd.google-apps.folder'",
        "'" + id + "'" + " in parents"
    ]

    #print(queries);
    drive_str_query = queries[0] if len(queries) == 1 else " and ".join(
        queries)
    results = service.files().list(q=drive_str_query,
                                   pageSize=1000,
                                   spaces='drive',
                                   fields="files(id, name)").execute()
    #print("2nd col")
    items = results.get('files', [])

    #print(items)
    global dayStr
    #dayStr = "12"
    #print(dayStr)
    #print(dat[2])
    #for item in items:
    #   if dat[2] == item['name']:
    #      id = item['id']
    #     print(id)
    #    dayStr = item['name']
    #   print(dayStr)
    #  break

    directory = directory  #+ dayStr + "/"

    results = service.files().list(q="'" + item['id'] + "'" + " in parents",
                                   pageSize=1000,
                                   spaces='drive',
                                   fields="files(id, name)").execute()
    fileItems = results.get('files', [])
    print(fileItems)

    for fileItem in fileItems:
        file_id = str(fileItem['id'])
        fileFolder_name = fileItem['name']
        downloadDir = directory + fileFolder_name + "/"
        fileResults = service.files().list(q="'" + fileItem['id'] + "'" +
                                           " in parents",
                                           pageSize=1000,
                                           spaces='drive',
                                           fields="files(id, name)").execute()
        finalFileItems = fileResults.get('files', [])
        #print(downloadDir)
        os.makedirs(downloadDir)
        for finalFileItem in finalFileItems:
            finalFile_id = str(finalFileItem['id'])
            request = service.files().get_media(fileId=finalFile_id)
            file_io_base = open(downloadDir + finalFileItem['name'], 'wb')
            #fh = io.BytesIO()
            downloader = MediaIoBaseDownload(file_io_base, request)
            #print(fh)
            done = False
            while done is False:
                status, done = downloader.next_chunk()
                #print(status)
                print("Download %d%%." % int(status.progress() * 100))
Пример #45
0
def book_timeslot(event_description, booking_time, input_email):
    """Shows basic usage of the Google Calendar API.
    Prints the start and name of the next 10 events on the user's calendar.
    """
    creds = None
    if os.path.exists('token.pickle'):
        with open('token.pickle', 'rb') as token:
            creds = pickle.load(token)
    # If there are no (valid) credentials available, let the user log in.
    if not creds or not creds.valid:
        if creds and creds.expired and creds.refresh_token:
            creds.refresh(Request())
        else:
            flow = InstalledAppFlow.from_client_secrets_file(
                'credentials.json', SCOPES)
            creds = flow.run_local_server(port=0)
        with open('token.pickle', 'wb') as token:
            pickle.dump(creds, token)

    service = build('calendar', 'v3', credentials=creds)

    # --------------------- Manipulating Booking Time ----------------------------
    start_time = str(
        datetime.datetime.now())[:10] + 'T' + booking_time + ':00+08:00'
    end_time = str(
        datetime.datetime.now())[:10] + 'T' + str(int(booking_time[:2]) +
                                                  1) + ':00:00+08:00'

    # Call the Calendar API
    now = datetime.datetime.utcnow().isoformat(
    ) + 'Z'  # 'Z' indicates UTC time
    print('Booking a time slot....')
    events_result = service.events().list(calendarId='primary',
                                          timeMin=now,
                                          maxResults=10,
                                          singleEvents=True,
                                          orderBy='startTime').execute()
    events = events_result.get('items', [])
    if not events:
        event = {
            'summary':
            'book an event',
            'location':
            'madurai',
            'description':
            str(event_description) + 'with Madurai Startups',
            'start': {
                'dateTime': start_time,
                'timeZone': 'Asia/Singapore',
            },
            'end': {
                'dateTime': end_time,
                'timeZone': 'Asia/Singapore',
            },
            'recurrence': ['RRULE:FREQ=DAILY;COUNT=1'],
            'attendees': [
                {
                    'email': '*****@*****.**'
                },
                {
                    'email': str(input_email)
                },
            ],
            'reminders': {
                'useDefault':
                False,
                'overrides': [
                    {
                        'method': 'email',
                        'minutes': 24 * 60
                    },
                    {
                        'method': 'popup',
                        'minutes': 10
                    },
                ],
            },
        }
        event = service.events().insert(calendarId='primary',
                                        body=event).execute()
        print('Event created: %s' % (event.get('htmlLink')))
        return True

    else:
        # --------------------- Check if there are any similar start time ---------------------
        for event in events:
            start = event['start'].get('dateTime', event['start'].get('date'))
            if start == start_time:
                print('Already book....')
                return False
        # -------------------- Break out of for loop if there are no apppointment that has the same time ----------
        event = {
            'summary':
            'book an event',
            'location':
            'Singapore',
            'description':
            str(event_description) + 'with Madurai Startups',
            'start': {
                'dateTime': start_time,
                'timeZone': 'Asia/Singapore',
            },
            'end': {
                'dateTime': end_time,
                'timeZone': 'Asia/Singapore',
            },
            'recurrence': ['RRULE:FREQ=DAILY;COUNT=1'],
            'attendees': [
                {
                    'email': '*****@*****.**'
                },
                {
                    'email': str(input_email)
                },
            ],
            'reminders': {
                'useDefault':
                False,
                'overrides': [
                    {
                        'method': 'email',
                        'minutes': 24 * 60
                    },
                    {
                        'method': 'popup',
                        'minutes': 10
                    },
                ],
            },
        }
        event = service.events().insert(calendarId='primary',
                                        body=event).execute()
        print('Event created: %s' % (event.get('htmlLink')))
        return True
Пример #46
0
    def connect(self):
        """
        Obtain the GCE service clients by authenticating, and setup prerequisites like bucket,
        net, subnet and fw rules.
        """
        log.info('Creating compute client')
        self.compute = discovery.build('compute',
                                       'v1',
                                       credentials=self.credentials,
                                       cache_discovery=False)

        # keeping bucket related code in case these will be later required
        # https://cloud.google.com/compute/docs/disks/gcs-buckets
        # self.storage = discovery.build('storage', 'v1', credentials=self.credentials)
        # log.info('Creating storage bucket {}'.format(self.bucket_name))
        # bucket_config = {'name': self.bucket_name,
        #                  'location': self.region
        #                 }
        # bucket = self.storage.buckets().insert(project=self.projectid,
        #                                        body=bucket_config).execute()

        log.info('Creating virtual network: {}'.format(self.net_name))
        net_config = {'name': self.net_name, 'autoCreateSubnetworks': False}
        net = self.compute.networks().insert(project=self.projectid,
                                             body=net_config).execute()
        self.wait_for_operation(net['name'])
        log.info(net)
        log.info('Creating subnet: {}'.format(self.subnet_name))
        subnet_config = {
            'name': self.subnet_name,
            'ipCidrRange': '10.10.10.0/24',
            'network': net['targetLink']
        }
        subnet = self.compute.subnetworks().insert(
            project=self.projectid, region=self.region,
            body=subnet_config).execute()
        self.wait_for_operation(subnet['name'], region=self.region)
        log.info(subnet)

        log.info('Creating firewall rules.')
        fw_config = {
            'name':
            'all-traffic-' + self.net_name,
            'network':
            net['targetLink'],
            'allowed': [{
                'IPProtocol': 'tcp',
                'ports': ['22']
            }, {
                'IPProtocol': 'tcp',
                'ports': ['0-65535']
            }, {
                'IPProtocol': 'udp',
                'ports': ['0-65535']
            }, {
                'IPProtocol': 'icmp'
            }]
        }
        fw = self.compute.firewalls().insert(project=self.projectid,
                                             body=fw_config).execute()
        self.wait_for_operation(fw['name'])
        log.info(fw)
Пример #47
0
def main():
    """Shows basic usage of the Gmail API.
    Lists the user's Gmail labels.
    """
    creds = None
    # The file token.pickle stores the user's access and refresh tokens, and is
    # created automatically when the authorization flow completes for the first
    # time.
    if os.path.exists('token.pickle'):
        with open('token.pickle', 'rb') as token:
            creds = pickle.load(token)
    # If there are no (valid) credentials available, let the user log in.
    if not creds or not creds.valid:
        if creds and creds.expired and creds.refresh_token:
            creds.refresh(Request())
        else:
            flow = InstalledAppFlow.from_client_secrets_file(
                'credentials.json', SCOPES)
            creds = flow.run_local_server(port=0)
        # Save the credentials for the next run
        with open('token.pickle', 'wb') as token:
            pickle.dump(creds, token)

    service = build('gmail', 'v1', credentials=creds)

    f = open("Everything.txt", "w")
    # Call the Gmail API
    megaThreadList = []  # All threads/ Emails
    sitesList = []  # List of all sites
    siteNames = []  # Names of sites for easy search
    ar = []  # Email From
    arSmall = []  # Unique email from
    noReply = []  # Thread of Noreply emails
    moreThreads = True
    threadsList = service.users().threads().list(userId='me',
                                                 includeSpamTrash=True,
                                                 prettyPrint=True).execute()
    nextPageToken = threadsList['nextPageToken']
    for thread1 in threadsList['threads']:
        megaThreadList.append(thread1['id'])

    ix = 0
    while moreThreads:
        threadsList = service.users().threads().list(
            userId='me',
            includeSpamTrash=True,
            prettyPrint=True,
            pageToken=nextPageToken).execute()
        for thread1 in threadsList['threads']:
            megaThreadList.append(thread1['id'])
        if 'nextPageToken' in threadsList:
            nextPageToken = threadsList['nextPageToken']
            if ix >= 10000:  # Cut off after second page
                moreThreads = False
            ix += 1
            print(nextPageToken)
        else:
            moreThreads = False
    print(ix)
    for ids in megaThreadList:
        metaMessage = service.users().threads().get(
            userId='me', id=ids, format="metadata").execute()
        #        fullMessage = service.users().threads().get(userId='me',id=ids,format="full").execute()
        #        print(metaMessage)
        payloads = (metaMessage['messages'][0]['payload'])
        head = payloads['headers']
        # Name = List-Unsubscribe
        curEmail = ""
        curMess = Message()
        curMess.thread = ids
        unsub = ""  # The unsubscriber link
        for pay in head:
            if (pay['name'] == 'From'):
                temp = pay['value']
                ind = -1
                if "<" in temp:
                    ind = temp.index("<")
                if (ind < 0):
                    curEmail = temp

                curEmail = temp[ind + 1:-1]
                curMess.sender = curEmail
                ar.append(curEmail)
                if "noreply" in curEmail or "no-reply" in curEmail:
                    noReply.append(ids)

            if (pay['name'] == 'List-Unsubscribe'):
                temp = pay['value']
                ind = 0
                if "<" in temp:
                    ind = temp.index("<")
                curLink = temp[ind + 1:-1]
                unsub = curLink
                curMess.link = curLink

        f.write(curMess.sender + "  " + curMess.link + "\n")

        cleanDom = unsub
        if "," in unsub:
            split = unsub.split(",")
            cleanDom = cleanDomain(split[1])
            curMess.link = cleanDom
        else:
            cleanDom = cleanDomain(unsub)

        if not (cleanDom is None or "mailto" in cleanDom):
            if (validURL(cleanDom)):
                if cleanDom in siteNames:  # Already exist
                    curIndex = siteNames.index(cleanDom)
                    sitesList[curIndex].addMessage(curMess)
                else:  # Create new Site
                    curSite = Site()
                    siteNames.append(cleanDom)
                    curSite.domainName = cleanDom
                    curSite.addMessage(curMess)
                    curSite.sender = curMess.sender
                    sitesList.append(curSite)
    f.flush()
    f.close()
    fsd = open("SitesFile.txt", "w")
    fs = open("webSiteFile.txt", "w")
    fsa = open("ignored.txt", "w")
    # If their is one do a get and post request real quick
    for s in sitesList:
        if s.getMessageSize() == 1:
            print("Would you like to delete them messages?")
            fsa.write(s.getSender() + "/n")
            print("Ignoring " + s.getSender())
        else:
            fsd.write(s.getString() + "\n")
            fs.write(s.getLink() + "\n")
    fsd.close()
    fsa.close()
    fs.close()
    #    print(noReply)
    fq = open("issues.txt", "w")
    keeping = []  # The ones we are keeping
    oneTimeResponse = "yes"
    noReplyResponse = "yes"
    #    oneTimeResponse = input("Would you like to delete one time messages (yes/no)? ")
    #    noReplyResponse = input("Would you like to delete noreply messages (yes/no)? ")
    oneTime = oneTimeResponse.lower().strip() == "yes"
    noReplies = noReplyResponse.lower().strip() == "yes"
    counter = 0
    newSet = []
    if (noReplies):
        for nrthread in noReply:
            try:
                service.users().messages().trash(
                    userId='me', id=nrthread).execute()  #trashing thread
            except:
                print("That was an issue with " + nrthread)

    for s in sitesList:
        try:
            if s.getMessageSize() == 1 and oneTime:
                service.users().messages().trash(
                    userId='me', id=s.messages.thread).execute()
            if s.getMessageSize() > 1:
                print(str(counter) + ". " + s.getString())
                newSet.append(s)
                counter += 1
        except:
            fq.write(s.getString())
            print("that message does not exist")

    fsb = open("deleting.txt", "w")
    #    keeping = input("enter in the number seperated by a , of the ones you want to keep: ")
    fq.write("\nhere is the split\n\n")
    #    spliting = keeping.split(",")
    spliting = []
    counter = 0
    for splits in spliting:
        if not (counter == splits):
            fsb.write(newSet[counter].sender + "\n")
            for mes in newSet[counter].messages:
                try:
                    service.users().messages().trash(userId='me',
                                                     id=mes.thread).execute()
                except:
                    fq.write(mes.getString())


#                    print("That thread does not exist")
        counter += 1
    # Deleting the messages here
    fsb.flush()
    fsb.close()
    fq.close()

    # Opening up all the unsubscribes
    fr = open("webSiteFile.txt", "r")
    lx = fr.readlines()
    ci = 0
    for lx1 in lx:
        if ci >= 5:  # only opening 5 browsers at a time
            webbrowser.open(lx1)
        ci += 1
    fr.close()
logger = logging.getLogger("google_drive_api")
logger.setLevel(logging.INFO)
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')

handler = logging.FileHandler('./logs/app_logs/app.log')
handler.setLevel(logging.INFO)
handler.setFormatter(formatter)
logger.addHandler(handler)

ch = logging.StreamHandler()
ch.setLevel(logging.INFO)
ch.setFormatter(formatter)
logger.addHandler(ch)


path = "./configuration/input.json"


with open(path, 'r') as f:
    config_input = json.load(f)

SCOPES = config_input["SCOPES"]
SERVICE_ACCOUNT_FILE = config_input["SERVICE_ACCOUNT_FILE"]
api_folder_path = config_input["api_folder_path"]
data_folder = config_input["data_folder"]
credentials = service_account.Credentials.from_service_account_file(
        SERVICE_ACCOUNT_FILE, scopes=SCOPES)

service = build('drive', 'v3', credentials=credentials)
Пример #49
0
def main(detect="", photo_file="", trans_lan="ja-JP"):
    if photo_file == "":
        photo_file = camera()
    credentials = GoogleCredentials.get_application_default()
    service = discovery.build('vision',
                              'v1',
                              credentials=credentials,
                              discoveryServiceUrl=DISCOVERY_URL)

    with open(photo_file, 'rb') as image:
        image_content = base64.b64encode(image.read())
        if detect == "":  #No parameter
            DETECT = default_detect
        else:  #Paremater specified
            DETECT = [detect.upper()]

        result = ""
        for DET in DETECT:
            service_request = service.images().annotate(
                body={
                    'requests': [{
                        'image': {
                            'content': image_content.decode('UTF-8')
                        },
                        'features': [{
                            'type': DET + '_DETECTION',
                            'maxResults': default_max
                        }]
                    }]
                })
            response = service_request.execute()
            annotation = DET.lower() + 'Annotations'
            try:
                results = response['responses'][0][annotation]
                for res in results:
                    if DET in ["LABEL", "LOGO"]:  #ラベル、ロゴ検出
                        if res["score"] > 0.7:
                            result += res["description"] + ", "

                    elif DET in ["TEXT"]:  #テキスト検出
                        result += res["description"] + ", "

                    elif DET in ["FACE"]:  #顔検出
                        if res["joyLikelihood"] == "VERY_LIKELY" or res[
                                "joyLikelihood"] == "LIKELY":
                            result += "Smile "
                        if res["angerLikelihood"] == "VERY_LIKELY" or res[
                                "angerLikelihood"] == "LIKELY":
                            result += "Angry "
                        if res["headwearLikelihood"] == "VERY_LIKELY" or res[
                                "headwearLikelihood"] == "LIKELY":
                            rsult += "Capped "
                        result += DET + ", "
            except:
                result += "No " + DET + ", "
        print('Result: ' + result)
        trans_result = translate_text(text, trans_lang)
        trans_result = trans_result.replace("&#39;", "")
        print('Trans Result: ' + trans_result)
        if trans_lang == "ja-JP":
            os.system(dir_aquest +
                      '/AquesTalkPi -g {} {} | aplay -D plughw:{},{}'.format(
                          VOLUME, trans_result, CARD, DEVICE))
Пример #50
0
def main():
    """Shows basic usage of the Google Calendar API.
    Prints the start and name of the next 10 events on the user's calendar.
    10日先の予定を表示する.
    """
    # The file 'token.pickle' stores the user's access and refresh tokens, and is
    # created automatically when the authorization flow completes for the first time.
    # 'token.pickle' は初回認証時に勝手に作られる。
    creds = None
    if os.path.exists('token.pickle'):
        with open('token.pickle', 'rb') as token:
            creds = pickle.load(token)

    # If there are no (valid) credentials available, let the user log in..
    # 利用可能な(有効な)認証情報がない場合は、ユーザーにログインさせます。
    if not creds or not creds.valid:
        if creds and creds.expired and creds.refresh_token:
            # 認証情報アリ
            creds.refresh(Request())
        else:
            # 認証情報ナシ(credentials.jsonから作成する)
            flow = InstalledAppFlow.from_client_secrets_file(
                'credentials.json', SCOPES)
            creds = flow.run_local_server()

        # Save the credentials for the next run
        # 認証情報を保存する
        with open('token.pickle', 'wb') as token:
            pickle.dump(creds, token)

    service = build('calendar', 'v3', credentials=creds)

    # Call the Calendar API
    # Google カレンダーでは、夏時間による問題を回避するため、協定世界時(UTC)を採用しています。
    # 作成した予定は UTC に変換されますが、常にそのユーザーの現地時刻で表示されます。
    now = datetime.datetime.utcnow().isoformat() + 'Z' # 'Z' indicates UTC time
    events_result = service.events().list(calendarId='primary', timeMin=now, \
                                        maxResults=10, singleEvents=True, \
                                        orderBy='startTime').execute()
    events = events_result.get('items', [])

    data = pd.DataFrame(index=[], columns=['ymdhms', 'summary'])
    if not events:
        print('No upcoming events found.')
    for event in events:
        start = event['start'].get('dateTime', event['start'].get('date'))
        series = pd.Series(
            [lib.Util.ConvertIso2YMDHMS(start), event['summary']], index=data.columns)
        data = data.append(series, ignore_index=True)

    # データ削除クエリを発行
    editsql = "DELETE FROM [dbo].[Calendar]"    # SQL原本に置換をかける
    lib.Util.ExecuteSQLBySQLServer(editsql)     # DELETE文の発行

    # データ追加クエリを発行
    for line in data.values:
        editsql = "INSERT INTO [dbo].[Calendar]([ymdhms],[summary]) VALUES ('{0}','{1}')"   # SQL原本
        for i, col in enumerate(line):               # SQL原本に置換をかける
            editsql = editsql.replace('{' + str(i) + '}', col)
        lib.Util.ExecuteSQLBySQLServer(editsql)  # INSERT文の発行

    # 選択クエリを発行
    editsql = \
        "SELECT FORMAT(A.ymdhms,'yyyy-MM-dd HH:mm') ymdhms, A.summary FROM [dbo].[Calendar] AS A"
    data = lib.Util.ReadQueryBySQLServer(editsql)  # SELECT文の発行
    for line in data.values:
        print(','.join(line))                       # SQL結果を調理して提供
Пример #51
0
def buildCompute():
    return discovery.build('compute', 'v1')
Пример #52
0
    If nothing has been stored, or if the stored credentials are invalid,
    the OAuth2 flow is completed to obtain the new credentials.
    Returns:
        Credentials, the obtained credential.
    """
    home_dir = os.path.expanduser('~')
    credential_dir = os.path.join(home_dir, '.credentials')
    if not os.path.exists(credential_dir):
        os.makedirs(credential_dir)
    credential_path = os.path.join(credential_dir,
                                   'gmail-python-quickstart.json')

    store = Storage('storage.json')
    credentials = store.get()
    if not credentials or credentials.invalid:
        flow = client.flow_from_clientsecrets(CLIENT_SECRET_FILE, SCOPES)
        flow.user_agent = APPLICATION_NAME
        if flags:
            credentials = tools.run_flow(flow, store, flags)
        else: # Needed only for compatibility with Python 2.6
            credentials = tools.run(flow, store)
        print('Storing credentials to ' + credential_path)
    return credentials

scope = 'https://www.googleapis.com/auth/gmail.readonly https://www.googleapis.com/auth/gmail.modify'
client_secret = 'client_secret.json'
http = Http()
credentials = get_credentials()
http = credentials.authorize(httplib2.Http())
service = discovery.build('gmail', 'v1', http=http)
Пример #53
0
def get_authenticated_service():
  flow = InstalledAppFlow.from_client_secrets_file(CLIENT_SECRETS_FILE, SCOPES)
  credentials = flow.run_console()
  return build(API_SERVICE_NAME, API_VERSION, credentials = credentials)
Пример #54
0
 def __init__(self, credentials):
     self.credentials = credentials
     self.service = discovery.build('cloudresourcemanager', 'v1', credentials=credentials)
 def __init__(self, database):
     self.service = build(PROCUREMENT_API, 'v1', cache_discovery=False)
     self.database = database
Пример #56
0
def get_vision_service():
    credentials = GoogleCredentials.get_application_default()
    return discovery.build('vision', 'v1', credentials=credentials)
Пример #57
0
 def build_gdrive_service(self):
     if not self.credentials:
         self.authorize()
     self.gdrive_service = build('drive',
                                 'v3',
                                 credentials=self.credentials)
Пример #58
0
 def __init__(self):
     self._ml = discovery.build("ml", "v1",
                                cache_discovery=False).projects()
import os
import pandas as pd
import matplotlib.pyplot as plt
import streamlit as st

from googleapiclient.discovery import build
from wordcloud import WordCloud, STOPWORDS

api_key = os.environ.get('YT_KEY')
youtube = build('youtube', 'v3', developerKey=api_key)

st.title('Youtube Comment Builder')

streamlit_yt_id = st.text_input("Give Youtube ID", '2MYD5LkXxKg')
user_stops = st.text_area("Input Stop Words", 'ugly,stupid,worst')
word_cloud_color = st.selectbox(
    'Pick a color for the word cloud',
    ('black', 'white', 'yellow', 'blue', 'green', 'red'))


class CommentCloud():
    """Takes in a youtube id
    returns a wordcloud"""
    def __init__(self, YouTube_ID, youtube, file_name):
        self.YouTube_ID = YouTube_ID
        self.api_key = api_key
        self.youtube = youtube

    def GetTopComments(self):
        """Request comments via youtube api, saves under self"""
        self.top_comments = self.youtube.commentThreads().list(
def youtube_search(q,
                   max_results,
                   order="relevance",
                   token=None,
                   location=None,
                   location_radius=None):

    youtube = build(YOUTUBE_API_SERVICE_NAME,
                    YOUTUBE_API_VERSION,
                    developerKey=DEVELOPER_KEY)
    search_response = youtube.search().list(
        q=q,
        type="video",
        pageToken=token,
        order=order,
        part=
        "id,snippet",  # Part signifies the different types of data you want 
        maxResults=max_results,
        location=location,
        locationRadius=location_radius).execute()

    title = []
    channelId = []
    channelTitle = []
    categoryId = []
    videoId = []
    viewCount = []
    likeCount = []
    dislikeCount = []
    commentCount = []
    tags = []
    commentAuthor = []
    comments = []
    #commentTime = []
    for search_result in search_response.get("items", []):

        if search_result["id"]["kind"] == "youtube#video":

            title.append(search_result['snippet']['title'])

            videoId.append(search_result['id']['videoId'])

            response = youtube.videos().list(
                part='statistics, snippet',
                id=search_result['id']['videoId']).execute()

            channelId.append(response['items'][0]['snippet']['channelId'])
            channelTitle.append(
                response['items'][0]['snippet']['channelTitle'])
            categoryId.append(response['items'][0]['snippet']['categoryId'])
            viewCount.append(response['items'][0]['statistics']['viewCount'])
            likeCount.append(response['items'][0]['statistics']['likeCount'])
            dislikeCount.append(
                response['items'][0]['statistics']['dislikeCount'])
            # commentTime.append(response['items'][0]['statistics']['publishedAt'])

            if 'commentCount' in response['items'][0]['statistics'].keys():
                commentCount.append(
                    response['items'][0]['statistics']['commentCount'])
            else:
                commentCount.append([])
            if 'tags' in response['items'][0]['snippet'].keys():
                tags.append(response['items'][0]['snippet']['tags'])
            else:
                tags.append([])

            try:

                results = youtube.commentThreads().list(
                    part="snippet",
                    videoId=search_result['id']['videoId'],
                    textFormat="plainText").execute()

                for item in results["items"]:

                    comment = item["snippet"]["topLevelComment"]
                    author = comment["snippet"]["authorDisplayName"]
                    text = comment["snippet"]["textDisplay"]
                    #published=comment["snippet"]["updatedAt"]

                    commentAuthor.append(author)
                    #commentTime.append(published)
                    comments.append(text)
            except:

                pass

    comment_dict = {
        'comment_author': commentAuthor,
        'comment': comments
    }  #,'View Count':likeCount}

    youtube_dict = {
        'tags': tags,
        'channelId': channelId,
        'channelTitle': channelTitle,
        'categoryId': categoryId,
        'title': title,
        'videoId': videoId,
        'viewCount': viewCount,
        'likeCount': likeCount,
        'dislikeCount': dislikeCount,
        'commentCount': commentCount
    }

    return youtube_dict, comment_dict