def _wait_for_operation(api: discovery.Resource, operation: Dict[Text, Any], method_name: Text) -> Dict[Text, Any]: """Wait for a long running operation. Args: api: Google API client resource. operation: The operation to wait for. method_name: Operation method name for logging. Returns: Operation completion status. Raises: RuntimeError: If the operation completed with an error. """ status_resc = api.projects().operations().get(name=operation['name']) while not status_resc.execute().get('done'): time.sleep(_POLLING_INTERVAL_IN_SECONDS) logging.info('Method %s still being executed...', method_name) result = status_resc.execute() if result.get('error'): # The operation completed with an error. raise RuntimeError('Failed to execute {}: {}'.format( method_name, result['error'])) return result
def get_report(analytics: Resource) -> Dict: return analytics.reports().batchGet( body={ "reportRequests": [{ "viewId": "85132606", "samplingLevel": "LARGE", "filtersExpression": "ga:hostname==kotlinlang.org;ga:pagepath!@?", "pageSize": 10000, "orderBys": [{ "fieldName": "ga:uniquepageviews", "sortOrder": "DESCENDING" }], "dateRanges": [{ "startDate": "30daysAgo", "endDate": "yesterday" }], "metrics": [{ "expression": "ga:uniquepageviews", "alias": "" }], "dimensions": [{ "name": "ga:pagePath" }] }] }).execute()
def _insert_resource(self, collection: discovery.Resource, body: Dict[str, Any]) -> 'GcpResource': logger.info('Creating compute resource:\n%s', self.resource_pretty_format(body)) resp = self._execute(collection.insert(project=self.project, body=body)) return self.GcpResource(body['name'], resp['targetLink'])
def _get_comments(client: Resource, **kwargs) -> List[str]: """Function to obtain comments of the video that has video_id in the order of relevance. ptoken marks where to find the next page of comments. Function returns a list of comment text. """ comments = [] for _ in range(0, MAX_NUMBER_COMMENTS, NUM_RESULTS_PER_REQUEST): try: response = client.commentThreads().list( **kwargs ).execute() except HttpError as e: print(e) return comments if response: for item in response["items"]: comment = item["snippet"]["topLevelComment"] text = comment["snippet"]["textDisplay"] comments.append(text) else: raise datatypes.DataFetchingError(kwargs["videoId"]) # TODO: catch if "nextPageToken" in response: kwargs["pageToken"] = response.get("nextPageToken") else: break return comments
def save_new_file(gds: Resource, file_data: BytesIO, mimetype: str, folder_id: str, filename: str) -> dict: """Uploads a new file to Google Drive. Args: gds (Resource): google drive service. file_data (BytesIO): file content as a buffer. mimetype (str): MIME type of the file. folder_id (str): Google Drive's id of the folder. filename (str): filename of the file. Returns: dict: metadata of the uploaded file. """ log("Saving new file: %s", filename) file_metadata = { "name": filename, "mimeType": mimetype, "parents": [folder_id] } media = MediaIoBaseUpload(file_data, mimetype=mimetype) res = (gds.files().create(body=file_metadata, media_body=media, fields="id").execute()) return res
def delete_model_from_aip_if_exists( api: discovery.Resource, ai_platform_serving_args: Dict[Text, Any], ) -> None: """Deletes a model from Google Cloud AI Platform if exists. Args: api: Google API client resource. ai_platform_serving_args: Dictionary containing arguments for pushing to AI Platform. For the full set of parameters supported, refer to https://cloud.google.com/ml-engine/reference/rest/v1/projects.models Raises: RuntimeError: if an error is encountered when trying to delete. """ logging.info('Deleting model with from AI Platform: %s', ai_platform_serving_args) model_name = ai_platform_serving_args['model_name'] project_id = ai_platform_serving_args['project_id'] name = 'projects/{}/models/{}'.format(project_id, model_name) try: operation = api.projects().models().delete(name=name).execute() _wait_for_operation(api, operation, 'projects.models.delete') except errors.HttpError as e: # If the error is to delete an non-exist model, it's ok to ignore. if e.resp.status == 404: logging.warn('Model %s does not exist', model_name) else: raise RuntimeError('Deleting model from AI Platform failed: {}'.format(e))
def upload_video( youtube: d.Resource, file: str, title: str, description: str, category=27, privacy='private', tags=None ): if tags is None: tags = [] body = dict( snippet=dict( title=title, description=description, tags=tags, categoryId=category ), status=dict( privacyStatus=privacy ) ) # Call the API's videos.insert method to create and upload the video. insert_request = youtube.videos().insert( part=','.join(body.keys()), notifySubscribers=False, body=body, media_body=MediaFileUpload(file, chunksize=-1, resumable=True) ) return resumable_upload(insert_request)
def _get_study( service_client: discovery.Resource, study_parent: Text, study_id: Text, study_should_exist: bool = False, ): """Method for loading a study. Given the study_parent and the study_id, this method will load the specified study, up to constants.MAX_NUM_TRIES_FOR_STUDIES tries. Args: service_client: An API client of Vizier service. study_parent: Prefix of the study name. The full study name will be {study_parent}/studies/{study_id}. study_id: An identifier of the study. study_should_exist: Indicates whether it should be assumed that the study with the given study_id exists. """ study_name = "{}/studies/{}".format(study_parent, study_id) tf.get_logger().info( "Study already exists: {}.\nLoad existing study...".format(study_name)) num_tries = 0 while True: try: service_client.projects().locations().studies().get( name=study_name ).execute() except errors.HttpError as err: num_tries += 1 if num_tries >= constants.MAX_NUM_TRIES_FOR_STUDIES: if ( study_should_exist and err.resp.status == http.HTTPStatus.NOT_FOUND.value ): raise ValueError( "GetStudy failed. Study not found: {}.".format(study_id) ) else: raise RuntimeError( "GetStudy failed. Max retries reached: {0!s}".format( err ) ) time.sleep(1) # wait 1 second before trying to get the study again else: break
def _gadmin_alias_insert(service: Resource, message: Message, email: str, alias: str) -> None: """ 指定したユーザーにエイリアスを追加する :param service: Google API との接続 :param email: 追加対象のメールアドレス :param alias: エイリアスのメールアドレス """ body = { "alias": alias, } try: service.users().aliases().insert(userKey=email, body=body).execute() botsend(message, f"`{email}` にエイリアス `{alias}` を追加しました") except HttpError as e: botsend(message, f"エイリアスの追加に失敗しました\n`{e}`")
def _gadmin_member_delete(message: Message, service: Resource, group: str, emails: list[str]) -> None: """ 指定したメンバーを指定したグループから削除する :param service: Google API との接続 :param group: グループのメールアドレス :param mail: 追加/削除するメンバーのメールアドレス """ for email in emails: try: service.members().delete(groupKey=group, memberKey=email).execute() botsend(message, f"`{group}` グループから `{email}` を削除しました") except HttpError as e: # TODO: グループが間違っている場合とメンバーのエラーの場合わけ botsend(message, f"メンバーの削除に失敗しました\n`{e}`")
def _create_resource(self, collection: discovery.Resource, body: dict, **kwargs): logger.info("Creating %s resource:\n%s", self.api_name, self._resource_pretty_format(body)) create_req = collection.create(parent=self.parent(), body=body, **kwargs) self._execute(create_req)
def __send_message(service: Resource, user_id: str, message: str) -> str: try: message = (service.users().messages().send(userId=user_id, body=message).execute()) print('Message Id: %s' % message['id']) return message except mail_errors.HttpError as error: print('An error occurred: %s' % error)
def get_dns_rrs(dns: Resource, dns_zones: List[Dict], project_id: str) -> List[Resource]: """ Returns a list of DNS Resource Record Sets within the given project. :type dns: The GCP DNS resource object :param dns: The DNS resource object created by googleapiclient.discovery.build() :type dns_zones: list :param dns_zones: List of DNS zones for the project :type project_id: str :param project_id: Current Google Project Id :rtype: list :return: List of Resource Record Sets """ try: rrs: List[Resource] = [] for zone in dns_zones: request = dns.resourceRecordSets().list(project=project_id, managedZone=zone['id']) while request is not None: response = request.execute() for resource_record_set in response['rrsets']: resource_record_set['zone'] = zone['id'] rrs.append(resource_record_set) request = dns.resourceRecordSets().list_next( previous_request=request, previous_response=response) return rrs except HttpError as e: err = json.loads(e.content.decode('utf-8'))['error'] if err.get('status', '') == 'PERMISSION_DENIED' or err.get( 'message', '') == 'Forbidden': logger.warning( ("Could not retrieve DNS RRS on project %s due to permissions issues. Code: %s, Message: %s" ), project_id, err['code'], err['message'], ) return [] else: raise raise e
def get_spreadsheet(service: Resource, args: dict) -> CommandResults: ''' Args: service - google-api discovery resource (google api client) args - demisto.args() for the api call output : command result action : gets a single or multiple spreadsheets ''' spread_sheets_ids = argToList(args.get('spreadsheet_id')) include_grid_data = argToBoolean(args.get('include_grid_data', False)) ranges = args.get('ranges') markdown = "" if not spread_sheets_ids: raise DemistoException('No spreadsheet ID given') if len(spread_sheets_ids) > 1: for spreadsheet in spread_sheets_ids: response = service.spreadsheets().get( spreadsheetId=spreadsheet).execute() markdown += markdown_single_get(response) markdown += '---\n' markdown = '### Success\n\n' + markdown return CommandResults(readable_output=markdown) else: ranges = default_ranges_if_not_specified(spread_sheets_ids[0], str(ranges), include_grid_data, service) request = service.spreadsheets().get( spreadsheetId=spread_sheets_ids[0], ranges=ranges, includeGridData=include_grid_data) response = request.execute() output_response = context_single_get_parse(response, include_grid_data) if include_grid_data: markdown = markdown_single_get_include_grid_data(output_response) else: markdown = markdown_single_get(response) markdown = '### Success\n' + markdown results = CommandResults(readable_output=markdown, outputs_prefix='GoogleSheets.Spreadsheet', outputs_key_field='spreadsheetId', outputs=output_response) return results
def create_spreadsheet(service: Resource, args: dict) -> CommandResults: ''' Args: service (Google Resource): google-api discovery resource (google api client) args (dict): demisto.args() for the api call Returns: (CommandResults) command result ready for the server Action : creates a new spreadsheet ''' rgb_format = argToList(args.get('cell_format_backgroundColor')) rgb_format = [1, 1, 1, 1] if not rgb_format else rgb_format spreadsheet = { "properties": { "title": args.get('title'), "locale": args.get('locale', "en"), "defaultFormat": { "numberFormat": { "type": args.get('cell_form_at_type', 'TEXT'), }, "backgroundColor": { "red": rgb_format[0], "green": rgb_format[1], "blue": rgb_format[2], "alpha": rgb_format[3] }, "textFormat": { "fontFamily": args.get('cell_format_textformat_family', 'ariel'), "fontSize": args.get('cell_format_textformat_font_size', 11), }, "textDirection": args.get('cell_format_text_direction', 'LEFT_TO_RIGHT'), } }, "sheets": [{ "properties": { "title": args.get('sheet_title'), "sheetType": args.get('sheet_type', "GRID"), } }] } spreadsheet = remove_empty_elements(spreadsheet) response = service.spreadsheets().create(body=spreadsheet).execute() human_readable = { 'spreadsheet Id': response.get('spreadsheetId'), 'spreadsheet title': response.get('properties').get('title'), } markdown = tableToMarkdown('Successfully created a spreadsheet', human_readable, headers=['spreadsheet Id', 'spreadsheet title']) results = CommandResults(readable_output=markdown, outputs_prefix='GoogleSheets.Spreadsheet', outputs_key_field='spreadsheetId', outputs=response) return results
def get_gcp_vpcs(projectid: str, compute: Resource) -> Resource: """ Get VPC data for given project :param projectid: The project ID :param compute: The compute resource object created by googleapiclient.discovery.build() :return: VPC response object """ req = compute.networks().list(project=projectid) return req.execute()
def _exists_resource( self, collection: discovery.Resource, filter: str) -> bool: # pylint: disable=redefined-builtin resp = collection.list( project=self.project, filter=filter, maxResults=1).execute(num_retries=self._GCP_API_RETRIES) if 'kind' not in resp: # TODO(sergiitk): better error raise ValueError('List response "kind" is missing') return 'items' in resp and resp['items']
def _delete_resource(self, collection: discovery.Resource, full_name: str): logger.debug("Deleting %s", full_name) try: self._execute(collection.delete(name=full_name)) except googleapiclient.errors.HttpError as error: # noinspection PyProtectedMember reason = error._get_reason() logger.info('Delete failed. Error: %s %s', error.resp.status, reason)
def get_old_events(service: Resource, calendar_id: str) -> List[Dict]: one_month_ago = get_time_n_days_ago( 30).isoformat() + "Z" # 'Z' indicates UTC time events = [] request = service.events().list( calendarId=calendar_id, timeMax=one_month_ago, singleEvents=True, orderBy="startTime", ) while request is not None: events_result = request.execute() events.extend(events_result.get("items", [])) request = service.events().list_next(request, events_result) return events
def get_members_for_group(admin: Resource, group_email: str) -> List[Dict]: """ Get all members for a google group :param group_email: A string representing the email address for the group :return: List of dictionaries representing Users or Groups. """ request = admin.members().list( groupKey=group_email, maxResults=500, ) members: List[Dict] = [] while request is not None: resp = request.execute(num_retries=GOOGLE_API_NUM_RETRIES) members = members + resp.get('members', []) request = admin.members().list_next(request, resp) return members
def __read_papi_v2beta_operation_metadata( operation_id: str, genomics_v2beta_client: Resource) -> Mapping[str, Any]: """Reads the operations metadata for a pipelines API v2beta job ID. Returns a python dict""" logger.info( f'Reading PAPI v2beta operation metadata for {operation_id}...') result = genomics_v2beta_client.projects().locations().operations( ).get(name=operation_id).execute() return result
def get_gcp_global_forwarding_rules(project_id: str, compute: Resource) -> Resource: """ Return list of all global forwarding rules in the given project_id and region :param project_id: The project ID :param compute: The compute resource object created by googleapiclient.discovery.build() :return: Response object containing data on all GCP forwarding rules for a given project """ req = compute.globalForwardingRules().list(project=project_id) return req.execute()
def get_channel_videos(youtube: d.Resource): print(f"Get videos for my channel") request = youtube.channels().list(mine=True, part='contentDetails') uploads = request.execute() playlist_id = \ uploads["items"][0]["contentDetails"]["relatedPlaylists"]["uploads"] uploaded_videos = [] next_page_token = None while 1: res = youtube.playlistItems().list( playlistId=playlist_id, part='snippet', maxResults=50, pageToken=next_page_token).execute() uploaded_videos += res['items'] next_page_token = res.get('nextPageToken') if next_page_token is None: break return uploaded_videos
def get_youtube_playlists(youtube: d.Resource): playlists = {} request = youtube.playlists().list(part="snippet,contentDetails", mine=True, maxResults=25) response = request.execute() for item in response['items']: title = item['snippet']['title'] playlists[title] = item['id'] return playlists
def get_all_messages(service: Resource, next_page: str, q: str, limit: int) -> list: """Looping through messages and nextPageToken until end of limit or batch of message is over""" messages = [] while (next_page and len(messages) < limit): results = service.users().messages().list( userId='me', q=q, pageToken=next_page).execute() messages.extend(results.get('messages', [])) next_page = results.get('nextPageToken') return messages
def get_gcp_firewall_ingress_rules(project_id: str, compute: Resource) -> Resource: """ Get ingress Firewall data for a given project :param project_id: The project ID to get firewalls for :param compute: The compute resource object created by googleapiclient.discovery.build() :return: Firewall response object """ req = compute.firewalls().list(project=project_id, filter='(direction="INGRESS")') return req.execute()
def get_team_drive(service: Resource): """ Get the aerial baboons team drive. """ results = ( service.drives() # pylint: disable=maybe-no-member .list(fields="nextPageToken, drives(id, name)").execute()) return [ d for d in results.get("drives", []) if d["name"] == "E4E_Aerial_Baboons" ][0]
def get_gcp_subnets(projectid: str, region: str, compute: Resource) -> Resource: """ Return list of all subnets in the given projectid and region :param projectid: THe projectid :param region: The region to pull subnets from :param compute: The compute resource object created by googleapiclient.discovery.build() :return: Response object containing data on all GCP subnets for a given project """ req = compute.subnetworks().list(project=projectid, region=region) return req.execute()
def get_upload_playlist_id(youtube: api.Resource, channel_id: str): global _total_quota_usage _total_quota_usage += 1 request = youtube.channels().list(part="contentDetails", id=channel_id) response = YouTubeChannelListResponse.from_dict(request.execute()) if response.pageInfo.totalResults == 0 or response.items[ 0].contentDetails is None: return None return response.items[0].contentDetails.relatedPlaylists.uploads
def transform_gcp_forwarding_rules(fwd_response: Resource) -> List[Dict]: """ Add additional fields to the forwarding rule object to make it easier to process in `load_gcp_forwarding_rules()`. :param fwd_response: The response object returned from compute.forwardRules.list() :return: A transformed fwd_response """ fwd_list: List[Dict] = [] prefix = fwd_response['id'] project_id = prefix.split('/')[1] for fwd in fwd_response.get('items', []): forwarding_rule: Dict[str, Any] = {} fwd_partial_uri = f"{prefix}/{fwd['name']}" forwarding_rule['id'] = fwd_partial_uri forwarding_rule['partial_uri'] = fwd_partial_uri forwarding_rule['project_id'] = project_id # Region looks like "https://www.googleapis.com/compute/v1/projects/{project}/regions/{region name}" region = fwd.get('region', None) forwarding_rule['region'] = region.split('/')[-1] if region else None forwarding_rule['ip_address'] = fwd.get('IPAddress', None) forwarding_rule['ip_protocol'] = fwd.get('IPProtocol', None) forwarding_rule['allow_global_access'] = fwd.get( 'allowGlobalAccess', None) forwarding_rule['load_balancing_scheme'] = fwd.get( 'loadBalancingScheme', None) forwarding_rule['name'] = fwd.get('name', None) forwarding_rule['port_range'] = fwd.get('portRange', None) forwarding_rule['ports'] = fwd.get('ports', None) forwarding_rule['self_link'] = fwd.get('selfLink', None) target = fwd.get('target', None) if target: forwarding_rule['target'] = _parse_compute_full_uri_to_partial_uri( target) else: forwarding_rule['target'] = None network = fwd.get('network', None) if network: forwarding_rule['network'] = network forwarding_rule[ 'network_partial_uri'] = _parse_compute_full_uri_to_partial_uri( network) subnetwork = fwd.get('subnetwork', None) if subnetwork: forwarding_rule['subnetwork'] = subnetwork forwarding_rule[ 'subnetwork_partial_uri'] = _parse_compute_full_uri_to_partial_uri( subnetwork) fwd_list.append(forwarding_rule) return fwd_list
def get_report(analytics: Resource) -> Dict: return analytics.reports().batchGet( body={ "reportRequests": [ { "viewId": "85132606", "samplingLevel": "LARGE", "filtersExpression": "ga:hostname==kotlinlang.org;ga:pagepath!@?", "pageSize": 10000, "orderBys": [ { "fieldName": "ga:uniquepageviews", "sortOrder": "DESCENDING" } ], "dateRanges": [ { "startDate": "30daysAgo", "endDate": "yesterday" } ], "metrics": [ { "expression": "ga:uniquepageviews", "alias": "" } ], "dimensions": [ { "name": "ga:pagePath" } ] } ] }).execute()