def buildCalendarDataGAPIObject(calname): calendarId = normalizeCalendarId(calname) # Try to impersonate the calendar owner. If we fail, fall back to using # admin for authentication. Resource calendars cannot be impersonated, # so we need to access them as the admin. cal = None if not calname.endswith('.calendar.google.com'): cal = gam.buildGAPIServiceObject('calendar', calendarId, False) if cal is None: _, cal = buildCalendarGAPIObject(gam._get_admin_email()) return (calendarId, cal)
def write_csv_file(csvRows, titles, list_type, todrive): def rowDateTimeFilterMatch(dateMode, rowDate, op, filterDate): if not rowDate or not isinstance(rowDate, str): return False try: rowTime = dateutil.parser.parse(rowDate, ignoretz=True) if dateMode: rowDate = datetime.datetime(rowTime.year, rowTime.month, rowTime.day).isoformat() + 'Z' except ValueError: rowDate = NEVER_TIME if op == '<': return rowDate < filterDate if op == '<=': return rowDate <= filterDate if op == '>': return rowDate > filterDate if op == '>=': return rowDate >= filterDate if op == '!=': return rowDate != filterDate return rowDate == filterDate def rowCountFilterMatch(rowCount, op, filterCount): if isinstance(rowCount, str): if not rowCount.isdigit(): return False rowCount = int(rowCount) elif not isinstance(rowCount, int): return False if op == '<': return rowCount < filterCount if op == '<=': return rowCount <= filterCount if op == '>': return rowCount > filterCount if op == '>=': return rowCount >= filterCount if op == '!=': return rowCount != filterCount return rowCount == filterCount def rowBooleanFilterMatch(rowBoolean, filterBoolean): if not isinstance(rowBoolean, bool): return False return rowBoolean == filterBoolean def headerFilterMatch(filters, title): for filterStr in filters: if filterStr.match(title): return True return False def rowFilterMatch(filters, columns, row): for c, filterVal in iter(filters.items()): for column in columns[c]: if filterVal[1] == 'regex': if filterVal[2].search(str(row.get(column, ''))): return True elif filterVal[1] == 'notregex': if not filterVal[2].search(str(row.get(column, ''))): return True elif filterVal[1] in ['date', 'time']: if rowDateTimeFilterMatch(filterVal[1] == 'date', row.get(column, ''), filterVal[2], filterVal[3]): return True elif filterVal[1] == 'count': if rowCountFilterMatch(row.get(column, 0), filterVal[2], filterVal[3]): return True else: #boolean if rowBooleanFilterMatch(row.get(column, False), filterVal[2]): return True return False if GC_Values[GC_CSV_ROW_FILTER] or GC_Values[GC_CSV_ROW_DROP_FILTER]: if GC_Values[GC_CSV_ROW_FILTER]: keepColumns = {} for column, filterVal in iter( GC_Values[GC_CSV_ROW_FILTER].items()): columns = [t for t in titles if filterVal[0].match(t)] if columns: keepColumns[column] = columns else: keepColumns[column] = [None] sys.stderr.write( f'WARNING: Row filter column pattern "{column}" does not match any output columns\n' ) else: keepColumns = None if GC_Values[GC_CSV_ROW_DROP_FILTER]: dropColumns = {} for column, filterVal in iter( GC_Values[GC_CSV_ROW_DROP_FILTER].items()): columns = [t for t in titles if filterVal[0].match(t)] if columns: dropColumns[column] = columns else: dropColumns[column] = [None] sys.stderr.write( f'WARNING: Row drop filter column pattern "{column}" does not match any output columns\n' ) else: dropColumns = None rows = [] for row in csvRows: if (((keepColumns is None) or rowFilterMatch( GC_Values[GC_CSV_ROW_FILTER], keepColumns, row)) and ((dropColumns is None) or not rowFilterMatch( GC_Values[GC_CSV_ROW_DROP_FILTER], dropColumns, row))): rows.append(row) csvRows = rows if GC_Values[GC_CSV_HEADER_FILTER] or GC_Values[GC_CSV_HEADER_DROP_FILTER]: if GC_Values[GC_CSV_HEADER_DROP_FILTER]: titles = [ t for t in titles if not headerFilterMatch(GC_Values[GC_CSV_HEADER_DROP_FILTER], t) ] if GC_Values[GC_CSV_HEADER_FILTER]: titles = [ t for t in titles if headerFilterMatch(GC_Values[GC_CSV_HEADER_FILTER], t) ] if not titles: controlflow.system_error_exit( 3, 'No columns selected with GAM_CSV_HEADER_FILTER and GAM_CSV_HEADER_DROP_FILTER\n' ) return csv.register_dialect('nixstdout', lineterminator='\n') if todrive: write_to = io.StringIO() else: write_to = sys.stdout writer = csv.DictWriter(write_to, fieldnames=titles, dialect='nixstdout', extrasaction='ignore', quoting=csv.QUOTE_MINIMAL) try: writer.writerow(dict((item, item) for item in writer.fieldnames)) writer.writerows(csvRows) except IOError as e: controlflow.system_error_exit(6, e) if todrive: admin_email = gam._get_admin_email() _, drive = gam.buildDrive3GAPIObject(admin_email) if not drive: print( f'''\nGAM is not authorized to create Drive files. Please run: gam user {admin_email} check serviceaccount and follow recommend steps to authorize GAM for Drive access.''') sys.exit(5) result = gapi.call(drive.about(), 'get', fields='maxImportSizes') columns = len(titles) rows = len(csvRows) cell_count = rows * columns data_size = len(write_to.getvalue()) max_sheet_bytes = int( result['maxImportSizes'][MIMETYPE_GA_SPREADSHEET]) if cell_count > MAX_GOOGLE_SHEET_CELLS or data_size > max_sheet_bytes: print( f'{WARNING_PREFIX}{MESSAGE_RESULTS_TOO_LARGE_FOR_GOOGLE_SPREADSHEET}' ) mimeType = 'text/csv' else: mimeType = MIMETYPE_GA_SPREADSHEET body = { 'description': QuotedArgumentList(sys.argv), 'name': f'{GC_Values[GC_DOMAIN]} - {list_type}', 'mimeType': mimeType } result = gapi.call(drive.files(), 'create', fields='webViewLink', body=body, media_body=googleapiclient.http.MediaInMemoryUpload( write_to.getvalue().encode(), mimetype='text/csv')) file_url = result['webViewLink'] if GC_Values[GC_NO_BROWSER]: msg_txt = f'Drive file uploaded to:\n {file_url}' msg_subj = f'{GC_Values[GC_DOMAIN]} - {list_type}' gam.send_email(msg_subj, msg_txt) print(msg_txt) else: webbrowser.open(file_url)
def showUsageParameters(): rep = build() throw_reasons = [ gapi.errors.ErrorReason.INVALID, gapi.errors.ErrorReason.BAD_REQUEST ] todrive = False if len(sys.argv) == 3: controlflow.missing_argument_exit('user or customer', 'report usageparameters') report = sys.argv[3].lower() titles = ['parameter'] if report == 'customer': endpoint = rep.customerUsageReports() kwargs = {} elif report == 'user': endpoint = rep.userUsageReport() kwargs = {'userKey': gam._get_admin_email()} else: controlflow.expected_argument_exit('usageparameters', ['user', 'customer'], report) customerId = GC_Values[GC_CUSTOMER_ID] if customerId == MY_CUSTOMER: customerId = None tryDate = datetime.date.today().strftime(YYYYMMDD_FORMAT) all_parameters = set() i = 4 while i < len(sys.argv): myarg = sys.argv[i].lower().replace('_', '') if myarg == 'todrive': todrive = True i += 1 else: controlflow.invalid_argument_exit(sys.argv[i], 'gam report usageparameters') fullDataRequired = ['all'] while True: try: result = gapi.call( endpoint, 'get', throw_reasons=throw_reasons, date=tryDate, customerId=customerId, fields='warnings,usageReports(parameters(name))', **kwargs) warnings = result.get('warnings', []) usage = result.get('usageReports') has_reports = bool(usage) fullData, tryDate = _check_full_data_available( warnings, tryDate, fullDataRequired, has_reports) if fullData < 0: print('No usage parameters available.') sys.exit(1) if has_reports: for parameter in usage[0]['parameters']: name = parameter.get('name') if name: all_parameters.add(name) if fullData == 1: break except gapi.errors.GapiInvalidError as e: tryDate = _adjust_date(str(e)) csvRows = [] for parameter in sorted(all_parameters): csvRows.append({'parameter': parameter}) display.write_csv_file(csvRows, titles, f'{report.capitalize()} Report Usage Parameters', todrive)
def build_dwd(api='cloudidentity'): admin = gam._get_admin_email() return gam.buildGAPIServiceObject(api, admin, True)