def validate(event) -> Union[bool, Response]: body = event["body"] headers = event["headers"] if ("X-Slack-Signature" not in headers) or ("X-Slack-Request-Timestamp" not in headers): return False received_signature = headers["X-Slack-Signature"] slack_timestamp = headers["X-Slack-Request-Timestamp"] shared_secret = SSM(with_decryption=True).get('slack_signing_secret') base_string = 'v0:{}:{}'.format(slack_timestamp, body.strip()).encode() calculated_signature = 'v0={}'.format( hmac.new(shared_secret.encode(), base_string, hashlib.sha256).hexdigest()) valid_signature = hmac.compare_digest(calculated_signature, received_signature) if not valid_signature: return False data = loads(event['body']) if data['type'] == 'url_verification': return Response(body=dumps({'challenge': data['challenge']})) return True
def ingest(event) -> Data: username, password, client, template_id = SSM(with_decryption=True).get( 'UBW_USERNAME', 'UBW_PASSWORD', 'UBW_CLIENT', 'UBW_TEMPLATE_ID') url = SSM().get('UBW_URL') soap_client = Client( wsdl=f'{url}?QueryEngineService/QueryEngineV200606DotNet') res = soap_client.service.GetTemplateResultAsXML(input={ 'TemplateId': template_id, 'TemplateResultOptions': { 'ShowDescriptions': True, 'Aggregated': True, 'OverrideAggregation': False, 'CalculateFormulas': True, 'FormatAlternativeBreakColumns': True, 'RemoveHiddenColumns': False, 'FirstRecord': -1, 'LastRecord': -1 } }, credentials={ 'Username': username, 'Client': client, 'Password': password, }) return Data(metadata=Metadata(timestamp=datetime.now().timestamp()), data=parse(res['TemplateResult'])['Agresso']['AgressoQE'])
def register_or_update_webhooks(): stage = environ['STAGE'] password, username, webhook_secret = SSM( with_decryption=True).get('jira_sales_password', 'jira_sales_username', 'jira_webhook_secret') webhook_url = SSM().get('jira_sales_webhook_url') webhook_name = f"{stage}-jiraSalesWebhook" webhooks = get(webhook_url, auth=HTTPBasicAuth(username, password)).json() webhook = next((x for x in webhooks if x['name'] == webhook_name), None) webhook_payload = { "name": webhook_name, "url": f"https://9fdzlk5wrk.execute-api.eu-central-1.amazonaws.com/{stage}/jira-sales-webhook/{webhook_secret}", "events": [ "jira:issue_created", "jira:issue_updated" ], "filters": { "issue-related-events-section": "project = SALG and status != 'Rejected'" }, "excludeBody": False } if webhook is None: res = post(webhook_url, auth=HTTPBasicAuth(username, password), json=webhook_payload) else: res = put(webhook['self'], auth=HTTPBasicAuth(username, password), json=webhook_payload) if res.status_code == 201 or res.status_code == 200: print(f'Webhook {"registered" if res.status_code == 201 else "updated"}') else: print('\n'.join([' '.join(x.values()) for x in res.json()['messages']]))
def validate_signature(body, received_signature): hash_type, signature = received_signature.split("=") if hash_type != "sha1": return False shared_secret = SSM(with_decryption=True).get('github_shared_secret') calculated_signature = hmac.new(shared_secret.encode(), body.encode(), hashlib.sha1).hexdigest() return hmac.compare_digest(calculated_signature, signature)
def ingest(event) -> Data: api_token = SSM(with_decryption=True).get('github_api_token') res = requests.get(url, headers={'Authorization': f'Bearer {api_token}'}) repos = res.json() while 'next' in res.links.keys(): res = requests.get(res.links['next']['url']) repos.extend(res.json()) def to_timestamp(date): return int(isoparse(date).timestamp()) if isinstance( date, str) else int(date) def data_point(repo): # TODO: Move hard coding of values to another file? return { 'id': repo['id'], 'name': repo['name'], 'description': repo['description'], 'url': repo['url'], 'html_url': repo['html_url'], 'owner': repo['owner']['login'], 'created_at': to_timestamp(repo['created_at']), 'updated_at': to_timestamp(repo['updated_at']), 'pushed_at': to_timestamp(repo['pushed_at']), 'language': repo['language'], 'forks_count': repo['forks_count'], 'stargazers_count': repo['stargazers_count'], 'default_branch': repo['default_branch'] } return Data(metadata=Metadata(timestamp=datetime.now().timestamp()), data=[data_point(repo) for repo in repos])
def ingest(event) -> Data: register_or_update_webhooks() password, username = SSM(with_decryption=True).get('jira_sales_password', 'jira_sales_username') search_url = SSM(with_decryption=False).get('jira_sales_search_url') res = get(search_url, auth=HTTPBasicAuth(username, password), json={ 'jql': "project = SALG and status != 'Rejected'", 'fields': 'labels, status, created, updated' }) data = [ { 'issue': item['key'], 'customer': item['fields']['labels'][0] if len(item['fields']['labels']) > 0 else '', 'issue_status': item['fields']['status']['name'], 'created': int(isoparse(item['fields']['created']).timestamp()), 'updated': int(isoparse(item['fields']['updated']).timestamp()) } for item in res.json().get('issues', []) ] return Data(metadata=Metadata(timestamp=datetime.now().timestamp()), data=data)
def ingest(event) -> Data: consumer_key, consumer_secret, access_token, access_secret = SSM( with_decryption=True).get('twitter_comsumer_key', 'twitter_comsumer_secret', 'twitter_access_token', 'twitter_access_secret') auth = OAuthHandler(consumer_key, consumer_secret) auth.set_access_token(access_token, access_secret) api = API(auth) search_args = [ 'knowit', '"knowit objectnet"', '"knowit amende"', '"knowit solutions"', '"knowit experience"', '"knowit insight"', 'knowitab', 'knowitnorge', 'knowit norge', '"knowit stavanger"', 'knowit bergen', 'knowit oslo', 'knowit sverige', 'knowit norway', 'knowit sweden', 'knowit finland', 'knowitx' ] knowit_accounts = ['knowitnorge', 'knowitab', 'KnowitSuomi', 'knowitx'] def search_data(): search_result = np.hstack([[ item for item in Cursor(api.search, q=arg, lang='no' if arg == 'knowit' else None, tweet_mode='extended').items() if item.user.screen_name not in knowit_accounts ] for arg in search_args]) return [{ 'tweet_id': item.id, 'created_at': int(item.created_at.timestamp()), 'text': item.full_text, 'is_retweet': item.full_text.startswith('RT @'), 'favorite_count': item.favorite_count, 'retweet_count': item.retweet_count, 'language': item.lang, 'hashtags': as_separated_list(item.entities['hashtags'], 'text'), 'place': item.place.full_name if item.place else None, 'reply_to': item.in_reply_to_screen_name if item.in_reply_to_screen_name and item.in_reply_to_screen_name in knowit_accounts else None } for item in search_result] def timeline_data(): timeline_result = np.hstack([[ item for item in Cursor(api.user_timeline, screen_name=account, tweet_mode='extended').items() ] for account in knowit_accounts]) return [{ 'tweet_id': item.id, 'created_at': int(item.created_at.timestamp()), 'user_screen_name': item.user.screen_name, 'text': item.full_text, 'is_retweet': item.full_text.startswith('RT @'), 'favorite_count': item.favorite_count, 'retweet_count': item.retweet_count, 'language': item.lang, 'hashtags': as_separated_list(item.entities['hashtags'], 'text'), 'mentions': as_separated_list(item.entities['user_mentions'], 'screen_name'), 'user_name': item.user.name } for item in timeline_result] def account_data(): account_result = [ api.get_user(screen_name=account) for account in knowit_accounts ] return [{ 'user_id': item.id, 'screen_name': item.screen_name, 'name': item.name, 'statuses_count': item.statuses_count, 'followers_count': item.followers_count, 'favourites_count': item.favourites_count, 'friends_count': item.friends_count, 'listed_count': item.listed_count } for item in account_result] return Data(metadata=Metadata(timestamp=datetime.now().timestamp()), data={ 'search': search_data(), 'timeline': timeline_data(), 'accounts': account_data() })
def ingest(event) -> Data: def ubw_record_filter(record): if "tab" not in record or "reg_period" not in record: return False # Only the "B" documents are completed, the rest should be ignored. if record["tab"] != "B": return False # You should only uploads docs that are older than 4 weeks. year, week = record["reg_period"][0:4], record["reg_period"][4:] cur_year, cur_week = datetime.now().isocalendar()[0:2] number_of_weeks = int(year) * 52 + int(week) current_number_of_weeks = cur_year * 52 + cur_week if number_of_weeks > current_number_of_weeks - 4: return False return True username, password, client, template_id = SSM( with_decryption=True ).get('UBW_USERNAME', 'UBW_PASSWORD', 'UBW_CLIENT', 'UBW_TEMPLATE_ID') url = SSM().get('UBW_URL') soap_client = Client(wsdl=f'{url}?QueryEngineService/QueryEngineV200606DotNet') res = soap_client.service.GetTemplateResultAsXML( input={ 'TemplateId': template_id, 'TemplateResultOptions': { 'ShowDescriptions': True, 'Aggregated': True, 'OverrideAggregation': False, 'CalculateFormulas': True, 'FormatAlternativeBreakColumns': True, 'RemoveHiddenColumns': False, 'FirstRecord': -1, 'LastRecord': -1 }, 'SearchCriteriaPropertiesList': { 'SearchCriteriaProperties': [ { 'ColumnName': 'timecode', 'Description': 'Tidskode', 'RestrictionType': '!()', 'FromValue': "'X9'", 'DataType': 10, 'DataLength': 25, 'DataCase': 2, 'IsParameter': True, 'IsVisible': False, 'IsPrompt': False } ] }, }, credentials={ 'Username': username, 'Client': client, 'Password': password, }) ubw_data = parse(res['TemplateResult'])['Agresso']['AgressoQE'] return Data( metadata=Metadata(timestamp=datetime.now().timestamp()), data=[rec for rec in ubw_data if ubw_record_filter(rec)] )
def get_channel_name(channel): slack_token = SSM(with_decryption=True).get('slack_app_token') res = requests.get('https://slack.com/api/channels.info', headers={'Authorization': f'Bearer {slack_token}'}, params={'channel': channel}) return res.json().get('channel', {}).get('name', None)
def validate(event) -> bool: url_secret = event['pathParameters'].get('secret', None) secret = SSM(with_decryption=True).get('jira_webhook_secret') return url_secret == secret
def ingest(event) -> Data: objectnet_id = SSM(with_decryption=False).get('cv_partner_objectnet_id') sor_id = SSM(with_decryption=False).get('cv_partner_sor_id') api_token = SSM(with_decryption=True).get('cv_partner_api_token') res = requests.get( f'{url}/search?office_ids[]={objectnet_id}&office_ids[]={sor_id}&offset=0&size={offset_size}', headers={'Authorization': f'Bearer {api_token}'}) data_json = res.json() empty_content_in_path(bucket=environ.get('PRIVATE_BUCKET'), prefix=environ.get('PRIVATE_PREFIX')) empty_content_in_path(bucket=environ.get('PUBLIC_BUCKET'), prefix=environ.get('PUBLIC_PREFIX')) def write_cv_doc_to_private_bucket(person, language: str = 'no', ext: str = 'pdf'): new_key = f'cv_{language}_{ext}' filename = f'{environ.get("PRIVATE_PREFIX")}/{uuid4()}.{ext}' http_request = { 'requestUrl': get_cv_link(person['cv']['user_id'], person['cv']['id'], language=language, ext=ext), 'header': { 'Authorization': f'Bearer {api_token}' }, } save_document(http_request, filename=filename, filetype=ext, private=True) return {new_key: filename} def write_cv_image_to_public_bucket(person, ext: str = 'jpg'): new_key = 'image_key' filename = f'{environ.get("PUBLIC_PREFIX")}/{uuid4()}.{ext}' http_request = {'requestUrl': person['cv']['image']['thumb']['url']} save_document(http_request, filename=filename, filetype=ext, private=False) return {new_key: filename} def get_cv_link(user_id, cv_id, language: str = 'no', ext: str = 'pdf'): return url_v1 + f"/cvs/download/{user_id}/{cv_id}/{language}/{ext}/" def get_person(person): d = { 'user_id': person['cv']['user_id'], 'default_cv_id': person['cv']['id'], 'cv_link': get_cv_link(person['cv']['user_id'], person['cv']['id'], language='{LANG}', ext='{FORMAT}') } d.update(write_cv_image_to_public_bucket(person)) d.update( write_cv_doc_to_private_bucket(person, language='no', ext='pdf')) d.update( write_cv_doc_to_private_bucket(person, language='int', ext='pdf')) d.update( write_cv_doc_to_private_bucket(person, language='no', ext='docx')) d.update( write_cv_doc_to_private_bucket(person, language='int', ext='docx')) return d def get_cv(user_id, cv_id): cv = requests.get(url + f'/cvs/{user_id}/{cv_id}', headers={'Authorization': f'Bearer {api_token}'}) return cv.json() def get_list_of_users(data): list_of_users = [] for person in data['cvs']: user = get_person(person) user['cv'] = get_cv(user['user_id'], user['default_cv_id']) list_of_users.append(user) return list_of_users return Data(metadata=Metadata(timestamp=datetime.now().timestamp()), data=get_list_of_users(data_json))