def tearDown(self): if self.png_data: png_file = '/tmp/lambda_png_file.png' with open(png_file, "wb") as fh: fh.write(base64.decodebytes(self.png_data.encode())) Dev.pprint("Png data with size {0} saved to {1}".format( len(self.png_data), png_file))
def test_fixed__bug_cache_issue_in_lambdas(self): payload = {"params": ["go_js", "graph_MKF", "default"]} self.png_data = Lambda('browser.lambda_browser').invoke(payload) Dev.pprint(self.png_data) self.png_data = Lambda('browser.lambda_browser').invoke(payload) Dev.pprint(self.png_data)
def test_raw_data(self): params = ['graph_J2O', 'details'] self.team_id = None self.channel = None results = Lambda_Graph_Commands().raw_data(self.team_id, self.channel, params, None) Dev.pprint(results)
def test_get_task_details(self): from osbot_aws.apis.Logs import Logs def find_starts(array, text): return [item for item in array if item.startswith(text)] def find_in(array, text): return [item for item in array if text in item] #build_id = 'OSBot-Jupyter:a553dda5-953a-41b8-ae91-e068cba4f56b' result = self.api.code_build.project_builds_ids(self.api.project_name) build_id = result.__next__() # get last one build_info = self.api.code_build.build_info(build_id) group_name = build_info.get('logs').get('groupName') stream_name = build_info.get('logs').get('streamName') #Dev.pprint(group_name,stream_name) logs = Logs(group_name=group_name, stream_name=stream_name) messages = logs.messages() #ngrok_messages = find_starts(messages,'t=') ngrok_url = find_in( messages, 'name=command_line addr')[0].split('url=')[1].strip() jupyter_token = find_in(messages, 'token=')[0].split('token=')[1].strip() Dev.pprint("{0}?token={1}".format(ngrok_url, jupyter_token))
def _show_google_calendar(calendar_data, team_id=None, channel=None, headless=True): #load_dependencies(['syncer', 'requests', 'gmail']); from osbot_browser.view_helpers.Full_Calendar import Full_Calendar full_calendar = Full_Calendar(headless=headless) full_calendar.load_page() events = [] for event in calendar_data: start = event.get('start').get('date') end = event.get('end').get('date') if start is None: start = event.get('start').get('dateTime') end = event.get('end').get('dateTime') events.append({ "title": event.get('summary'), "start": start, "end": end }) Dev.pprint(events) # events = gcalendar.next_10() full_calendar.invoke_js('show_calendar', events) full_calendar.browser_width(1000, 700) return full_calendar.send_screenshot_to_slack(team_id=team_id, channel=channel)
def test_get_graph_data(self): graph_name = 'graph_MKF' # small ( 20 nodes, 27 edges) #graph_name = 'graph_YT4' # large one (199 nodes, 236 edges) #graph_name = 'graph_VZ5' # very large one (367 nodes, 653 edges) result = self.vis_js.get_graph_data(graph_name) Dev.pprint("{0} nodes, {1} edges".format(len(result.get('nodes')), len(result.get('edges'))))
def servers(team_id=None, channel=None, params=None): text = ":point_right: Here are the running servers:" servers_text = "" attachments = [] for build_id,build in CodeBuild_Jupyter_Helper().get_active_builds().items(): #print(build_id) build_info = build.build_info() Dev.pprint(build_info) variables = {} for variable in build_info.get('environment').get('environmentVariables'): variables[variable.get('name')] = variable.get('value') repo_name = variables.get('repo_name') user = variables.get('user') timeout = build_info.get('timeoutInMinutes') small_id = build_id[-5:] server_url = build.url() if server_url is None: user_text = "(server booting up)" else: user_text = "<{0}|open>".format(server_url) # servers_text += "*{0}*: booting up\n".format(repo_name, server_url) #else: time = "{0}".format(build_info.get('startTime').strftime("%H:%M")) servers_text += "*{1}*: {2} (id: `{0}`, user: <@{3}>, started: {4}, timeout: {5})\n".format( small_id, repo_name,user_text,user,time, timeout) if servers_text: attachments.append({"text":servers_text, 'color': 'good'}) slack_message(text, attachments, channel, team_id) else: slack_message(":information_source: there are no servers running! Why don't you start one using the command `jupyter start {repo name}` ", [], channel, team_id)
def tearDown(self): if self.result is not None: Dev.pprint(self.result) if self.png_data: png_file = '/tmp/lambda_png_file.png' with open(png_file, "wb") as fh: fh.write(base64.decodebytes(self.png_data.encode()))
def test_add_all_linked_issues____with_color_coding_on_rating(self): issues = None def on_add_node(element, title, id, original_id): key = id.replace('_', '-') issue = issues.get(key) color = '#FFFFFF' if issue: rating = issue['Rating'] if rating == 'High': color = '#F37071' elif rating == 'Medium': color = '#F0BF99' elif rating == 'Low': color = '#78999D' elif rating == 'TBD': color = '#F7A4A4' node_puml = '{0} "<color:#000000>{1}</color>" as {2} {3}'.format( element, title, id, color) return node_puml self.graph.puml.on_add_node = on_add_node self.graph.puml.add_line("\tscale 3024 width \n") keys = ['RISK-1610'] # ['GSP-95'] # 'FACT-47', # (self.graph.set_puml_left_to_right(True).set_puml_only_from_projects([ 'RISK', 'VULN' ]).set_puml_show_key_in_text(False).set_puml_show_edge_labels( False).add_all_linked_issues(keys, 2)) issues = self.graph.get_nodes_issues() self.graph.render_puml() self.graph.puml.save_tmp() Dev.pprint(len(self.graph.nodes)) Dev.pprint(len(self.graph.edges))
def test_get_screenshot_using_local_chrome(self): url, token = self.code_build.get_server_details_from_logs() self.jp.set_url(url).set_token(token) self.jp.login() #self.jp.open('aaa') self.png_file = self.jp.screenshot() Dev.pprint(self.png_file)
def test_build_start(self): build_id = self.code_build.build_start() build_info = self.code_build.build_wait_for_completion(build_id,1, 60) build_phases = build_info.get('phases') phase = build_phases.pop(-2) Dev.pprint(phase.get('phaseType'),phase.get('phaseStatus'),phase.get('contexts')[0].get('message') )
def test_pprint__confirm_call_to_builtins_print(self, builtins_print): Dev.pprint('aaa') # first call Dev.pprint('aaa', 123) # 2nd call assert builtins_print.call_count == 2 # confirm two calls where made builtins_print.assert_called_with( ) # confirm last call was made with no params builtins_print.assert_has_calls([call(), call() ]) # confirm values of two calls
def add_notification(source_arn): result = lambda_obj._lambda.client().permission_add( FunctionName=lambda_arn, StatementId='1', Action='lambda:InvokeFunction', Principal='s3.amazonaws.com', SourceArn=source_arn, SourceAccount=IAM().account_id()) Dev.pprint(result)
def test_pprint__confirm_call_to_pprint_pprint(self, pprint_pprint): assert Dev.pprint('1st') == '1st' assert Dev.pprint('2nd', 123) == ('2nd', 123) assert pprint_pprint.call_count == 3 pprint_pprint.assert_has_calls([ call('1st', indent=2), call('2nd', indent=2), call(123, indent=2) ])
def test_save_graph(self): nodes = ['a','b'] edges = [('a', 'goes to','b')] extra_data = None graph_id = None # 'unit_test_test_save_graph_nodes_edges' graph_name = 'test_save_graph_nodes_edges' graph_type = 'unit-test' result = self.lambda_graph.save_graph(nodes, edges, extra_data, graph_id, graph_name, graph_type) Dev.pprint(result)
def convert_issue(self, issue_raw): if issue_raw: skip_fields = [ 'resolution', 'votes', 'worklog', 'watches', 'comment', 'iconUrl', 'fixVersions', 'customfield_14238', 'issuelinks' ] # '% complete' skip_types = ['any', 'progress', 'option-with-child'] use_display_name = ['user'] use_name = [ 'issuetype', 'status', 'project', 'priority', 'securitylevel' ] use_value = ['string', 'number', 'datetime', 'date'] if issue_raw: issue_key = issue_raw['key'] issue_id = issue_raw['id'] issue = {'Key': issue_key, 'Id': issue_id} fields = self.fields_by_id() fields_values = issue_raw.get('fields') self.map_issue_links(issue, fields_values.get('issuelinks')) if fields_values: for field_id, value in fields_values.items(): if value and field_id not in skip_fields: field = fields.get(field_id) issue_type = field.get('schema').get('type') if issue_type not in skip_types: issue_name = field.get('name') if issue_type in use_display_name: value = value.get('displayName') elif issue_type in use_name: value = value.get('name') elif issue_type in use_value: value = value elif issue_type == 'option': value = value.get('value') elif issue_type == 'array': items = [] for item in value: if type(item) is str: items.append(item) else: if item.get('value'): items.append(item.get('value')) elif item.get('name'): items.append(item.get('name')) value = ",".join(items) else: #print('>> ', field_id,issue_type) Dev.pprint(value) continue issue[issue_name] = value return issue return {}
def _save_png_file(self, png_data): try: png_file = '/tmp/lambda_png_file.png' if png_data: with open(png_file, "wb") as fh: fh.write(base64.decodebytes(png_data.encode())) Dev.pprint("Png data with size {0} saved to {1}".format(len(png_data),png_file)) except Exception as error: Dev.print("[_save_png_file][Error] {0}".format(error)) Dev.print(png_data)
def tearDown(self): if self.result is not None: Dev.pprint(self.result) if self.png_data: png_file = '/tmp/tmp-jira-screenshot.png' with open(png_file, "wb") as fh: fh.write(base64.decodebytes(self.png_data.encode())) Dev.pprint("Png data with size {0} saved to {1}".format( len(self.png_data), png_file))
def test__create_graph_with_epic_data__sec_9195(self): graph = self.graph #Dev.pprint(self.graph.api_issues.epic_issues('SEC-9195')) #return (graph.add_all_linked_issues( ['GSOKR-924']).add_nodes_from_epics().set_link_paths_to_ignore([ 'is child of', 'has Stakeholder' ]).set_links_path_mode_to_up().add_all_linked_issues(depth=3)) Dev.pprint(len(graph.nodes)) graph.render_puml_and_save_tmp()
def test_issue_update(self): issue_data = { "Key": "RISK-12", "Summary": "new summary value", "Risk Description": "el risk description", "Description": "the description" } #"Risk Rating" : "Low" } #"Status" : "Blocked" ,} result = self.api.issue_update(issue_data) Dev.pprint(result)
def test_add_all_linked_issues____with___risk_links_paths_up(self): keys = ['SEC-8708'] #['RISK-1610'] # ['GSP-95'] # 'FACT-47', # graph = self.graph #self.risk_links_paths_down (graph #.set_puml_node_edge_value(None) .set_puml_link_types_to_add(graph.risk_links_paths_up ).add_all_linked_issues(keys, 5)) graph.render_puml_and_save_tmp() #self.graph.add_link_types_as_nodes() Dev.pprint(len(graph.nodes)) Dev.pprint(len(graph.edges))
def request_put(self, path, data): json_data = json_dumps(data) (server, username, password) = self.config() path = '{0}/rest/api/2/{1}'.format(server, path) if self.log_requests: Dev.pprint('put', path) headers = {'Content-Type': 'application/json'} response = requests.put(path, json_data, headers=headers, auth=(username, password)) if 200 <= response.status_code < 300: return True Dev.pprint('[Error][request_put]: {0}'.format(response.text)) return False
def build_wait_for_completion(self, build_id, sleep_for=0.5, max_attempts=20, log_status=False): for i in range(0, max_attempts): build_info = self.build_info(build_id) build_status = build_info.get('buildStatus') current_phase = build_info.get('currentPhase') if log_status: Dev.pprint("[{0}] {1} {2}".format(i, build_status, current_phase)) if build_status != 'IN_PROGRESS': return build_info sleep(sleep_for) return None
def _test_s3_bucket_to_sqs(self): s3 = S3() queue = Queue('unit_tests_temp_queue') #.create() bucket_name = 'bucket-42-temp' region = 'eu-west-2' lambda_obj = Lambda_Package( 'osbot_aws.lambdas.pocs.send_event_data_to_queue' ).update_with_root_folder() lambda_arn = lambda_obj._lambda.function_Arn() bucket_arn = s3.bucket_arn(bucket_name) queue_arn = queue.arn() #lambda_obj.invoke({'a':43}) #s3.bucket_create(s3_bucket, region) def add_notification(source_arn): result = lambda_obj._lambda.client().permission_add( FunctionName=lambda_arn, StatementId='1', Action='lambda:InvokeFunction', Principal='s3.amazonaws.com', SourceArn=source_arn, SourceAccount=IAM().account_id()) Dev.pprint(result) #Dev.pprint(result == { 'Statement': '{"Sid":"1","Effect":"Allow","Principal":{"Service":"s3.amazonaws.com"},"Action":"lambda:InvokeFunction","Resource":"arn:aws:lambda:eu-west-2:244560807427:function:osbot_aws_lambdas_pocs_send_event_data_to_queue","Condition":{"StringEquals":{"AWS:SourceAccount":"244560807427"},"ArnLike":{"AWS:SourceArn":"arn:aws:sqs:eu-west-2:244560807427:unit_tests_temp_queue"}}}'}) #add_notification(bucket_arn) #return # resource = s3.s3_bucket_notification(bucket_name) # config = { 'LambdaFunctionConfigurations': [{ 'LambdaFunctionArn': lambda_arn , # 'Events' : ['s3:ObjectCreated:*']}]} # Dev.pprint(config) # Dev.pprint(resource.put(NotificationConfiguration=config)) #Dev.pprint(resource.lambda_function_configurations) #Dev.pprint(s3.s3().get_bucket_notification_configuration(Bucket=bucket_name)) #bucket_notification = s3.BucketNotification('bucket_name') #Dev.pprint(bucket_notification) #s3.boto_client_s3().bucket_notification() Dev.pprint(queue.pull()) Dev.pprint(queue.pull())
def test_create(self): headless = False file = '/tmp/active_jupyter_server.yml' api = CodeBuild_Jupyter_Helper() #result = api.start_build_and_wait_for_jupyter_load() #build_id = api.get_active_build_id() config = api.save_active_server_details(file) #Dev.pprint(result.build_status()) #Dev.pprint(build_id) Dev.pprint(config) jp_web = Jupyter_Web(token=config.get('token'), server=config.get('server'), headless=headless) jp_web.login() jp_api = Jupyter_API(token=config.get('token'), server=config.get('server'), headless=headless)
async def wait_for_element(self, selector, timeout=10000, page=None, visible=False, hidden=False): if page is None: page = await self.page() try: await page.waitForSelector(selector, { 'timeout': timeout, 'visible': visible, 'hidden': hidden }) return True except Exception as error: Dev.pprint("[Error][sync__await_for_element] {0}".format(error)) return False
def test_remove_with_links(self): (self.graph.add_node("RISK-424").add_node("ID-42").add_node( "ID-41").add_edge("RISK-424", '', "ID-42").add_edge("RISK-424", '', "ID-7")) Dev.pprint(self.graph.nodes) Dev.pprint(self.graph.edges) self.graph.remove_with_links() Dev.pprint(self.graph.nodes) Dev.pprint(self.graph.edges)
def update_index_from_jira_changes(self): if self.projects is None: log_error("Cannot update ELK since self.projects value is not configured") return update_key = '_update_details' # code to store last updated time in the index (move this to a dedicated location) data = self.elastic.get_data(update_key) if data is None: update_key_data = { "Key": update_key, "last_updated_at": None } epoch = (datetime.datetime.now() - datetime.timedelta(0, 60 * 1440)).timestamp() # if the value is not set , go back 24 h when = strftime("%Y/%m/%d %H:%M", localtime(epoch)) else: update_key_data = data['_source'] when = update_key_data['last_updated_at'] now_epoch = time() - 120 # due to the current issue with the sync server, remove 2 minutes from the time() value (which is the current time in seconds since the Epoch) now = strftime("%Y/%m/%d %H:%M", localtime(now_epoch)) # capture this value here (as soon as possible) now_server = strftime("%Y/%m/%d %H:%M",localtime(time())) print(" > using {0} , localtime: {1}".format(now,now_server)) query = 'project in ({0}) AND updated >= "{1}"'.format(self.projects,when) changes = self.api_Jira.search_no_cache(query) if len(changes) == 0: log_info("No issues updated since: {0}".format(when),"API_Elastic_Jira.update_index_from_jira_changes") return log_info(Dev.pprint("Since {0}, there where {1} issues updated: {2}".format(when, len(set(changes)),set(changes))), "API_Elastic_Jira.update_index_from_jira_changes") issues = self.fix_issues_for_elk(changes.values()) result = self.elastic.add_bulk(issues, "Key") log_info(Dev.pprint("sent {0} issues to elk instance: {1}".format(result, self.secrets_id)), "API_Elastic_Jira.update_index_from_jira_changes") update_key_data['last_updated_at'] = now self.elastic.add(update_key_data, "Key")
def chord(team_id=None, channel=None, params=None, headless=True): load_dependencies('syncer,requests,pyppeteer,websocket-client'); (am_charts, graph_data) = Am_Charts_Views._get_graph_data(params, headless=headless) Dev.pprint(graph_data) am_charts.load_page(True) js_code= """ var chart = am4core.create("chartdiv", am4charts.ChordDiagram); chart.hiddenState.properties.opacity = 0; chart.dataFields.fromName = "from"; chart.dataFields.toName = "to"; chart.dataFields.value = "value"; // make nodes draggable var nodeTemplate = chart.nodes.template; nodeTemplate.readerTitle = "Click to show/hide or drag to rearrange"; nodeTemplate.showSystemTooltip = true; nodeTemplate.cursorOverStyle = am4core.MouseCursorStyle.pointer """ am_charts.exec_js(js_code) data = [{ "from": "A", "to": "D", "value": 1 }, { "from": "B", "to": "D", "value": 1 }, { "from": "B", "to": "E", "value": 1 }, { "from": "B", "to": "C", "value": 1 }, { "from": "C", "to": "E", "value": 1 }, { "from": "E", "to": "D", "value": 1 }, { "from": "C", "to": "A", "value": 1 }, { "from": "G", "to": "A", "value": 1 }, { "from": "D", "to": "B", "value": 1 }]; data = [] for edge in graph_data.get('edges'): data.append({ "from": edge[0], "to": edge[2], "value": 1 }) am_charts.assign_variable_js('window.chart.data',data) return am_charts.send_screenshot_to_slack(team_id, channel)
def test_add_all_linked_issues____with___risk_links_paths_down(self): issues = None def on_add_node(element, title, id, original_id): key = id.replace('_', '-') issue = issues.get(key) color = '#FFFFFF' if issue: status = issue['Status'] if status in ['Blocked', 'Backlog']: color = '#F37071' elif status in ['To VULN Assess', 'To Validate']: color = '#F0BF99' elif status == 'Allocated for Fix': color = '#78999D' elif status == 'Fixed': color = '#6DD1A3' node_puml = '{0} "<color:#000000>{1}</color>" as {2} {3}'.format( element, title, id, color) return node_puml self.graph.puml.on_add_node = on_add_node keys = ['IA-333' ] #['RISK-1526'] #['RISK-1610'] # ['GSP-95'] # 'FACT-47', # graph = self.graph #self.risk_links_paths_down (graph #.set_puml_node_edge_value("Status") .set_puml_link_types_to_add( graph.risk_links_paths_down ).set_puml_show_key_in_text(False).add_all_linked_issues(keys, 4)) issues = self.graph.get_nodes_issues() graph.render_puml_and_save_tmp() Dev.pprint(len(graph.nodes)) Dev.pprint(len(graph.edges))