def run(event, context): load_dependency('plantuml') dot_static = '/tmp/lambdas-dependencies/plantuml/dot_static' plantuml_jar = '/tmp/lambdas-dependencies/plantuml/plantuml.jar' Process.run("chmod", ['+x', dot_static]) Process.run("chmod", ['+x', plantuml_jar]) os.environ['PLANTUML_LIMIT_SIZE'] = str( 4096 * 4) # set max with to 4 times the default (16,384) os.environ['GRAPHVIZ_DOT'] = dot_static (fd, puml_file) = tempfile.mkstemp('.puml') png_file = puml_file.replace(".puml", ".png") code = event.get('puml') Files.write(puml_file, code) subprocess.run([ 'java', '-jar', plantuml_jar, '-Xmx2512m', '-tpng', '-o', '/tmp', puml_file ], stdout=subprocess.PIPE, stderr=subprocess.PIPE) if os.path.exists(png_file): with open(png_file, "rb") as image_file: png = base64.b64encode(image_file.read()).decode() else: png = None return {"png_base64": png}
def test__using_with_valid_zip_and_target_folder(self): test_zip = Files.current_folder() target_folder = '/tmp/unzip_test' with Zip_Folder(test_zip) as (zip_file): with Unzip_File(zip_file, target_folder, True) as temp_folder: assert Files.exists(temp_folder) is True assert Files.exists(temp_folder) is False
def __init__(self, headless=True): self.web_page = '/vis-js/simple.html' self.web_root = Files.path_combine(Files.parent_folder(__file__), '../web_root') self.api_browser = API_Browser( headless=headless, auto_close=headless).sync__setup_browser() self.render_page = Render_Page(api_browser=self.api_browser, web_root=self.web_root)
def test_get_set_last_chrome_session(self): self.api.file_tmp_last_chrome_session = Files.temp_file() data = { 'chrome_devtools': 'ws://127.0.0.1:64979/devtools/browser/75fbaab9-33eb-41ee-afd9-4aed65166791' } self.api.set_last_chrome_session(data) assert self.api.get_last_chrome_session() == data Files.delete(self.api.file_tmp_last_chrome_session)
def __init__(self, tmp_img=None, clip=None, headless=False): self.headless = headless self.path_views = Files.path_combine(Files.parent_folder(__file__), '../../src/web_root') self.render_page = Render_Page(headless=self.headless, auto_close=self.headless, web_root=self.path_views) self.tmp_img = tmp_img self.clip = clip
def test__init__(self): temp_file = Temp_File() assert Files.exists(temp_file.tmp_folder) assert Files.not_exists(temp_file.tmp_file) assert Files.not_exists(temp_file.file_path) assert temp_file.tmp_folder in temp_file.file_path assert '/' == temp_file.file_path.replace(temp_file.tmp_folder, '').replace( temp_file.tmp_file, '')
def __init__(self): self.web_page = '/google_charts/simple.html' self.web_root = Files.path_combine(Files.parent_folder(__file__), '../web_root') self.api_browser = API_Browser().sync__setup_browser() self.render_page = Render_Page(api_browser=self.api_browser, web_root=self.web_root) self.table_width = '100%' self.columns_defs = None self.table_title = None
def __init__(self, web_page, headless=True): self.web_page = web_page self.title = 'browser view' self.web_root = Files.path_combine(Files.parent_folder(__file__), '../web_root') self.api_browser = API_Browser( headless=headless, auto_close=headless).sync__setup_browser() self.web_server = Web_Server(self.web_root) self.render_page = Render_Page(api_browser=self.api_browser, web_server=self.web_server)
def web_root(self): if os.getenv('AWS_REGION') is not None: # if we are in AWS return Files.path_combine('.', './web_root') if 'test/browser' in Files.current_folder( ): # if we are in an unit test return Files.path_combine('.', '../../../../src/web_root') parent_folder = Files.folder_name(__file__) if 'pbx-gs-python-utils/src/browser' in parent_folder: return Files.path_combine(parent_folder, '../../../../src/web_root') return None
def __init__(self): self.web_page = '/gs/risk/risks-dashboard.html' self.web_root = Files.path_combine(Files.parent_folder(__file__), '../web_root') self.headless = False self.api_browser = API_Browser(self.headless, self.headless).sync__setup_browser() self.render_page = Render_Page(api_browser=self.api_browser, web_root=self.web_root) self.graph_name = None self.jira_key = None
def setup_tmp_web_root(payload): copy_tree('./html', web_root) cs_map_1 = web_root + '/coffee/map-1.coffee' if payload.get('coffee_script_code'): cs_code = payload.get('coffee_script_code') Files.write(cs_map_1, cs_code) if payload.get("queryStringParameters") and payload.get( "queryStringParameters").get('code'): cs_code = payload.get("queryStringParameters").get('code') Files.write(cs_map_1, cs_code) return Files.contents(cs_map_1)
def test_create_local_server_and_take_screenshot(self): Files.folder_delete_all(self.src_tmp) self.copy_html_files() web_root = self.src_tmp + '/html' proc = subprocess.Popen(["python", "-m", "SimpleHTTPServer", "1234"], cwd=web_root) Dev.pprint(proc) self.take_screenshot() html = requests.get('http://localhost:1234/map/').text proc.kill() Dev.pprint(html)
def upload_dependency(target): s3 = S3() s3_bucket = 'gs-lambda-tests' s3_folder = 'dinis/lambdas-dependencies/' s3_file = 'dinis/lambdas-dependencies/{0}.zip'.format(target) path_libs = Files.path_combine('../../../_lambda_dependencies/', target) if Files.not_exists(path_libs): raise Exception( "In Lambda upload_dependency, could not find dependency for: {0}". format(target)) s3.folder_upload(path_libs, s3_bucket, s3_folder) return s3.file_exists(s3_bucket, s3_file)
def copy_html_files(self): html_file = self.folder_src_hugo + '/layouts/visjs/simple.html' vis_js = self.folder_src_hugo + '/static/js/visjs/vis.js' cs_compiler = self.folder_src_hugo + '/static/js/repl/browser-compiler-coffeescript.js' cs_vis_js = self.folder_src_hugo + '/static/coffee/api-visjs.coffee' cs_map_1 = self.folder_src_hugo + '/static/coffee/map-1.coffee' Files.copy(html_file, self.src_tmp + '/html/map/simple/index.html') Files.copy(vis_js, self.src_tmp + '/html/js/visjs/vis.js') Files.copy( cs_compiler, self.src_tmp + '/html/js/repl/browser-compiler-coffeescript.js') Files.copy(cs_vis_js, self.src_tmp + '/html/coffee/api-visjs.coffee') Files.copy(cs_map_1, self.src_tmp + '/html/coffee/map-1.coffee')
def test_invoke_using_map_as_param(self): cs_map_2 = self.folder_src_hugo + '/static/coffee/map-2.coffee' payload = {'coffee_script_code': Files.contents(cs_map_2)} result = self.zip_update_invoke(payload) #result = self.just_invoke(payload) self.save_image(result, './lambda-result.png')
def __init__(self, web_root=None): self.src_tmp = '/tmp/temp_web_server' if web_root : self.web_root = web_root else : self.web_root = self.src_tmp + '/html' self.html_file = Files.path_combine(self.web_root, 'index.html') self.port = Misc.random_number(10000,60000) self.server_proc = None
def run(event, context): channel = event.get('channel') png_data = event.get('png_data') s3_bucket = event.get('s3_bucket') s3_key = event.get('s3_key') title = event.get('title') team_id = event.get('team_id') aws_secrets_id = event.get('aws_secrets_id') if team_id == 'T7F3AUXGV': aws_secrets_id = 'slack-gs-bot' # hard coded values if team_id == 'T0SDK1RA8': aws_secrets_id = 'slack-gsbot-for-pbx' # need to move to special function bot_token = Secrets(aws_secrets_id).value() if png_data: #(fd, tmp_file) = tempfile.mkstemp('png') tmp_file = Files.temp_file('.png') with open(tmp_file, "wb") as fh: fh.write(base64.decodebytes(png_data.encode())) else: if s3_bucket and s3_key: tmp_file = S3().file_download_and_delete(s3_bucket, s3_key) else: return None return send_file_to_slack(tmp_file, title, bot_token, channel)
def run(event, context): load_dependency('pydot') channel = event.get('channel') data = event.get('dot') #slack_message("in dot to svg: {0}".format(event), [], channel) log_to_elk("in dot to svg: {0}".format(event)) import dot_parser try: (fd, tmp_file) = tempfile.mkstemp('dot)') dot_static = '/tmp/lambdas-dependencies/pydot/dot_static' Process.run("chmod", ['+x', dot_static]) data = data.replace('<', '<').replace( '>', '>' ) # this solved a really nasty bug caused by the fact that Slack will html encode the < and > # graph = pydot.graph_from_dot_data(data).pop() # <from pydot> use code below (instead of above) to get a better error message from dot parser graphparser = dot_parser.graph_definition() graphparser.parseWithTabs() tokens = graphparser.parseString(data) graph = list(tokens).pop() # </from pydot> graph.write_svg(tmp_file, prog=dot_static) svg_raw = Files.contents(tmp_file) return base64.b64encode(svg_raw.encode()).decode() except Exception as error: slack_message("[dot_to_svg] Error: {0} ".format(error), [], channel) return None
def list(team_id, channel, params): text = "Here are the current examples files:" attachments = [] files = '' for file in Files.find('./web_root/**/*.html'): files += '{0} \n'.format(file.replace('./web_root/', '')) attachments.append({'text': files}) return text, attachments
def test_puml___dudes_creation(self): target_file = '/tmp/dudes-puml.png' puml = Files.contents('../dudes/puml/first-test.puml') #Dev.pprint(puml) #self.plantuml.puml_to_png_via_local_server(puml, target_file) self.plantuml.puml_to_png_using_lambda_function(puml, target_file)
def test_invoke___jira_issues_dot_file(self): dot = Files.contents('../../../data/jira-test.dot') Dev.pprint(dot) params = {"dot": dot, "channel": "DDKUZTK6X"} svg = self.dot_to_svg.update().invoke(params) #how_Img.from_svg_string(svg) Dev.pprint(svg)
def __init__(self, headless=True, auto_close=True, url_chrome=None): self.file_tmp_last_chrome_session = '/tmp/browser-last_chrome_session.json' #self.file_tmp_screenshot = '/tmp/browser-page-screenshot.png' self.file_tmp_screenshot = Files.temp_file('.png') self._browser = None self.headless = headless self.auto_close = auto_close # happens after taking a screenshot self.url_chrome = url_chrome self.log_js_errors_to_console = True
def test__using_with__no_params(self): with Temp_File() as temp: assert Files.file_extension(temp.file_path) == '.tmp' assert Files.exists(temp.file_path) assert Files.contents(temp.file_path) == '...' assert Files.not_exists(temp.file_path) with Temp_File('abc', 'txt') as temp: assert Files.file_extension(temp.file_path) == '.txt' assert Files.exists(temp.file_path) assert Files.contents(temp.file_path) == 'abc' assert Files.not_exists(temp.file_path)
def update_with_src( self, path_to_src=None ): # use this when wanting to add a local folder to the lambda source code if path_to_src is None: path_to_src = Files.path_combine(__file__, '../../../../../../src') src_tmp = '/tmp/src_{0}'.format(self.name) copy_tree(self.source, src_tmp) copy_tree(path_to_src, src_tmp) self.source = src_tmp return self.update()
def load_dependency(target): s3 = S3() s3_bucket = 'gs-lambda-tests' s3_key = 'dinis/lambdas-dependencies/{0}.zip'.format(target) tmp_dir = Files.path_combine('/tmp/lambdas-dependencies', target) if s3.file_exists(s3_bucket, s3_key) is False: raise Exception( "In Lambda load_dependency, could not find dependency for: {0}". format(target)) if Files.not_exists( tmp_dir ): # if the tmp folder doesn't exist it means that we are loading this for the first time (on a new Lambda execution environment) zip_file = s3.file_download( s3_bucket, s3_key, False) # download zip file with dependencies shutil.unpack_archive(zip_file, extract_dir=tmp_dir) # unpack them sys.path.append( tmp_dir ) # add tmp_dir to the path that python uses to check for dependencies return Files.not_exists(tmp_dir)
async def get_screenshot_via_browser(self, url=None, png_file=None, full_page=True, clip=None, viewport=None, js_code=None, delay=None): if clip is not None: full_page = False if png_file is None: png_file = Files.temp_file('.png') if url is None: url = self.web_server.url() await self.api_browser.browser() return await self.api_browser.screenshot(url, full_page=full_page, file_screenshot=png_file, clip=clip, viewport=viewport, js_code=js_code, delay=delay)
def get_oauth_token(self, desired_scope): secret_data = json.loads(Secrets(self.gsuite_secret_id).value() ) # load secret from AWS Secrets store token_file = '/tmp/gmail_credential_{0}.json'.format( desired_scope ) # this is the tmp file with the token value for the desired scope if not Files.exists(token_file): # if the file does not exist if os.getenv('AWS_REGION') is not None or os.getenv( 'SYNC_SERVER' ): # check if we are running in AWS or in the sync server Files.write( token_file, secret_data['token.json'] ) # if we are, use the token.json value from the AWS secret_data else: secret_data = json.loads(Secrets( 'gsuite_token').value()) # BUG, need to refactor this credentials_file = '/tmp/gsuite_credentials.json' # file to hold the credentials.json value Files.write(credentials_file, secret_data['credentials.json'] ) # save value received from AWS into file store = file.Storage( token_file) # create a gsuite Storage object scopes = 'https://www.googleapis.com/auth/{0}'.format( desired_scope ) # full qualified name for the desired scopes flow = client.flow_from_clientsecrets( credentials_file, scopes) # create a gsuite flow object flags = argparser.parse_args( '--auth_host_name localhost --logging_level INFO'.split() ) # configure the use of a localhost server to received the oauth response run_flow( flow, store, flags ) # open browser and prompt user to follow the OAuth flow Files.delete( credentials_file ) # delete main gsuite credentials file (since we don't want it hanging around) return token_file # return file with token credentials
def folder_upload(self, folder, s3_bucket, s3_key): file = Files.zip_folder(folder) self.file_upload(file, s3_bucket, s3_key) os.remove(file) return self
def file_upload_as_temp_file(self, file, bucket): key = '{0}/{1}'.format(self.tmp_file_folder, Files.temp_filename(Files.file_extension(file))) self.file_upload_to_key(file, bucket, key) return key
def cleanup_chrome_processes_and_tmp_files(): # remote temp files for file in Files.find('/tmp/core.headless_shell.*'): pid = file.split('.')[-1] Process.run('pkill',['-TERM','-P',str(pid)]) # this doesn't seem to be working since the "headless_shell <defunct>" is still there Files.delete(file)