def test_render(self): params = ['examples/wardley_map/cup-of-tea.html'] png_data = self.browser_commands.render(None, None, params) Files.delete(self.png_file) self._save_png_data(png_data) assert Files.exists(self.png_file)
def test_render__with_clip_params(self): #params = ['/examples/bootstrap-cdn.html' ,0 ,0 ,500 ,50 ] params = ['examples/wardley_map/cup-of-tea.html', 250, 50, 600, 200] png_data = self.browser_commands.render(None, None, params) Files.delete(self.png_file) self._save_png_data(png_data) assert Files.exists(self.png_file)
def test__init__(self): temp_file = Temp_File() assert Files.exists (temp_file.tmp_folder) assert Files.not_exists(temp_file.tmp_file) assert Files.not_exists(temp_file.file_path) assert temp_file.tmp_folder in temp_file.file_path assert '/' == temp_file.file_path.replace(temp_file.tmp_folder,'').replace(temp_file.tmp_file,'')
def run(event, context): load_dependency('plantuml') dot_static = '/tmp/lambdas-dependencies/plantuml/dot_static' plantuml_jar = '/tmp/lambdas-dependencies/plantuml/plantuml.jar' Process.run("chmod", ['+x', dot_static]) Process.run("chmod", ['+x', plantuml_jar]) os.environ['PLANTUML_LIMIT_SIZE'] = str( 4096 * 4) # set max with to 4 times the default (16,384) os.environ['GRAPHVIZ_DOT'] = dot_static (fd, puml_file) = tempfile.mkstemp('.puml') png_file = puml_file.replace(".puml", ".png") code = event.get('puml') Files.write(puml_file, code) subprocess.run([ 'java', '-jar', plantuml_jar, '-Xmx2512m', '-tpng', '-o', '/tmp', puml_file ], stdout=subprocess.PIPE, stderr=subprocess.PIPE) if os.path.exists(png_file): with open(png_file, "rb") as image_file: png = base64.b64encode(image_file.read()).decode() else: png = None return {"png_base64": png}
def create_temp_file(self, new_code=None): if new_code: self.lambda_code = new_code self.tmp_file = Files.path_combine(self.folder, '{0}.py'.format(self.file_name)) Files.write(self.tmp_file, self.lambda_code) assert Files.exists(self.tmp_file) return self
def test_folder_create(self): tmp_folder = '_tmp_folder' assert Files.folder_exists(tmp_folder) is False assert Files.folder_create(tmp_folder) == tmp_folder assert Files.folder_create(tmp_folder) == tmp_folder assert Files.folder_exists(tmp_folder) is True assert Files.folder_delete_all(tmp_folder) is True
def cleanup_chrome_processes_and_tmp_files(): # remote temp files for file in Files.find('/tmp/core.headless_shell.*'): pid = file.split('.')[-1] Process.run( 'pkill', ['-TERM', '-P', str(pid)] ) # this doesn't seem to be working since the "headless_shell <defunct>" is still there Files.delete(file)
def load_dependency(target): if os.getenv('AWS_REGION') is None: return from osbot_aws.apis.S3 import S3 import shutil import sys s3 = S3() s3_bucket = 'gw-bot-lambdas' s3_key = 'lambdas-dependencies/{0}.zip'.format(target) tmp_dir = Files.path_combine('/tmp/lambdas-dependencies', target) #return s3.file_exists(s3_bucket,s3_key) if s3.file_exists(s3_bucket, s3_key) is False: raise Exception( "In Lambda load_dependency, could not find dependency for: {0}". format(target)) if Files.not_exists( tmp_dir ): # if the tmp folder doesn't exist it means that we are loading this for the first time (on a new Lambda execution environment) zip_file = s3.file_download( s3_bucket, s3_key, False) # download zip file with dependencies shutil.unpack_archive(zip_file, extract_dir=tmp_dir) # unpack them sys.path.append( tmp_dir ) # add tmp_dir to the path that python uses to check for dependencies return Files.exists(tmp_dir)
def test__using_with_valid_zip_and_target_folder(self): test_zip = Files.current_folder() target_folder = '/tmp/unzip_test' with Zip_Folder(test_zip) as (zip_file): with Unzip_File(zip_file, target_folder, True) as temp_folder: assert Files.exists(temp_folder) is True assert Files.exists(temp_folder) is False
def __init__(self, headless=True): self.web_page = '/vivagraph/simple.html' self.web_root = Files.path_combine(Files.parent_folder(__file__), '../web_root') self.api_browser = API_Browser(headless=headless).sync__setup_browser() self.web_server = Web_Server(self.web_root) self.render_page = Render_Page(api_browser=self.api_browser, web_server=self.web_server)
def delete(self, name): virtual_path = "{0}/{1}".format(self.base_folder, self.fix_name(name) + '.md') full_path = self.md_file_path(virtual_path) if Files.exists(full_path) is False: return False Files.delete(full_path) return Files.exists(full_path) is False
def __init__(self, tmp_img=None, clip=None, headless=False): self.headless = headless self.path_views = Files.path_combine(Files.parent_folder(__file__), '../../osbot_browser/web_root') self.render_page = Render_Page(headless=self.headless, web_root=self.path_views) self.tmp_img = tmp_img self.clip = clip
def start(self): if Files.not_exists(self.web_root): Files.folder_create(self.web_root) # make sure root folder exists self.server_proc = subprocess.Popen( ["python3", "-m", "http.server", str(self.port)], cwd=self.web_root) self.wait_for_server_started() return self
def web_root(self): if os.getenv('AWS_REGION') is not None: # if we are in AWS return Files.path_combine('.','./osbot_browser/web_root') if 'test/browser' in Files.current_folder(): # if we are in an unit test return Files.path_combine('.','../../osbot_browser/web_root') parent_folder = Files.folder_name(__file__) if 'serverless-render/osbot_browser/browser' in parent_folder: return Files.path_combine(parent_folder,'../web_root') return None
def run(event, context): file_name = event.get('file_name') # get file_name from lambda params tmp_path = '/tmp' # location of lambda temp folder tmp_file = Files.path_combine( tmp_path, file_name) # create file name (in temp folder) Files.write(tmp_file, 'some text') # create file (with some text) return Files.find(tmp_path + '/*.*') # return list of files in temp folder
def test_add_file(self): lambda_file = Files.path_combine( __file__, '../../../../osbot_aws/lambdas/dev/hello_world.py') assert Files.exists(lambda_file) self.package.add_file(lambda_file) assert self.package.get_files() == ['/hello_world.py'] self.package.aws_lambda.handler = 'hello_world.run' self.package.update() assert self.package.invoke() == 'From lambda code, hello None'
def __init__(self, web_page, headless=True): self.web_page = web_page self.title = 'browser view' self.web_root = Files.path_combine(Files.parent_folder(__file__), '../web_root') self.api_browser = API_Browser(headless=headless).sync__setup_browser() self.web_server = Web_Server(self.web_root) self.render_page = Render_Page(api_browser=self.api_browser, web_server=self.web_server)
def test_get_set_last_chrome_session(self): self.api.file_tmp_last_chrome_session = Files.temp_file() data = { 'chrome_devtools': 'ws://127.0.0.1:64979/devtools/browser/75fbaab9-33eb-41ee-afd9-4aed65166791' } self.api.set_last_chrome_session(data) assert self.api.get_last_chrome_session() == data Files.delete(self.api.file_tmp_last_chrome_session)
def test_screenshot_url(self): #url = 'https://github.com/GoogleChrome/puppeteer' #url = 'https://www.google.co.uk' #url = 'http://visjs.org/examples/network/other/manipulation.html' #url = 'http://visjs.org/examples/graph3d/01_basics.html' url = 'https://getbootstrap.com/docs/4.3/examples/dashboard/' tmp_img = '/tmp/test_screenshot_html.png' Files.delete(tmp_img) self.render_page.screenshot_url(url, tmp_img) assert Files.exists(tmp_img)
def __init__(self): self.web_page = '/datatables/simple.html' self.web_root = Files.path_combine(Files.parent_folder(__file__), '../web_root') self.api_browser = API_Browser().sync__setup_browser() self.render_page = Render_Page(api_browser=self.api_browser, web_root=self.web_root) self.table_width = '100%' self.columns_defs = None self.table_title = None
def upload_dependency(target): s3 = S3() s3_bucket = 'gw-bot-lambdas' s3_file = 'lambdas-dependencies/{0}.zip'.format(target) path_libs = Files.path_combine('../../../_lambda_dependencies/', target) if Files.not_exists(path_libs): raise Exception( "In Lambda upload_dependency, could not find dependency for: {0}". format(target)) s3.folder_upload(path_libs, s3_bucket, s3_file) return s3.file_exists(s3_bucket, s3_file)
def __init__(self, headless=True): self.web_page = '/gs/risk/risks-dashboard.html' self.web_root = Files.path_combine(Files.parent_folder(__file__), '../web_root') self.headless = headless self.api_browser = API_Browser(self.headless, self.headless).sync__setup_browser() self.render_page = Render_Page(api_browser=self.api_browser, web_root=self.web_root) self.graph_name = None self.jira_key = None
def __init__(self, base_folder=None, folder_oss=None): self.folder_oss = folder_oss self.base_folder = base_folder if folder_oss is None: self.folder_oss = Files.path_combine(__file__, '../../../..') if base_folder is None: self.base_folder = '.' self.file_template = Files.path_combine( self.folder_oss, "{0}/{1}".format(self.base_folder, '_template.md')) self._all_md_files = None
def test_participant_create(self): name = 'an test user' result = self.hugo_page.create(name) assert result.get('status') == 'ok' data = result.get('data') path = self.hugo_page.md_file_path(data['path']) assert data['metadata']['title'] == 'an test user' assert data['path'] == '/content/participant/an-test-user.md' assert Files.exists(path) is True assert self.hugo_page.delete(name) is True assert Files.exists(path) is False
def test_file_extension(self): assert Files.file_extension('/path/to/somefile.ext') == '.ext' assert Files.file_extension('/path/to/somefile.') == '.' assert Files.file_extension('/path/to/somefile..') == '.' assert Files.file_extension('/path/to/somefile') == '' assert Files.file_extension('/a/b.c/d') == '' assert Files.file_extension('/a/b.c/.git') == '' assert Files.file_extension('/a/b.c/a.git') == '.git' assert Files.file_extension('/a/b.c/a.git.abc') == '.abc' assert Files.file_extension(None) == ''
def setup(self): secret_data = json.loads(Secrets(self.secret_id).value()) storage_file = '/tmp/gmail_storage_token.json' Files.write(storage_file, secret_data['storage']) store = file.Storage(storage_file) creds = store.get() self.service = build('gmail', 'v1', http=creds.authorize(Http())) # note: 'storage.json file created using storage # SCOPES = 'https://www.googleapis.com/auth/gmail.readonly' # if not creds or creds.invalid: # flow = client.flow_from_clientsecrets(self.credentials_file, SCOPES) # flags = argparser.parse_args('--auth_host_name localhost --logging_level INFO'.split()) # creds = run_flow(flow, store, flags) return self
def test_zip_files(self): target_file = '/tmp/test_zip.zip' Files.delete(target_file) assert Files.exists(target_file) is False Dev.pprint(Files.zip_files('..', '*.py', '/tmp/test_zip.zip')) assert Files.exists(target_file) is True Files.delete(target_file) assert Files.exists(target_file) is False
def test_screenshot_file(self): with Temp_File(self.html, 'html') as temp_file: clip = {'x': 1, 'y': 1, 'width': 180, 'height': 30} img_file = self.render_page.screenshot_file(temp_file.file_path, self.tmp_img, clip=clip) assert Files.exists(img_file)
def run(event, context): load_dependency('pydot') channel = event.get('channel') data = event.get('dot') #slack_message("in dot to svg: {0}".format(event), [], channel) log_to_elk("in dot to svg: {0}".format(event)) import dot_parser try: (fd, tmp_file) = tempfile.mkstemp('dot)') dot_static = '/tmp/lambdas-dependencies/pydot/dot_static' Process.run("chmod", ['+x', dot_static]) data = data.replace('<', '<').replace('>','>') # this solved a really nasty bug caused by the fact that Slack will html encode the < and > # graph = pydot.graph_from_dot_data(data).pop() # <from pydot> use code below (instead of above) to get a better error message from dot parser graphparser = dot_parser.graph_definition() graphparser.parseWithTabs() tokens = graphparser.parseString(data) graph = list(tokens).pop() # </from pydot> graph.write_svg(tmp_file, prog=dot_static) svg_raw = Files.contents(tmp_file) return base64.b64encode(svg_raw.encode()).decode() except Exception as error: slack_message("[dot_to_svg] Error: {0} ".format(error), [], channel) return None
def run(event, context): channel = event.get('channel') png_data = event.get('png_data') s3_bucket = event.get('s3_bucket') s3_key = event.get('s3_key') title = event.get('title') team_id = event.get('team_id') aws_secrets_id = event.get('aws_secrets_id') if team_id == 'T7F3AUXGV': aws_secrets_id = 'slack-gs-bot' # hard coded values if team_id == 'T0SDK1RA8': aws_secrets_id = 'slack-gsbot-for-pbx' # need to move to special function bot_token = Secrets(aws_secrets_id).value() if png_data: #(fd, tmp_file) = tempfile.mkstemp('png') tmp_file = Files.temp_file('.png') with open(tmp_file, "wb") as fh: fh.write(base64.decodebytes(png_data.encode())) else: if s3_bucket and s3_key: tmp_file = S3().file_download_and_delete(s3_bucket, s3_key) else: return None return send_file_to_slack(tmp_file, title, bot_token, channel)