def save_file(post_contents, public_file_path): script_contents = base64.b64decode(post_contents) with open('/dev/output', 'a') as fp: fp.write(script_contents) _rest, container, short_name = public_file_path.rsplit('/', 2) file_url = 'http://%(host)s/api/%(acct)s/%(cont)s/%(short_name)s\n' file_url %= dict(host=os.environ.get('HTTP_HOST'), cont=container, acct=os.environ.get('PATH_INFO').strip('/'), short_name=short_name) snakebin.http_resp(201, 'Created', msg=file_url)
import snakebin if __name__ == '__main__': with open('/dev/input') as fp: contents = fp.read() http_accept = os.environ.get('HTTP_ACCEPT', '') execute = os.environ.get('SNAKEBIN_EXECUTE', None) if 'text/html' in http_accept: # Something that looks like a browser is requesting the document: if execute is not None: output = snakebin.execute_code(contents) snakebin.http_resp(200, 'OK', content_type='text/html; charset=utf-8', msg=output) else: with open('/index.html') as fp: html_page_template = fp.read() html_page = html_page_template.replace('{code}', escape(contents)) snakebin.http_resp(200, 'OK', content_type='text/html; charset=utf-8', msg=html_page) else: # Some other type of client is requesting the document: output = contents if execute is not None: output = snakebin.execute_code(contents) snakebin.http_resp(200, 'OK', content_type='text/plain', msg=output)
import json import os import snakebin inp_dir = '/dev/in' results = [] for inp_file in os.listdir(inp_dir): with open(os.path.join(inp_dir, inp_file)) as fp: result = fp.read().strip() if result: results.append(result) snakebin.http_resp(200, 'OK', content_type='application/json', msg=json.dumps(results))