def wrapper(*args, **kargs): if request.method.lower() == 'options': return _json_view_options(grant_preflighted_request, allow_origin) _args = request.args callback = _args['callback'] if 'callback' in _args else None if request.method == 'POST': _args = request.form if 'callback' in _args: callback = _args['callback'] view_data = view_func(*args, **kargs) if isinstance(view_data, flask.Response): return view_data if callback: resp_text = json_dumps(view_data) resp_text = callback + '(' + resp_text + ')' else: resp_text = json_dumps(view_data) # -------------------- compress -------------------- IE = re.search(r'(?:msie)|(?:boie)|(?:trident\/\d+)', request.user_agent.string, re.I | re.S) # print request.user_agent.string content_type = 'application/json;charset=UTF-8' compress = gzip and len(resp_text) > 1024 * 4 accept_enc = request.headers['accept-encoding'] \ if 'accept-encoding' in request.headers else '' accept_enc = accept_enc or accept_enc accept_enc = re.split(r'[\,\;\s]+', accept_enc.lower(), re.S) if compress and 'deflate' in accept_enc: resp = make_response(zlib.compress(resp_text)) # resp.content_type = 'text/javascript; charset=utf-8' resp.content_type = content_type if IE: # IE resp.headers['Content-Encoding'] = 'gzip' else: resp.headers['Content-Encoding'] = 'deflate' else: resp = make_response(resp_text) resp.content_type = content_type # -------------------- no-cache -------------------- _cache_control = ( 'no-cache', 'no-store', 'must-revalidate', ) _cache_control = ','.join(_cache_control) resp.headers['cache-control'] = _cache_control resp.headers['pragma'] = 'no-cache' resp.headers['expires'] = '0' # -------------------- -------- -------------------- _req_origin = request.headers.get('origin', u'') or allow_origin _req_origin = _req_origin or '*' resp.headers['Access-Control-Allow-Origin'] = _req_origin resp.headers['Access-Control-Allow-Credentials'] = 'true' for h in headers: resp.headers[h] = headers[h] return resp
def compile_code(code, output_type="exe", compiler_opts=CompilerOptions()): """ supported_output_types = [ "exe", "ll", "wasm", "ast", "sema", "lowered_ast", "tokens", ] """ if output_type == "tokens": tokens = Lexer(code, compiler_opts).lex() result = json_dumps(tokens) elif output_type == "ast": ast = Parser.from_code(code, compiler_opts).parse() result = json_dumps(ast) elif output_type == "sema": tokens = Lexer(code, compiler_opts).lex() ast = Parser(tokens, compiler_opts).parse() semantic_info = SemanticAnalyzer(ast, tokens, compiler_opts).analyze() result = json_dumps(semantic_info) elif output_type == "ll": compiler_opts.target_code = "llvm" tokens = Lexer(code, compiler_opts).lex() ast = Parser(tokens, compiler_opts).parse() semantic_info = SemanticAnalyzer(ast, tokens, compiler_opts).analyze() llvm = LLVMCodegen(ast, semantic_info).generate() result = llvm.dumps() elif output_type == "wasm": compiler_opts.target_code = "wasm" tokens = Lexer(code, compiler_opts).lex() ast = Parser(tokens, compiler_opts).parse() semantic_info = SemanticAnalyzer(ast, tokens, compiler_opts).analyze() result = json_dumps(semantic_info) else: click.echo("Unimplemented Output Type!") return click.echo(result)
def penqueue(uid, type, priority, body, mdb=None): """ 追加一条准备状态的队列,同一个用户会不断的追加 @body as dict, 追加队列的body必须时一个词典,格式如下 { 'email': '接收的邮箱地址', 'values': '追加值,必须是一个列表' } """ assert (isinstance(body, dict)) assert ('email' in body) assert ('values' in body and isinstance(body['values'], list)) # 获取用户处于准备中的队列记录 q = Queue.get_user_status(uid, type, 'P', mdb=mdb) if q is None: # 初始化一条队列记录 q = web.utils.storage(id=0, user_id=uid, type=type, priority=priority, status='P', body=body) else: q.body = utils.json_loads(q.body) q.body['email'] = body['email'] q.body['values'].extend(body['values']) q.body = utils.json_dumps(q.body) # 覆盖更新之前的队列记录 return Queue.replace(q, mdb=mdb)
def format(self, record): # exclude all known keys in Record # bundle the remainder into an 'extra' field, # bypassing attempt to make Record read-only _known_keys = [ 'asctime', 'created', 'filename', 'funcName', 'levelname', 'levelno', 'lineno', 'module', 'msecs', 'message', 'name', 'pathname', 'process', 'processName', 'relativeCreated', 'thread', 'threadName', # non-formatting fields present in __dict__ 'exc_text', 'exc_info', 'msg', 'args', ] unknown_fields = { key: val for key, val in record.__dict__.items() if key not in _known_keys } record.__dict__['extra'] = utils.json_dumps(unknown_fields) return super(FormatterWithEncodedExtras, self).format(record)
def GET(self, *args, **kwargs): """ """ response = Response.internal_error() web.ctx.orm = None try: self.get_argument_timestamp() self.get_argument('token') self.get_argument('sign') self.log_request() with models.Session() as session: web.ctx.orm = session result = self.get(*args, **kwargs) response.code = 0 response.message = err.Success.message response.result = result pass except err.BaseError as e: logger.error("base error: %s", e.message) response.code = e.code response.message = e.message except: logger.exception('JsonHandler failure:') pass finally: del web.ctx.orm response_json_data = utils.json_dumps(response) return response_json_data
async def hsm_start(self, new_policy=None): args = [] if new_policy is not None: # must upload it first data = json_dumps(new_policy).encode('utf8') args = self.dev.upload_file(data) # save a trimmed copy of some details, if they want that bk = policy.desensitize(new_policy) BP['summary'] = None if not bk.get('priv_over_ux'): BP['priv_over_ux'] = False BP['policy'] = bk # full copy BP['xfp'] = xfp2str(self.dev.master_fingerprint) BP['serial'] = self.dev.serial else: BP['priv_over_ux'] = True BP['policy'] = None BP['xfp'] = None BP['serial'] = None BP.save() try: await self.send_recv(CCProtocolPacker.hsm_start(*args)) except CCProtoError as exc: msg = str(exc) logging.error("Coldcard didn't like policy: %s" % msg) raise RuntimeError(str(msg))
def wrapper(*v_args, **k_args): user = request.environ['user'] # if not user.authenticated and config.auto_check_unicom_session: # import requests,json # zootopiasid = request.cookies.get('ZOOTOPIASID',None) # result = requests.get('%s?secret=%s&session=%s' %(HOST, SECRET, zootopiasid)) # user_json = {} # if result.status_code == 200: # user_json = json.loads(result.text) # user_data = user_json.get('data',None) # if not user_data: # #usersvc.register_user(user_id) # return redirect('/u/login/?path=%s'% request.path) # else: # _,user_id = usersvc.register_or_update_user(user_data) # user_info = usersvc.get_user_info(user_id) # if not user_info: # raise Abort(u'用户资料缺失,注册失败。') # user.user_id = user_id # user.user_info = user_info # user.save_to_session() if not user.authenticated: # raise Abort(u'需要登录。') # return redirect('/u/login/?path=%s' % request.path) return js_redirect() result_chk = pri_check(user, k_args) if result_chk: return result_chk result = view_func(*v_args, **k_args) if isinstance(result, dict): result['user'] = user.get_dict() result['user_json'] = json_dumps(user.get_dict()) return result
def set(self, session_id, session_dict): if not session_id: return if not session_dict: self.clear(session_id) data = json_dumps(session_dict) self.conn.set(session_id, data) self.conn.expire(session_id, self.TTL)
def send(self, message): if isinstance(message, basestring): return super(PlayerSession, self).send(message) if isinstance(message, list) or isinstance(message, dict): return super(PlayerSession, self).send(utils.json_dumps(message)) raise Exception('send data fail, type error')
def update(mq): "更新给定队列记录" return db.manager.master_core.update( 'queue_mail', status=mq.status, message=mq.message if isinstance( mq.message, (str, unicode)) else utils.json_dumps(mq.message), where='id = $id', vars=mq)
def fetch_from(self, queue_name): # Try to fetch the message & bail out if it fails raw_message = self.get(queue_name, timeout=self.TIMEOUT) if not raw_message: logger.info("Timed out when fetching message from %s" % queue_name) return None # Try to decode the raw message into JSON try: decoded_message = json.loads(raw_message) except: logger.info("Invalid JSON message from %s: %s" % (queue_name, json_dumps(raw_message))) logger.debug("Got message from queue %s: %s" % (queue_name, json_dumps(decoded_message))) return decoded_message
def lets_do_spider(self): """针对2.0版本""" # 省略了参数验证环节 pname = self.get_argument('pname') cardNum = self.get_argument('cardNum') # 使用多进程 logger.debug('接受参数:\t{0}\t{1}'.format(pname, cardNum)) result = executoer_for_web_server(pname, cardNum) return json_dumps(result)
def get(self, id): """Get an application.""" if id is None: # list all app.logger.info('List all application') ret = Application.query.all() return json_dumps(ret) app.logger.info('Get application: %s' % id) application = Application.query.filter_by(id=id).first_or_404() return application.to_json()
def get(self, id): """Get an instance.""" if id is None: # list all app.logger.info('List instances') ret = Instance.query.all() return json_dumps(ret) app.logger.info('Get instance: %s' % id) instance = Instance.query.filter_by(id=id).first_or_404() return instance.to_json()
def wrapper(*args, **kargs): user = request.environ['user'] result = view_func(*args, **kargs) if isinstance(result, dict): privs = user.privileges if menu_items: visible_items = get_items(menu_items, privs, _li) else: visible_items = get_items(items, privs, _li) result['menu_items'] = json_dumps(visible_items) return result
def set(self, session_id, session_dict): assert len(session_id) > 0 self._check_folder() data_path = os.path.join(self.folder, session_id) if len(session_dict) >= 1: with open(data_path, 'wb+') as f: f.write(json_dumps(session_dict)) f.close() else: if os.access(data_path, os.F_OK): os.unlink(data_path)
def test_fs_outgoing_invalid(self): inc = fs_adaptor.IncomingQueue(self.ingest_dir, conf.PUBLISH) out = fs_adaptor.OutgoingQueue() valid_request = list(inc)[0] valid_response = adapt.mkresponse(conf.PUBLISHED, "dummy-published-message", request=valid_request) valid_response_json = utils.json_dumps(valid_response) out.write(valid_response_json) self.assertEqual(len(out.valids), 1) self.assertEqual(len(out.invalids), 0) self.assertEqual(len(out.errors), 0)
def __call__(cls, column_data): try: pyobj = json.loads(column_data) json_string = column_data except (ValueError, TypeError): pyobj = column_data json_string = utils.json_dumps(column_data) if isinstance(pyobj, dict): return type.__call__(JSON.JsonDict, pyobj, json_string) if isinstance(pyobj, list): return type.__call__(JSON.JsonList, pyobj, json_string) return type.__call__(JSON.JsonString, pyobj, json_string)
def test_fs_outgoing_valid(self): "valid responses can be written without errors" inc = fs_adaptor.IncomingQueue(self.ingest_dir, 'ingest') out = fs_adaptor.OutgoingQueue() valid_request = list(inc)[0] valid_response = adapt.mkresponse(conf.INGESTED, "dummy-ingested-message", request=valid_request) valid_response_json = utils.json_dumps(valid_response) out.write(valid_response_json) self.assertEqual(len(out.valids), 1) self.assertEqual(len(out.invalids), 0) self.assertEqual(len(out.errors), 0)
def main(event, context): """Main.""" logging.info('Using queue %s', QUEUE_URL) logging.debug(event) actions = { 'init-inventory-hosts': init_inventory_hosts, 'inventory-hosts': inventory_hosts, 'inventory-wait': command_complete, 'report-delta': make_delta, } if not event.get('Records'): logging.error('No Records key.') logging.error(event) # We should have only one record per event, but sanity for record in event.get('Records', []): # if we don't have logging.debug(json_dumps(record)) data = json.loads(record['body']) logging.info(data) if data['action'] in actions: return json_dumps(actions[data['action']](data, context)) raise Exception('Unknown action {}.'.format(data['action']))
def save(self): fn = self.filename tmp = fn + '.tmp' with open(tmp, 'wb') as fp: d = json_dumps(dict(self)).encode('utf8') d = self.box.encrypt(d) fp.write(d) os.rename(tmp, fn) logging.info(f"Saved bunker settings to: {fn}") self.notify_watchers()
def send_response(outgoing, response): # `response` here is the result of `mkresponse` below try: utils.validate(response, conf.RESPONSE_SCHEMA) channel = outgoing.write except ValidationError as err: # response doesn't validate. this probably means # we had an error decoding request and have no id or token # because the message will not validate, we will not be sending it back response['validation-error-msg'] = str(err) channel = outgoing.error channel(utils.json_dumps(response)) return response
def _call(self, http_method, api_method, **kwargs): """ 调用远程接口 @http_method as str, 使用的HTTP方法 e.g. GET,POST,PUT,DELETE @api_method as str, 调用的接口方法 e.g. messages/domains @kwargs as dict, 接口方法所需参数 """ hm = getattr(requests, http_method, None) if hm is None: raise APIError('HTTP method was invalid.') kwargs['api_user'] = self.api_user kwargs['api_key'] = self.api_key while True: try: print utils.json_dumps(kwargs) return hm( self._make_url(api_method) #, auth = ( 'api', self.api_key ) , data=kwargs) except ConnectionError, ce: # 连接被重置 if self._connection_reset_retry >= 3: raise ce # 连接错误可以重试3次 # 超过3次则抛出异常 print '[SendcloudAPI]CONNECTION RESET RETRY %s/3' % self._connection_reset_retry self._connection_reset_retry += 1 continue except Exception, e: # 超时异常,重试 if 'timed out' in str(e): print '[SendcloudAPI]TIME OUT RETRY' continue raise e
def wrapper(*args, **kargs): if request.method.lower() == 'options': return _json_view_options(grant_preflighted_request, allow_origin) _resp = view_func(*args, **kargs) if isinstance(_resp, list) or isinstance(_resp, dict) \ or isinstance(_resp, tuple): resp = make_response(json_dumps(_resp)) resp.content_type = 'text/javascript; charset=utf-8' _req_origin = request.headers.get('origin', u'') or allow_origin resp.headers['Access-Control-Allow-Origin'] = _req_origin resp.headers['Access-Control-Allow-Credentials'] = 'true' return resp return _resp
def process_message(self, message): # Purge open files once in a while current_time = time.time() if current_time - self._last_files_purge >= self.FILES_PURGE_THRESHOLD: self.purge_files() # Get active experiments matching this message experiments = Experiment.get_active_experiments_matching(message) experiment_ids = [str(e.id) for e in experiments] if len(experiment_ids) > 0: self.logger.info("Measurement %r matches experiments %r" %\ (json_dumps(message), experiment_ids)) # Write the measurement to each experiment file for e in experiments: efile = self.get_file_for_experiment(e) efile.put(message) self.logger.info("Written measurement to file %s" % e.file)
def push(self, queue_name, entity, event, body, delayseconds=0, priority=8): "推送事件到队列中" p = [ entity, event, ] p.extend(body) m = Message(utils.json_dumps({'param': p})) m.set_delayseconds(delayseconds) m.set_priority(priority) return self[queue_name].send_message(m)
def _wrapper(*v_args, **v_kargs): user = request.environ['user'] inject = False if 'inject' in kargs: inject = kargs['inject'] if 'ignore' in kargs and kargs['ignore']: pass else: # do dac check if not user.authenticated: # url = '/u/login/?path=%s' # url = url % request.path # resp = redirect(url) # return resp return js_redirect() result_chk = pri_check(user, kargs) if result_chk: return result_chk result = view_func(*v_args, **v_kargs) if isinstance(result, dict) and inject: result['user'] = user.get_dict() result['user_json'] = json_dumps(user.get_dict()) return result
async def push_status_updates_handler(ws): # block for a bit, and then send display updates (and all other system status changes) # - there is no need for immediate update because when we rendered the HTML on page # load, we put in current values. await asyncio.sleep(0.250) last = None while 1: # get latest state now = STATUS.as_dict() if last != now: # it has changed, so send it. await ws.send_str(json_dumps(dict(vue_app_cb=dict(update_status=now)))) last = now # wait until next update, or X seconds max (for keep alive/just in case) try: await asyncio.wait_for(STATUS._update_event.wait(), 120) except asyncio.TimeoutError: # force an update last = None
def set(self, session_id, session_dict): if not session_id: return if not session_dict: self.clear(session_id) data = json_dumps(session_dict) conn, cur = None, None try: conn = self._conn() cur = conn.cursor() sql = 'select 1 from t_session where session_id=?' cur.execute(sql, (session_id, )) row = cur.fetchone() t = int(time.time()) if row: sql = ('update t_session ', ' set session_data=?, last_update=? ', ' where session_id=?') cur.execute(''.join(sql), ( data, t, session_id, )) else: sql = ( 'insert into t_session ', ' (session_id, session_data, last_update) ', ' values ', ' (?, ?, ?) ', ) cur.execute(''.join(sql), (session_id, data, t)) conn.commit() finally: if cur: cur.close() if conn: conn.close()
async def ws_api_handler(ses, send_json, req, orig_request): # handle_api # # Handle incoming requests over websocket; send back results. # req = already json parsed request coming in # send_json() = means to send the response back # action = req.action args = getattr(req, 'args', None) #logging.warn("API action=%s (%r)" % (action, args)) # MAJOR info leak XXX logging.debug(f"API action={action}") if action == '_connected': logging.info("Websocket connected: %r" % args) # can send special state update at this point, depending on the page elif action == 'start_hsm_btn': await Connection().hsm_start() await send_json(show_flash_msg=APPROVE_CTA) elif action == 'delete_user': name, = args assert 1 <= len(name) <= MAX_USERNAME_LEN, "bad username length" await Connection().delete_user(name.encode('utf8')) # assume it worked, so UX updates right away try: STATUS.hsm.users.remove(name) except ValueError: pass STATUS.notify_watchers() elif action == 'create_user': name, authmode, new_pw = args assert 1 <= len(name) <= MAX_USERNAME_LEN, "bad username length" assert ',' not in name, "no commas in names" if authmode == 'totp': mode = USER_AUTH_TOTP | USER_AUTH_SHOW_QR new_pw = '' elif authmode == 'rand_pw': mode = USER_AUTH_HMAC | USER_AUTH_SHOW_QR new_pw = '' elif authmode == 'give_pw': mode = USER_AUTH_HMAC else: raise ValueError(authmode) await Connection().create_user(name.encode('utf8'), mode, new_pw) # assume it worked, so UX updates right away try: STATUS.hsm.users = list(set(STATUS.hsm.users + [name])) except ValueError: pass STATUS.notify_watchers() elif action == 'submit_policy': # get some JSON w/ everything the user entered. p, save_copy = args proposed = policy.web_cleanup(json_loads(p)) policy.update_sl(proposed) await Connection().hsm_start(proposed) STATUS.notify_watchers() await send_json(show_flash_msg=APPROVE_CTA) if save_copy: d = policy.desensitize(proposed) await send_json(local_download=dict(data=json_dumps(d, indent=2), filename=f'hsm-policy-{STATUS.xfp}.json.txt')) elif action == 'download_policy': proposed = policy.web_cleanup(json_loads(args[0])) await send_json(local_download=dict(data=json_dumps(proposed, indent=2), filename=f'hsm-policy-{STATUS.xfp}.json.txt')) elif action == 'import_policy': # they are uploading a JSON capture, but need values we can load in Vue proposed = args[0] cooked = policy.web_cookup(proposed) await send_json(vue_app_cb=dict(update_policy=cooked), show_flash_msg="Policy file imported.") elif action == 'pick_onion_addr': from torsion import TOR addr, pk = await TOR.pick_onion_addr() await send_json(vue_app_cb=dict(new_onion_addr=[addr, pk])) elif action == 'pick_master_pw': pw = b64encode(os.urandom(12)).decode('ascii') pw = pw.replace('/', 'S').replace('+', 'p') assert '=' not in pw await send_json(vue_app_cb=dict(new_master_pw=pw)) elif action == 'new_bunker_config': from torsion import TOR # save and apply config values nv = json_loads(args[0]) assert 4 <= len(nv.master_pw) < 200, "Master password must be at least 4 chars long" # copy in simple stuff for fn in [ 'tor_enabled', 'master_pw', 'easy_captcha', 'allow_reboots']: if fn in nv: BP[fn] = nv[fn] # update onion stuff only if PK is known (ie. they changed it) if nv.get('onion_pk', False) or False: for fn in [ 'onion_addr', 'onion_pk']: if fn in nv: BP[fn] = nv[fn] BP.save() await send_json(show_flash_msg="Bunker settings encrypted and saved to disk.") STATUS.tor_enabled = BP['tor_enabled'] STATUS.notify_watchers() if not BP['tor_enabled']: await TOR.stop_tunnel() elif BP.get('onion_pk') and not (STATUS.force_local_mode or STATUS.setup_mode) \ and TOR.get_current_addr() != BP.get('onion_addr'): # disconnect/reconnect await TOR.start_tunnel() elif action == 'sign_message': # sign a short text message # - lots more checking could be done here, but CC does it anyway msg_text, path, addr_fmt = args addr_fmt = AF_P2WPKH if addr_fmt != 'classic' else AF_CLASSIC try: sig, addr = await Connection().sign_text_msg(msg_text, path, addr_fmt) except: # get the spinner to stop: error msg will be "refused by policy" typically await send_json(vue_app_cb=dict(msg_signing_result='(failed)')) raise sig = b64encode(sig).decode('ascii').replace('\n', '') await send_json(vue_app_cb=dict(msg_signing_result=f'{sig}\n{addr}')) elif action == 'upload_psbt': # receiving a PSBT for signing size, digest, contents = args psbt = b64decode(contents) assert len(psbt) == size, "truncated/padded in transit" assert sha256(psbt).hexdigest() == digest, "corrupted in transit" STATUS.import_psbt(psbt) STATUS.notify_watchers() elif action == 'clear_psbt': STATUS.clear_psbt() STATUS.notify_watchers() elif action == 'preview_psbt': STATUS.psbt_preview = 'Wait...' STATUS.notify_watchers() try: txt = await Connection().sign_psbt(STATUS._pending_psbt, flags=STXN_VISUALIZE) txt = txt.decode('ascii') # force some line splits, especially for bech32, 32-byte values (p2wsh) probs = re.findall(r'([a-zA-Z0-9]{36,})', txt) for p in probs: txt = txt.replace(p, p[0:30] + '\u22ef\n\u22ef' + p[30:]) STATUS.psbt_preview = txt except: # like if CC doesn't like the keys, whatever .. STATUS.psbt_preview = None raise finally: STATUS.notify_watchers() elif action == 'auth_set_name': idx, name = args assert 0 <= len(name) <= MAX_USERNAME_LEN assert 0 <= idx < len(STATUS.pending_auth) STATUS.pending_auth[idx].name = name STATUS.notify_watchers() elif action == 'auth_offer_guess': idx, ts, guess = args assert 0 <= idx < len(STATUS.pending_auth) STATUS.pending_auth[idx].totp = ts STATUS.pending_auth[idx].has_guess = 'x'*len(guess) STATUS._auth_guess[idx] = guess STATUS.notify_watchers() elif action == 'submit_psbt': # they want to sign it now expect_hash, send_immediately, finalize, wants_dl = args assert expect_hash == STATUS.psbt_hash, "hash mismatch" if send_immediately: assert finalize, "must finalize b4 send" logging.info("Starting to sign...") STATUS.busy_signing = True STATUS.notify_watchers() try: dev = Connection() # do auth steps first (no feedback given) for pa, guess in zip(STATUS.pending_auth, STATUS._auth_guess): if pa.name and guess: await dev.user_auth(pa.name, guess, int(pa.totp), a2b_hex(STATUS.psbt_hash)) STATUS.reset_pending_auth() try: result = await dev.sign_psbt(STATUS._pending_psbt, finalize=finalize) logging.info("Done signing") msg = "Transaction signed." if send_immediately: msg += '<br><br>' + broadcast_txn(result) await send_json(show_modal=True, html=Markup(msg), selector='.js-api-success') result = (b2a_hex(result) if finalize else b64encode(result)).decode('ascii') fname = 'transaction.txt' if finalize else ('signed-%s.psbt'%STATUS.psbt_hash[-6:]) if wants_dl: await send_json(local_download=dict(data=result, filename=fname, is_b64=(not finalize))) await dev.hsm_status() except CCUserRefused: logging.error("Coldcard refused to sign txn") await dev.hsm_status() r = STATUS.hsm.get('last_refusal', None) if not r: raise HTMLErroMsg('Refused by local user.') else: raise HTMLErrorMsg(f"Rejected by Coldcard.<br><br>{r}") finally: STATUS.busy_signing = False STATUS.notify_watchers() elif action == 'shutdown_bunker': await send_json(show_flash_msg="Bunker is shutdown.") await asyncio.sleep(0.25) logging.warn("User-initiated shutdown") asyncio.get_running_loop().stop() sys.exit(0) elif action == 'leave_setup_mode': # During setup process, they want to go Tor mode; which I consider leaving # setup mode ... in particular, logins are required. # - button label is "Start Tor" tho ... so user doesn't see it that way assert STATUS.setup_mode, 'not in setup mode?' assert BP['tor_enabled'], 'Tor not enabled (need to save?)' addr = BP['onion_addr'] assert addr and '.onion' in addr, "Bad address?" STATUS.setup_mode = False await send_json(show_flash_msg="Tor hidden service has been enabled. " "It may take a few minutes for the website to become available") STATUS.notify_watchers() from torsion import TOR logging.info(f"Starting hidden service: %s" % addr) asyncio.create_task(TOR.start_tunnel()) elif action == 'logout_everyone': # useful for running battles... # - changes crypto key for cookies, so they are all invalid immediately. from aiohttp_session.nacl_storage import NaClCookieStorage import nacl logging.warning("Logout of everyone!") # reset all session cookies storage = orig_request.get('aiohttp_session_storage') assert isinstance(storage, NaClCookieStorage) storage._secretbox = nacl.secret.SecretBox(os.urandom(32)) # kick everyone off (bonus step) for w in web_sockets: try: await send_json(redirect='/logout', _ws=w) await w.close() except: pass else: raise NotImplementedError(action)
logging.debug(event) actions = { 'init-inventory-hosts': init_inventory_hosts, 'inventory-hosts': inventory_hosts, 'inventory-wait': command_complete, 'report-delta': make_delta, } if not event.get('Records'): logging.error('No Records key.') logging.error(event) # We should have only one record per event, but sanity for record in event.get('Records', []): # if we don't have logging.debug(json_dumps(record)) data = json.loads(record['body']) logging.info(data) if data['action'] in actions: return json_dumps(actions[data['action']](data, context)) raise Exception('Unknown action {}.'.format(data['action'])) if __name__ == '__main__': configure_logging({'aws_request_id': "local"}) os.environ['AWS_DEFAULT_REGION'] = 'us-west-2' os.environ['BUCKET'] = 'edwards-asadmin-patching-bucket-us-west-2' os.environ['SCHEDULE_SSM_KEY'] = '/config/patching/schedule' os.environ['SLACK_CONFIG'] = '/onica/slack/webhook' resp = {'action': 'init-inventory-hosts'} while resp != True: resp = main({'Records': [{'body': json_dumps(resp)}]}, None) time.sleep(10)
def certificate_report(region=None): certs = all_certificates(region) d = utils.json_dumps(certs, dangerous=True) open('/tmp/certs.report', 'w').write(d)
def to_json(self): """Retrieve a json version of the value """ res = self.to_dict() return json_dumps(res)
def send(self, data): if not self.__connected: self.__connected = True ports = CONFIG.ports self.__socket.connect(('localhost', ports[0])) self.__socket.send(utils.json_dumps(data) + "\r\n")
def set(self, key, value, expire): self.client.setex(key, utils.json_dumps(value), expire)
def handler(json_request, outgoing): response = partial(send_response, outgoing) try: request = utils.validate(json_request, conf.REQUEST_SCHEMA) except ValueError as err: # bad data. who knows what it was. die return response( mkresponse(ERROR, "request could not be parsed: %s" % json_request)) except ValidationError as err: # data is readable, but it's in an unknown/invalid format. die return response( mkresponse(ERROR, "request was incorrectly formed: %s" % str(err))) except Exception as err: # die msg = "unhandled error attempting to handle request: %s" % str(err) return response(mkresponse(ERROR, msg)) # we have a valid request :) LOG.info("valid request") params = subdict( request, ['action', 'id', 'token', 'version', 'force', 'validate-only']) params = renkeys(params, [('validate-only', 'dry_run')]) # if we're to ingest/publish, then we expect a location to download article data if params['action'] in [INGEST, INGEST_PUBLISH]: try: article_xml = download(request['location']) if not article_xml: raise ValueError("no article content available") except AssertionError as err: msg = "refusing to download article xml: %s" % str(err) return response(mkresponse(ERROR, msg, request)) except Exception as err: msg = "failed to download article xml from %r: %s" % ( request['location'], str(err)) return response(mkresponse(ERROR, msg, request)) LOG.info("got xml") try: article_data = scraper.render_single(article_xml, version=params['version'], location=request['location']) LOG.info("rendered article data ") except Exception as err: error = str(err) if hasattr(err, 'message') else err msg = "failed to render article-json from article-xml: %s" % error LOG.exception(msg, extra=params) return response(mkresponse(ERROR, msg, request)) LOG.info("successful scrape") try: article_json = utils.json_dumps(article_data) except ValueError as err: msg = "failed to serialize article data to article-json: %s" % str( err) return response(mkresponse(ERROR, msg, request)) LOG.info("successfully serialized article-data to article-json") # phew! gauntlet ran, we're now confident of passing this article-json to lax # lax may still reject the data as invalid, but we'll proxy that back if necessary params['article_json'] = article_json try: LOG.info("calling lax") # with params: %r" % params) lax_response = call_lax(**params) LOG.info("lax response: %r", lax_response) return response(mkresponse(**lax_response)) except Exception as err: # lax didn't understand us or broke msg = "lax failed attempting to handle our request: %s" % str(err) response(mkresponse(ERROR, msg, request)) # when lax fails, we fail raise
default=8, type=int, help='ID of GPU that is used for training.') parser.add_argument('--workers', default=16, type=int, dest='nb_workers', help='Number of workers for dataloader.') args = parser.parse_args() torch.cuda.set_device(args.gpu_id) config = utils.load_config(args.config) from utils import JSONEncoder, json_dumps print(json_dumps(obj=config, indent=4, cls=JSONEncoder, sort_keys=True)) with open('log/' + args.log_filename + '.json', 'w') as x: json.dump(obj=config, fp=x, indent=4, cls=JSONEncoder, sort_keys=True) dl_tr = torch.utils.data.DataLoader(dataset.load( name=args.dataset, root=config['dataset'][args.dataset]['root'], classes=config['dataset'][args.dataset]['classes']['train'], transform=dataset.utils.make_transform(**config['transform_parameters'])), batch_size=args.sz_batch, shuffle=True, num_workers=args.nb_workers, drop_last=True, pin_memory=True) dl_ev = torch.utils.data.DataLoader(dataset.load(
async def tx_resp(_ws=ws, **resp): logging.debug(f"Send resp: {resp}") await _ws.send_str(json_dumps(resp))
def send_to(self, queue_name, message): logger.debug("Sending message to %s: %s" % (queue_name, json_dumps(message))) return self.add(queue_name, json.dumps(message))
def json_response(data): return web.Response(text=json_dumps(data), content_type='application/json')
def handler(json_request, outgoing): response = partial(send_response, outgoing) try: request = utils.validate(json_request, conf.REQUEST_SCHEMA) except ValueError: # bad data. who knows what it was. die return response(mkresponse(ERROR, "request could not be parsed: %s" % json_request)) except ValidationError as err: # data is readable, but it's in an unknown/invalid format. die return response(mkresponse(ERROR, "request was incorrectly formed: %s" % str(err))) except Exception as err: # die msg = "unhandled error attempting to handle request: %s" % str(err) return response(mkresponse(ERROR, msg)) # we have a valid request :) LOG.info("valid request") params = subdict(request, ['action', 'id', 'token', 'version', 'force', 'validate-only']) params = renkeys(params, [('validate-only', 'dry_run'), ('id', 'msid')]) # if we're to ingest/publish, then we expect a location to download article data if params['action'] in [INGEST, INGEST_PUBLISH]: try: article_xml = download(request['location']) if not article_xml: raise ValueError("no article content available") except AssertionError as err: msg = "refusing to download article xml: %s" % str(err) return response(mkresponse(ERROR, msg, request)) except Exception as err: msg = "failed to download article xml from %r: %s" % (request['location'], str(err)) return response(mkresponse(ERROR, msg, request)) LOG.info("got xml") try: article_data = scraper.render_single(article_xml, version=params['version'], location=request['location']) LOG.info("rendered article data ") except Exception as err: error = str(err) if hasattr(err, 'message') else err msg = "failed to render article-json from article-xml: %s" % error LOG.exception(msg, extra=params) return response(mkresponse(ERROR, msg, request)) LOG.info("successful scrape") try: article_json = utils.json_dumps(article_data) except ValueError as err: msg = "failed to serialize article data to article-json: %s" % str(err) return response(mkresponse(ERROR, msg, request)) LOG.info("successfully serialized article-data to article-json") # phew! gauntlet ran, we're now confident of passing this article-json to lax # lax may still reject the data as invalid, but we'll proxy that back if necessary params['article_json'] = article_json try: LOG.info("calling lax") lax_response = call_lax(**params) LOG.info("lax response: %r", lax_response) return response(mkresponse(**lax_response)) except Exception as err: # lax didn't understand us or broke msg = "lax failed attempting to handle our request: %s" % str(err) response(mkresponse(ERROR, msg, request)) # when lax fails, we fail raise
def serialise_response(struct): return utils.json_dumps(struct)
def run(self): """ Main loop of the PDU. It's basically an an infinite loop that tries to read messages from the message queue, decode them and them process them with the specific function. """ self._start_running() self.log("PDU %s is alive!" % self.__class__.__name__) while self._is_running(): try: # While module reports that it's busy, stop feeding # messages to it - especially useful for cases where # a part of message processing is asynchronous w.r.t. # to message fetching. In that case, messages can accumulate # in the memory of the PDU, leading to a huge mem usage. busy_sleeps = 0 while self.busy() and busy_sleeps < self.MAX_BUSY_SLEEPS: time.sleep(self.TIME_TO_SLEEP_ON_BUSY) if busy_sleeps == self.MAX_BUSY_SLEEPS: continue # Step 1 - get message from message queue message = self.queue_system.get(self.QUEUE, timeout = 1) if not message: """self.log("Could not get message from queue %s Retrying ..." % self.QUEUE)""" continue # Step 2 - try to decode it assuming it's in correct # JSON format try: doc = json.loads(message) except: self.log("Did not get valid JSON from queue %s" % self.QUEUE) self.log("Invalid message: %s" % json_dumps(message)) continue # Step 3 - validate message try: copy_of_doc = copy.deepcopy(doc) is_valid = self.validate_message(copy_of_doc) if not is_valid: self.log("Invalid message from queue %s" % self.QUEUE) self.log("Message: %s" % json_dumps(copy_of_doc)) continue except: self.log("Error while validating message %s from queue %s" % json_dumps(doc), self.QUEUE) traceback.print_exc() continue # Step 4 - actually process the message. Usually, this means # that a PDU enqueues it further down the pipeline to other # modules. try: copy_of_doc = copy.deepcopy(doc) self.process_message(copy_of_doc) except: self.log("Error while processing message %s from queue %s" % (json_dumps(doc), self.QUEUE)) traceback.print_exc() continue ratio = self.MESSAGE_SAMPLING_RATIO if self.debug_mode and self._processed_messages % ratio == 0: self.log("Sampled message: %s" % json_dumps(doc)) # Only increment # of processed messages if there was an actual # processing. If we do this in the "finally" block, it will # also get executed after "continue" statements as well. self._processed_messages = self._processed_messages + 1 except: self.log("Error while getting message from queue %s" % self.QUEUE) traceback.print_exc() finally: # Count # of processed messages in each time interval # and display them to the log. time_since_last_stats = time.time() - self._last_stats if time_since_last_stats >= self.PRINT_STATS_INTERVAL: if self._processed_messages > 0: flow = self._processed_messages / time_since_last_stats self.log("%.2f messages/s in last %.2f seconds" % \ (flow, time_since_last_stats)) self._processed_messages = 0 self._last_stats = time.time()