def xor(self, ctx, return_type, a, atype, b, btype): assert atype == return_type assert atype == btype if z3.is_bool(a) or z3.is_bool(b): a = util.as_bool(a) b = util.as_bool(b) assert return_type.size() == 1 return z3.Xor(a, b) else: return a ^ b
def lambda_handler(event, context): try: dt = datetime.utcnow() log_stream = LOG_STREAM.format(dt.year, dt.month, dt.day) result = {} with Logger(logstream=log_stream, buffersize=20, context=context, debug=util.as_bool(os.getenv(configuration.ENV_TRACE, False))) as logger: logger.info("InstanceScheduler, version %version%") logger.debug("Event is {}", util.safe_json(event, indent=3)) for handler_type in [SchedulerRequestHandler, SchedulerSetupHandler, ScheduleResourceHandler, AdminCliRequestHandler, CloudWatchEventHandler]: if handler_type.is_handling_request(event): start = time() handler = handler_type(event, context) logger.info("Handler is {}".format(handler_type.__name__)) try: result = handler.handle_request() except Exception as e: logger.error("Error handling request {} by handler {}: ({})\n{}", json.dumps(event), handler_type.__name__, e, traceback.format_exc()) execution_time = round(float((time() - start)), 3) logger.info("Handling took {} seconds", execution_time) return result logger.debug("Request was not handled, no handler was able to handle this type of request {}", json.dumps(event)) finally: configuration.unload_scheduler_configuration()
def __init__(self, parent, defn): super(Account, self).__init__(parent, 'accounts/{id}', defn) self.suspended = as_bool(self.suspended) self.password_expired = as_bool(self.password_expired)
import matplotlib import matplotlib.pyplot as plt bm_file_path, _, _, _ = get_paths() bm_results_file_path = append_to_filename(bm_file_path, "_results") # Read results file into rows field. rows = [] with open(bm_results_file_path) as f: # Skip CSV header. next(f) for line in f: line = parse_csv_line(line) line[1] = as_bool(line[1]) line[2] = as_bool(line[2]) for i in range(3, len(line)): line[i] = float(line[i]) rows.append(line) # Create basic figure. fig, axes = plt.subplots(figsize=(6, 3)) if action.startswith("hist_"): hist_data = action[5:] # Strip hist_ at beginning. hist_title = "" # Contains indices of column containing measurements for # when JS is on (index 0) and JS is off (index 1).
where JS was enabled and one where JS was disabled. """ from util import get_paths, parse_csv_line, append_to_filename, as_bool from util import benchmark_columns as columns import os bm_file_path, _, _, _ = get_paths() bm_js_file_path = append_to_filename(bm_file_path, "_js") bm_no_js_file_path = append_to_filename(bm_file_path, "_no_js") with open(bm_file_path, "r") as f: out_js = open(bm_js_file_path, "w") out_no_js = open(bm_no_js_file_path, "w") # Get CSV header and write them to both output files. csv_header = next(f) out_js.write(csv_header) out_no_js.write(csv_header) for line in f: # Write to respective file if the jsenabled column # is either true or false. if as_bool(parse_csv_line(line)[columns["jsEnabled"]]): out_js.write(line) else: out_no_js.write(line) out_js.close() out_no_js.close()
# Skip CSV header next(f) results = {} pages_scanned = 0 for line in f: # Scan line for both JS and no JS. row_js = parse_csv_line(line) row_no_js = parse_csv_line(next(f)) noscript_tags = [] url = urlparse(row_js[columns["url"]]) # Check if page with JS enabled sent any noscript tags. if as_bool(row_js[columns["noscript"]]): file_path = os.path.join(noscript_dir_path, row_js[columns["dataFileName"]] + ".html") noscript_tags.extend(get_noscript_tags(file_path)) # Check if page with JS disabled sent any noscript tags. if as_bool(row_no_js[columns["noscript"]]): file_path = os.path.join(noscript_dir_path, row_no_js[columns["dataFileName"]] + ".html") noscript_tags.extend(get_noscript_tags(file_path)) if len(noscript_tags) > 0: print("Processing {}".format(url.hostname)) r = process_noscript_tags(noscript_tags, url) pages_scanned += 1 # Only keep unique results
break js_row = parse_csv_line(js_line) nojs_row = parse_csv_line(nojs_line) # 6 = index first median col. for i in range(6, len(js_row)): # Parse values into floats. js_row[i] = float(js_row[i]) nojs_row[i] = float(nojs_row[i]) out_row = [ # col 1: url js_row[columns["url"]], # col 2: noscript exists? as_bool(js_row[columns["noscript"]]) or as_bool(nojs_row[columns["noscript"]]), # col 3: script exists? (int(js_row[columns["scriptCount"]]) > 0) or (int(nojs_row[columns["scriptCount"]]) > 0), # col 4: median load (js on) median(js_row[6:11]), # col 5: median domload (js on) median(js_row[11:16]), # col 6: median idle (js on) median(js_row[16:21]), # col 7: median load (js off) median(nojs_row[6:11]), # col 8: median domload (js off) median(nojs_row[11:16]), # col 9: median idle (js off)