def plot_horizontal_band(ax, circles, configuration): from scipy import interpolate points = [(center[0], center[1] + radius*0.8) for (center, radius) in circles] point_rows = chunks(points, configuration.width) for point_row in point_rows: point_data = np.array(point_row) tck,_ = interpolate.splprep(point_data.transpose(), s=0) unew = np.arange(0, 1.01, 0.01) out = interpolate.splev(unew, tck) ax.plot(out[0], out[1], color='black') points = [(center[0], center[1] - radius*0.8) for (center, radius) in circles] point_rows = chunks(points, configuration.width) for point_row in point_rows: point_data = np.array(point_row) tck,_ = interpolate.splprep(point_data.transpose(), s=0) unew = np.arange(0, 1.01, 0.01) out = interpolate.splev(unew, tck) ax.plot(out[0], out[1], color='black') ax.set_xlim(0, configuration.width * configuration.mmPerPixel) ax.set_ylim(0, configuration.height * configuration.mmPerPixel) ax.set_title('Horizontal Bands') ax.set_aspect(1.0)
def do_nvram(self, arg): # dump nvram if arg.startswith('dump'): bs = 2**9 # memory block size used for sampling max = 2**18 # maximum memory address for sampling steps = 2**9 # number of bytes to dump at once (feedback-performance trade-off) lpath = os.path.join('nvram', self.basename(self.target)) # local copy of nvram #- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - # ******* sampling: populate memspace with valid addresses ****** if len(re.split("\s+", arg, 1)) > 1: memspace = [] commands = ['@PJL RNVRAM ADDRESS=' + str(n) for n in range(0, max, bs)] self.chitchat("Sampling memory space (bs=" + str(bs) + ", max=" + str(max) + ")") for chunk in (list(chunks(commands, steps))): str_recv = self.cmd(c.EOL.join(chunk)) # break on unsupported printers if not str_recv: return # collect valid memory addresses blocks = re.findall('ADDRESS\s*=\s*(\d+)', str_recv) for addr in blocks: memspace += range(conv().int(addr), conv().int(addr) + bs) self.chitchat(str(len(blocks)) + " blocks found. ", '') else: # use fixed memspace (quick & dirty but might cover interesting stuff) memspace = range(0, 8192) + range(32768, 33792) + range(53248, 59648) #- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - # ******* dumping: read nvram and write copy to local file ****** commands = ['@PJL RNVRAM ADDRESS=' + str(n) for n in memspace] self.chitchat("Writing copy to " + lpath) if os.path.isfile(lpath): file().write(lpath, '') # empty file for chunk in (list(chunks(commands, steps))): str_recv = self.cmd(c.EOL.join(chunk)) if not str_recv: return # break on unsupported printers else: self.makedirs('nvram') # create nvram directory data = ''.join([conv().chr(n) for n in re.findall('DATA\s*=\s*(\d+)', str_recv)]) file().append(lpath, data) # write copy of nvram to disk self.logger.dump(data) # print asciified output to screen print #- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - # read nvram (single byte) elif arg.startswith('read'): arg = re.split("\s+", arg, 1) if len(arg) > 1: arg, addr = arg self.logger.info(self.cmd('@PJL RNVRAM ADDRESS=' + addr)) else: self.help_nvram() #- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - # write nvram (single byte) elif arg.startswith('write'): arg = re.split("\s+", arg, 2) if len(arg) > 2: arg, addr, data = arg self.cmd('@PJL SUPERUSER PASSWORD=0' + c.EOL + '@PJL WNVRAM ADDRESS=' + addr + ' DATA=' + data + c.EOL + '@PJL SUPERUSEROFF', False) else: self.help_nvram() else: self.help_nvram()
def generate_dxf_horizontal_bands(circles, output_file_path, configuration: Configuration): print('generate horizontal band dxf...') doc, msp = _create_dxf_file() _plot_bounding_box(msp, configuration) radius_weight_x = 0.5 radius_weight_y = 1 points = [((center[0] + radius * radius_weight_x, center[1] + radius * radius_weight_y), (center[0] - radius * radius_weight_x, center[1] - radius * radius_weight_y)) for (center, radius) in circles] point_rows = chunks(points, configuration.width) for point_row in point_rows: pairs = zip(point_row[0:-1], point_row[1:]) for pair in pairs: msp.add_line(pair[0][0], pair[1][0]) msp.add_line(pair[0][1], pair[1][1]) first_pair = point_row[0] last_pair = point_row[-1] width = configuration.width * configuration.mmPerPixel msp.add_line((0, first_pair[0][1]), first_pair[0]) msp.add_line((0, first_pair[1][1]), first_pair[1]) msp.add_line((0, first_pair[0][1]), (0, first_pair[1][1])) msp.add_line(last_pair[0], (width, last_pair[0][1])) msp.add_line(last_pair[1], (width, last_pair[1][1])) msp.add_line((width, last_pair[0][1]), (width, last_pair[1][1])) print(f'write dxf to {output_file_path}') doc.saveas(output_file_path)
def main(): """ Main logic """ if BUCKET is None or PREFIX is None or TIMESTAMP is None: logger.error("BUCKET or PREFIX or TIMESTAMP is None") return 1 logger.info("BUCKET: {}".format(BUCKET)) logger.info("PREFIX: {}".format(PREFIX)) logger.info("TIMESTAMP: {}".format(TIMESTAMP)) # Download gzip files to /tmp/INPUT_PREFIX succeeded = s3util.download_dir(bucket=BUCKET, prefix=INPUT_PREFIX, local="/tmp") if succeeded is False: logger.error("S3 Download failed") return 1 logger.info("S3 download complete") # gunzip /tmp/INPUT_PREFIX/*.gz try: subprocess.run( "gunzip /tmp/{}/*.gz".format(INPUT_PREFIX), shell=True, stdout=PIPE, stderr=PIPE, text=True, ) except Exception: trace = traceback.format_exc() logger.error("Error while gunzip /tmp/{}/*.gz".format(INPUT_PREFIX)) logger.exception(trace) return 1 # Read /tmp/INPUT_PREFIX jsonline and filter and append list filtered_data = filter_data("/tmp/{}".format(INPUT_PREFIX)) logger.info("Index size: {}".format(len(filtered_data))) # Create and update Algolia index every 1000 records # https://www.algolia.com/doc/api-reference/api-methods/save-objects/#about-this-method for split_data in chunks(filtered_data, 1000): algolia.save(split_data) logger.info("uploaded") logger.info("algolia index save complete") # Upload filtered data to S3://BUCKET/PREFIX/dest/filtered_data_TIMESTAMP.jsonl.gz try: filtered_data_bytes = "\n".join([json.dumps(d) for d in filtered_data]).encode( "utf-8" ) key = "{}/{}/filtered_data_{}.jsonl.gz".format(PREFIX, "dest", TIMESTAMP) s3util.upload_file(BUCKET, key, filtered_data_bytes) logger.info("s3 upload complete") except Exception as e: logger.exception("Error while s3 upload", exc_info=e)
def do_unlock(self, arg): "Unlock control panel settings and disk write access." # first check if locking is supported by device str_recv = self.cmd('@PJL DINQUIRE PASSWORD') if not str_recv or '?' in str_recv: return output().errmsg("Cannot unlock", "locking not supported by device") # user-supplied pin vs. 'exhaustive' key search if not arg: print("No PIN given, cracking.") keyspace = [""] + range(1, 65536) # protection can be bypassed with else: # empty password one some devices try: keyspace = [int(arg)] except Exception as e: output().errmsg("Invalid PIN", str(e)) return # for optimal performance set steps to 500-1000 and increase timeout steps = 500 # set to 1 to get actual PIN (instead of just unlocking) # unlock, bypass or crack PIN for chunk in (list(chunks(keyspace, steps))): str_send = "" for pin in chunk: # try to remove PIN protection str_send += '@PJL JOB PASSWORD='******'@PJL DEFAULT PASSWORD=0' + c.EOL # check if PIN protection still active str_send += '@PJL DINQUIRE PASSWORD' # visual feedback on cracking process if len(keyspace) > 1 and pin: self.chitchat( "\rTrying PIN " + str(pin) + " (" + "%.2f" % (pin / 655.35) + "%)", '') # send current chunk of PJL commands str_recv = self.cmd(str_send) # seen hardcoded strings like 'ENABLED', 'ENABLE' and 'ENALBED' (sic!) in the wild if str_recv.startswith("ENA"): if len(keyspace) == 1: output().errmsg("Cannot unlock", "Bad PIN") else: # disable control panel lock and disk lock self.cmd( '@PJL DEFAULT CPLOCK=OFF' + c.EOL + '@PJL DEFAULT DISKLOCK=OFF', False) if len(keyspace) > 1 and pin: self.chitchat("\r") # exit cracking loop break self.show_lock()
if __name__ == '__main__': tg = TrainedGrammar() if len(sys.argv)<2: print(USAGE) sys.exit(0) if sys.argv[1] == '-encode': code_g = tg.encode_pw(sys.argv[2]) print(code_g) print(tg.decode_pw(code_g)) elif sys.argv[1] == '-vault': g = SubGrammar(tg, sys.argv[2:]) print(g) elif sys.argv[1] == '-parse': print('Parse', tg.parse(sys.argv[2])) elif sys.argv[1] == '-ptree': pw = sys.argv[2] pt = tg.l_parse_tree(pw) print('Parse Tree for {}\n{}\nSize: {}'.format(pw, pt, len(pt))) # print('Get_all_matches: ', tg.get_all_matches(pw)) elif sys.argv[1] == '-generate': n = max(int(sys.argv[2]), 1) fmt = "<{}L".format(hny_config.PASSWORD_LENGTH) rnd = os.urandom(n * struct.calcsize(fmt)) for i, x in enumerate(chunks(rnd, n)): # print(len(x), n, len(rnd), fmt) code = struct.unpack(fmt, x) print("{:-5d}: {:20s}".format(i, tg.decode_pw(code))) else: print("No matches found in your action: {!r}".format(sys.argv[1])) print(USAGE)
if __name__ == '__main__': tg = TrainedGrammar() if len(sys.argv) < 2: print(USAGE) sys.exit(0) if sys.argv[1] == '-encode': code_g = tg.encode_pw(sys.argv[2]) print(code_g) print(tg.decode_pw(code_g)) elif sys.argv[1] == '-vault': g = SubGrammar(tg, sys.argv[2:]) print(g) elif sys.argv[1] == '-parse': print('Parse', tg.parse(sys.argv[2])) elif sys.argv[1] == '-ptree': pw = sys.argv[2] pt = tg.l_parse_tree(pw) print('Parse Tree for {}\n{}\nSize: {}'.format(pw, pt, len(pt))) # print('Get_all_matches: ', tg.get_all_matches(pw)) elif sys.argv[1] == '-generate': n = max(int(sys.argv[2]), 1) fmt = "<{}L".format(hny_config.PASSWORD_LENGTH) rnd = os.urandom(n * struct.calcsize(fmt)) for i, x in enumerate(chunks(rnd, n)): # print(len(x), n, len(rnd), fmt) code = struct.unpack(fmt, x) print("{:-5d}: {:20s}".format(i, tg.decode_pw(code))) else: print("No matches found in your action: {!r}".format(sys.argv[1])) print(USAGE)
def plot(self, start_block, end_block, output, batch_size=20): # Create a colour code cycler e.g. 'C0', 'C1', etc. n_functions = len(self.contract.all_functions()) color_codes = map('C{}'.format, cycle(range(max(10, n_functions)))) batch_chunks = chunks(range(start_block, end_block+1), batch_size) batch_chunks = [list(i) for i in batch_chunks] if len(batch_chunks) > 1 and len(batch_chunks[-1]) == 1: batch_chunks[-2] += batch_chunks[-1] del batch_chunks[-1] pdf_file = PdfPages(output) plot_style_dict = {} bar = progressbar.ProgressBar(max_value=end_block-start_block) for batch in batch_chunks: batch_dict = {} figure = plt.figure() ax = figure.gca() ax.set_xticks(batch) ax.set_xticklabels([str(i) for i in batch]) for label in ax.get_xmajorticklabels(): label.set_rotation(30) label.set_horizontalalignment("right") ax.set_xlim((min(batch)-0.5, max(batch)+0.5)) ax.set_title('Fn calls for contract\n%s\nin blocks %d to %d'%(self.contract.address, batch[0], batch[-1])) for block_number in batch: bar.update(block_number-start_block) block_dict = self.get_block_dict(block_number) for item_name, item_count in block_dict.items(): if not item_name in batch_dict: batch_dict[item_name] = {} if not block_number in batch_dict[item_name]: batch_dict[item_name][block_number] = 0 batch_dict[item_name][block_number] += item_count if not item_name in plot_style_dict: color_code = next(color_codes) plot_style_dict[item_name] = {"color": color_code, "edgecolor":"black"} plot_style_dict[item_name+" {failed}"] = {"color": color_code, "edgecolor":"black", "hatch":"/"} bottom = [0 for i in batch] for fn_name, call_dict in batch_dict.items(): pairs = [(k,v) for k,v in call_dict.items()] pairs = sorted(pairs, key=lambda item: item[0]) X = [i[0] for i in pairs] Y = [i[1] for i in pairs] X, Y = fill_data(X, Y, batch[0], batch[-1]) ax.bar(X, Y, bottom=bottom, label=fn_name, **plot_style_dict[fn_name]) ax.legend() bottom = [a+b for a,b in zip(bottom, Y)] ax.set_axisbelow(True) ax.set_ylim(0, max(max(bottom), 10)*1.05) ax.set_yticks(list(range(0, max(bottom)+1))) ax.grid() pdf_file.savefig(figure) bar.finish() pdf_file.close()