def calculate_length(BASE_PATH, no_subdir, media_type): if not os.path.isdir(BASE_PATH): return bold(red('Error: This doesn\'t seem to be a valid directory.')) all_files = get_all_files(BASE_PATH, no_subdir) with ProcessPoolExecutor() as executor: sys.stdout.write('\n') result = list( tqdm( executor.map(duration, all_files), total=len(all_files), desc='Processing files', ) ) length = round(sum(result)) if length == 0: return bold(red('Seems like there are no {} files. ¯\_(ツ)_/¯'.format(media_type))) elif length < 60: minutes_string = pluralize(length, base='minute', suffix='s') result = 'Length of all {} is {}.'.format(media_type, minutes_string) else: hours, minutes = divmod(length, 60) hours_string = pluralize(hours, base='hour', suffix='s') minutes_string = pluralize(minutes, base='minute', suffix='s') result = 'Length of all {} is {} and {}.'.format( media_type, hours_string, minutes_string ) return bold(green(result))
def calculate_length(BASE_PATH, no_subdir, media_type, queue, cache_ob): if not os.path.isdir(BASE_PATH): return bold(red('Error: This doesn\'t seem to be a valid directory.')) all_files = get_all_files(BASE_PATH, no_subdir) max_workers = multiprocessing.cpu_count() + 1 with ProcessPoolExecutor(max_workers=max_workers) as executor: sys.stdout.write('\n') cache = cache_ob.cache args = ((file, queue, cache) for file in all_files) result = list( tqdm( executor.map(duration, args), total=len(all_files), desc='Processing files', )) length = round(sum(result)) queue.put(None) # poison pill if length == 0: return bold( red('Seems like there are no {} files. ¯\_(ツ)_/¯'.format( media_type))) elif length < 60: minutes_string = pluralize(length, base='minute', suffix='s') result = 'Length of all {} is {}.'.format(media_type, minutes_string) else: hours, minutes = divmod(length, 60) hours_string = pluralize(hours, base='hour', suffix='s') minutes_string = pluralize(minutes, base='minute', suffix='s') result = 'Length of all {} is {} and {}.'.format( media_type, hours_string, minutes_string) return bold(green(result))
def append(self, item, allow_duplicates=False): if self.verbose: msg = "Adding '{}' to `{}`.".format(item, self.fname) print(bold(green(msg))) if not allow_duplicates and str(item) in self.list: msg = "'{}' already in `{}`.".format(item, self.fname) print(bold(orange(msg))) return with open(self.fname, "a", encoding="utf8") as f: f.write("{item}\n".format(item=item))
def append(self, item, allow_duplicates=False): if self.verbose: msg = "Adding '{}' to `{}`.".format(item, self.fname) print(bold(green(msg))) if not allow_duplicates and str(item) in self.list: msg = "'{}' already in `{}`.".format(item, self.fname) print(bold(orange(msg))) return with open(self.fname, 'a') as f: f.write('{item}\n'.format(item=item))
def append(self, item, allow_duplicates=False): if self.verbose: msg = "Adding '{}' to `{}`.".format(item, self.fname) print(bold(green(msg))) if not allow_duplicates and str(item) in self.list: msg = "'{}' already in `{}`.".format(item, self.fname) print(bold(orange(msg))) return file_content = self.get_file().read() new_content = file_content + '{item}\n'.format(item=item) self.write_file(new_content)
def colorize_option(chave, valor): ''' Based on index type format and print out menu options. ''' if type(chave) == int: selector = yellow(' [') + bold(red('%s')) + yellow('] ') suffix = yellow('%s') return selector % chave + suffix % valor if type(chave) == str: pos = valor.lower().find(chave) prefix, radical, suffix = valor.partition(valor[pos]) if prefix: prefix = red('%s') radical = yellow('[') + bold(red('%s' % radical)) + yellow(']') return ' %s%s%s\n' % (prefix, radical, suffix)
def follow_and_like(self): self.update_to_follow() if self.bot.reached_limit("likes"): print(green(bold(f"\nOut of likes, pausing for 10 minutes."))) self.print_sleep(600) return user_id = self.to_follow.random() busy = True while busy: if self.get_user_info( user_id)["is_private"] or not self.bot.check_user(user_id): user_id = self.to_follow.random() self.to_follow.remove(user_id) else: busy = False username = self.get_user_info(user_id)["username"] medias = self.bot.get_user_medias(user_id) self.to_follow.remove(user_id) if medias and self.lastest_post(medias) < 21: # days n = min(random.randint(4, 10), len(medias)) print(f"Liking {n} medias from `{username}`.") self.bot.like_medias(random.sample(medias, n)) self.follow(user_id, tmp_follow=True) else: # Abandon user and call self recusively. self.follow_and_like()
def calculate_length(BASE_PATH, no_subdir, media_type): if not os.path.isdir(BASE_PATH): return bold(red('Error: This doesn\'t seem to be a valid directory.')) all_files = get_all_files(BASE_PATH, no_subdir) with ProcessPoolExecutor() as executor: sys.stdout.write('\n') video_files = [] tasks = [ executor.submit(is_media_file, file_path) for file_path in all_files ] for task in tqdm(as_completed(tasks), total=len(tasks), desc='Filtering {}'.format(media_type)): path = task.result() if path is not None: video_files.append(path) if not video_files: return bold( red(r'Seems like there are no {} files. ¯\_(ツ)_/¯'.format( media_type))) with ProcessPoolExecutor() as executor: sys.stdout.write('\n') result = list( tqdm( executor.map(duration, video_files), total=len(video_files), desc='Calculating time', )) length = round(sum(result)) if length < 60: minutes_string = pluralize(length, base='minute', suffix='s') result = 'Length of all {} is {}.'.format(media_type, minutes_string) else: hours, minutes = divmod(length, 60) hours_string = pluralize(hours, base='hour', suffix='s') minutes_string = pluralize(minutes, base='minute', suffix='s') result = 'Length of all {} is {} and {}.'.format( media_type, hours_string, minutes_string) return bold(green(result))
def remove(self, x): x = str(x) items = self.list if x in items: items.remove(x) msg = "Removing '{}' from `{}`.".format(x, self.fname) print(bold(green(msg))) self.save_list(items)
def lucky_bunny(i): print('') print('| ̄ ̄ ̄ ̄ ̄ ̄ ̄ ̄|') print('| TRAINING |') print('| epoch |') print('| ' + hue.bold(hue.green(str(i))) + ' |') print('| (*^▽^*) |') print('| (≧∇≦)ノ) |') print('| ________|')
def process(self, data): print(hue.bold(hue.green("\n------ {} ------".format(datetime.now())))) print( hue.yellow("Full packet data: ") + hue.italic(binascii.hexlify(data))) # Checks if the 802.15.4 packet is valid if makeFCS(data[:-2]) != data[-2:]: print(hue.bad("Invalid packet")) return # Parses 802.15.4 packet packet = Dot15d4FCS(data) packet.show() if packet.fcf_frametype == 2: # ACK return # Tries to match received packet with a known link # configuration matched = False for link in self.link_configs: if packet.dest_panid != link.dest_panid: continue if packet.fcf_srcaddrmode == 3: # Long addressing mode if packet.src_addr != link.source.get_long_address(): continue if packet.dest_addr != link.destination.get_long_address(): continue else: if packet.src_addr != link.source.get_short_address(): continue if packet.dest_addr != link.destination.get_short_address(): continue source = link.source destination = link.destination key = link.key matched = True if not matched: if packet.fcf_srcaddrmode == 3: source = Rf4ceNode(packet.src_addr, None) destination = Rf4ceNode(packet.dest_addr, None) else: source = Rf4ceNode(None, packet.src_addr) destination = Rf4ceNode(None, packet.dest_addr) key = None # Process RF4CE payload frame = Rf4ceFrame() try: rf4ce_payload = bytes(packet[3].fields["load"]) frame.parse_from_string(rf4ce_payload, source, destination, key) except Rf4ceException, e: print(hue.bad("Cannot parse RF4CE frame: {}".format(e))) return
def disp_exp(options): if all(i.startswith(('1', '2', '3', '4', '5', '6')) for i in options.exp): options.include = options.exp for b in get_recommendations(options): if b[2]: profileServer = 'Level ' + str(b[2]) + ' Server' else: profileServer = 'N/A' if b[3]: profileWorkstation = 'Level ' + str(b[3]) + ' Workstation' else: profileWorkstation = 'N/A' exp = '{:<9}|{:<10}|{:<14}|{:<19}|'.format( b[0], 'Scored' if b[1] else 'Not Scored', profileServer, profileWorkstation) + b[4] if b[1]: print(bold(green(exp))) else: print(bold(yellow(exp))) exit()
def lucky_bunny(i): print('') print('| ̄ ̄ ̄ ̄ ̄ ̄ ̄ ̄|') print('| TRAINING |') print('| epoch |') print('| ' + hue.bold(hue.green(str(i))) + ' |') print('| ________|') print(' (\__/) ||') print(' (•ㅅ•) || ') print(' / づ') print('')
def main(new_args, get_model_fn): args = Nestedspace() args.load_from_json(osp.join(new_args.path, 'args.json')) args.from_dict(new_args.to_dict()) # override previous args device = torch.device(args.device) cudnn.benchmark = False print( hue.info( hue.bold(hue.lightgreen('Working directory: {}'.format( args.path))))) np.random.seed(args.seed) torch.manual_seed(args.seed) torch.cuda.manual_seed_all(args.seed) gallery_loader, probe_loader = get_data_loader(args, train=False) model = get_model_fn(args, training=False, pretrained_backbone=False) model.to(device) args.resume = osp.join(args.path, 'checkpoint.pth') args, model, _, _ = resume_from_checkpoint(args, model) name_to_boxes, all_feats, probe_feats = \ inference(model, gallery_loader, probe_loader, device) print(hue.run('Evaluating detections:')) precision, recall = detection_performance_calc(gallery_loader.dataset, name_to_boxes.values(), det_thresh=0.01) print(hue.run('Evaluating search: ')) gallery_size = 100 if args.dataset == 'CUHK-SYSU' else -1 ret = gallery_loader.dataset.search_performance_calc( gallery_loader.dataset, probe_loader.dataset, name_to_boxes.values(), all_feats, probe_feats, det_thresh=0.5, gallery_size=gallery_size) # import IPython # IPython.embed() return ret['mAP']
def main(args, get_model_fn): if args.distributed: init_distributed_mode(args) device = torch.device(args.device) cudnn.benchmark = False np.random.seed(args.seed) torch.manual_seed(args.seed) torch.cuda.manual_seed_all(args.seed) if is_main_process(): current_time = datetime.now().strftime('%b%d_%H-%M-%S') args.path = osp.join(args.path, current_time + '_' + socket.gethostname()) mkdir_if_missing(args.path) print( hue.info( hue.bold( hue.lightgreen('Working directory: {}'.format( args.path))))) if args.train.use_tfboard: tfboard = SummaryWriter(log_dir=args.path) args.export_to_json(osp.join(args.path, 'args.json')) else: tfboard = None train_loader = get_data_loader(args, train=True) model = get_model_fn(args, training=True, pretrained_backbone=True) model.to(device) optimizer = get_optimizer(args, model) lr_scheduler = get_lr_scheduler(args, optimizer) trainer = get_trainer(args, model, train_loader, optimizer, lr_scheduler, device, tfboard) if args.debug: from lib.utils.debug_tools import get_rcnn_fg_bg_ratio trainer.add_event_handler(Events.STARTED, get_rcnn_fg_bg_ratio(args, model)) trainer.run(train_loader, max_epochs=args.train.epochs) if is_main_process(): tfboard.close()
def __repr__(self): if self.frame_type == Rf4ceConstants.FRAME_TYPE_DATA: type = hue.lightblue("DATA") + " - " type += "profile:" + hue.lightblue("0x{:x}".format(self.profile_indentifier)) elif self.frame_type == Rf4ceConstants.FRAME_TYPE_COMMAND: type = hue.lightblue("COMMAND") + " - " type += "cmd:" + hue.lightblue("0x{:x}".format(self.command)) elif self.frame_type == Rf4ceConstants.FRAME_TYPE_VENDOR: type = hue.lightblue("VENDOR") + " - " type += "profile:" + hue.lightblue("0x{:x}").format(self.profile_indentifier) type += " - vendor:" + hue.lightblue("0x{:x}".format(self.vendor_indentifier)) data = hue.bold(binascii.hexlify(self.payload).decode()) counter = hue.lightblue("0x{:x}".format(self.frame_counter)) result = "({}) -> ({}) : ".format(self.source, self.destination) result += "[{} - counter:{}] : {}".format(type, counter, data) return result
def print_args(args, default_args): from huepy import bold, lightblue, orange, lightred, green, red args_v = vars(args) default_args_v = vars(default_args) print(bold(lightblue(' - ARGV: ')), '\n', ' '.join(sys.argv), '\n') # Get list of default params and changed ones s_default = '' s_changed = '' for arg in sorted(args_v.keys()): value = args_v[arg] if default_args_v[arg] == value: s_default += f"{lightblue(arg):>50} : {orange(value if value != '' else '<empty>')}\n" else: s_changed += f"{lightred(arg):>50} : {green(value)} (default {orange(default_args_v[arg] if default_args_v[arg] != '' else '<empty>')})\n" print(f'{bold(lightblue("Unchanged args")):>69}\n\n' f'{s_default[:-1]}\n\n' f'{bold(red("Changed args")):>68}\n\n' f'{s_changed[:-1]}\n')
def get_mp4_link_api(capitol_id): api_url = f"{DINAMICS_API}/pvideo/media.jsp?media=video&idint={capitol_id}" api_response = requests.get(api_url) json = api_response.json() try: mp4_links = json["variants"]["media"]["url"] except KeyError: mp4_links = json["media"]["url"] try: max_resolution = str( max([int(link["label"][:-1]) for link in mp4_links])) + "p" url_mp4 = [ link["file"] for link in mp4_links if max_resolution in link["label"] ][0] except Exception: url_mp4 = None if url_mp4: print(info(f"video url disclossed with {max_resolution}")) print(bold(url_mp4)) return url_mp4
def how_to_use(command): content = get_content(command) if not content: print("Sorry: could not find the `{}` command".format(command)) return parse = commonmark.Parser() ast = parse.parse(content) tags = [] for obj, entering in ast.walker(): if not entering or obj.t == TAG_HTML_BLOCK: continue tags.append(Tag(obj.t, obj.literal, obj.level)) tag_length, out = len(tags), "" for i, tag in enumerate(tags): if i < tag_length - 1: if tag.t == TAG_HEADING: tag.literal = huepy.bold("#" * tag.level + " ") if tag.t == TAG_TEXT: if tags[i + 1].t in (TAG_PARAGRAPH, TAG_HEADING, TAG_CODE_BLOCK): tag.literal = tag.literal + "\n" * 2 if tags[i + 1].t == TAG_ITEM: tag.literal = tag.literal + "\n" + "- " if tags[i + 1].t == TAG_LIST: tag.literal = tag.literal + "\n" * 2 + "- " if tags[i + 1].t == TAG_BLOCK_QUOTE: tag.literal = tag.literal + "\n" * 2 + "> " if tag.t == TAG_CODE_BLOCK: tag.literal = tag.literal + "\n" if tag.literal: out += tag.literal doc = [wrap_text(d) for d in docs_need_space(out).strip().split("\n")] print(highlight("".join(doc), command))
def main(): parser = argparse.ArgumentParser() parser.add_argument('-u', '--url', dest='url', metavar='URL', required=True, help='URL https://example.com') parser.add_argument( '--verify', action='store_true', default=False, help='Verify the SSL certificate. Default is set to False.') parser.add_argument('--description', action='store_true', help='Print header description') args = parser.parse_args() session = requests.Session() session.headers.update({ 'User-Agent': 'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:77.0) Gecko/20100101 Firefox/77.0', 'Cache-control': 'no-cache', 'Pragma': 'no-cache', 'Connection': 'close' }) # prepend http if missing args.url = 'http://' + args.url if not args.url.startswith( 'http') else args.url if not valid_url(args.url): parser.print_help() exit() try: response = session.get(url=args.url, verify=args.verify) except requests.exceptions.ConnectionError as e: print(bold(bad(f"{bold(red('connection error'))}: {e}"))) print(bold(bad(f'{args.url}'))) exit() except Exception: print(bold(bad(bold(red('connection error'))))) print(bold(bad(f'{args.url}'))) exit() headers = response.headers html = response.text soup = BeautifulSoup(html, "lxml") wappalyzer_json_url = "https://raw.githubusercontent.com/AliasIO/wappalyzer/master/src/technologies.json" check_headers = [ 'X-Content-Type-Options', 'X-Frame-Options', 'X-XSS-Protection', 'Strict-Transport-Security', 'Content-Security-Policy', 'Referrer-Policy', 'Feature-Policy' ] descriptions = {} descriptions['X-Content-Type-Options'] = que( 'X-Content-Type-Options stops a browser from trying to MIME-sniff the content type and forces it to stick with the declared content-type. The only valid value for this header is "X-Content-Type-Options: nosniff".' ) descriptions['X-Frame-Options'] = que( 'X-Frame-Options tells the browser whether you want to allow your site to be framed or not. By preventing a browser from framing your site you can defend against attacks like clickjacking.' ) descriptions['X-XSS-Protection'] = que( 'X-XSS-Protection sets the configuration for the XSS Auditor built into older browser. The recommended value was "X-XSS-Protection: 1; mode=block" but you should now look at Content Security Policy instead.' ) descriptions['Strict-Transport-Security'] = que( 'HTTP Strict Transport Security is an excellent feature to support on your site and strengthens your implementation of TLS by getting the User Agent to enforce the use of HTTPS.' ) descriptions['Content-Security-Policy'] = que( 'Content Security Policy is an effective measure to protect your site from XSS attacks. By whitelisting sources of approved content, you can prevent the browser from loading malicious assets. Analyse this policy in more detail. You can sign up for a free account on Report URI to collect reports about problems on your site.' ) descriptions['Referrer-Policy'] = que( 'Referrer-Policy Referrer Policy is a new header that allows a site to control how much information the browser includes with navigations away from a document and should be set by all sites.' ) descriptions['Feature-Policy'] = que( 'Feature Policy is a new header that allows a site to control which features and APIs can be used in the browser.' ) cookie_checks = [ 'Expires', 'HttpOnly', 'Secure', 'Path=/', ] print(info(f"{bold('Request URL')}: {args.url}")) print(info(f"{bold('Response status code')}: {response.status_code}")) print(info(bold('Request headers:'))) print(json.dumps(dict(session.headers), indent=2, sort_keys=True)) print(info(bold('Response headers:'))) print(json.dumps(dict(headers), indent=2, sort_keys=True)) print(f"\n{run(bold('Checking security headers...'))}") for check_head in check_headers: if check_head.lower() in headers: print(good(f'{check_head} found')) else: print(bad(f'{check_head} not found')) if args.description: if check_head in descriptions.keys(): print(descriptions[check_head]) print(f"\n{run(bold('Checking cookies...'))}") if 'set-cookie' in headers: cookies = headers['Set-Cookie'].split(',') for cookie in cookies: print(f"{bold('cookie: ')} {cookie}") for cookie_check in cookie_checks: if cookie_check.lower() in cookie.lower(): print(good(f'{cookie_check} found')) else: print(bad(f'{cookie_check} not found')) else: print(info('not found')) print(f"\n{run(bold('Checking Wappalyzer Regular Expressions...'))}") # Prepare wappalyzer data wappalyzer_json_file = requests.get(wappalyzer_json_url) if wappalyzer_json_file.ok: try: wappalyzer_json = json.loads(wappalyzer_json_file.text) except json.decoder.JSONDecodeError as e: print(bold(bad(f"{bold(red('JSONDecodeError'))}: {e}"))) exit() else: print( bold( bad(f"{bold(red(f'Unable to get wappalyzer json file {wappalyzer_json_url}'))}" ))) exit() wappalyzer_categories = wappalyzer_json['categories'] saved_apps = {} for k, v in wappalyzer_categories.items(): name = wappalyzer_categories[k]['name'] saved_apps[name] = set() wappalyzer_tech = wappalyzer_json['technologies'] wappalyzer_names = {} for app_name, details in wappalyzer_tech.items(): wappalyzer_names[app_name] = set() if 'cats' in details.keys(): for ca in details['cats']: wappalyzer_names[app_name].add(ca) # Parse meta data metas = [] for meta in soup.findAll('meta'): meta_object = list(meta.attrs.keys()) + list(meta.attrs.values()) metas.append(meta_object) for app_name, details in wappalyzer_tech.items(): found = False try: # Check meta if 'meta' in details.keys(): for k, v in details['meta'].items(): for meta in metas: if k in meta and re.search(v, ' '.join(meta)): for cat in details['cats']: name = wappalyzer_categories[str(cat)]['name'] saved_apps[name].add(app_name) found = True # Check headers if 'headers' in details.keys(): for k, header in details['headers'].items(): if k in headers and re.search(headers[k], header): for cat in details['cats']: name = wappalyzer_categories[str(cat)]['name'] saved_apps[name].add(app_name) found = True # Check html and script search_in_html = [] if 'html' in details.keys(): if isinstance(details['html'], list): search_in_html += details['html'] if isinstance(details['html'], str): search_in_html.append(details['html']) if 'script' in details.keys(): if isinstance(details['script'], list): search_in_html += details['script'] if isinstance(details['script'], str): search_in_html.append(details['script']) for regex in search_in_html: if re.search(regex, html): for cat in details['cats']: name = wappalyzer_categories[str(cat)]['name'] saved_apps[name].add(app_name) found = True if found and 'implies' in details.keys(): if isinstance(details['implies'], list): techs = details['implies'] elif isinstance(details['implies'], str): techs = [details['implies']] else: techs = [] for tech in techs: subcats = wappalyzer_names[tech] for subcat in subcats: subcat_category = wappalyzer_categories[str( subcat)]['name'] saved_apps[subcat_category].add(tech) except re.error: # print(warn(f'regex error: {regex}')) pass wappalyzer_found = False for category, app_names in saved_apps.items(): if app_names: wappalyzer_found = True output = info(f"{category}: {', '.join(map(str, app_names))}") print(output) if not wappalyzer_found: print(info('not found'))
from lib.utils.evaluator import inference, detection_performance_calc if __name__ == '__main__': arg_parser = args_faster_rcnn_hoim() new_args = lazy_arg_parse(arg_parser) args = Nestedspace() args.load_from_json(osp.join(new_args.path, 'args.json')) args.from_dict(new_args.to_dict()) device = torch.device(args.device) cudnn.benchmark = False print( hue.info( hue.bold(hue.lightgreen('Working directory: {}'.format( args.path))))) np.random.seed(args.seed) torch.manual_seed(args.seed) gallery_loader, probe_loader = get_data_loader(args, train=False) model = get_hoim_model( pretrained_backbone=False, num_features=args.num_features, num_pids=args.num_pids, num_cq_size=args.num_cq_size, num_bg_size=args.num_bg_size, oim_momentum=args.train.oim_momentum, oim_scalar=args.oim_scalar, min_size=args.train.min_size,
def controlActsMsg(): print(bold(blue("#----- Controls ------ #"))) print("1. Up Arrow key -> Accelerate") print("2. Down Arrow Key -> Decelerate") print("3. Left or Right Arrow Key -> Lane Change") print(bold(blue("#----- Controls ------ #")))
cipher_array[sorted_score[j][0]], string_hex)) return result_array def xor_decrypt(string_hex): xor_array = xor_cipher(string_hex) # cipher by every char xor_score = xor_cipher_score(xor_array) # score result = xor_result(xor_score, xor_array, string_hex) # order by score # debug results: # print('(key, score, string_decrypted)') # for j in range(0, 10): # print(result[j]) final_result = result[0] # first one with better score return final_result if __name__ == '__main__': # The hex encoded string has been XOR'd against a single character. # Find the key, decrypt the message. input_string_hex = read_data('3', out_multiline=False) # Crack XOR encryption final_result = xor_decrypt(input_string_hex) print(info(f'final result:')) print(info(f'key: { final_result[0] } ({ chr(final_result[0]) })')) print(info(f'score: { final_result[1] }')) print(bold(good(f'decrypted string: { final_result[2] }')))
from challenge03 import xor_decrypt, read_data from huepy import good, info, bold if __name__ == '__main__': # One of the 60-character strings in the file # has been encrypted by single-character XOR. encrypted_data = read_data('4', out_multiline=True) decrypted_data = [] for line in encrypted_data: decrypted_data.append(xor_decrypt(line)) sorted_score = sorted(decrypted_data, key=lambda tup: tup[0])[0] print(info(f'input_hex: {sorted_score[3]}')) print(good(f'score: {sorted_score[1]}')) print(good(f'cipher key: {sorted_score[0]} ( {chr(sorted_score[0])} )')) print(bold(good(f'decrypted_string: {sorted_score[2].encode()}')))
print(trainAlgo) if trainAlgo == "IMPALA": controller = impalaController(args.sim_config, algoConfig, args.checkpoint_file) elif trainAlgo == "PPO": controller = ppoController(args.sim_config, algoConfig, args.checkpoint_file) else: raiseValueError("invalid training algo %s" % (trainAlgo)) # Warn user if data save is not enabled if (args.save_data != 1): print( info( bold( red("save is disabled, simulation data will not be saved to disk." )))) # Local Env env = V2I.V2I(args.sim_config, "test", paramDict(args)) # Init Render if enabled fig, ax1, ax2 = None, None, None if args.render_graphs == 1: fig, ax1, ax2 = initRender() # Use LSTM if enabled by sim-config file useLstm = False if simConfig["config"]["enable-lstm"]: useLstm = True
def wrapper(*args, **kwargs): print(green(bold(f"\n\nStarting with `{f.__name__}`."))) return f(*args, **kwargs)
if options.exp != None: disp_exp(options) # generates report and exits if options.report != None: generatePDF(options.report) # exit if SeBAz isn't run as root if not geteuid() == 0: exit('\nPlease run SeBAz as root\n') # starting terminal manager by enlighten manager = get_manager() system('sudo clear') print(Image(path.join(_MEIPASS, 'Logo.png'))) print(bold('Welcome to SeBAz')) print('\n\nGive me a moment to calculate the prerequisites...\n\n') # writing test details and start time to .SeBAz.csv file file_path = path.dirname(path.abspath(__file__)) + '/' + \ str(options.org) + '-' + str(options.unique) + '.SeBAz.csv' with open(file_path, 'w', newline='') as csvfile: csvwriter = writer(csvfile, dialect='excel') csvwriter.writerow( ['Recommendation Number', 'Message', 'Result', 'Explanation', 'Time']) length = len(recommendations) score = 0 passed = 0 if options.verbose:
#!/usr/bin/env python3 # huepy muss vorher installiert werden. import huepy print(huepy.bold(huepy.red("red and bold"))) print(huepy.run("Starte...")) print(huepy.info("Info!!")) print(huepy.bad("Schlecht!")) print(huepy.good("Gut!")) input(huepy.que("Frage? ")) # mit Fortschritt: from tqdm import tqdm for x in tqdm(range(5000000), desc=huepy.run("Dinge")): pass
def main_menu(): print(bold(green('\n Select an option'))) net_menu = build_menu(menu_sites, SF_PROMPT, False) return build_menu(menu_sites[net_menu], SF_PROMPT)
def raiseValueError(msg): raise ValueError(bad(bold(red(msg))))
menu_sites = { "Social Media": { "Facebook", "Google", "LinkedIn", "Twitter", "Instagram", "Snapchat", "FbRobotCaptcha", "VK", "Github", } } SF_PROMPT = bold(red(" ------ ")) def colorize_option(chave, valor): ''' Based on index type format and print out menu options. ''' if type(chave) == int: selector = yellow(' [') + bold(red('%s')) + yellow('] ') suffix = yellow('%s') return selector % chave + suffix % valor if type(chave) == str: pos = valor.lower().find(chave) prefix, radical, suffix = valor.partition(valor[pos]) if prefix: prefix = red('%s')