def _(self, *arg, **kw): timeit = kw.pop('timeit', False) if not timeit: ret = func(self, *arg, **kw) if hasattr(ret, 'text'): try: data = json.loads(ret.text) except: data = ret.text else: data = ret if data: print info(purple('执行结果:')) print red( unicode_to_utf8(json.dumps( data, indent = 2, ensure_ascii = False )) ) else: st = time.time() for i in xrange(10000): func() print time.time() - st
def calculate_length(BASE_PATH, no_subdir, media_type): if not os.path.isdir(BASE_PATH): return bold(red('Error: This doesn\'t seem to be a valid directory.')) all_files = get_all_files(BASE_PATH, no_subdir) with ProcessPoolExecutor() as executor: sys.stdout.write('\n') result = list( tqdm( executor.map(duration, all_files), total=len(all_files), desc='Processing files', ) ) length = round(sum(result)) if length == 0: return bold(red('Seems like there are no {} files. ¯\_(ツ)_/¯'.format(media_type))) elif length < 60: minutes_string = pluralize(length, base='minute', suffix='s') result = 'Length of all {} is {}.'.format(media_type, minutes_string) else: hours, minutes = divmod(length, 60) hours_string = pluralize(hours, base='hour', suffix='s') minutes_string = pluralize(minutes, base='minute', suffix='s') result = 'Length of all {} is {} and {}.'.format( media_type, hours_string, minutes_string ) return bold(green(result))
def calculate_length(BASE_PATH, no_subdir, media_type, queue, cache_ob): if not os.path.isdir(BASE_PATH): return bold(red('Error: This doesn\'t seem to be a valid directory.')) all_files = get_all_files(BASE_PATH, no_subdir) max_workers = multiprocessing.cpu_count() + 1 with ProcessPoolExecutor(max_workers=max_workers) as executor: sys.stdout.write('\n') cache = cache_ob.cache args = ((file, queue, cache) for file in all_files) result = list( tqdm( executor.map(duration, args), total=len(all_files), desc='Processing files', )) length = round(sum(result)) queue.put(None) # poison pill if length == 0: return bold( red('Seems like there are no {} files. ¯\_(ツ)_/¯'.format( media_type))) elif length < 60: minutes_string = pluralize(length, base='minute', suffix='s') result = 'Length of all {} is {}.'.format(media_type, minutes_string) else: hours, minutes = divmod(length, 60) hours_string = pluralize(hours, base='hour', suffix='s') minutes_string = pluralize(minutes, base='minute', suffix='s') result = 'Length of all {} is {} and {}.'.format( media_type, hours_string, minutes_string) return bold(green(result))
def size(path, json=False, verbose=False): ''' check size of a given path or directory examples: \n\n luz disk size /tmp \n luz disk size /home --json (print output in JSON) \n luz disk size /home -v (print names and size of all subfolders) ''' #click.echo('\n'+cyan('%s disk size\n' % path)) payload = get_dir_size(json, verbose, path) if payload == 'error': return if json: if True in verbose: click.echo(color(payload, bg='black', fg='white')) else: try: #payload = ast.literal_eval(json.dumps(payload)) print(payload) click.echo(yellow(json.dump(payload['total']))) except AttributeError as e: click.echo(red('error generating json, %s' % str(e))) click.echo(yellow('total (kb): ' + str(payload['total']['kb']))) else: if True in verbose: for d in payload['dirs']: try: click.echo(color(str(d), fg='yellow') + color(' {:,} bytes'.format(payload['dirs'][d]), fg='white')) #click.echo(white('{:,} bytes'.format(payload['dirs'][d]))) #click.echo(yellow(str(d) + ': ' + str(payload['dirs'][d]) + ' b')) except UnicodeEncodeError as e: click.echo(red('error displaying sub directories %s' % str(e))) #click.echo(yellow('total (b): ' + str(payload['total']['b']))) #click.echo(yellow('total (kb): ' + str(payload['total']['kb']))) # click.echo(yellow('total (mb): ' + str(payload['total']['mb']))) #click.echo(yellow('total (gb): ' + str(payload['total']['gb']))) else: total_b = str(payload['total']['b']) total_kb = str(payload['total']['kb']) total_mb = str(payload['total']['mb']) total_gb = str(payload['total']['gb']) click.echo(yellow("disk space: {0}").format(path)) horizontal() click.echo(white("{0} B\n{1} KB\n{2} MB\n{3} GB\n".format(total_b, total_kb, total_mb, total_gb)))
def colorize_option(chave, valor): ''' Based on index type format and print out menu options. ''' if type(chave) == int: selector = yellow(' [') + bold(red('%s')) + yellow('] ') suffix = yellow('%s') return selector % chave + suffix % valor if type(chave) == str: pos = valor.lower().find(chave) prefix, radical, suffix = valor.partition(valor[pos]) if prefix: prefix = red('%s') radical = yellow('[') + bold(red('%s' % radical)) + yellow(']') return ' %s%s%s\n' % (prefix, radical, suffix)
def youdao_api(words: str): print() url = ("http://fanyi.youdao.com/openapi.do?keyfrom={}&key={}&" "type=data&doctype=json&version=1.1&q={}") try: resp = requests.get(url.format(CONF.youdao_key_from, CONF.youdao_key, words), headers=HEADERS).json() phonetic = "" basic = resp.get("basic", None) if basic and resp.get("basic").get("phonetic"): phonetic += huepy.purple(" [ " + basic.get("phonetic") + " ]") print(" " + words + phonetic + huepy.grey(" ~ fanyi.youdao.com")) print() translation = resp.get("translation", []) if len(translation) > 0: print(" - " + huepy.green(translation[0])) if basic and basic.get("explains", None): for item in basic.get("explains"): print(huepy.grey(" - ") + huepy.green(item)) print() web = resp.get("web", None) if web and len(web): for i, item in enumerate(web): print( huepy.grey(" " + str(i + 1) + ". " + highlight(item.get("key"), words))) print(" " + huepy.cyan(", ".join(item.get("value")))) except Exception: print(" " + huepy.red(ERR_MSG))
def convert(src, dst, args): if args.overwrite or not os.path.exists(dst): img = cv2.imread(src, -1) if img is None: print(red(src)) return if (args.out_img_height > 0) and (args.out_img_width > 0) or args.max_dim > 0: if args.save_aspect_ratio: img = image_resize(img, max_dim=args.max_dim, only_downscale=args.only_downscale) else: img = image_resize(img, specific_size=(args.out_img_height, args.out_img_width)) if args.remove_alpha and len(img.shape): img = img[:, :, :3] if args.out_ext == 'png': cv2.imwrite(dst, img, [cv2.IMWRITE_PNG_COMPRESSION, 9]) elif args.out_ext == 'jpg': cv2.imwrite(dst, img, [int(cv2.IMWRITE_JPEG_QUALITY), args.out_jpg_q])
def get_image(image_name): # if datapath is not None: # image_name = ( # image_name if image_name.startswith(datapath) else # os.path.join(datapath, image_name) # ) img = None if has_jpeg4py and image_name.endswith(("jpg", "JPG", "jpeg", "JPEG")): try: img = jpeg4py.JPEG(image_name).decode() except Exception: print('jpeg4py error!') pass if img is None: img = cv2.imread(image_name) if img is None: print(red(image_name)) return None if len(img.shape) == 3: # BGR -> RGB img = img[:, :, ::-1] if len(img.shape) < 3: # grayscale img = np.expand_dims(img, -1) if img.shape[-1] != 3 and not grayscale: img = cv2.cvtColor(img, cv2.COLOR_GRAY2RGB) return img
def load_module_(extension, module_type, module_name, raise_error=True): ''' module_type : models | dataloaders ''' cdir = os.getcwd() os.chdir(RECOGNITION_PATH) print(os.getcwd()) if extension == '': m = importlib.import_module(f'{module_type}.{module_name}') else: if os.path.exists( f'extensions/{extension}/{module_type}/{module_name}.py'): m = importlib.import_module( f'extensions.{extension}.{module_type}.{module_name}') print(f" - Extension module {green(module_name)} loaded.") else: # print(f'Extension module {extension}/{module_type}/{module_name} not found.') if os.path.exists(f'{module_type}/{module_name}.py'): m = importlib.import_module(f'{module_type}.{module_name}') print((f" - Default module {green(module_name)} loaded.")) else: if raise_error: assert False, red( f" - Default or extension module {module_name} not found." ) else: return None os.chdir(cdir) return m
def get_optimizer_class(name): if name in sys.modules[__name__].__dict__: return sys.modules[__name__].__dict__[name] elif name in torch.optim.__dict__: return torch.optim.__dict__[name] else: assert False, red(f"Cannot find optimizer with name {name}")
def calculate_length(BASE_PATH, no_subdir, media_type): if not os.path.isdir(BASE_PATH): return bold(red('Error: This doesn\'t seem to be a valid directory.')) all_files = get_all_files(BASE_PATH, no_subdir) with ProcessPoolExecutor() as executor: sys.stdout.write('\n') video_files = [] tasks = [ executor.submit(is_media_file, file_path) for file_path in all_files ] for task in tqdm(as_completed(tasks), total=len(tasks), desc='Filtering {}'.format(media_type)): path = task.result() if path is not None: video_files.append(path) if not video_files: return bold( red(r'Seems like there are no {} files. ¯\_(ツ)_/¯'.format( media_type))) with ProcessPoolExecutor() as executor: sys.stdout.write('\n') result = list( tqdm( executor.map(duration, video_files), total=len(video_files), desc='Calculating time', )) length = round(sum(result)) if length < 60: minutes_string = pluralize(length, base='minute', suffix='s') result = 'Length of all {} is {}.'.format(media_type, minutes_string) else: hours, minutes = divmod(length, 60) hours_string = pluralize(hours, base='hour', suffix='s') minutes_string = pluralize(minutes, base='minute', suffix='s') result = 'Length of all {} is {} and {}.'.format( media_type, hours_string, minutes_string) return bold(green(result))
def load_config(extension, config_name, args): if extension == '': assert False, red(f' - Extension is not specified.') config_extension = f'extensions/{extension}/{config_name}.yaml' config_extension_dir = f'extensions/{extension}/configs/{config_name}.yaml' config_lib = f'configs/{config_name}.yaml' for config in [config_extension, config_lib, config_extension_dir]: if os.path.exists(config): print((f' - Using config {green(config)}')) return get_update_defaults_fn(config, args) else: print((f' - Did not find config {green(config)}')) assert False, red(f' - Config not found!')
def end(): system('clear') print(red(fin)) print("\nOS: \t" + so) print("\nMachine:\t" + platform.machine()) print("\nPC-Name:\t" + platform.node()) print("\nPlataform:\t" + platform.platform()) print("")
def records_prompt_shell(): try: from litecli.main import LiteCli litecli = LiteCli(prompt="Type quit to exit shell.\nPrompt: ") litecli.connect(database=FY_DB_PATH) litecli.run_cli() except: print(huepy.red("sorry, it can't spawn records prompt shell."))
def get_image_cv2(path, force_3channels=False): img = cv2.imread(path, -1) if img is None: print(red(path)) if len(img.shape) == 3: img = img[:, :, :3] img = img[:, :, ::-1] elif force_3channels: img = np.concatenate( [img[:, :, None], img[:, :, None], img[:, :, None]], axis=2) return img
def get_checkpoint_path(self, args): options = [ args.checkpoint, args.experiment_dir / 'checkpoints' / args.checkpoint ] for path in options: if path.exists(): return path else: assert False, red('Checkpoint path was set, but not found. \n' + str(options))
def output_stats(self): print("\t| {} {}".format(green("Player:"), self.name)) print("\t|\t {} {}, {} {}".format(yellow("Gold:"), self.gold, orange("Infamy:"), self.infamy)) for unit in self.units: print("\t|\t {}".format(unit)) print("\t| {} {}".format(red("Opponent:"), self.opponent.name)) print("\t|\t {} {}, {} {}".format(yellow("Gold:"), self.opponent.gold, orange("Infamy:"), self.opponent.infamy)) for unit in self.opponent.units: print("\t|\t {}".format(unit))
def run(self): """Thread run method.""" try: while self.input_queue.not_empty: url = self.input_queue.get(timeout=1) data = self.scraper.scrape(url) self.output_queue.put(data) self.input_queue.task_done() except queue.Empty: pass except WebDriverException as err: print(hue.red(str(err))) self.input_queue.task_done()
def iciba_api(words: str): print() print(huepy.grey(" -------- ")) print() url = "http://dict-co.iciba.com/api/dictionary.php?key={key}&w={w}&type={type}" try: resp = requests.get(url.format(key=CONF.iciba_key, w=words, type="xml")) resp.encoding = "utf8" dct = xmltodict.parse(resp.text).get("dict") ps = dct.get("ps") or "" print(" " + words + " " + huepy.purple(ps) + huepy.grey(" ~ iciba.com")) print() pos = dct.get("pos") acceptation = dct.get("acceptation") if pos and acceptation: if not isinstance(pos, list) and not isinstance(acceptation, list): pos = [pos] acceptation = [acceptation] for p, a in zip([i for i in pos], [i for i in acceptation]): if a and p: print(" - " + huepy.green(p + " " + a)) print() index = 1 sent = dct.get("sent") if not sent: return if not isinstance(sent, list): sent = [sent] for item in sent: for k, v in item.items(): if k == "orig": print( highlight(huepy.grey(" {}. ".format(index) + v), words)) index += 1 elif k == "trans": print(highlight(huepy.cyan(" " + v), words)) print() except Exception: print(" " + huepy.red(ERR_MSG))
def destruct_response(cls, response: ty.Dict[str, ty.Any]) -> VKAPIError: """Разбирает ответ от вк про некорректный API запрос на части и инициализирует сам объект исключения Args: response: ty.Dict[str: ty.Any]: response: ty.Dict[str: Returns: """ status_code = response["error"].pop("error_code") description = response["error"].pop("error_msg") request_params = response["error"].pop("request_params") request_params = { item["key"]: item["value"] for item in request_params } pretty_exception_text = (huepy.red(f"\n[{status_code}]") + f" {description}\n\n" + huepy.grey("Request params:")) for key, value in request_params.items(): key = huepy.yellow(key) value = huepy.cyan(value) pretty_exception_text += f"\n{key} = {value}" # Если остались дополнительные поля if response["error"]: pretty_exception_text += ( "\n\n" + huepy.info("There are some extra fields:\n") + str(response["error"])) return cls( pretty_exception_text=pretty_exception_text, description=description, status_code=status_code, request_params=request_params, extra_fields=response["error"], )
def get_criterion(name, args, **kwargs): criterion_args = {} if args.criterion_args != '': for entry in args.criterion_args.split("^"): k, v = entry.split('=') criterion_args[k] = eval(v) print(criterion_args) if name in sys.modules[__name__].__dict__: criterion = sys.modules[__name__].__dict__[name](**criterion_args) elif name in torch.nn.modules.__dict__: criterion = torch.nn.modules.__dict__[name](**criterion_args) else: assert False, red(f"Cannot find loss with name {name}") # if args.use_all_gpus and args.parallel_criterion: # criterion = DataParallelCriterion(criterion) if args.fp16: criterion = FP16Criterion(criterion) return criterion
def main(): parser = argparse.ArgumentParser() parser.add_argument('-u', '--url', dest='url', metavar='URL', required=True, help='URL https://example.com') parser.add_argument( '--verify', action='store_true', default=False, help='Verify the SSL certificate. Default is set to False.') parser.add_argument('--description', action='store_true', help='Print header description') args = parser.parse_args() session = requests.Session() session.headers.update({ 'User-Agent': 'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:77.0) Gecko/20100101 Firefox/77.0', 'Cache-control': 'no-cache', 'Pragma': 'no-cache', 'Connection': 'close' }) # prepend http if missing args.url = 'http://' + args.url if not args.url.startswith( 'http') else args.url if not valid_url(args.url): parser.print_help() exit() try: response = session.get(url=args.url, verify=args.verify) except requests.exceptions.ConnectionError as e: print(bold(bad(f"{bold(red('connection error'))}: {e}"))) print(bold(bad(f'{args.url}'))) exit() except Exception: print(bold(bad(bold(red('connection error'))))) print(bold(bad(f'{args.url}'))) exit() headers = response.headers html = response.text soup = BeautifulSoup(html, "lxml") wappalyzer_json_url = "https://raw.githubusercontent.com/AliasIO/wappalyzer/master/src/technologies.json" check_headers = [ 'X-Content-Type-Options', 'X-Frame-Options', 'X-XSS-Protection', 'Strict-Transport-Security', 'Content-Security-Policy', 'Referrer-Policy', 'Feature-Policy' ] descriptions = {} descriptions['X-Content-Type-Options'] = que( 'X-Content-Type-Options stops a browser from trying to MIME-sniff the content type and forces it to stick with the declared content-type. The only valid value for this header is "X-Content-Type-Options: nosniff".' ) descriptions['X-Frame-Options'] = que( 'X-Frame-Options tells the browser whether you want to allow your site to be framed or not. By preventing a browser from framing your site you can defend against attacks like clickjacking.' ) descriptions['X-XSS-Protection'] = que( 'X-XSS-Protection sets the configuration for the XSS Auditor built into older browser. The recommended value was "X-XSS-Protection: 1; mode=block" but you should now look at Content Security Policy instead.' ) descriptions['Strict-Transport-Security'] = que( 'HTTP Strict Transport Security is an excellent feature to support on your site and strengthens your implementation of TLS by getting the User Agent to enforce the use of HTTPS.' ) descriptions['Content-Security-Policy'] = que( 'Content Security Policy is an effective measure to protect your site from XSS attacks. By whitelisting sources of approved content, you can prevent the browser from loading malicious assets. Analyse this policy in more detail. You can sign up for a free account on Report URI to collect reports about problems on your site.' ) descriptions['Referrer-Policy'] = que( 'Referrer-Policy Referrer Policy is a new header that allows a site to control how much information the browser includes with navigations away from a document and should be set by all sites.' ) descriptions['Feature-Policy'] = que( 'Feature Policy is a new header that allows a site to control which features and APIs can be used in the browser.' ) cookie_checks = [ 'Expires', 'HttpOnly', 'Secure', 'Path=/', ] print(info(f"{bold('Request URL')}: {args.url}")) print(info(f"{bold('Response status code')}: {response.status_code}")) print(info(bold('Request headers:'))) print(json.dumps(dict(session.headers), indent=2, sort_keys=True)) print(info(bold('Response headers:'))) print(json.dumps(dict(headers), indent=2, sort_keys=True)) print(f"\n{run(bold('Checking security headers...'))}") for check_head in check_headers: if check_head.lower() in headers: print(good(f'{check_head} found')) else: print(bad(f'{check_head} not found')) if args.description: if check_head in descriptions.keys(): print(descriptions[check_head]) print(f"\n{run(bold('Checking cookies...'))}") if 'set-cookie' in headers: cookies = headers['Set-Cookie'].split(',') for cookie in cookies: print(f"{bold('cookie: ')} {cookie}") for cookie_check in cookie_checks: if cookie_check.lower() in cookie.lower(): print(good(f'{cookie_check} found')) else: print(bad(f'{cookie_check} not found')) else: print(info('not found')) print(f"\n{run(bold('Checking Wappalyzer Regular Expressions...'))}") # Prepare wappalyzer data wappalyzer_json_file = requests.get(wappalyzer_json_url) if wappalyzer_json_file.ok: try: wappalyzer_json = json.loads(wappalyzer_json_file.text) except json.decoder.JSONDecodeError as e: print(bold(bad(f"{bold(red('JSONDecodeError'))}: {e}"))) exit() else: print( bold( bad(f"{bold(red(f'Unable to get wappalyzer json file {wappalyzer_json_url}'))}" ))) exit() wappalyzer_categories = wappalyzer_json['categories'] saved_apps = {} for k, v in wappalyzer_categories.items(): name = wappalyzer_categories[k]['name'] saved_apps[name] = set() wappalyzer_tech = wappalyzer_json['technologies'] wappalyzer_names = {} for app_name, details in wappalyzer_tech.items(): wappalyzer_names[app_name] = set() if 'cats' in details.keys(): for ca in details['cats']: wappalyzer_names[app_name].add(ca) # Parse meta data metas = [] for meta in soup.findAll('meta'): meta_object = list(meta.attrs.keys()) + list(meta.attrs.values()) metas.append(meta_object) for app_name, details in wappalyzer_tech.items(): found = False try: # Check meta if 'meta' in details.keys(): for k, v in details['meta'].items(): for meta in metas: if k in meta and re.search(v, ' '.join(meta)): for cat in details['cats']: name = wappalyzer_categories[str(cat)]['name'] saved_apps[name].add(app_name) found = True # Check headers if 'headers' in details.keys(): for k, header in details['headers'].items(): if k in headers and re.search(headers[k], header): for cat in details['cats']: name = wappalyzer_categories[str(cat)]['name'] saved_apps[name].add(app_name) found = True # Check html and script search_in_html = [] if 'html' in details.keys(): if isinstance(details['html'], list): search_in_html += details['html'] if isinstance(details['html'], str): search_in_html.append(details['html']) if 'script' in details.keys(): if isinstance(details['script'], list): search_in_html += details['script'] if isinstance(details['script'], str): search_in_html.append(details['script']) for regex in search_in_html: if re.search(regex, html): for cat in details['cats']: name = wappalyzer_categories[str(cat)]['name'] saved_apps[name].add(app_name) found = True if found and 'implies' in details.keys(): if isinstance(details['implies'], list): techs = details['implies'] elif isinstance(details['implies'], str): techs = [details['implies']] else: techs = [] for tech in techs: subcats = wappalyzer_names[tech] for subcat in subcats: subcat_category = wappalyzer_categories[str( subcat)]['name'] saved_apps[subcat_category].add(tech) except re.error: # print(warn(f'regex error: {regex}')) pass wappalyzer_found = False for category, app_names in saved_apps.items(): if app_names: wappalyzer_found = True output = info(f"{category}: {', '.join(map(str, app_names))}") print(output) if not wappalyzer_found: print(info('not found'))
def get_saver(name, saver_args=''): if name in sys.modules[__name__].__dict__: return sys.modules[__name__].__dict__[name](**parse_dict(saver_args)) else: assert False, red(f"Cannot find saver with name {name}")
def run(): clear() print(red(ban)) main()
import argparse import importlib import os import torch import shutil from huepy import red, green # Define main args parser = argparse.ArgumentParser(conflict_handler='resolve') parser.add = parser.add_argument parser.add('--exp_dir', type=str, default="", help='') parser.add('--no-dry-run', action='store_true') parser.add('--min_checkpoints', type=int, default=1) args = parser.parse_args() import glob for exp_path in glob.glob(f'{args.exp_dir}/*'): checkpoints = glob.glob(f'{exp_path}/checkpoints/*') if len(checkpoints) < args.min_checkpoints: print('Deleting ', red(exp_path)) if args.no_dry_run: shutil.rmtree(exp_path) else: print('Leaving ', green(exp_path))
file_path = path.dirname(path.abspath(__file__)) + '/' + \ str(options.org) + '-' + str(options.unique) + '.SeBAz.csv' with open(file_path, 'w', newline='') as csvfile: csvwriter = writer(csvfile, dialect='excel') csvwriter.writerow( ['Recommendation Number', 'Message', 'Result', 'Explanation', 'Time']) length = len(recommendations) score = 0 passed = 0 if options.verbose: # printing the legend for verbose output print('Done. Here\'s the legend for the test results:') print(bold(green('Green Text indicates tests that have PASSED'))) print(bold(red('Red Text indicates tests that have FAILED'))) if options.score == None: print(bold(yellow('Yellow Text indicates tests that are NOT SCORED'))) print('\n\nPerforming ' + str(length) + ' tests now...\n') else: print('Done. Performing ' + str(length) + ' tests now...\n\n') # progressbar format bar_format = u'{count:03d}/{total:03d}{percentage:6.1f}%|{bar}| ' + \ bold(green('pass')) + u':{count_0:{len_total}d} ' + \ bold(red('fail')) + u':{count_1:{len_total}d} ' + \ bold(yellow('chek')) + u':{count_2:{len_total}d} ' + \ u'[{elapsed}<{eta}, {rate:.1f}{unit_pad}{unit}/s]' passd = manager.counter(total=length, unit='tests', color='white',
print(trainAlgo) if trainAlgo == "IMPALA": controller = impalaController(args.sim_config, algoConfig, args.checkpoint_file) elif trainAlgo == "PPO": controller = ppoController(args.sim_config, algoConfig, args.checkpoint_file) else: raiseValueError("invalid training algo %s" % (trainAlgo)) # Warn user if data save is not enabled if (args.save_data != 1): print( info( bold( red("save is disabled, simulation data will not be saved to disk." )))) # Local Env env = V2I.V2I(args.sim_config, "test", paramDict(args)) # Init Render if enabled fig, ax1, ax2 = None, None, None if args.render_graphs == 1: fig, ax1, ax2 = initRender() # Use LSTM if enabled by sim-config file useLstm = False if simConfig["config"]["enable-lstm"]: useLstm = True # Start rolling out :)...
#!/usr/bin/env python3 # huepy muss vorher installiert werden. import huepy print(huepy.bold(huepy.red("red and bold"))) print(huepy.run("Starte...")) print(huepy.info("Info!!")) print(huepy.bad("Schlecht!")) print(huepy.good("Gut!")) input(huepy.que("Frage? ")) # mit Fortschritt: from tqdm import tqdm for x in tqdm(range(5000000), desc=huepy.run("Dinge")): pass
menu_sites = { "Social Media": { "Facebook", "Google", "LinkedIn", "Twitter", "Instagram", "Snapchat", "FbRobotCaptcha", "VK", "Github", } } SF_PROMPT = bold(red(" ------ ")) def colorize_option(chave, valor): ''' Based on index type format and print out menu options. ''' if type(chave) == int: selector = yellow(' [') + bold(red('%s')) + yellow('] ') suffix = yellow('%s') return selector % chave + suffix % valor if type(chave) == str: pos = valor.lower().find(chave) prefix, radical, suffix = valor.partition(valor[pos]) if prefix: prefix = red('%s')
It exposes the WSGI callable as a module-level variable named ``application``. For more information on this file, see https://docs.djangoproject.com/en/dev/howto/deployment/wsgi/ """ import os from dj_static import Cling, MediaCling from django.core.wsgi import get_wsgi_application from huepy import green, red # async mail with uwsgi try: # this will not work in the local dev server :( import uwsgidecorators from django.core.management import call_command @uwsgidecorators.timer(10) def send_queued_mail(num): """Send queued mail every 10 seconds""" call_command("send_queued_mail", processes=1) print(green("background mail queue activated")) except ImportError: print(red("uwsgidecorators not found - background mails are unavailable!")) os.environ.setdefault("DJANGO_SETTINGS_MODULE", "settings") application = MediaCling(Cling(get_wsgi_application()))