def inspectConcept(object): with indent(2): if object.hashcode: puts(green(object.name) + ' (' + object.hashcode + ')') with indent(4): if object.synonyms: puts(green(object.name) + ' is also known as: ' + ', '.join(object.synonyms)) if object.parents: puts(green(object.name) + ' is a ' + ', '.join(object.parents)) for stateTuple in object.states: if (stateTuple[1] == 100): puts(green(object.name) + ' is ' + stateTuple[0]) else: puts(green(object.name) + ' is ' + stateTuple[0] + magenta(" [" + str(stateTuple[1]) + "]")) for componentTuple in object.components: if (componentTuple[2] == 100): puts(green(object.name) + ' (has ' + componentTuple[0] + ') --> ' + componentTuple[1]) else: puts(green(object.name) + ' (has ' + componentTuple[0] + ') --> ' + componentTuple[1] + magenta(" [" + str(componentTuple[2]) + "]")) for componentOfTuple in object.componentOf: if (componentOfTuple[2] == 100): puts(componentOfTuple[1] + ' (has ' + componentOfTuple[0] + ') --> ' + green(object.name)) else: puts(componentOfTuple[1] + ' (has ' + componentOfTuple[0] + ') --> ' + green(object.name) + magenta(" [" + str(componentOfTuple[2]) + "]")) for actionTuple in object.actions: if (actionTuple[2] == 100): puts(green(object.name) + ' (' + actionTuple[0] + ') --> ' + str(actionTuple[1])) else: puts(green(object.name) + ' (' + actionTuple[0] + ') --> ' + str(actionTuple[1]) + magenta(" [" + str(actionTuple[2]) + "]")) for actedOnByTuple in object.actedOnBy: if (actedOnByTuple[2] == 100): puts(actedOnByTuple[1] + ' (' + actedOnByTuple[0] + ') --> ' + green(object.name)) else: puts(actedOnByTuple[1] + ' (' + actedOnByTuple[0] + ') --> ' + green(object.name) + magenta(" [" + str(actedOnByTuple[2]) + "]"))
def load_config(self): puts("Loading configuration...") with indent(2): settings.import_settings(quiet=False) puts("") puts("Verifying rooms...") # If we're missing ROOMS, join all of them. with indent(2): if settings.ROOMS is None: # Yup. Thanks, BSDs. q = Queue() p = Process(target=self.update_available_rooms, args=(), kwargs={"q": q, }) p.start() rooms_list = q.get() show_valid("Joining all %s known rooms." % len(rooms_list)) os.environ["WILL_ROOMS"] = ";".join(rooms_list) p.join() settings.import_settings() else: show_valid( "Joining the %s room%s specified." % ( len(settings.ROOMS), "s" if len(settings.ROOMS) > 1 else "" ) ) puts("")
def test_regex(self, line): """Test a regex to see how many actions match. """ try: rgx = re.compile(line) except sre_constants.error as e: msg = red("Bad regex: ") + green(repr(line)) + " You have failed the bat-test." puts(msg) print e return self.current_rgx = rgx puts("Testing " + colored.green(line)) matched = [] for action in self.actions.unmatched: m = re.search(line, action) if m: matched.append([action, m.groupdict()]) if not matched: with indent(4, quote=" >"): puts(red("Aw, snap!") + " " + cyan("No matches found!")) return self.current_rgx = line self.show_matches_start = 0 with indent(4, quote=" >"): puts("Found " + colored.red(len(matched)) + " matches:") self._print_matches(matched[: self.show]) self.matched = matched
def print_help(): puts('Baancomplete gen_api.py') puts('Use this python script to generate a baancomplete_api.sqlite file') puts('') puts('Either from library documentation generated with ttstpbaandoc') with indent(2): puts('{0} {1} {2} {3}'.format( colored.green('--doc'), colored.yellow('[file or directory (subfolders are searched too)]'), colored.green('--out'), colored.yellow('[file]')) ) puts('Or from table definitions (database credentials required)') with indent(2): puts('{0} {1} {2} {3}'.format( colored.green('--db'), colored.yellow('[mssql]'), colored.green('--out'), colored.yellow('[file]') )) puts(colored.red(''' The output file is a sqlite3 database. Copy it into the baancomplete autoload folder and name it baancomplete_api.sqlite You can change the path to the folder where baancomplete will look for the api file by setting g:baancomplete_path in .vimrc But you cannot change the filename itself. '''))
def finalize(self): """Finalize this init command. """ puts() puts("Configured git for continuity:") with indent(): for key, value in self.pivotal.iteritems(): puts("pivotal.{0}={1}".format(key, value)) for key, value in self.github.iteritems(): puts("github.{0}={1}".format(key, value)) for key, value in self.continuity.iteritems(): if isinstance(value, bool): value = str(value).lower() puts("continuity.{0}={1}".format(key, value)) puts() puts("Aliased git commands:") with indent(): for command in sorted(self.aliases.iterkeys()): puts(command)
def list_existing_drives(): puts(colored.green('List registered Drives for editing / deleting...\n')) with indent(4, quote=' >'): puts('To edit a Drive, type the index of the Drive in the table.') puts('To delete a Drive, type a minus sign followed by the index of the Drive.') puts('To abort and return to main menu, hit [Ctrl+C].') puts('For example, type "1" to edit the Drive indexed 1, and type "-1" to delete it.') puts() account_store.get_all_accounts() drive_list = [] for key, drive in drive_store.get_all_drives().items(): drive_id, account_id, account_type = key with indent(4): puts(columns( [(colored.green('Index')), 8], [(colored.magenta('Drive ID')), 17], [(colored.magenta('Drive Type')), 12], [(colored.cyan('Account')), 20], [(colored.yellow('Local Root')), None])) profile = drive.root.account.profile puts(columns( [str(len(drive_list)), 8], [drive_id, 17], [drive.type, 12], ["{} ({})".format(account_id, profile.name), 20], [drive.config.local_root, None])) drive_list.append(drive) prompt_edit_drive(drive_list)
def update(user, password, lang=None): langs = getlangs(lang) puts(u"Updating %s" % ', '.join(langs)) for loc in langs: with indent(2): puts(u"Downloading PO for %s" % loc) url = (u'https://www.transifex.com/projects/p/formhub/' u'resource/django/l/%(lang)s/download/for_use/' % {'lang': loc}) try: tmp_po_file = download_with_login(url, TX_LOGIN_URL, login=user, password=password, ext='po', username_field='identification', password_field='password', form_id=1) po_file = os.path.join(REPO_ROOT, 'locale', loc, 'LC_MESSAGES', 'django.po') with indent(2): puts(u"Copying downloaded file to %s" % po_file) shutil.move(tmp_po_file, po_file) except Exception as e: puts(colored.red(u"Unable to update %s " u"from Transifex: %r" % (loc, e))) puts(colored.green("sucesssfuly retrieved %s" % loc)) compile_mo(langs)
def test_regex(self, line): '''Test a regex to see how many actions match. ''' try: rgx = re.compile(line) except sre_constants.error as e: msg = red('Bad regex: ') + green(repr(line)) + ' You have failed the bat-test.' puts(msg) print e return self.current_rgx = rgx puts('Testing ' + colored.green(line)) matched = [] for action in self._test_list: m = re.search(line, action) if m: matched.append([action, m.groupdict()]) if not matched: with indent(4, quote=' >'): puts(red('Aw, snap!') + ' ' + cyan('No matches found!')) return self.current_rgx = line self.show_matches_start = 0 total_matches = len(filter(rgx.search, self.actions.list)) with indent(4, quote=' >'): puts('Found ' + colored.red(total_matches) + ' matches:') self._print_matches(matched[:self.show]) self.matched = matched # Copy the pattern to the clipboard. if xsel_enabled: p = subprocess.Popen(['xsel', '-bi'], stdin=subprocess.PIPE) p.communicate(input=line)
def __init__(self): '''Constructor, fires all required methods.''' from .config import Config from .install import Install from .template import Template from .virtualenv import Virtualenv from clint.textui import puts, indent from clint.textui.colored import green # Basic Skeleton Generation self.config = Config() with indent(4, quote=' >'): puts(green('Starting')) self.template = Template(self.config) self.template.copy_template() # Create python virtual environment if self.config.venv_create: self.venv = Virtualenv(self.config) # Install the project to python path if hasattr(self.config, 'install'): if self.config.install: self.install = Install(self.config, self.template, getattr(self, 'venv', None)) with indent(4, quote=' >'): puts(green('Done'))
def compile_theme(theme_id=None): """Compiles a theme.""" output_dir = path(mkdtemp()) if theme_id is None: themes = ThemeManager.themes().values() else: themes = [ThemeManager.theme(theme_id)] with(indent(2)): puts(colored.yellow("Using %s as the temporary path." % output_dir)) puts(colored.yellow("Compiling %s themes." % len(themes))) for theme in themes: temp_theme_output_path = path(output_dir / theme.id).normpath() theme_output_path = (theme.static_root / ('stylesheets/%s_precompiled.css' % theme.id)).normpath() puts(colored.cyan("Compiling theme %s to %s" % (theme.id, theme_output_path))) with indent(4): puts("Copying content to %s" % temp_theme_output_path) theme.copy_all_content(temp_theme_output_path) puts("Compiling...") convert_less(temp_theme_output_path / 'stylesheets/%s.less' % theme.id, theme_output_path, minify=True) puts(colored.green("Done.", bold=True))
def print_path_info(address, path, coin_symbol, wif=None): assert path, path assert coin_symbol, coin_symbol assert address, address if wif: address_formatted = '%s/%s' % (address, wif) else: address_formatted = address if USER_ONLINE: addr_balance = get_total_balance( address=address, coin_symbol=coin_symbol, ) with indent(2): puts(colored.green('%s (%s) - %s' % ( path, address_formatted, format_crypto_units( input_quantity=addr_balance, input_type='satoshi', output_type=UNIT_CHOICE, coin_symbol=coin_symbol, print_cs=True, ), ))) else: with indent(2): puts(colored.green('%s (%s)' % ( path, address_formatted, )))
def start_polling(self): puts("Starting Poller") while True: self.passes_between_resets = self.passes_between_resets + 1 events = Event.objects.all() failed_events = [] pastdue_events = [] for event in events: if event.status() == 'fail' or event.status() == 'failure' or event.status() == 'failed': if not self.was_notified('failed', event.name): with indent(4): puts("failed event found %s" % (event.name)) failed_events.append(event) else: with indent(4): puts("failed event found %s but notification already sent" % (event.name)) elif event.status() == 'pastdue': if not self.was_notified('pastdue', event.name): with indent(4): puts("past due event found %s" % (event.name)) pastdue_events.append(event) else: with indent(4): puts("past due event found %s but notification already sent" % (event.name)) if len(failed_events) > 0: puts(" Notifying for Failed Events") self.notify(failed_events, config.EMAIL_SUBJECTS['failed'], 'failed') if len(pastdue_events) > 0: puts(" Notifying for Past Due Events") self.notify(pastdue_events, config.EMAIL_SUBJECTS['pastdue'], 'past due') puts("Sleeping for %s seconds" % (config.NOTIFIER_INTERVAL)) self.check_for_reset() sleep(config.NOTIFIER_INTERVAL)
def start_feature(self, feature): with indent(self.current_indent): puts("Feature: %s" % feature.name) with indent(self.current_indent + 2): puts(feature.description) print "" self.indent()
def listnotes(docopt_args): if docopt_args["--limit"]: limit = docopt_args["<items>"] insertvaluecache("list", str(limit), "") # Scrible().hasnext = "list" + str(limit) # print "next" + Scrible().hasnext note = NoteOperations() allnotes = note.viewall(limit) else: note = NoteOperations() allnotes = note.viewall() if len(allnotes) > 0: for item in allnotes: with indent(4, quote=' >'): noteid = item.get("_id", "") time = "["+ item.get("datecreated", "") + "]" noteids = "["+ str(noteid) + "]" body = item.get("body", "") title = item.get("title", "========NOT FOUND=======") print(Fore.YELLOW + "===============================================" + Fore.RESET) print(Back.BLUE + noteids + Back.RESET +" " + Back.BLUE + time + Back.RESET +"\n\n" + Back.RED + title + Back.RESET + Style.BRIGHT +"\n\n"+ Fore.GREEN + body + Fore.RESET + Style.NORMAL) print(Fore.YELLOW + "===============================================" + Fore.RESET) else: with indent(4): puts(colored.yellow("Sorry, no notes present"))
def fetch_all_timetables(browser): # Fetch the homepage for capemetro, and click the Timetables link if debug: puts('Fetching timetable links') tables = defaultdict(sub_dict) browser.open(base_url) browser.follow_link(text='Timetables') # Iterate over all the links on the timetable page and # get the URLs for timetables, using zone/start/end/period for link in browser.links(): if 'html' in link.text and 'Business_Express' not in link.url: _, date, zone, title = link.url.split('/') (start, end, period) = title.split('_')[:3] tables[zone][(start, end)][period] = link if debug: #Print retrieved/organized data for zone, directions in tables.items(): puts(zone) with indent(2): for direction, periods in directions.items(): puts('From:%s, to:%s' % ( area_nicename[direction[0]], area_nicename[direction[1]])) with indent(2): for period, link in periods.items(): puts('%s - %s' % (period_nicename[period], link)) return tables
def main(): if '--debug' in clint.args.grouped: debug = True zone = clint.args.grouped.get('--zone', ['South'])[0] start = clint.args.grouped.get('--from', ['ST'])[0] finish = clint.args.grouped.get('--to', ['CT'])[0] period = clint.args.grouped.get('--period', ['MonFri'])[0] station = clint.args.grouped.get('--station', ['Fish Hoek'])[0] time_window = int(clint.args.grouped.get('--window', [60])[0]) puts('Zone: ' + zone) puts('Service line: %s to %s' % ( area_nicename.get(start, start), area_nicename.get(finish, finish))) puts('Time: ' + period_nicename[period]) data = timetable(zone=zone, start=start, finish=finish, period=period) today = datetime.date.today().timetuple()[:3] now = datetime.datetime.now() with indent(2): puts('Station: ' + station) with indent(2): for train, time in data.filter(station).dict[0].items(): if train and time: notes = '' time_tuple = today + (time.hour, time.minute) local_time = datetime.datetime(*time_tuple) if local_time > now: minutes = (local_time - now).seconds / 60 if minutes < 60: notes = '* leaving in %s minutes' % minutes puts('%s: %s %s' % (train, time, notes))
def delete(self): if self.dry_run: self.puts( colored.yellow( ("\nYou are trying to delete this project's resources!\n" "By default this command runs in dry-run mode. If you are ok \n" "with the following resources being deleted, you can run this\n" "command with --confirm to do the actual deletion.\n" "\nNOTHING IS GOING TO BE DELETED!\n") ) ) self.puts(colored.blue("The following resources would be deleted...")) else: self.puts(colored.blue("Deleting project resources...")) context = self.get_initial_context() with indent(2): self.puts(colored.magenta("\nRegion:{Region}\nStage: {Stage}\n".format(**context))) for (number, name, filename, template_type) in self.steps(): with indent(2): self.puts(colored.cyan("{} ({})".format(filename, template_type))) with indent(4): if self.debug: self.puts(colored.white(u"✸ Delete template {} with context {}".format(filename, context))) getattr(self, 'delete_{}_template'.format(template_type))(name, filename, context)
def test_regex(self, line): '''Test a regex to see how many actions match. ''' try: rgx = re.compile(line) except sre_constants.error as e: msg = red('Bad regex: ') + green(repr(line)) + ' You have failed the bat-test.' puts(msg) print e return self.current_rgx = rgx puts('Testing ' + colored.green(line)) matched = [] for action in self.actions.unmatched: m = re.search(line, action) if m: matched.append([action, m.groupdict()]) if not matched: with indent(4, quote=' >'): puts(red('Aw, snap!') + ' ' + cyan('No matches found!')) return self.current_rgx = line self.show_matches_start = 0 with indent(4, quote=' >'): puts('Found ' + colored.red(len(matched)) + ' matches:') self._print_matches(matched[:self.show]) self.matched = matched
def puts_package_list(paginator, current_page, highlighted_item): packages = paginator.page(current_page) starting_index = paginator.pagination*(current_page-1) pagination_tpl = "Page %s of %s" %(current_page, paginator.num_pages) puts(colored.green('='*80), newline=False) puts(pagination_tpl) puts(colored.green('='*80)) for index, package in enumerate(packages): #if package.check_installed: # puts('* ' + colored.green(package.title) + ' [Installed] ' + colored.yellow(package.pypi_package_name) + ' ' + colored.yellow(package.repo_name)) #else: # puts('* ' + colored.green(package.title) + ' ' + colored.yellow(package.pypi_package_name) + ' ' + colored.yellow(package.repo_name)) #STREAM.write(package.title+"\r\n") #STREAM.write("\r") with indent(indent=6, quote="%s)" %str(starting_index+index+1)): title = colored.green(package.title) if index+1 == highlighted_item: title = " * " + title if package.installed: if not package.installed_version: # There is no package version! We can't deduce if a new version is really available. title += colored.yellow(" [Installed] ") else: # Package version is there. Everything normal and good! title += colored.yellow(" [Installed %s] " %package.installed_version) if versioner(package.installed_version) < versioner(package.pypi_version): title += colored.red(" [New version %s] " %package.pypi_version) puts(title) info = { "using": package.usage_count, "PYPI dl": package.pypi_downloads, #"forks": package.repo_forks, "watching": package.repo_watchers, } cols = [["%s: %s" %(key, value), 20] for key,value in info.items()] with indent(indent=6): #puts() puts(columns(*cols)) puts() puts(colored.green('='*80), newline=False) puts(pagination_tpl) puts(colored.green('='*80))
def all(self): lists = self.storage.get_all_lists() for list_name, values in lists.iteritems(): with indent(INDENT): puts(list_name) for k, v in values.iteritems(): with indent(INDENT): puts('{}:\t\t{}'.format(k, v))
def show_error(msgs, ex=None, tab=4): for msg in msgs: with indent(tab, quote='!!!'): puts(colored.red(msg), stream=STDERR) if ex: puts('', stream=STDERR) with indent(8, quote='!!!'): puts(colored.red(unicode(ex)), stream=STDERR)
def sync(): puts('Grabbing GitHub Commits...') with indent(2): puts(grab_changelog()) puts('Grabbing Topsy Links...') with indent(2): for w in WINDOW_MAP.keys(): puts(get_window_url(w))
def savenotestocloud(self, exists): # if there is a user present in local db proceed wih sync if self.isuserindb(): # Creates a thread class and starts it that does progress # animations while synchronizing p = progress_bar_loading() p.start() # if there are notes present in the db,sync the notes try: if self.arenotesindb(): username = self.getusernamefromdb().get("username", "") passhash = self.getusernamefromdb().get("pass", "") if not self.isuserincloud(username): self.saveuserincloud(username, passhash) exists = "yes" # gets a list of notes from db notes = SyncNotes.fetchunsynced(self) if len(notes) > 0: if exists == "yes": # deletes first the data,then inserts so as to update # it self.fb.delete('/notes/' + username, None) result = self.fb.post( '/notes/' + username, notes) else: result = self.fb.post( '/notes/' + username, notes) else: self.fb.delete('/notes/' + username, None) with indent(4, quote=' >'): puts(colored.green("Notes synced")) p.stopit() self.flagsent() # flags notes as sent else: # else fetch notes from the cloud and insert to local db username = self.getusernamefromdb().get("username", "") passhash = self.getusernamefromdb().get("pass", "") if not self.isuserincloud(username): self.saveuserincloud(username, passhash) exists = "yes" self.getnotes(username) except: p.stopit() else: # Else create the user account and try to sync again with indent(4, quote=' >'): puts( colored.red("User account not found.Please create account")) name = prompt.query("Enter username") pswd = getpass.getpass('Enter password') self.createuser(name, pswd)
def _displayNodeHelp(self, nodeFullName, node): """ Display help of a specific node in the command line. """ if not node: self.logger.error('Cannot print help of unknown plugin "' + nodeFullName + '".') exit(1) # NODE self._displayTitle('NODE') with indent(4): puts(colored.green(nodeFullName + ' ' + node.getVersionStr())) # DESCRIPTION self._displayTitle('DESCRIPTION') with indent(4): puts('type: ' + colored.green(tuttle.mapNodeTypeEnumToString(node.getNodeType()))) puts('group: ' + colored.green(node.asImageEffectNode().getPluginGrouping())) puts('\n') puts(node.getProperties().fetchStringProperty('OfxPropPluginDescription').getValue()) # PARAMETERS if node.getParams(): self._displayTitle('PARAMETERS') for param in node.getParams(): paramType = param.getParamType() # Skip Group / PushButton / Page params if paramType == 'OfxParamTypeGroup' or paramType == 'OfxParamTypePushButton' or paramType == 'OfxParamTypePage': continue self._displayParamHelp(param) # CLIPS self._displayTitle('CLIPS') for clip in node.getClipImageSet().getClips(): self._displayClipHelp(clip) # SUPPORTED BIT DEPTH if node.getProperties().hasProperty('OfxImageEffectPropSupportedPixelDepths'): self._displayTitle('SUPPORTED BIT DEPTH') propBitDepth = node.getProperties().fetchProperty('OfxImageEffectPropSupportedPixelDepths') bitDepthValues = samUtils.getListValues(propBitDepth) bitDepthSourceStr = [] bitDepthOutputStr = [] for bitDepthValue in bitDepthValues[:int(len(bitDepthValues)/2)]: bitDepthSourceStr.append(self._getNbBitsFromOfxBitDepth(bitDepthValue)) for bitDepthValue in bitDepthValues[int(len(bitDepthValues)/2):]: bitDepthOutputStr.append(self._getNbBitsFromOfxBitDepth(bitDepthValue)) # Print with indent(4): puts('{name!s:10}: [{bitdepth}]'.format( name=colored.green('Source'), bitdepth=', '.join(bitDepthSourceStr))) puts('{name!s:10}: [{bitdepth}]'.format( name=colored.green('Output'), bitdepth=', '.join(bitDepthOutputStr)))
def print_help(self): puts(colored.green("Additional help for service '{}'".format(self.name))) options = self.get_options() for name, options in options.items(): puts("{}:".format(name)) with indent(): for option in options: puts(option.name) if option.sub_options: with indent(): puts(', '.join(option.sub_options))
def subscribe(config, repo): with indent(3, quote=':: '): puts('Subscribing to repository...') with indent(1): puts(colored.magenta(repo)) repos = config['repos'] if repo in repos: puts_err(colored.red( 'You\'re already subscribed to this repository!')) else: repos.append(repo)
def show_running(behaviors, services, pkg_data): """Print running behaviors and services.""" print('\nActive Content:') if not behaviors and not services: print('No behaviors or services are active.') if behaviors: for b in behaviors: with indent(4): puts('* {} ({})'.format(bold(b), col.magenta('behavior'))) if services: for s in services: with indent(4): puts('* {} ({})'.format(bold(s), col.blue('service'))) print('')
def build(): start = time.time() error_occurred = False with indent(4): if not exists(build_dir): puts(colored.yellow('Build directory missing.')) puts(colored.yellow('Creating a new directory at %s' % build_dir)) makedirs(build_dir) puts(colored.cyan('Build List:')) with indent(4): for js_file_path in build_list: puts(colored.magenta(js_file_path)) puts(colored.cyan('Concatenating...')) with open(raw_target, mode="w") as raw_target_file: for js_file_path in build_list: raw_target_file.write('// *****************************************\n') raw_target_file.write('// %s\n' % js_file_path) raw_target_file.write('// *****************************************\n') with open(join(project_dir, js_file_path), mode='r') as js_file: raw_target_file.write(js_file.read()) raw_target_file.write('\n\n') with indent(4): puts(colored.magenta('Done.')) puts(colored.cyan('Minifying...')) with indent(4): closure_bin = join(vendor_dir, 'closure/compiler.jar') r = envoy.run('java -jar %s --js=%s --js_output_file=%s' % ( closure_bin, raw_target, min_target, )) if r.status_code == 0: puts(colored.magenta('Done.')) else: error_occurred = True puts(colored.red('There was an error running the closure compiler')) puts(r.std_out) puts(r.std_err) run_time = time.time() - start if not error_occurred: puts(colored.green('Build successfully completed in %g seconds.' % run_time)) else: puts(colored.red('An error occurred :('))
def help(services=None, methods=None): puts_err("Usage: sunlight <service> <method> [<args>, ...]") if services: puts_err("Available services:") with indent(4): for s in services: puts_err(s) if methods: puts_err("Available methods:") with indent(4): for m in methods: puts_err(m)
def failures(self, failed_examples): if not failed_examples: return puts() puts('Failures:') puts() with indent(2): for index, failed in enumerate(failed_examples): puts('%d) %s' % (index + 1, self._format_full_example_name(failed))) with indent(3): puts(self._color('red', 'Failure/Error: %s' % self._format_failing_expectation(failed))) puts(self._color('red', self._format_traceback(failed))) puts()
arg = args.grouped['--database'][0] if arg == 'heroku': db_type = 'heroku' if arg == 'sqlite': db_type = 'sqlite' if args.contains('--test'): test_args = args.grouped['--test'] if 'all' in test_args: pass elif 'database' not in test_args: test['database'] = False elif 'email' not in test_args: test['email'] = False puts('Testing:') with indent(4): for key, value in test.items(): if value: puts(colored.blue(key)) if args.contains('--database'): puts('Database: %s' % db_type) # ======================================================================== # Database # ======================================================================== if test['database']: puts(colored.blue('\nTesting database...')) from models import Reservation, Flight, FlightLeg, FlightLegLocation
def num_choice(choices, default='1', valid_keys='', depth=1, icons='', sn_info=None, indent=4, fg_color='green', separator='', with_img=6, img_list=None, img_cache_dir='/tmp', use_cache=False, extra_hints='', clear_previous=False, quit_app=True, ): """ 传入数组, 生成待选择列表, 如果启用图片支持, 需要额外传入与数组排序一致的图片列表, - 图片在 iterms 中显示速度较慢, 不推荐使用 .. note: 图片在 iterms 中显示速度较慢, 如果数组长度大于10, 不推荐使用 .. code:: python sn_info = { 'align': '-', # 左右对齐 'length': 2, # 显示长度 } :param use_cache: :type use_cache: :param default: :type default: :param indent: ``左侧空白`` :type indent: :param fg_color: ``前景色`` :type fg_color: :param choices: 备选选项 :type choices: list :param depth: ``如果是嵌套数组, 显示当前层级`` :type depth: int :param icons: ``默认展示的icons: '❶❷❸❹❺❻❼❽❾❿'`` :type icons: any :param sn_info: ``需要展示的序号的信息长度对齐方式, 默认2个字符/右对齐`` :type sn_info: dict :param valid_keys: ``可以输入的有效 key, 使用 ',' 分隔`` :type valid_keys: str :param separator: 分隔符 header/footer, 默认无, 如果不为空, 则显示 :type separator: :param img_cache_dir: ``图片缓存目录`` :type img_cache_dir: str :param with_img: ``是否使用图片, 如果值大于0, 则以实际值大小来作为终端显示行数`` :type with_img: int :param img_list: ``图片原始 url `` :type img_list: list :param extra_hints: ``n-next,p-prev,s-skip`` :type extra_hints: any :param clear_previous: ``clear previous output`` :type clear_previous: :return: :rtype: """ icons = ICONS if not icons else icons if not choices: return None # warn: 这里需要使用 None, 不能 not default 来判断!!!, 会可能传入 0 if default is not None: default = '{}'.format(default) sn_info = sn_info or {} _header, _footer = gen_separator(separator=separator) with textui.indent(indent, quote=' {}'.format(icons[depth - 1])): if _header: textui.puts(getattr(textui.colored, fg_color)(_header)) for i, choice in enumerate(choices, start=1): if with_img > 0 and img_list: cat_net_img(img_list[i - 1], indent=indent, img_height=with_img, img_cache_dir=img_cache_dir, use_cache=use_cache) _align = '{}{}'.format(sn_info.get('align', ''), sn_info.get('length', 2)) # _hint = '%{}s. %s'.format(_align) % (i, choice) _hint_num = '%{}s.'.format(_align) % i _hint = '[{}]'.format(_hint_num) _hint = textui.colored.magenta(_hint) _hint += getattr(textui.colored, fg_color)(' %s' % choice) textui.puts(_hint) if _footer: textui.puts(getattr(textui.colored, fg_color)(_footer)) _valid = [str(x + 1) for x in range(0, len(choices))] default_prompt = 'Your Choice' valid_choices = ['q-quit', 'b-back'] if extra_hints: if isinstance(extra_hints, str): extra_hints = extra_hints.split(',') valid_choices += extra_hints default_prompt = '{}({})?'.format(default_prompt, '/'.join(valid_choices)) c = click.prompt( # click.style('[Depth: ({})]Your Choice(q-quit/b-back)?', fg='cyan').format(depth), click.style(default_prompt, fg='cyan'), type=str, default=default ) if str(c) in 'qQ': if quit_app: os._exit(0) else: if clear_previous: click.clear() return str(c) if valid_keys == 'all': return c elif str(c) in 'bB': if clear_previous: click.clear() return str(c) elif valid_keys and str(c) in valid_keys.split(','): return str(c) elif c not in _valid: textui.puts(textui.colored.red(' 😭 ✘ Invalid input[{}]'.format(c))) return num_choice( choices, default, valid_keys, depth, icons, sn_info, indent, fg_color, separator, with_img, img_list, img_cache_dir, use_cache, extra_hints, clear_previous, quit_app, ) else: return int(c) - 1
def color_print(msg, indent=4, color='green'): with textui.indent(indent, quote=' {}'.format(' ')): textui.puts(getattr(textui.colored, color)(msg))
def run(): """ Runs QXSConsolas Configures the logger as well as the routing and runs the cli application """ logging.config.dictConfig(SysConf().getValue("logging")) config = SysConf().getValue("routes.cliapps") if len(sys.argv) > 1 and sys.argv[1] in config and sys.argv[1] != "help": cmdname = sys.argv[1] modulename = ".".join(config[cmdname]["func"].split('.')[0:-1]) classname = config[cmdname]["func"].split('.')[-1] # remove the first argument -> the module should not see this sys.argv.pop(1) # run the app module = importlib.import_module(modulename) app = getattr(module, classname) app = app() assert isinstance( app, Application ), modulename + "." + classname + "() does not return a QXSConsolas.Cli.Application object" logger = None try: logger = logging.getLogger(config[cmdname]["logger"]) except: pass data = None try: data = Configuration(configData=config[cmdname]["data"]) except: pass return app.run(data=data, logger=logger) elif len(sys.argv) > 2 and sys.argv[2] in config and sys.argv[1] == "help": modulename = ".".join(config[sys.argv[2]]["func"].split('.')[0:-1]) classname = config[sys.argv[2]]["func"].split('.')[-1] # run the app module = importlib.import_module(modulename) app = getattr(module, classname) app = app() assert isinstance( app, Application ), modulename + "." + classname + "() does not return a QXSConsolas.Cli.Application object" # render it try: length = 0 for line in SysConf().getValue("routes.list.header").split("\n"): length = max(length, len(line)) puts(colored.green("=" * length)) puts( colored.green( SysConf().getValue("routes.list.header").rstrip())) puts(colored.green("=" * length) + "\n") except: pass puts("Command: " + colored.green(sys.argv[2]) + "\n") if "description" in config[sys.argv[2]]: with indent(4): puts(config[sys.argv[2]]["description"] + "\n") showCliName = True showCliDescription = True if "showCliName" in config[sys.argv[2]]: showCliName = bool(config[sys.argv[2]]["showCliName"]) if "showCliDescription" in config[sys.argv[2]]: showCliDescription = bool( config[sys.argv[2]]["showCliDescription"]) with indent(4): #app.showHelp(False, False) app.showHelp(showCliName, showCliDescription) try: puts("\n" + SysConf().getValue("routes.list.footer").rstrip() + "\n") except: pass else: try: length = 0 for line in SysConf().getValue("routes.list.header").split("\n"): length = max(length, len(line)) puts(colored.green("=" * length)) puts( colored.green( SysConf().getValue("routes.list.header").rstrip())) puts(colored.green("=" * length) + "\n") except: pass try: puts(SysConf().getValue("routes.list.description").rstrip() + "\n") except: pass puts("Available commands:") #for routename in config: for routename in sorted(config): if routename == "help": continue with indent(2): if "func" in config[routename]: puts(colored.green(routename)) if "description" in config[routename]: with indent(4): puts(config[routename]["description"]) with indent(2): puts( colored.green("help") + " [" + colored.yellow("command") + "]") with indent(4): puts("Show the help [for a command]") try: puts("\n" + SysConf().getValue("routes.list.footer").rstrip() + "\n") except: pass return 1
def processDataSet(dataSetName, dataSet, settings): puts('Processing Data Set: %s' % dataSetName) dataSet['data'] = { 'files': {}, 'dir': { 'raman': [], 'intensity': { 'filtered': [] } } } if 'a' in settings['method']: dataSet['data']['dir']['intensity']['baselined'] = [] outputPathBaseName = "%s_v%%d" % dataSetName.replace(' ', '_').lower() fileVersion = nextVersion(os.path.abspath(dataSet['output']), outputPathBaseName) outputPath = getVersionPath(os.path.abspath(dataSet['output']), outputPathBaseName, fileVersion) try: os.makedirs(outputPath) except OSError as e: if e[0] == 17 or e[0] == 183: pass else: print e[0] raise inputPathBasename = os.path.basename(dataSet['input']) with indent(4): putSeparator('-', 30) puts('Created Output Directory: %s' % (outputPath, )) putSeparator('-', 30) for fileName in os.listdir(dataSet['input']): if fileName[-3:] == 'txt': processDataFile(fileName, os.path.join(dataSet['input'], fileName), outputPath, dataSet['data'], settings, fileVersion) if len(dataSet['data']['dir']['raman']) > 0: if 'b' in settings['method']: puts( "Running Method B: Averaging All Data and Then Baselining") dirAvg = numpy.array( dataSet['data']['dir']['intensity']['filtered']).mean( axis=1) dirAvgBaseline, dirAvgSubtracted = baselineData( dirAvg, settings['smooth'], settings['porder'], settings['max_it']) dirAvgMatrix = zip(dataSet['data']['dir']['raman'], dirAvgSubtracted) puts('Writing DataSet Avg(Method B)') for format in settings['formats']: puts("Data Format: " + format) with indent(4, quote='>'): formatPath = os.path.join(outputPath, format) dirAvgFileName = "methodB_{base}_v%d.{ext}".format( base=inputPathBasename, ext=format) dirAvgPath = getVersionPath(formatPath, dirAvgFileName, fileVersion) writeData( formatData(dirAvgMatrix, format, settings['prec']), dirAvgPath) puts('Saved Method B to: %s' % dirAvgPath) if 'a' in settings['method']: puts( 'Running Method A: Averaging All Baselined Data and Then Baselining' ) methodAAvg = numpy.array( dataSet['data']['dir']['intensity']['baselined']).mean( axis=1) methodABaseline, methodASubtracted = baselineData( methodAAvg, settings['smooth'], settings['porder'], settings['max_it']) methodAMatrix = zip(dataSet['data']['dir']['raman'], methodASubtracted) puts('Writing DataSet Baselined(Method A)') for format in settings['formats']: puts("Data Format: " + format) with indent(4, quote='>'): formatPath = os.path.join(outputPath, format) methodAFileName = "methodA_{base}_v%d.{ext}".format( base=inputPathBasename, ext=format) methodAPath = getVersionPath(formatPath, methodAFileName, fileVersion) writeData( formatData(methodAMatrix, format, settings['prec'])) puts('Saved Method A to: %s' % methodAPath) putSeparator('-', 20)
def standard_cost_report(wf_id, json_costs, display_nano_dollars): units = partial(dollar_units, display_nano_dollars) display = partial(display_dollars, display_nano_dollars) puts('=== Workflow: {} ==='.format(wf_id)) puts() headers = ['task', '# called', 'cpu', 'mem', 'disk', 'total', 'avg. cost'] (total_cost, total_cpu_cost, total_mem_cost, total_disk_cost) = \ (0.0, 0.0, 0.0, 0.0) table_rows = [] total_calls = 0 for k in sorted(json_costs.keys()): task_cpu_cost = units(json_costs[k]['cpu']) task_mem_cost = units(json_costs[k]['mem']) task_disk_cost = units(json_costs[k]['disk']) task_cost = units(json_costs[k]['total-cost']) total_cost += task_cost total_cpu_cost += task_cpu_cost total_mem_cost += task_mem_cost total_disk_cost += task_disk_cost total_task_calls = 0 for item in json_costs[k]['items']: total_task_calls += len(item.keys()) total_calls += total_task_calls avg_task_cost = task_cost / float(total_task_calls) table_rows.append([ k, total_task_calls, display(task_cpu_cost), display(task_mem_cost), display(task_disk_cost), display(task_cost), display(avg_task_cost) ]) unit = 'nano dollars (USD)' if display_nano_dollars else 'dollars (USD)' with indent(4, quote=''): puts("Prices in: '{}'".format(unit)) puts() if display_nano_dollars: puts( tabulate(table_rows, headers, tablefmt="simple", floatfmt="8.4e")) else: puts( tabulate(table_rows, headers, tablefmt="simple", floatfmt=".3f")) puts() with indent(4, quote=''): puts('= Summary ======================') puts(colored.blue(" cpu : {}".format(display(total_cpu_cost)))) puts(colored.blue(" mem : {}".format(display(total_mem_cost)))) puts(colored.blue(" disk : {}".format(display(total_disk_cost)))) puts(colored.green("Total Cost : {}".format(display(total_cost)))) puts(colored.yellow("Total Calls : {}".format(total_calls))) puts('================================')
def __init__(self, template_path): self.template_path = template_path with indent(4, quote=' >'): puts(blue('Cloning template using Git from: {0}'.format( self.repo)))
def list(nc, filtre, options): hostgroups_founded = [] hostgroups_filtered = [] try: hostgroups = nc['all_hostgroup'] except KeyError: with indent(3, quote=colored.white(' ==> ')): puts("No hostgroup") sys.exit(0) except Exception as err: with indent(3, quote=colored.white(' ==> ')): puts("Something went wront (%)" % err) sys.exit(1) for hostgroup in hostgroups: filtered = False for element in filtre: if element in hostgroup.keys(): try: filtre_pattern = re.compile(filtre[element]) except: with indent(3, quote=colored.red(' >> ')): puts('Regexp error') sys.exit(1) if not filtre_pattern.match(hostgroup[element]): filtered = True break else: filtered = False else: filtered = True break if not filtered: hostgroups_founded.append(hostgroup) else: hostgroups_filtered.append(hostgroup) with indent(3, quote=colored.white(' ====> ')): puts("Hostgroups founded") if hostgroups_founded: for hostgroup_founded in hostgroups_founded: with indent(3, quote=colored.green(' >> ')): puts("%s" % hostgroup_founded['hostgroup_name']) if not 'small' in options: for key, value in sorted(hostgroup_founded.items()): if key != 'meta': if key in hostgroup_founded['meta'][ 'defined_attributes']: with indent(3, quote=colored.white(' | ')): puts("%s: %s" % (colored.blue(key), colored.green(value))) with indent(3, quote=colored.white(' ==> ')): puts("Total: %s" % len(hostgroups_founded)) if 'show_filtered' in options: print('') with indent(3, quote=colored.white(' ====> ')): puts("Hostgroups filtered") if hostgroups_filtered: for hostgroup_filtered in hostgroups_filtered: with indent(3, quote=colored.green(' >> ')): puts("%s" % hostgroup_filtered['hostgroup_name']) with indent(3, quote=colored.white(' ==> ')): puts("Total: %s" % len(hostgroups_filtered))
def legend(options): print with indent(3, quote=colored.white(' ==> ')): puts('Legend') with indent(2, quote=colored.white(' + ')): puts('titles after \">>\" are hostgroup_name')
def example_failed(self, example): self._format_example(self._color('red', '✗'), example) with indent((self._depth(example) + 1) * 2): puts(self._color('red', str(example.error.exception)))
elif "--csv" in str(args.flags): data = get_cells(img, column) if len(data) > 0 and nested_array_to_formated_file( data[1:], data[0], "csv", path): print("Done! Path of output csv file: ", path) else: cv2.imwrite(dst, img) with indent(4, quote=" > "): puts(colored.green("Output: " + dst)) if __name__ == "__main__": if not args: with indent(4, quote=" > "): puts(colored.red("No argument passed. Use --help for help.")) else: # puts(colored.green(str(args))) if args.flags[0] == "--help": with indent(4, quote=" { "): puts(colored.green("##########@@@@@@@@@@@@@@###########")) puts(colored.green("#### Welcome To OCR CLI Tool ####")) puts(colored.green("##########@@@@@@@@@@@@@@###########")) puts(colored.blue("")) puts(colored.blue("Usage:")) puts( colored.green( "./preprocess_image.py <URL of image> --json -c <No. of columns> -o <Output path>" ))
#!/usr/bin/env python # -*- coding: utf-8 -*- import sys import os sys.path.insert(0, os.path.abspath('..')) from clint.eng import join from clint.textui import colored, indent, puts colors = [ colored.blue('blue'), colored.red('red'), colored.yellow('yellow'), colored.green('green'), colored.magenta('magenta') ] colors = [str(cs) for cs in colors] puts('Smart:') with indent(4): for i in range(len(colors)): puts(join(colors[:i + 1])) puts('\n') puts('Stupid:') with indent(4): for i in range(len(colors)): puts(join(colors[:i + 1], im_a_moron=True, conj='\'n'))
def is_balanced(output=False): connection = pymongo.Connection("localhost", 27017) chunks = {} nss = {} # Loop through each of the chunks, tallying things up for chunk in connection["config"]["chunks"].find(): if "ns" in chunk: # Chunks per shard if chunk["ns"] in chunks: if chunk["shard"] in chunks[chunk["ns"]]: chunks[chunk["ns"]][chunk["shard"]] = chunks[chunk["ns"]][ chunk["shard"]] + 1 else: chunks[chunk["ns"]][chunk["shard"]] = 1 else: chunks[chunk["ns"]] = {} chunks[chunk["ns"]][chunk["shard"]] = 1 # Total chunks for the ns if chunk["ns"] in nss: nss[chunk["ns"]] = nss[chunk["ns"]] + 1 else: nss[chunk["ns"]] = 1 shardsCount = connection["config"]["shards"].count() chunksCount = connection["config"]["chunks"].count() # Different migration thresholds depending on cluster size # http://docs.mongodb.org/manual/core/sharding-internals/#sharding-migration-thresholds if chunksCount < 20: threshold = 2 elif chunksCount < 80 and chunksCount > 21: threshold = 4 else: threshold = 8 isBalanced = True balanceStatus = {} # Loop through each ns and determine if it's balanced or not for ns in nss: balanced = nss[ns] / shardsCount if output == True: print ns balanceStatus[ns] = True for shard in chunks[ns]: if chunks[ns][shard] > balanced - threshold and chunks[ns][ shard] < balanced + threshold: if output == True: with indent(4): puts(shard + colored.green(" balanced ") + "(" + str(chunks[ns][shard]) + ")") else: isBalanced = False balanceStatus[ns] = False if output == True: with indent(4): puts(shard + colored.red(" unbalanced ") + "(" + str(chunks[ns][shard]) + ")") return { "isBalanced": isBalanced, "chunks": chunks, "balanceStatus": balanceStatus }
def uninstall(**kwargs): stop() output_cli_message("Uninstall operations", color='cyan') puts("") with indent(4, quote=' >'): output_cli_message("Remove nodes from Destination cluster") print() with indent(4, quote=' '): for db in get_cluster_databases(connect('Destination')): output_cli_message(db) drop_node(db) print(output_cli_result(True, 4)) output_cli_message("Drop pgl_ddl_deploy extension in all databases") print() with indent(4, quote=' '): for t in ['Source', 'Destination']: output_cli_message(t) print() with indent(4, quote=' '): for db in get_cluster_databases(connect(t)): output_cli_message(db) if not clean_pgl_ddl_deploy(t, db): print(output_cli_result(False, compensation=8)) continue print(output_cli_result(True, compensation=8)) output_cli_message("Drop pg_logical extension in all databases") print() with indent(4, quote=' '): for t in ['Source', 'Destination']: output_cli_message(t) print() with indent(4, quote=' '): for db in get_cluster_databases(connect(t)): output_cli_message(db) if not clean_pglogical_setup(t, db): print(output_cli_result(False, compensation=8)) continue print(output_cli_result(True, compensation=8)) output_cli_message("Drop user for replication") print( output_cli_result( drop_user(connect('Source'), get_pgrepup_replication_user()))) output_cli_message("Drop unique fields added by fix command") print() with indent(8, quote=' '): src_db_conn = connect('Source') for db in get_cluster_databases(src_db_conn): output_cli_message(db) print() src_d_db_conn = connect('Source', db_name=db) dest_d_db_conn = connect('Destination', db_name=db) with indent(4, quote=' '): for table in get_database_tables(src_d_db_conn): output_cli_message("%s.%s" % (table['schema'], table['table'])) s = drop_table_field(src_d_db_conn, table['schema'], table['table'], get_unique_field_name()) d = drop_table_field(dest_d_db_conn, table['schema'], table['table'], get_unique_field_name()) print(output_cli_result(s and d, compensation=12))
def parse(self): labels = ['precio_minimo_kg', 'precio_maximo_kg', 'frecuencia_kg'] if self.html: for table in self.html.find_all('table'): headers = table.find_all('td', {'class': 'encabTAB'}) if len(headers) == 0: continue if 'Precios' in headers[0].text == False: continue packer = None for tr in table.find_all('tr'): tds = tr.find_all('td') if len(tds) == 1 and 'Precios' not in tds[0].text: packer = tds[0].text with indent(4): puts( colored.blue("Empacador: {}".format( str(packer)))) # print(packer) if len(tds) == 12: if packer: row = { 'fecha': self.date, 'pieza': None, 'precio_minimo_kg': None, 'precio_maximo_kg': None, 'frecuencia_kg': None, 'empacadora': None } for element in range(1, 12): if element == 0: continue if element > 0 and element < 4: row['pieza'] = 'pechuga' row[labels[element - 1]] = tds[element].text.replace( '$', '') if element > 3 and element < 7: row['pieza'] = 'pierna/muslo' row[labels[element - 4]] = tds[element].text.replace( '$', '') if element > 6 and element < 10: row['pieza'] = 'retazo' row[labels[element - 7]] = tds[element].text.replace( '$', '') if element > 9: row['pieza'] = 'visceras' row[labels[ element - 10]] = tds[element].text.replace( '$', '') if element == 3 or element == 6 or element == 9 or element == 11: row['empacadora'] = packer yield row row = { 'fecha': self.date, 'pieza': None, 'precio_minimo_kg': None, 'precio_maximo_kg': None, 'frecuencia_kg': None, 'empacadora': None }
#!/usr/bin/env python3 # -*- coding: utf-8 -*- from clint.arguments import Args from clint.textui import puts, colored, indent def create_file(name): with open(name, 'w+') as file: file.write('\n') args = Args() if args.flags[0] == '--cargar': with indent(4, quote='vivas> '): puts('Carga finalizada. Ver target/resultado.csv') create_file('target/resultado.csv') if args.flags[0] == '--reportar': with indent(4, quote='vivas> '): puts('Ver target/estadisticas.csv') create_file('target/estadisticas.csv') print()
def setup(**kwargs): result = True if check_destination_subscriptions(): result = False output_cli_message("Check if there are active subscriptions in Destination nodes") print(output_cli_result(result, compensation=-4)) if not result: print(" " + colored.yellow("Hint: use pgrepup stop to terminate the subscriptions")) sys.exit(1) targets = ['Source', 'Destination'] files_to_clean = [] try: output_cli_message("Global tasks", color='cyan') puts("") with indent(4, quote=' >'): output_cli_message("Remove nodes from Destination cluster") print with indent(4, quote=' '): for db in get_cluster_databases(connect('Destination')): output_cli_message(db) drop_node(db) print(output_cli_result(True, 4)) output_cli_message("Create temp pgpass file") pg_pass = create_pgpass_file() print(output_cli_result(bool(pg_pass))) for t in targets: output_cli_message("Drop pg_logical extension in all databases of %s cluster" % t) print with indent(4, quote=' '): for db in get_cluster_databases(connect(t)): output_cli_message(db) if not clean_pglogical_setup(t, db): print(output_cli_result(False, compensation=4)) continue print(output_cli_result(True, compensation=4)) source_setup_results = {} for t in targets: results = checks(t) output_cli_message("Setup %s" % t, color='cyan') if not results['result']: print(output_cli_result(colored.red('Skipped, configuration problems'), compensation=-4)) continue puts("") with indent(4, quote=' >'): if t == 'Source': source_setup_results = _setup_source(results['data']['conn'], pg_pass) if isinstance(source_setup_results, dict) and 'pg_dumpall' in source_setup_results: files_to_clean.append(source_setup_results['pg_dumpall']) else: _setup_destination( results['data']['conn'], pg_pass=pg_pass, source_setup_results=source_setup_results ) finally: output_cli_message("Cleaning up", color='cyan') puts("") with indent(4, quote=' >'): output_cli_message("Remove temporary pgpass file") print(output_cli_result(remove_pgpass_file())) output_cli_message("Remove other temporary files") if len(files_to_clean) == 0: print(output_cli_result(True)) for tempf in files_to_clean: try: os.unlink(tempf) print(output_cli_result(True)) except OSError: print(output_cli_result(False))
cycleStart = getcyclestartdate(sedate) historyparams = urllib.urlencode({ "location": "allLocations", "parameter": "custom", "customStartMonth": cycleStart['mm'], "customStartDay": cycleStart['dd'], "customStartYear": cycleStart['yy'], "customEndMonth": 04, "customEndDay": 01, "customEndYear": 2016, # Lazy, so hardcoding end year. "button": "View" }) with indent(5, quote=">"): puts(colored.yellow("Accessing history")) historyreq = urllib2.Request( BASE_URL + '/registration/customerSessionHistory.do', historyparams) histories = urllib2.urlopen(historyreq) html = histories.read() if debug: with open('debug/history.txt', 'wb') as f: f.write(html) f.close() with indent(5, quote=colored.white("DEBUG:")): puts( colored.red( "logged /registration/customerSessionHistory.do response" ))
def advicemessage(string): with indent(4, quote=''): puts('\r' + colored.yellow(wrapit.fill(string)))
def printfx(self, cldata, data): with indent(4, quote='>>>'): puts(colored.red(str(cldata)) + data)
def logintopronto(username, password, debug): params = urllib.urlencode({ 'loginUserId': username, 'authType': 'Pronto', 'loginPassword': password, 'submit': 'Login' }) opener = urllib2.build_opener( urllib2.HTTPCookieProcessor(cookielib.CookieJar())) urllib2.install_opener(opener) puts(colored.white("Contacting ProntoNetworks...")) if debug: if not os.path.exists('debug/'): os.makedirs('debug/') with indent(5, quote=">"): puts(colored.yellow("Fetching site")) mainreq = urllib2.Request( BASE_URL + '/registration/Main.jsp?wispId=1&nasId=00:15:17:c8:09:b1') mainres = urllib2.urlopen(mainreq) if debug: with open('debug/main.txt', 'wb') as f: f.write(mainres.read()) f.close() with indent(5, quote=colored.white("DEBUG:")): puts(colored.red("logged /registration/Main.jsp response")) with indent(5, quote=">"): puts(colored.yellow("Sending credentials")) loginReq = urllib2.Request(BASE_URL + '/registration/chooseAuth.do', params) loginRes = urllib2.urlopen(loginReq) if debug: with open('debug/login.txt', 'wb') as f: f.write(loginRes.read()) f.close() with indent(5, quote=colored.white("DEBUG:")): puts( colored.red("logged /registration/chooseAuth.do response")) with indent(5, quote=">"): puts(colored.yellow("Checking plan")) planreq = urllib2.Request( BASE_URL + '/registration/main.do?content_key=%2FSelectedPlan.jsp') planres = urllib2.urlopen(planreq) planSoup = BeautifulSoup(planres.read()) data = planSoup.findAll('td', attrs={ 'class': 'formFieldRight', 'colspan': '2' }) planDetails = [] for i in range(0, len(data) - 1): kids = data[i].parent.findAll('td') planDetails.append(str(kids[1].text)) if debug: with open('debug/plan.txt', 'wb') as f: f.write(loginRes.read()) f.close() with indent(5, quote=colored.white("DEBUG:")): puts( colored.red( "logged /registration/main.do?content_key=%2FSelectedPlan.jsp response" )) sedate = datetime.strptime(planDetails[2], "%m/%d/%Y %H:%M:%S") # enddate = datetime.strptime(planDetails[3], "%m/%d/%Y %H:%M:%S") cycleStart = getcyclestartdate(sedate) historyparams = urllib.urlencode({ "location": "allLocations", "parameter": "custom", "customStartMonth": cycleStart['mm'], "customStartDay": cycleStart['dd'], "customStartYear": cycleStart['yy'], "customEndMonth": 04, "customEndDay": 01, "customEndYear": 2016, # Lazy, so hardcoding end year. "button": "View" })
def parse(self): labels = [ 'estado', 'cabezas', 'peso_promedio_kg', 'precio_novillo_kg', 'precio_novillona_kg', 'precio_vaca_kg', 'precio_toro_kg' ] for table in self.html.find_all('table'): td_class = table.find_all('td', {'class': 'encabTAB'}) if len(td_class) == 0: continue if 'Origen' not in td_class[0].text: continue rastro = None fecha = None sacrificio = None for tr in table.find_all('tr'): row = { 'rastro': None, 'fecha': None, 'volumen_sacrificio': None, 'estado': None, 'cabezas': None, 'peso_promedio_kg': None, 'precio_novillo_kg': None, 'precio_novillona_kg': None, 'precio_vaca_kg': None, 'precio_toro_kg': None } tds = tr.find_all('td') if len(tds) == 1: rastro = tds[0].text with indent(4): puts(colored.blue("Empacador: {}".format(str(rastro)))) # print(rastro) if len(tds) == 2: for td in tds: if 'Fecha' in td.text: fecha = td.text.split(':')[1] if 'Volumen de Sacrificio' in td.text: sacrificio = td.text.split(':')[1] if len(tds) == 7: row['fecha'] = fecha row['rastro'] = rastro row['volumen_sacrificio'] = sacrificio for element in range(7): row[labels[element]] = tds[element].text yield row
tso = TwitterSearchOrder() # create a TwitterSearchOrder object tso.setKeywords(SEARCH_SPEC.all) # let's define all words we would like to have a look for tso.setLanguage('en') tso.setCount(RESULT_COUNT) tso.setIncludeEntities(False) user_fields = ['screen_name','utc_offset','description','location'] tweet_fields = ['created_at','text','retweet_count','favorite_count'] sheet = tablib.Dataset(headers=['id']+user_fields+tweet_fields) search.searchTweetsIterable(tso) queries = 0 puts(u"Fetching results:") with indent(3): try: for tweet in search: if search.getStatistics()['queries'] != queries: puts('Fetched {0} tweets'.format(search.getStatistics()['tweets'])) queries = search.getStatistics()['queries'] data = [tweet['id_str']] for key in user_fields: data.append(tweet['user'][key]) for key in tweet_fields: data.append(tweet[key]) sheet.append(data) except TwitterSearchException as e: # take care of all those ugly errors if there are some print(e)
def import_settings(quiet=True): """This method takes care of importing settings from the environment, and config.py file. Order of operations: 1. Imports all WILL_ settings from the environment, and strips off the WILL_ 2. Imports settings from config.py 3. Sets defaults for any missing, required settings. This method takes a quiet kwarg, that when False, prints helpful output. Called that way during bootstrapping. """ settings = {} # Import from environment, handle environment-specific parsing. for k, v in os.environ.items(): if k[:5] == "WILL_": k = k[5:] settings[k] = v if "ROOMS" in settings: settings["ROOMS"] = settings["ROOMS"].split(";") # If HIPCHAT_SERVER is set, we need to change the USERNAME slightly # for XMPP to work. if "HIPCHAT_SERVER" in settings: settings["USERNAME"] = "******".\ format(user=settings["USERNAME"].split("@")[0], host=settings["HIPCHAT_SERVER"]) else: settings["HIPCHAT_SERVER"] = "api.hipchat.com" if "PROXY_URL" in settings: parsed_proxy_url = urlparse(settings["PROXY_URL"]) settings["USE_PROXY"] = True settings["PROXY_HOSTNAME"] = parsed_proxy_url.hostname settings["PROXY_USERNAME"] = parsed_proxy_url.username settings["PROXY_PASSWORD"] = parsed_proxy_url.password settings["PROXY_PORT"] = parsed_proxy_url.port else: settings["USE_PROXY"] = False # Import from config if not quiet: puts("Importing config.py... ") with indent(2): try: had_warning = False import config for k, v in config.__dict__.items(): # Ignore private variables if "__" not in k: if k in os.environ and v != os.environ[k] and not quiet: warn( "%s is set in the environment as '%s', but overridden in" " config.py as '%s'." % (k, os.environ[k], v)) had_warning = True settings[k] = v if not had_warning and not quiet: show_valid("Valid.") except: # TODO: Check to see if there's a config.py.dist if not quiet: warn("no config.py found. This might be ok, but more likely, " "you haven't copied config.py.dist over to config.py") if not quiet: puts("Verifying settings... ") with indent(2): # Set defaults if "ROOMS" not in settings: if not quiet: warn("no ROOMS list found in the environment or config. " "This is ok - Will will just join all available rooms.") settings["ROOMS"] = None if "DEFAULT_ROOM" not in settings and "ROOMS" in settings and settings[ "ROOMS"] and len(settings["ROOMS"]) > 0: if not quiet: warn("no DEFAULT_ROOM found in the environment or config. " "Defaulting to '%s', the first one." % settings["ROOMS"][0]) settings["DEFAULT_ROOM"] = settings["ROOMS"][0] if "HTTPSERVER_PORT" not in settings: # For heroku if "PORT" in os.environ: settings["HTTPSERVER_PORT"] = os.environ["PORT"] else: if not quiet: warn( "no HTTPSERVER_PORT found in the environment or config. Defaulting to ':80'." ) settings["HTTPSERVER_PORT"] = "80" if "REDIS_URL" not in settings: # For heroku if "REDISCLOUD_URL" in os.environ: settings["REDIS_URL"] = os.environ["REDISCLOUD_URL"] if not quiet: note( "WILL_REDIS_URL not set, but it appears you're using RedisCloud. If so, all good." ) elif "REDISTOGO_URL" in os.environ: settings["REDIS_URL"] = os.environ["REDISTOGO_URL"] if not quiet: note( "WILL_REDIS_URL not set, but it appears you're using RedisToGo. If so, all good." ) elif "OPENREDIS_URL" in os.environ: settings["REDIS_URL"] = os.environ["OPENREDIS_URL"] if not quiet: note( "WILL_REDIS_URL not set, but it appears you're using OpenRedis. If so, all good." ) else: settings["REDIS_URL"] = "redis://localhost:6379/7" if not quiet: note( "WILL_REDIS_URL not set. Defaulting to redis://localhost:6379/7." ) if not settings["REDIS_URL"].startswith("redis://"): settings["REDIS_URL"] = "redis://%s" % settings["REDIS_URL"] if "PUBLIC_URL" not in settings: default_public = "http://localhost:%s" % settings["HTTPSERVER_PORT"] settings["PUBLIC_URL"] = default_public if not quiet: warn( "no PUBLIC_URL found in the environment or config. Defaulting to '%s'." % default_public) if "V1_TOKEN" not in settings: if not quiet: warn( "no V1_TOKEN found in the environment or config." "This is generally ok, but if you have more than 30 rooms, " "you may recieve rate-limit errors without one.") if "REDIS_MAX_CONNECTIONS" not in settings: settings["REDIS_MAX_CONNECTIONS"] = 4 if not quiet: note("REDIS_MAX_CONNECTIONS not set. Defaulting to 4.") if "TEMPLATE_DIRS" not in settings: if "WILL_TEMPLATE_DIRS_PICKLED" in os.environ: # All good pass else: settings["TEMPLATE_DIRS"] = [] if "ALLOW_INSECURE_HIPCHAT_SERVER" in settings and\ (settings["ALLOW_INSECURE_HIPCHAT_SERVER"] is True or settings["ALLOW_INSECURE_HIPCHAT_SERVER"].lower() == "true"): warn( "You are choosing to run will with SSL disabled. " "This is INSECURE and should NEVER be deployed outside a development environment." ) settings["ALLOW_INSECURE_HIPCHAT_SERVER"] = True settings["REQUESTS_OPTIONS"] = { "verify": False, } else: settings["ALLOW_INSECURE_HIPCHAT_SERVER"] = False settings["REQUESTS_OPTIONS"] = {} if "ADMINS" not in settings: settings["ADMINS"] = "*" else: if "WILL_ADMINS" in os.environ: settings["ADMINS"] = [ a.strip().lower() for a in settings.get('ADMINS', '').split(';') if a.strip() ] # Set them in the module namespace for k in sorted(settings, key=lambda x: x[0]): if not quiet: show_valid(k) globals()[k] = settings[k]
execute[1] = myfn(True, articles) execute[2] = myfn(False, articles) execute[3] = read_table_chose execute[4] = delete_then_again(category) execute[0] = table execute[choice]() return wrapper def read_table_chose(): index = 0 dct_choice = dict() dct = mg.get_categories_info() for category, counts in dct.items(): index += 1 dct_choice[index] = category puts(colored.blue('{}. {} ->{}'.format(index, category, counts))) puts(colored.yellow('任何時候輸入 0 回到根目錄')) choice = input_check_int('請輸入選項: ') execute = dict() for key in dct_choice: execute[key] = read_table_by_category(dct_choice[key]) execute[0] = table execute[choice]() if __name__ == '__main__': with indent(4, quote=' >'): table()
def print_policy(policy): with indent(2): if any(policy['Statement']): puts(colored.yellow(json.dumps(policy, indent=4))) else: puts(colored.yellow("None"))
def DumpConfig(app): showInventory = False try: app.configuration.get("SplunkInventory.datasource") showInventory = True except: pass if showInventory: inv = app.configuration.get("SplunkInventory") puts( colored.yellow( "----------------------------------------------------")) puts(colored.yellow("INVENTORY CONFIGURATION")) puts( colored.yellow( "----------------------------------------------------")) with indent(): for ckey, helptxt in { "datasource": "SQL Alchemy datasource (database connection definition)" }.iteritems(): if str(helptxt) != "": puts(colored.blue("# " + str(helptxt))) if ckey in inv: puts(colored.red(str(ckey) + ": ") + str(inv[ckey])) else: puts( colored.red(str(ckey) + ": ") + colored.blue("## UNDEFINED ##")) puts("") apps = app.configuration.get("SplunkDeployment.apps") puts( colored.yellow("----------------------------------------------------")) puts( colored.yellow("APP CONFIGURATION") + " " + colored.blue("# FOR DEPLOYMENTS")) puts( colored.yellow("----------------------------------------------------")) with indent(): for appname in apps: puts(colored.red("app: ") + colored.green(appname)) for ckey, helptxt in { "directory": "Local app directory" }.iteritems(): with indent(): if str(helptxt) != "": puts(colored.blue("# " + str(helptxt))) if ckey in apps[appname]: puts( colored.red(str(ckey) + ": ") + str(apps[appname][ckey])) else: puts( colored.red(str(ckey) + ": ") + colored.blue("## UNDEFINED ##")) puts("") envs = app.configuration.get("SplunkDeployment.envs") puts( colored.yellow("----------------------------------------------------")) puts( colored.yellow("ENV CONFIGURATION") + " " + colored.blue("# FOR DEPLOYMENTS")) puts( colored.yellow("----------------------------------------------------")) with indent(): for env in envs: puts(colored.red("env: ") + colored.green(env)) with indent(): for role in envs[env]: puts(colored.red("role: ") + colored.green(role)) with indent(): for ckey, helptxt in { "role": "Deployment role" }.iteritems(): if str(helptxt) != "": puts(colored.blue("# " + str(helptxt))) if ckey in envs[env][role]: puts( colored.red(str(ckey) + ": ") + str(envs[env][role][ckey])) else: puts( colored.red(str(ckey) + ": ") + colored.blue("## UNDEFINED ##")) for ckey, helptxt in { "prelocal": "Execute locally before deployment", "preremote": "Execute on the remote site before deployment", "postlocal": "Execute locally after deployment", "postremote": "Execute on the remote site after deployment" }.iteritems(): if str(helptxt) != "": puts(colored.blue("# " + str(helptxt))) if ckey in envs[env][role]: puts(colored.red(str(ckey) + ": ")) with indent(): for cmd in envs[env][role][ckey]: puts( colored.red("- ") + str(envs[env][role][ckey] [cmd].configuration)) else: puts( colored.red(str(ckey) + ": ") + colored.blue("## UNDEFINED ##")) for server in envs[env][role]["servers"]: puts(colored.red("server: ") + server) with indent(): for ckey, helptxt in { "hostname": "Target hostname", "path": "Path to the remote app directory", "target": "Target to reload/restart after deployments" }.iteritems(): if str(helptxt) != "": puts(colored.blue("# " + str(helptxt))) if ckey in envs[env][role]["servers"][ server]: puts( colored.red(str(ckey) + ": ") + str(envs[env][role]["servers"] [server][ckey])) else: puts( colored.red(str(ckey) + ": ") + colored.blue("## UNDEFINED ##")) for ckey, helptxt in { "exclude": "Exclude patterns from synchronization" }.iteritems(): if str(helptxt) != "": puts(colored.blue("# " + str(helptxt))) if ckey in envs[env][role]["servers"][ server]: puts(colored.red(str(ckey) + ": ")) with indent(): for i in envs[env][role][ "servers"][server][ckey]: puts( colored.red("- ") + str(envs[env][role] ["servers"][server] [ckey][i])) else: puts( colored.red(str(ckey) + ": ") + colored.blue("## UNDEFINED ##")) puts("") envs = app.configuration.get("SplunkNodes.envs") puts( colored.yellow("----------------------------------------------------")) puts( colored.yellow("NODE CONFIGURATION") + " " + colored.blue("# FOR BACKUP/RESTORE & NODE TASKS")) puts( colored.yellow("----------------------------------------------------")) with indent(): for env in envs: puts(colored.red("env: ") + colored.green(env)) with indent(): for role in envs[env]: puts(colored.red("role: ") + colored.green(role)) with indent(): for ckey, helptxt in { "role": "Deployment role" }.iteritems(): if str(helptxt) != "": puts(colored.blue("# " + str(helptxt))) if ckey in envs[env][role]: puts( colored.red(str(ckey) + ": ") + str(envs[env][role][ckey])) else: puts( colored.red(str(ckey) + ": ") + colored.blue("## UNDEFINED ##")) for server in envs[env][role]["servers"]: puts(colored.red("server: ") + server) with indent(): for ckey, helptxt in { "hostname": "Target hostname", "path": "Path to the remote app directory", "target": "Target to reload/restart after deployments" }.iteritems(): if str(helptxt) != "": puts(colored.blue("# " + str(helptxt))) if ckey in envs[env][role]["servers"][ server]: puts( colored.red(str(ckey) + ": ") + str(envs[env][role]["servers"] [server][ckey])) else: puts( colored.red(str(ckey) + ": ") + colored.blue("## UNDEFINED ##")) for ckey, helptxt in { "exclude": "Exclude patterns from synchronization" }.iteritems(): if str(helptxt) != "": puts(colored.blue("# " + str(helptxt))) if ckey in envs[env][role]["servers"][ server]: puts(colored.red(str(ckey) + ": ")) with indent(): for i in envs[env][role][ "servers"][server][ckey]: puts( colored.red("- ") + str(envs[env][role] ["servers"][server] [ckey][i])) else: puts( colored.red(str(ckey) + ": ") + colored.blue("## UNDEFINED ##")) puts("")
def recursively_check_conflicts(new, old, accept=False): puts(u'checking {}'.format(new)) if hasattr(new, 'name'): puts(u'Person: {}'.format(new['name'])) with indent(2, quote=" | "): if old is None: return new if new == old: return new # check if, eg 1990 == '1990' == u'1990' u_new = unicode(new) u_old = unicode(old) if u_new == u_old: return new puts(green('GREEN (FROM EXCEL):')) puts(green(u_new)) puts(red('RED (FROM OLD VERSION OF DOCUMENT):')) puts(red(u_old)) if accept: puts(green('accepting Excel version (green)')) return new while True: selection = raw_input( 'Should we keep [G]REEN, [R]ED? \n' 'Or should we [C]ONTINUE recursing, ' 'so you can make a more granular decision? \n' 'OR Would you prefer to enter some [O]THER value (will be text)? \n' '[g/r/c/o]: ').lower()[0] if selection in ['g', 'r', 'c', 'o']: if selection == 'o': new = raw_input('enter a new value: \n') puts('Do you confirm the following?') puts(new) if raw_input('[y/n]: ').lower().startswith('y'): return new else: break else: puts('-> You must enter g, r, c, or o!') if selection == 'g': return new elif selection == 'r': return old elif selection == 'c': if isinstance(new, dict) and isinstance(old, dict): checked = [] for k, v in new.items(): new[k] = recursively_check_conflicts(v, old.get(k, None)) checked.append(k) for k, v in old.items(): if k in checked: continue v = recursively_check_conflicts(new.get(k, None), v) if v: new[k] = v elif isinstance(new, list) and isinstance(old, list): for i, item in enumerate(new): if item in old: continue else: new[i] = recursively_check_conflicts(item, old[i]) for i, item in enumerate(old): if item in new: continue else: item = recursively_check_conflicts(new[i], item) if item: new.append(item) else: # int, str, or unicode (probably) if selection == 'c': puts(red('-> You cannot recurse further. Try again.')) return recursively_check_conflicts(new, old) return new
p1_gt_set[p1_gt_set == 3] = 1 p2_gt_set[p2_gt_set == 3] = 1 dp_set[dp_set < 0] = 0 # Result dict vcf_gt_out = defaultdict( lambda: defaultdict(lambda: defaultdict(lambda: defaultdict(list)))) sample_gt_matrix = np.array([]) chrom_to_factor = {y: x for x, y in enumerate(set(chromosome))} sample_to_factor = {y: x for x, y in enumerate(v.samples)} hmm_gt_set = [] if args["--vcf-out"] and args["<vcf>"] == "-": with indent(2): exit(puts_err(colored.blue("Cannot use vcf-out with stdin."))) if not args["--vcf-out"]: print( "chrom\tstart\tend\tsample\tgt\tsupporting_sites\tsites\tDP\tswitches\tCIGAR" ) s = 0 tree = {} for sample, column in zip(v.samples, gt_set): sample_gt = zip(chromosome, positions, column == p2_gt_set, column) sample_gt = [x[:3] for x in sample_gt if x[3] in [0, 1]] sequence = [to_model[x[2]] for x in sample_gt] model = generate_model(float(args['--state']),
def processDataFile(dataSetFileName, dataSetFilePath, dataOutputPath, dataSetData, settings, fileVersion): puts("Processing Data File %s:" % dataSetFilePath) fileNameBase = os.path.splitext(dataSetFileName)[0] with indent(4, quote=' >'): dataSetData['files'][dataSetFileName] = filterDataFile( settings['min'], settings['max'], dataSetFilePath, dataSetData['dir']) dataSetFileData = dataSetData['files'][dataSetFileName] puts('Data Filtered') dataFiltered = zip(dataSetFileData['raman'], dataSetFileData['intensity']['filtered']) puts('Baselining') baselinedData, airData = baselineData( dataSetFileData['intensity']['filtered'], settings['smooth'], settings['porder'], settings['max_it']) for i, value in enumerate(baselinedData): try: dataSetData['dir']['intensity']['baselined'][i].append(value) except IndexError: dataSetData['dir']['intensity']['baselined'].append([value]) except KeyError: pass dataSetFileData['intensity']['airpls'] = baselinedData airMatrix = zip(dataSetFileData['raman'], baselinedData) baselineMatrix = zip(dataSetFileData['raman'], airData) puts('Writing Data') with indent(4, quote='>'): for format in settings['formats']: puts("Data Format:" + format) with indent(4, quote='>'): formatPath = os.path.join(dataOutputPath, format) try: os.mkdir(formatPath) except OSError as e: if e[0] == 17 or e[0] == 183: pass else: print e[0] raise dataFilteredFileName = "{base}_filtered_v%d.{ext}".format( base=fileNameBase, ext=format) dataFilteredPath = getVersionPath(formatPath, dataFilteredFileName, fileVersion) writeData( formatData(dataFiltered, format, settings['prec']), dataFilteredPath) puts('Saved Filtered To: %s' % dataFilteredPath) dataFileNameAir = "{base}_airPLS_v%d.{ext}".format( base=fileNameBase, ext=format) dataPathAir = getVersionPath(formatPath, dataFileNameAir, fileVersion) baselinePath = getVersionPath(formatPath, 'air_baseline_v%d.csv', fileVersion) writeData( formatData(baselineMatrix, format, settings['prec']), baselinePath) puts('Saved Baseline To: %s' % baselinePath) writeData(formatData(airMatrix, format, settings['prec']), dataPathAir) puts('Saved Baseline Subtracted To: %s' % dataPathAir) putSeparator('-', 30)