def start(self): os.system("clear") while True: print green("How can I help you, %s?" % self.user) print yellow("1. Transfer some money\n" " 2. Pay\n" " 3. Retrieve\n" " 4. Show balance\n" " 5. Logout\n") option = int(raw_input(blue("I want to: "))) if option == 1 or option == 2: to = raw_input(red("to: ")) money = raw_input(red("sum: ")) try: print self.bank.transfer_from(self.user, to, money) except ValueError as ve: print red(ve) elif option == 3: money = raw_input(red("sum: ")) try: if self.bank.transfer_from(self.user, self.name, money): print green("Operation finished with success!\n") else: print red("Something went wrong...try again later\n") except ValueError as ve: print red(ve) elif option == 4: print self.bank.show_balance(self.user) elif option == 5: self.user = None return
def show_diff(old, new): for k, v in new.iteritems(): if k in old.keys() and v == old[k]: continue if k in old.keys() and v != old[k]: red(" - ['{}'] = {}".format(k, old[k])) green(" + ['{}'] = {}".format(k, v))
def open(self): os.system("clear") print blue("Welcome to %s" % green(self.name)) print blue("========================") while True: print red("How may I help you?") print yellow("1. Buy a ticket\n" " 2. Book a ticket by phone\n" " 3. Book a ticket\n" " 4. Cancel booking\n" " 5. See how many money do you have\n" " 6. Goodbye!\n") option = raw_input(green("I want to: ")) if option == "1": self.buy_ticket() if option == "2": self.book_ticket_by_phone() if option == "3": self.book_ticket() if option == "4": self.cancel_booking() if option == "5": self.money() if option == "6": break
def _status(self, final=False): if not final: new = (green(len(self._completed)), white(len(self._running)), yellow(len(self._queued)), green('finished'), white('running'), yellow('queued')) if hasattr(self, 'last_status') and new == self.last_status: return self.last_status = (green(len(self._completed)), white(len(self._running)), yellow(len(self._queued)), green('finished'), white('running'), yellow('queued')) print WHIPE, "[%s/%s/%s] %s, %s, %s" % new else: print "\n[ %s OK / %s ERROR ] in %s seconds" % ( green(self._num_of_jobs - self._errors, True), red(self._errors), time.time() - self._time_start) if self._errors: print red("Failures:", True) for job in self._completed: if job.exitcode != 0: print red(job.name) sys.stdout.flush()
def _check_for_crash(self): status = self._get_status() if status != "LIVE": print(status) sys.exit(utils.red('ESPv2 crash')) return print utils.green('No crashes detected.')
def run_all_tests(self): for _ in range(FLAGS.runs): self._run_fuzz_tests() if self._unexpected_errors > 0: sys.exit(utils.red('Fuzz test failed.')) else: print utils.green('Fuzz test passed.')
def run_all_tests(self): shelf1 = {'name': 'shelves/1', 'theme': 'Fiction'} shelf2 = {'name': 'shelves/2', 'theme': 'Fantasy'} book13 = { 'name': 'shelves/1/books/3', 'author': 'Neal Stephenson', 'title': 'REAMDE' } book24 = { 'name': 'shelves/2/books/4', 'author': 'George R.R. Martin', 'title': 'A Game of Thrones' } self.verify_shelf(shelf1) self.verify_shelf(shelf2) self.verify_list_shelves([shelf1, shelf2]) self.verify_list_books(shelf1['name'], [book13]) self.verify_list_books(shelf2['name'], [book24]) self.verify_book(book13) self.verify_book(book24) self.verify_jwt_locations() self.verify_allow_cors_passthrough() if self._failed_tests: sys.exit( utils.red('%d tests passed, %d tests failed.' % (self._passed_tests, self._failed_tests))) else: print utils.green('All %d tests passed' % self._passed_tests)
def run_all_tests(self): self.verify_key_restriction(); if self._failed_tests: sys.exit(utils.red('%d tests passed, %d tests failed.' % ( self._passed_tests, self._failed_tests))) else: print utils.green('All %d tests passed' % self._passed_tests)
def run_all_tests(self): self.verify_quota_control() if self._failed_tests: sys.exit( utils.red('%d tests passed, %d tests failed.' % (self._passed_tests, self._failed_tests))) else: print utils.green('All %d tests passed' % self._passed_tests)
def main(): parser = argparse.ArgumentParser( description= 'Program that takes as an argument the json -->.txt<-- file and makes a root file out of it, which can be used with the main framework.' ) parser.add_argument('--input', '-i', required=True, type=str, help='Name of the json converted to a .txt file') parser.add_argument('--output', '-o', type=str, help='Name of the target .root file') args = parser.parse_args() infilename = os.path.abspath(args.input) if not infilename.endswith('.txt'): raise ValueError('Infilename does not end with .txt') outfilename = os.path.abspath( args.output) if args.output is not None else '.root'.join( infilename.rsplit('.txt', 1)) with open(infilename, 'r') as f: dict_in_json = safe_load(f) outfile = TFile(outfilename, 'RECREATE') outtree = TTree("LumiTree", "LumiTree") t_run = array('i', [0]) t_lb_low = array('i', [0]) t_lb_high = array('i', [0]) outtree.Branch("run", t_run, 'run number/i') outtree.Branch("lb_low", t_lb_low, 'lumi block low number (including this one)/i') outtree.Branch("lb_high", t_lb_high, 'lumi block high number (including this one)/i') runs = dict_in_json.keys() for run in dict_in_json: parts = dict_in_json[run] # print parts for lumiblocks in parts: t_run[0] = int(run) t_lb_low[0] = int(lumiblocks[0]) t_lb_high[0] = int(lumiblocks[1]) outtree.Fill() # for lb in range(lumiblocks[0], lumiblocks[1]+1): # t_run[0] = int(run) # t_lb[0] = int(lb) # outtree.Fill() # print 'filled: (%i, %i)' % (int(run), int(lb)) outfile.Write() outfile.Close() print green('--> Successfully created lumifile \'%s\'' % (outfilename))
def run(self): if self.is_negative: if self._from != 10: self.log.error(red("I dont know how to convert negative numbers if" " there are not in decimal base")) return False else: number = self.transform_negative() else: number = self.transform_positive() print green("Your number is %s" % red(number))
def process_tick_top(self, tick, last_row): volok = (last_row['V'] < last_row['VMA20']) priceok = (last_row['H'] > last_row['BBU']) self.log('%s(%.2f < %.2f) %s(%s > %s)' % (green('volok') if volok else red('volko'), last_row['V'], last_row['VMA20'], green('priceok') if priceok else red('priceko'), last_row['H'], last_row['BBU'])) if priceok: if volok: self.sell(tick) else: self.set_stop(tick, last_row['BBM'])
def login(self): print "\n\n" username = raw_input(green("username: "******"password: "******"db/clients.txt", "r") as f: clients = f.read() credentials = "%s:%s" % (username, sha1(password).hexdigest()) if credentials in clients: print green("Welcome!") self.user = username self.start() else: print red("Wrong credentials!")
def process_tick_middle(self, tick, last_row): volok = (last_row['V'] > last_row['VMA20']) priceok = (tick['H'] > last_row['BBM']) self.log('%s(%.2f > %.2f) %s(%s > %s)' % (green('volok') if volok else red('volko'), last_row['V'], last_row['VMA20'], green('priceok') if priceok else red('priceko'), tick['H'], last_row['BBM'])) if priceok: if volok: self.status = 'top' self.set_stop(tick, self.entry) else: self.sell(tick)
def show_unencrypted_diff(diff_part, password_file=None): intense(get_head(diff_part).strip()) old, new = decrypt_diff(diff_part, password_file) diff = difflib.unified_diff(old.split('\n'), new.split('\n'), lineterm='') # ... we'll take the git filenames from git's diff output rather than # ... difflib for line in islice(diff, 2, None): if line.startswith('-'): red(line) elif line.startswith('+'): green(line) elif line.startswith('@@'): cyan(line) else: print line
def handle(socket, address): print green("Client connected") while True: data = socket.recv(2048) if not data: print red("Client disconnected") return message = parser(data) if message is None: return if message[0] == "transfer": transfer(message[1], socket)
def job_status(job): status = blue("STOPPED") if not job.is_enabled(): status = red("DISABLED") if job.is_running(): status = green("RUNNING") return status
def transform_model(self, model_klass, filename=None, factories=None): self.factories = factories if filename: self.filename = filename else: self.filename = self.get_filename_for_klass(model_klass) # If the filename exists, prompt to overwrite if not self.overwrite: if os.path.isfile(self.filename): char = raw_input('File "%s" exists. Overwrite? [Y]/n: ' % self.filename) if char != 'Y' and char != '': sys.exit(0) self.file = open(self.filename, 'w') # Generate the output self.pre_transform(1) # Generate python values for the class obj = self.python_obj_for_klass(model_klass) # Run the transformation, returns string output transformed = self.transform_obj(obj) self.file.write(transformed) self.post_transform(1) self.file.write('\n') self.file.close() self.post_process() print '%s generated %s' % (green('--'), self.filename)
def debug(self, action_name, bindings_string): """Test execution of the rule associated with the action name, given the binding string.""" rule = self.rule_by_action_name(action_name=action_name) if not rule: raise Exception( f"Couldn't find rule with action name '{action_name}'") bindings = {} for binding in bindings_string.replace(', ', ',').split(','): role_name, entity_name = binding.split('=') entity = self.entity_by_name(entity_name=entity_name) if not entity: raise Exception(f"Couldn't find entity named '{entity_name}'") bindings[role_name] = entity config.VERBOSITY = 3 print( utils.green(f"\n\nTesting rule ${action_name} with debugging on.")) if self._triggers(rule=rule, bindings=bindings): print(" All preconditions hold!") print(f" Testing rule probability ({rule.probability})") if random.random() < rule.probability: print(" Triggered rule!") print( " But I'm not executing it, because this is for debugging only." ) self._bind_optional_roles(rule=rule, bindings=bindings) else: print(f" Did not trigger rule (probability not met)") return
def open(self): os.system("clear") print blue("Welcome to ") + yellow(self.bank.name) print blue("========================\n\n") while True: print green("Please choose one of the action:") print red("1. Register\n" " 2. Login\n\n") option = int(raw_input(yellow("I want to: "))) if option == 1: self.register() elif option == 2: self.login() else: print red("I dont't understand you! Please repeat")
def transform_model_array(self, model_klass, n, filename=None, factories=None): self.factories = factories if filename: self.filename = filename else: self.filename = self.get_plural_filename_for_klass(model_klass) self.file = open(self.filename) # Generate the output self.pre_transform(n) for i in range(n): # Set a counter variable we can use if we need to self.model_index = i # Generate python values for the class obj = self.python_obj_for_klass(model_klass) # Run the transformation, returns string output transformed = self.transform_obj(obj) if i < n - 1: transformed += ',\n' self.file.write(transformed) self.post_transform(n) self.file.write('\n') self.file.close() self.post_process() print '%s generated %s' % (green('--'), self.filename)
def save_job_to_file(self, ext="yaml"): try: os.makedirs(self.options["output_dir"]) except: pass file = os.path.join(self.options["output_dir"], self.job["job_name"] + "." + ext) with open(file, 'w') as f: f.write(self.job_template.render(self.job)) print(green("File saved to %s" % file))
def INIT(): '''Init repo `fabsetup_custom` with custom tasks and config.''' # decorator @needs_repo_fabsetup_custom makes the job print(green('Initialization finished\n')) print('List available tasks: ' + blue('fab -l')) print(' '.join([ 'Show details of a task: `fab -d <task>`, eg.:', blue('fab -d setup_webserver'),]))
def process_tick_rsi(self, tick, last_row): rsiok = (last_row['RSI'] > 70) self.log('%s(%.1f > %f)' % (green('rsiok') if rsiok else red('rsiko'), last_row['RSI'], 70)) if rsiok: self.sell(tick) else: self.set_stop(last_row['ATR_STP'])
def print(self, n=-1, highlight_rows=None): if highlight_rows is None: highlight_rows = [] elif type(highlight_rows) == int: highlight_rows = [highlight_rows] if len(self.rows) == 0: print(green(join(self.head, " "))) print("(empty)") print() else: def get_col_width(lst): return max(list(map(lambda x: len(str(x)), lst))) col_width_list = list(map(lambda col: get_col_width(self[col]), self.head)) delta_list = [] for i in range(len(self.head)): delta = col_width_list[i] - len(self.head[i]) delta_list.append(delta) head = "" for index, col_name in enumerate(self.head): head += col_name head += " " * max(delta_list[index] + 2, 2) print(green(head)) i = 0 for r in self.rows: if i == n: break else: i += 1 _row = "" for j in range(len(self.head)): head_len = len(self.head[j] + " " * max(delta_list[j] + 2, 2)) space_num = head_len - len(str(r[j])) _row += str(r[j]) + " " * space_num if i - 1 in highlight_rows: print(yellow(_row)) else: print(_row) print() return self
def register(self): print "\n\n" username = raw_input(green("username: "******"password: "******"db/clients.txt", "ab+") as f: clients = f.read() if "%s:" % username in clients: print red("Username already taken") return f.write("%s:%s\n" % (username, sha1(password).hexdigest())) initial_deposit = raw_input(green("Initial deposit: ")) with open("clients/%s" % sha1(username).hexdigest(), "w") as g: g.write(initial_deposit) print green("Succesfully register!") self.user = username self.start()
def job_list(self): print(blue('Fetching job list for %s...' % self.url)) table_data = [['Name', 'Status', 'Url']] for job_name, job_instance in self.server.get_jobs(): table_data.append([ job_instance.name, green('RUNNING') if job_instance.is_running() else blue('STOPPED'), job_instance.url ]) format_data(table_data) print("Jobs found: ", len(self.server.get_jobs_list()))
def counts(self): if self.jsonify: utils.to_json(self.stats["counts"]) else: header, rows = ["#", "Total", "TryHackMe", "HackTheBox", "VulnHub", "OSCPlike"], [] rows.append("___".join([x for x in [ "%s" % (utils.green("Total")), "%s/%s (%s)" % (utils.green(self.stats["counts"]["ownedtotal"]), utils.green(self.stats["counts"]["totaltotal"]), utils.green("%.2f%%" % (self.stats["counts"]["pertotal"]))), "%s/%s (%s)" % (utils.green(self.stats["counts"]["ownedthm"]), utils.green(self.stats["counts"]["totalthm"]), utils.green("%.2f%%" % (self.stats["counts"]["perthm"]))), "%s/%s (%s)" % (utils.green(self.stats["counts"]["ownedhtb"]), utils.green(self.stats["counts"]["totalhtb"]), utils.green("%.2f%%" % (self.stats["counts"]["perhtb"]))), "%s/%s (%s)" % (utils.green(self.stats["counts"]["ownedvh"]), utils.green(self.stats["counts"]["totalvh"]), utils.green("%.2f%%" % (self.stats["counts"]["pervh"]))), "%s/%s (%s)" % (utils.red(self.stats["counts"]["ownedoscplike"]), utils.red(self.stats["counts"]["totaloscplike"]), utils.red("%.2f%%" % (self.stats["counts"]["peroscplike"]))), ]])) rows.append("___".join([str(x) for x in [ utils.yellow("Windows"), "%s/%s (%s)" % (utils.yellow(self.stats["counts"]["ownedwindows"]), utils.yellow(self.stats["counts"]["totalwindows"]), utils.yellow("%.2f%%" % (self.stats["counts"]["perwindows"]))), "%s/%s (%s)" % (utils.yellow(self.stats["counts"]["ownedthmwindows"]), utils.yellow(self.stats["counts"]["thmwindows"]), utils.yellow("%.2f%%" % (self.stats["counts"]["perthmwindows"]))), "%s/%s (%s)" % (utils.yellow(self.stats["counts"]["ownedhtbwindows"]), utils.yellow(self.stats["counts"]["htbwindows"]), utils.yellow("%.2f%%" % (self.stats["counts"]["perhtbwindows"]))), "%s/%s (%s)" % (utils.yellow(self.stats["counts"]["ownedvhwindows"]), utils.yellow(self.stats["counts"]["vhwindows"]), utils.yellow("%.2f%%" % (self.stats["counts"]["pervhwindows"]))), "%s/%s (%s)" % (utils.yellow(self.stats["counts"]["ownedoscplikewindows"]), utils.yellow(self.stats["counts"]["oscplikewindows"]), utils.yellow("%.2f%%" % (self.stats["counts"]["peroscplikewindows"]))), ]])) rows.append("___".join([str(x) for x in [ utils.magenta("*nix"), "%s/%s (%s)" % (utils.magenta(self.stats["counts"]["ownednix"]), utils.magenta(self.stats["counts"]["totalnix"]), utils.magenta("%.2f%%" % (self.stats["counts"]["pernix"]))), "%s/%s (%s)" % (utils.magenta(self.stats["counts"]["ownedthmnix"]), utils.magenta(self.stats["counts"]["thmnix"]), utils.magenta("%.2f%%" % (self.stats["counts"]["perthmnix"]))), "%s/%s (%s)" % (utils.magenta(self.stats["counts"]["ownedhtbnix"]), utils.magenta(self.stats["counts"]["htbnix"]), utils.magenta("%.2f%%" % (self.stats["counts"]["perhtbnix"]))), "%s/%s (%s)" % (utils.magenta(self.stats["counts"]["ownedvhnix"]), utils.magenta(self.stats["counts"]["vhnix"]), utils.magenta("%.2f%%" % (self.stats["counts"]["pervhnix"]))), "%s/%s (%s)" % (utils.magenta(self.stats["counts"]["ownedoscplikenix"]), utils.magenta(self.stats["counts"]["oscplikenix"]), utils.magenta("%.2f%%" % (self.stats["counts"]["peroscplikenix"]))), ]])) rows.append("___".join([str(x) for x in [ utils.red("OSCPlike"), "%s/%s (%s)" % (utils.red(self.stats["counts"]["ownedoscplike"]), utils.red(self.stats["counts"]["totaloscplike"]), utils.red("%.2f%%" % (self.stats["counts"]["peroscplike"]))), "%s/%s (%s)" % (utils.red(self.stats["counts"]["ownedthmoscplike"]), utils.red(self.stats["counts"]["thmoscplike"]), utils.red("%.2f%%" % (self.stats["counts"]["perthmoscplike"]))), "%s/%s (%s)" % (utils.red(self.stats["counts"]["ownedhtboscplike"]), utils.red(self.stats["counts"]["htboscplike"]), utils.red("%.2f%%" % (self.stats["counts"]["perhtboscplike"]))), "%s/%s (%s)" % (utils.red(self.stats["counts"]["ownedvhoscplike"]), utils.red(self.stats["counts"]["vhoscplike"]), utils.red("%.2f%%" % (self.stats["counts"]["pervhoscplike"]))), utils.red(""), ]])) aligndict = { "#": "r", "Total": "l", "TryHackMe": "l", "HackTheBox": "l", "VulnHub": "l", "OSCPlike": "l", } utils.to_table(header, rows, delim="___", aligndict=aligndict)
def process_tick_searching(self, tick, last_row): volok = (last_row['V'] < last_row['VMA20']) priceok = (tick['L'] < (last_row['BBL'] * self.selling_pressure(last_row))) bbok = (last_row['BBW'] > self.percent) self.log('%s(%.2f < %.2f) %s(%s < %s) %s(%.2f > %.2f)' % (green('volok') if volok else red('volko'), last_row['V'], last_row['VMA20'], green('priceok') if priceok else red('priceko'), btc2str(tick['L']), btc2str(last_row['BBL']), green('bbok') if bbok else red('bbko'), last_row['BBW'], self.percent)) if volok and priceok and bbok: self.status = 'buying' self.entry = last_row['L'] self.quantity = self.amount / self.entry self.send_order(self.exch.buy_limit, self.pair, self.quantity, self.entry) self.log('buying %f @ %s' % (self.quantity, btc2str(self.entry)))
def print_diff(dbdict, data, removes=True): "Print a (hopefully) human readable list of changes." # TODO: needs work, especially on multiline properties, # empty properties (should probably never be allowed but still) # and probably more corner cases. Also the output format could # use some tweaking. try: from collections import defaultdict import jsonpatch from jsonpointer import resolve_pointer, JsonPointerException ops = defaultdict(int) diff = jsonpatch.make_patch(dbdict, data) for d in diff: try: ptr = " > ".join(decode_pointer(d["path"])) if d["op"] == "replace": print yellow("REPLACE:") print yellow(ptr) db_value = resolve_pointer(dbdict, d["path"]) print red(dump_value(db_value)) print green(dump_value(d["value"])) ops["replace"] += 1 if d["op"] == "add": print green("ADD:") print green(ptr) if d["value"]: print green(dump_value(d["value"])) ops["add"] += 1 if removes and d["op"] == "remove": print red("REMOVE:") print red(ptr) value = resolve_pointer(dbdict, d["path"]) if value: print red(dump_value(value)) ops["remove"] += 1 except JsonPointerException as e: print " - Error parsing diff - report this!: %s" % e # # The following output is a bit misleading, removing for now # print "Total: %d operations (%d replace, %d add, %d remove)" % ( # sum(ops.values()), ops["replace"], ops["add"], ops["remove"]) return diff except ImportError: print >> sys.stderr, ("'jsonpatch' module not available - " "no diff printouts for you! (Try -d instead.)")
def do_switch_region(self, region): """ Switch to a different region """ from botosh import available_contexts if not self._ready: print error("boto has not been configured with sufficient credentials. " "Please configure boto first") if not region: print error('No region provided.') if self.context is None: print error('No context provided. Please `set_context` to one of: %s' % green(', '.join(available_contexts.keys()))) else: if self.context.region == region: return regions = self.context._valid_regions if region not in regions: print error('Invalid region `%s`. Please `switch_region` to one of: %s' % (region, green(', '.join(regions)))) else: self.context.conn = self.context.region_switcher(region)
def get_str(self, models_by_name=None, print_name=True): s = '' if print_name: s = '%s : %s \t<%s>' % (red(self.name.ljust(20)), green(self.superclass.ljust(15)), blue(self.filename)) for field in self.fields: s += '\n%s' % field.__repr__() if self.superclass != 'MTLModel' and models_by_name: # Also print the fields of the superclass s += '\n%s' % models_by_name[self.superclass].get_str(models_by_name=models_by_name, print_name=False) return s
def send_to_lava(self): try: dev = self.get_device_status() if dev["status"] == "offline": print(red("Device seems offline, not sending the job")) return except Exception as e: print(red(repr(e))) print(red("Not sending the job")) return print("Sending to LAVA") job_str = self.job_template.render(self.job) ret = utils.get_connection(**self.options).scheduler.submit_job(job_str) try: for r in ret: print(green("Job send (id: %s)" % r)) print("Potential working URL: ", "%s/scheduler/job/%s" % (self.options['web_ui_address'], r)) except: print(green("Job send (id: %s)" % ret)) print("Potential working URL: ", "%s/scheduler/job/%s" % (self.options['web_ui_address'], ret))
async def download_episode(session: ClientSession, name: str, link: str, path: str, total_bar: tqdm, pool: asyncio.Queue): """Download the episode to local storage""" file_target = f'{path}/{name}.mp4' try: async with get_connection( pool): # Limit ourself to max concurrent downloads if os.path.isfile(file_target): raise FileExistsError(f'{name} already exists in the folder') req_method = session.post if Servers.MP4UPLOAD in link else session.get async with req_method(link) as resp: if resp.status != 200: raise RuntimeError( f'Got a bad response from the server for {name}: {resp.status}' ) file_size = int(resp.headers.get('content-length')) with tqdm(desc=name, total=file_size, unit='B', unit_scale=True, unit_divisor=1024, leave=False, ncols=BAR_WIDTH) as progress_bar: async with aiofiles.open(f'{path}/{name}', mode='wb') as file: async for chunk, _ in ChunkTupleAsyncStreamIterator( resp.content): await file.write(chunk) progress_bar.update(len(chunk)) # Mark success and wait a bit before removing the bar progress_bar.set_postfix_str(green('✔️')) await asyncio.sleep(5) except Exception as e: tqdm.write(red(f'Failed to download {name} : {e} ❌')) if DEBUG: tqdm.write( red(''.join( traceback.format_exception(None, e, e.__traceback__)))) try: os.remove(f'{path}/{name}') except FileNotFoundError: pass return False finally: total_bar.update(1) return True
def on_post_item(self, response, **kwargs): input_file = kwargs.get('input_file', '') source = kwargs.get('source', '') filename = self.format_filename(input_file.replace(source, '')) if response.success: self._optimized += 1 self._input_bytes += response.input_size self._output_bytes += response.output_size print("%s %16s %37s" % (filename, green("OK"), response.output_ratio)) else: self._failed += 1 print("%s %18s %30s" % (filename, red("FAIL"), "-"))
def get_str(self, models_by_name=None, print_name=True): s = '' if print_name: s = '%s : %s \t<%s>' % (red( self.name.ljust(20)), green( self.superclass.ljust(15)), blue(self.filename)) for field in self.fields: s += '\n%s' % field.__repr__() if self.superclass != 'MTLModel' and models_by_name: # Also print the fields of the superclass s += '\n%s' % models_by_name[self.superclass].get_str( models_by_name=models_by_name, print_name=False) return s
def process_tick_searching(self, tick, last_row): RSI_THRESHOLD = 30 CORPSE_THRESHOLD = 0.2 rsiok = (last_row['RSI'] <= RSI_THRESHOLD) priceok = (tick['L'] < last_row['BBL']) bbok = (last_row['BBW'] >= self.percent) volok = (last_row['V'] < last_row['VMA20']) if last_row['H'] == last_row['C']: candle_corpse = 1 else: candle_corpse = ((last_row['H'] - min(last_row['C'], last_row['O'])) / (last_row['H'] - last_row['L'])) candleok = (candle_corpse <= CORPSE_THRESHOLD) or volok self.log('%s(%s <= %s) %s(%.1f < %f) %s(%.2f >= %.2f) ' '%s(%.2f <= %.2f or %s %.2f < %.2f)' % (green('priceok') if priceok else red('priceko'), btc2str(tick['L']), btc2str(last_row['BBL']), green('rsiok') if rsiok else red('rsiko'), last_row['RSI'], RSI_THRESHOLD, green('bbok') if bbok else red('bbko'), last_row['BBW'], self.percent, green('candleok') if candleok else red('candleko'), candle_corpse, CORPSE_THRESHOLD, green('volok') if volok else red('volko'), last_row['V'], last_row['VMA20'])) if rsiok and priceok and bbok and candleok: self.status = 'buying' self.entry = last_row['L'] self.quantity = self.amount / self.entry self.send_order(self.exch.buy_limit, self.pair, self.quantity, self.entry) self.log('buying %f @ %s' % (self.quantity, btc2str(self.entry))) self.middle = last_row['BBM'] self.top = last_row['BBU']
def do_listen(self, *args): """Start a server on localhost to listen connections on given port.""" host = self._input(utils.yellow('[+] Enter the host IP > ')) port = self._input(utils.yellow('[+] Enter the port > ')) # Create a socket object from factory to accepts connections. self.__socket = SocketFactory.server(listen=host, port=port) # Inform user that server has started. print(utils.green('[*] Started a remote server on {}:{}'.format(host, port))) # Start to accept the incoming connections. self.connection_acceptor.start() # Set the server has started. self.server_started = True
def add(self, predicate, bindings): """Add a new fact into the working memory.""" try: fact = ' '.join(e if type(e) is str else bindings[e.name].name for e in predicate.template) except KeyError: # The predicate references an optional role that wasn't bound return if not fact.strip(): return self.facts.add(fact) try: self._facts_by_first_character[fact[0]].add(fact) except KeyError: self._facts_by_first_character[fact[0]] = {fact} if config.VERBOSITY >= 2: print(utils.green(f" {fact}"))
def process_tick_nine(self, tick): if tick['T'].hour == 9: if tick['C'] >= self.midnight_price * 1.05: self.status = 'buying' self.entry = tick['C'] self.quantity = self.amount / self.entry self.send_order(self.exch.buy_limit, self.pair, self.quantity, self.entry) self.log('%s %f @ %s' % (green('buying'), self.quantity, btc2str(self.entry))) else: self.log('%s: %s >= %s. retrying tomorrow. %.3f' % (red('no up trend'), btc2str(tick['C']), btc2str(self.midnight_price * 1.05), self.amount)) self.status = 'midnight'
def do_set_context(self, context): """ Set/Switch to a different context """ from botosh import available_contexts available_contexts_str = green(', '.join(available_contexts.keys())) if not self._ready: print error("boto has not been configured with sufficient credentials. " "Please configure boto first") if not context or not self._ready: print error('No context provided. Please `set_context` to one of: %s' % available_contexts_str) elif context in available_contexts: if context not in _context_cache: old_region = self.context.region if self.context else None new_context = available_contexts[context]() _context_cache[context] = new_context new_context.context = new_context if old_region and old_region in new_context._valid_regions: new_context.conn = new_context.region_switcher(old_region) _context_cache[context].cmdloop() else: print error('Invalid context `%s`. Please `set_context` to one of: %s' % (context, available_contexts_str))
async def run(anime_url: str) -> None: print("Trying to bypass cloudfront protection...") headers, cookies = bypass_cloudfront() async with ClientSession(headers=headers, cookies=cookies) as session: # Verify the cloudfront cookies work async with session.get(KISSANIME_URL) as resp: if resp.status != 200: print('Failed to bypass cloudfront ' + red('❌')) exit(1) print("Passed cloudfront " + green('✔️')) anime_url = anime_url.strip() # Strip following whitespaces async with session.get(anime_url) as resp: page = await resp.text() soup = BeautifulSoup(page, features="html.parser") anime_title = soup.find(class_='bigChar').text.strip().replace( ' ', '_') print(f'Anime title: {anime_title.replace("_"," ")}') try: episode_links = get_episode_links(soup, 1, 12) except Exception as e: print(red("Couldn't get episode links ❌")) exit(1) if len(episode_links) == 0: print(red("Found 0 episodes to download ❌")) exit(1) print(f'Found {len(episode_links)} episodes to download') with tqdm( desc='Collecting download links', total=len(episode_links), bar_format= '{desc}: {percentage:3.0f}%|{bar}|{n_fmt}/{total_fmt} {postfix}', ncols=BAR_WIDTH) as progressbar: try: tasks = [ asyncio.create_task( get_download_link(session, name, link, VideoQuality.P480, progressbar)) for name, link in episode_links.items() ] download_links = await asyncio.gather( *tasks, return_exceptions=True) links_found = sum([ 1 for i in download_links if not isinstance(i, Exception) ]) if links_found == 0: raise RuntimeError("Found no download links" ) # Goes to the except block except: progressbar.set_postfix_str(red('❌')) tqdm.write(red('Failed to collect download links ❌')) exit(1) else: progressbar.set_postfix_str(green('✔️')) # Prepare connection pool pool = asyncio.Queue() max_parallel_downloads = 10 [await pool.put(i) for i in range(max_parallel_downloads)] try: os.mkdir(anime_title) except FileExistsError: pass with tqdm( desc='Downloading episodes', total=len(download_links), bar_format='{desc}: |{bar}|{n_fmt}/{total_fmt} {postfix}', ncols=BAR_WIDTH, leave=False) as progressbar: tasks = [ asyncio.create_task( download_episode(session, ep.name, ep.link, anime_title, progressbar, pool)) for ep in download_links if not isinstance(ep, Exception) ] results = await asyncio.gather(*tasks) progressbar.set_postfix_str(green('✔️')) await asyncio.sleep(3) total_downloaded = sum([1 for res in results if res]) print( f'\n\nDone, downloaded {total_downloaded}/{len(episode_links)}' + green('✔️'))
sys.stderr.write(utils.yellow( "stackoverflow is not configured; skipping...\n" )) exit(0) # authenticate. if this gives problems exceeding request limits, # you'll need to obtain an API key # https://github.com/lucjon/Py-StackExchange/tree/updating-2.0#api-keys so = stackexchange.Site(stackexchange.StackOverflow) so.impose_throttling = True user = so.user(config_parser.get('stackoverflow', 'user_id')) timeline = user.timeline.fetch() # timeline = user.timeline.fetch( # i think this is the format # fromdate=datetime.datetime.now(), # todate=datetime.datetime.now()-datetime.timedelta(days=365), # ) writer = csv.writer(sys.stdout) writer.writerow(['datetime', 'event']) for event in timeline: date = datetime.datetime.fromtimestamp(event.json_ob.creation_date) detail = event.timeline_type if detail is None: detail = event.json_ob.post_type writer.writerow([date, detail]) sys.stdout.flush() sys.stderr.write(utils.green( "stackoverflow complete for user %s!\n" % user.display_name ))
def makeCard(template, cardname, outdir=None, verbose=False, **params): """Replace placeholders in template with values.""" assert os.path.isfile(template), error("Input card '%s' does not exist!"%template) # CARD NAME if outdir: cardname = os.path.join(outdir,os.path.basename(cardname)) #cardname = makeCardName(template,cardlabel,outdir,**params) def makeParamValue(key,value): """Local help function to replace placeholders in parameter values.""" if not isinstance(value,str) or '$' not in value: return str(value) if '$SAMPLE' in value and 'SAMPLE' not in params: params['SAMPLE'] = getSampleName(template) return makeCardLabel(value,**params) # FIX BWCUTOFF for high lambda LQ samples lambd = float(params.get('LAMBDA',0)) mass = float(params.get('MASS',0)) # if 'LQ' in template and 'BWCUTOFF' not in params and lambd>=1.5 and mass>0: # params['BWCUTOFF'] = computeBWCutoff(template,mass,lambd) # print ">>> computed bwcutoff=%s for mass=%s, lambda=%s"%(params['BWCUTOFF'],mass,lambd) # PRINT ###print ">>> "+'-'*85 ###print ">>> template = %s"%template ###print ">>> cardname = '%s'"%cardname ###print ">>> params = %s"%params ###print ">>> "+'-'*85 # REPLACE if verbose: print ">>> replacing in '%s'..."%(green(template)) else: print ">>> writing '%s'..."%(green(os.path.basename(cardname))) lines = [ ] with open(template,'r') as file: for i, line in enumerate(file.readlines(),1): linenum = "L%d:"%i if '$' in line: for key in parampattern.findall(line): if key in params: value = makeParamValue(key,params[key]) pattern = '$'+key line = line.replace(pattern,value) print ">>> %-4s replacing '%s' -> '%s'"%(linenum,pattern,value) else: print ">>> %-4s Found no given value for '$%s'"%(linenum,key) for pattern, key, value in defaultpattern.findall(line): value = makeParamValue(key,params.get(key,value)) line = defaultpattern.sub(value,line) print ">>> %-4s replacing '%s' -> '%s'"%(linenum,pattern,value)+("" if value in params else " (default)") lines.append(line) # WRITE with open(cardname,'w') as file: for line in lines: file.write(line) if verbose: print '>>> written file "%s"'%(green(cardname)) print ">>> "+'-'*85
# third party from github import Github # local import utils # authenticate to the API config_parser = utils.get_config_parser() if not config_parser.has_section('github'): sys.stderr.write(utils.yellow( "github is not configured; skipping...\n" )) exit(0) api = Github( config_parser.get("github", "username"), config_parser.get("github", "password"), ) # get all of the events for the authenticated user writer = csv.writer(sys.stdout) writer.writerow(['datetime', 'event']) user = api.get_user(config_parser.get("github", "username")) for event in user.get_events(): writer.writerow([event.created_at, event.type]) sys.stdout.flush() sys.stderr.write(utils.green( "github complete!\n" ))
def run(self, platform, precision=None, devices=None, use_dispersion_correction=True): """ Runs the test on the given platform with the given precision model. Parameters ---------- platform : str Name of the OpenMM platform to use (CPU, Reference, CUDA, or OpenCL) precision : str Precision model to use for CUDA or OpenCL (single, double, or mixed) devices : int or tuple of ints Which GPUs to run on (a tuple will run in parallel) """ if not platform in ('CPU', 'Reference', 'CUDA', 'OpenCL'): raise ValueError('Platform %s not recognized.' % platform) # Define our platform and our platform properties plat = mm.Platform.getPlatformByName(platform) properties = None if platform == 'CUDA': if not precision in ('mixed', 'double', 'single'): raise ValueError('You must set the precision to single, ' 'double, or mixed for the CUDA platform.') properties = dict(CudaPrecision=precision) if devices is not None: properties['CudaDeviceIndex'] = str(devices) elif platform == 'OpenCL': if not precision in ('mixed', 'double', 'single'): raise ValueError('You must set the precision to single, ' 'double, or mixed for the CUDA platform.') properties = dict(OpenCLPrecision=precision) if devices is not None: properties['OpenCLDeviceIndex'] = str(devices) # Create a new system with no charges so we can compare vdW and EEL # energies to Amber parmcopy = copy(self.parm) for i in range(len(parmcopy.parm_data['CHARGE'])): parmcopy.parm_data['CHARGE'][i] = 0.0 system = parmcopy.createSystem(nonbondedCutoff=8.0*u.angstroms, nonbondedMethod=app.PME) system.setDefaultPeriodicBoxVectors(*self.parm.box_vectors) # Test serialization xmlsys = mm.XmlSerializer.deserialize( mm.XmlSerializer.serialize(self.system) ) # Loop through all systems and turn on or off the dispersion correction print 'Trying to set PME parameters...', succeeded = None for sysmod in (self.system, self.systemapp, system, xmlsys): for force in sysmod.getForces(): if isinstance(force, mm.NonbondedForce): force.setUseDispersionCorrection(use_dispersion_correction) # See if we can set the PME parameters try: force.setPMEParameters(3.285326106/u.nanometers, 60, 64, 60) succeeded = True except AttributeError: # This version of OpenMM does not support setting PME # parameters succeeded = False if succeeded: print 'Changed.' elif succeeded is None: print 'No NonbondedForce detected.' else: print 'OpenMM is too old. Could not change PME parameters.' # Define some integrators dummyint1 = mm.VerletIntegrator(1.0e-6*u.picoseconds) dummyint2 = mm.VerletIntegrator(1.0e-6*u.picoseconds) dummyint3 = mm.VerletIntegrator(1.0e-6*u.picoseconds) dummyint4 = mm.VerletIntegrator(1.0e-6*u.picoseconds) # Define the contexts if properties is None: context1 = mm.Context(self.system, dummyint1, plat) context2 = mm.Context(system, dummyint2, plat) context3 = mm.Context(self.systemapp, dummyint3, plat) context4 = mm.Context(xmlsys, dummyint4, plat) else: context1 = mm.Context(self.system, dummyint1, plat, properties) context2 = mm.Context(system, dummyint2, plat, properties) context3 = mm.Context(self.systemapp, dummyint3, plat, properties) context4 = mm.Context(xmlsys, dummyint4, plat, properties) # Set the context positions context1.setPositions(self.parm.positions) context2.setPositions(self.parm.positions) context3.setPositions(self.crdapp.getPositions()) context4.setPositions(self.parm.positions) # Get the energies eunit = u.kilocalories_per_mole state = context1.getState(getEnergy=True, getForces=True, enforcePeriodicBox=True) funit = eunit / u.angstrom forces = state.getForces().value_in_unit(funit) tote = state.getPotentialEnergy().value_in_unit(eunit) state = context3.getState(getEnergy=True, enforcePeriodicBox=True) toteapp = state.getPotentialEnergy().value_in_unit(eunit) state = context4.getState(getEnergy=True, enforcePeriodicBox=True) xmltote = state.getPotentialEnergy().value_in_unit(eunit) # Now get the decomposed energies from both the system and the # deserialized system to check that serialization and deserialization # behave as expected with these force objects and force groups state = context1.getState(getEnergy=True, enforcePeriodicBox=True, groups=2**self.parm.BOND_FORCE_GROUP) bonde = state.getPotentialEnergy().value_in_unit(eunit) state = context4.getState(getEnergy=True, enforcePeriodicBox=True, groups=2**self.parm.BOND_FORCE_GROUP) xmlbonde = state.getPotentialEnergy().value_in_unit(eunit) state = context1.getState(getEnergy=True, enforcePeriodicBox=True, groups=2**self.parm.ANGLE_FORCE_GROUP) anglee = state.getPotentialEnergy().value_in_unit(eunit) state = context4.getState(getEnergy=True, enforcePeriodicBox=True, groups=2**self.parm.ANGLE_FORCE_GROUP) xmlanglee = state.getPotentialEnergy().value_in_unit(eunit) state = context1.getState(getEnergy=True, enforcePeriodicBox=True, groups=2**self.parm.DIHEDRAL_FORCE_GROUP) dihede = state.getPotentialEnergy().value_in_unit(eunit) state = context4.getState(getEnergy=True, enforcePeriodicBox=True, groups=2**self.parm.DIHEDRAL_FORCE_GROUP) xmldihede = state.getPotentialEnergy().value_in_unit(eunit) state = context1.getState(getEnergy=True, enforcePeriodicBox=True, groups=2**self.parm.NONBONDED_FORCE_GROUP) nonbe = state.getPotentialEnergy().value_in_unit(eunit) state = context4.getState(getEnergy=True, enforcePeriodicBox=True, groups=2**self.parm.NONBONDED_FORCE_GROUP) xmlnonbe = state.getPotentialEnergy().value_in_unit(eunit) state = context2.getState(getEnergy=True, enforcePeriodicBox=True, groups=2**self.parm.NONBONDED_FORCE_GROUP) vdwe = state.getPotentialEnergy().value_in_unit(eunit) eele = nonbe - vdwe # Now get the sander forces and compare them traj = netcdf_file(get_fn('sander_pme.nc'), 'r') sander_forces = traj.variables['forces'][0] maxdif = [abs(ofrc-sfrc) for ofrc, sfrc in zip(forces[0], sander_forces[0])] maxrel = [abs(ofrc-sfrc)/sfrc for ofrc, sfrc in zip(forces[0], sander_forces[0])] avgdif = [0, 0, 0] avgrel = [0, 0, 0] n = 0 for ofrcs, sfrcs in zip(forces, sander_forces): for i, sfrc in enumerate(sfrcs): ofrc = ofrcs[i] dif = abs(ofrc-sfrc) rel = dif/sfrc maxdif[i] = max(maxdif[i], dif) maxrel[i] = max(maxrel[i], rel) avgdif[i] += dif avgrel[i] += rel n += 1 avgdif = [x/n for x in avgdif] avgrel = [x/n for x in avgrel] # The sander energies are: # Etot = -69285.4160 EKtot = 0.0000 EPtot = -69285.4160 # BOND = 404.9439 ANGLE = 1003.4499 DIHED = 2231.7367 # 1-4 NB = 440.7084 1-4 EEL = 3818.2959 VDWAALS = 8271.5191 # EELEC = -85456.0701 EHBOND = 0.0000 RESTRAINT = 0.0000 sander = dict(bond=404.9439, angle=1003.4499, dihedral=2231.7367, vdw=8271.5191+440.7084, eel=-85456.0701+3818.2959, total=-69285.4160) if not use_dispersion_correction: # Without the long-range dispersion correction, VDWAALS = 8943.8420 sander['total'] -= sander['vdw'] sander['vdw'] = 8943.8420 + 440.7084 sander['total'] += sander['vdw'] bonddif = bonde - sander['bond'] angledif = anglee - sander['angle'] diheddif = dihede - sander['dihedral'] vdwdif = vdwe - sander['vdw'] # includes 1-4 also eeldif = eele - sander['eel'] # Includes 1-4 also totaldif = tote - sander['total'] appdif = tote - toteapp print 'Energy differences compared to sander/Amber (kcal/mol)' print ' Absolute Relative sander' print '------------------------------------------------------' print 'Bond =', colorize_error(bonddif), \ colorize_error(bonddif/sander['bond'], 1e-6), \ '%12.4f'%sander['bond'] print 'Angle =', colorize_error(angledif), \ colorize_error(angledif/sander['angle'], 1e-6), \ '%12.4f'%sander['angle'] print 'Dihedral =', colorize_error(diheddif), \ colorize_error(diheddif/sander['dihedral'], 1e-6), \ '%12.4f'%sander['dihedral'] if use_dispersion_correction: # The dispersion correction in Amber neglects the repulsive part of # the correction, but OpenMM does not. Therefore, when we are using # the dispersion correction we should allow for a slightly larger # energy difference. print 'vdWaals =', colorize_error(vdwdif, 1.0), \ colorize_error(vdwdif/sander['vdw'], 1e-4), \ '%12.4f'%sander['vdw'] else: print 'vdWaals =', colorize_error(vdwdif, 1e-2), \ colorize_error(vdwdif/sander['vdw'], 1e-6), \ '%12.4f'%sander['vdw'] print 'Elec =', colorize_error(eeldif, 4e0), \ colorize_error(eeldif/sander['eel'], 1e-3), '%12.4f'%sander['eel'] print 'Total =', colorize_error(totaldif, 4e0), \ colorize_error(totaldif/sander['total'], 1e-3), \ '%12.4f'%sander['total'] print '' print 'Difference b/w ParmEd and OpenMM App layer' print '------------------------------------------' print 'Total =', colorize_error(appdif, tolerance=5e-3) print '' print 'Difference b/w sander and OpenMM forces' print '---------------------------------------' print 'Maximum deviation = [%12s, %12s, %12s]' % colorize_list(maxdif,2e0) print 'Maximum rel. dev. = [%12s, %12s, %12s]' % colorize_list(maxrel,2e0) print 'Average deviation = [%12s, %12s, %12s]' % colorize_list(avgdif,1e-1) print 'Average rel. dev. = [%12s, %12s, %12s]' % colorize_list(avgrel,5e-1) # Now test serialization CUTOFF = 1e-5 CUTOFFNB = 1e-2 print '' print 'Serialization tests' print '-------------------' print 'Bond........', if abs(xmlbonde - bonde) < CUTOFF: print green('OK') else: dif = xmlbonde - bonde print red('off by %.4e (%f%%)' % (dif, 100*dif/(bonde or xmlbonde))) print 'Angle.......', if abs(xmlanglee - anglee) < CUTOFF: print green('OK') else: dif = xmlanglee - anglee print red('off by %.4e (%f%%)' % (dif,100*dif/(anglee or xmlanglee))) print 'Dihedral....', if abs(xmldihede - dihede) < CUTOFF: print green('OK') else: dif = xmldihede - dihede print red('off by %.4e (%f%%)' % (dif,100*dif/(dihede or xmldihede))) print 'Nonbonded...', if abs(xmlnonbe - nonbe) < CUTOFFNB: print green('OK') else: dif = xmlnonbe - nonbe print red('off by %.4e (%f%%)' % (dif,100*dif/(nonbe or xmlnonbe)))
last_nonsubrepo_repo_root = None for repo_root in repo_roots.splitlines(): repo_root = repo_root.replace(vcs_dir, '') # check to see if this repo is actually a # subrepository. only need to check the last # nonsubrepo_repo_root because of how `find` recursively # traverses the directory tree is_subrepo = False if (last_nonsubrepo_repo_root and repo_root.startswith(last_nonsubrepo_repo_root)): is_subrepo = True # only extract the timestamps for non-subrepositories if not is_subrepo: sys.stderr.write(utils.green( 'examining hg repo at %s:%s\n' % (host, repo_root) )) last_nonsubrepo_repo_root = repo_root with cd(repo_root): timestamps = run( "hg log -u %s --template '{date|isodate}\n'" % email ) for timestamp in timestamps.splitlines(): t = datetime.datetime.strptime( ' '.join(timestamp.split()[:2]), '%Y-%m-%d %H:%M', ) writer.writerow([t, host, repo_root]) sys.stdout.flush()
def do_list_contexts(self, ignored): """ List all available contexts """ from botosh import available_contexts print "Available contexts:\n%s" % green('\n'.join(available_contexts.keys()))
# The access tokens can be found on your applications's Details # page located at https://dev.twitter.com/apps (located # under "Your access token") access_token = config_parser.get('twitter', 'access_token') access_token_secret = config_parser.get('twitter', 'access_token_secret') # authenticate and use the api object auth = tweepy.OAuthHandler(consumer_key, consumer_secret) auth.set_access_token(access_token, access_token_secret) api = tweepy.API(auth) return api # If the authentication was successful, you should # see the name of the account print out api = authenticate(utils.get_config_parser()) # print out the tweet timestamps and some basic statistics about those # tweets writer = csv.writer(sys.stdout) writer.writerow(["datetime", "retweets", "favorites"]) for status in tweepy.Cursor(api.user_timeline).items(): writer.writerow([ status.created_at, status.retweet_count, status.favorite_count ]) sys.stderr.write(utils.green( "twitter complete!\n" ))
# directories. This traverses the directory tree recursively, so # subrepos will always be listed *after* their parent repositories vcs_dir = '.hg' with settings(hide('commands'), host_string=host): repo_roots = run("find %s -type d -name '%s'" % (directory, vcs_dir)) # Extract commit history from these mercurial root directories that # correspond with the specified user email last_nonsubrepo_repo_root = None for repo_root in repo_roots.splitlines(): repo_root = repo_root.replace(vcs_dir, '') # print some updates host_repo = ':'.join([host, repo_root]) sys.stderr.write(utils.green( 'examining hg repo at %s\n' % host_repo )) # check to see if this repo is actually a # subrepository. only need to check the last # nonsubrepo_repo_root because of how `find` recursively # traverses the directory tree is_subrepo = False if (last_nonsubrepo_repo_root and repo_root.startswith(last_nonsubrepo_repo_root)): is_subrepo = True # only extract the timestamps for non-subrepositories if not is_subrepo: last_nonsubrepo_repo_root = repo_root with cd(repo_root):