def init(text, time, action, args=None): multiLine(text) status = None if args: status = action(args) else: status = action() sleep(time) if not status: singleLine(colored(" [FAILED]", "red")) else: singleLine(colored(" [PASSED]", "green")) sleep(.7)
def repl(): """Start the interactive Read-Eval-Print-Loop """ print print " " + grey(" ^__^ ") print " welcome to " + grey(" (oo)\_______ ") print " the MOO-lisp " + grey(" (__)\ )\/\ ") print " REPL " + grey(" ||----w | ") print " " + grey(" || || ") print env = copy.deepcopy(default_environment) try: while True: try: source = read_expression() if source.strip() == "(help)": with open('moolisp/usage.txt', 'r') as f: print "".join(f.readlines()) else: result = interpret(source, env) if result is not None: print to_string(result) except LispError, e: print colored("! ", "red") + str(e) except (EOFError, KeyboardInterrupt): print colored("\nBye! o/", "grey")
def endPreprocessed(isPreprocessed): sys.stdout.write("IS PREPROCESSED DATA IS STORED ON DISK?") sys.stdout.write(("\t\t%s.\n\n") % format(c.colored("TRUE", c.Color.BLUE))) \ if isPreprocessed \ else sys.stdout.write(("\t\t%s.\n\n") % format(c.colored("FALSE", c.Color.RED))) sys.stdout.flush()
def endClustering(start_time, threshold, n_errors): time_elapsed = time.time() - start_time sys.stdout.write("\t\t\tDONE in\t %s seconds.\n\n" % format(round(time_elapsed, 3))) sys.stdout.flush() print("THRESHOLD:\t%s" % c.colored(str(threshold), c.Color.BLUE)) print("# ERRORS:\t%s\n" % c.colored(str(n_errors), c.Color.BLUE))
def execute(self, query, database_name=None): try: try: # Establish the Connection self.__connect(database_name) # Prepare the cursor with self._connection.cursor(OrderedDictCursor) as cursor: # Execute the SQL query ignoring warnings with warnings.catch_warnings(): warnings.simplefilter("ignore") start_time = time() cursor.execute(query) # Get the query results query_result = cursor.fetchall() # Commit the changes in the database self._connection.commit() end_time = time() # Return query results return { "query_result": query_result, "query_time": "{0:.3f}".format(end_time - start_time) } except (pymysql.err.OperationalError, pymysql.ProgrammingError, pymysql.InternalError, pymysql.IntegrityError, TypeError) as error: raise Exception(error.args[1]) except Exception as e: if self._args.env_start_deploy: show_output = self._credentials['execution_mode']['parallel'] == "False" try: if show_output: print(colored("Error: ", 'red', attrs=['bold']) + colored(colored(e, 'red'))) print(colored("--> Rollback Initiated...", 'yellow')) self._connection.rollback() if show_output: print(colored("--> Rollback successfully performed.", 'green')) except Exception as e2: if show_output: print(colored("--> Rollback not performed. Error: {}".format(e2), 'red')) raise e except KeyboardInterrupt: if self._args.env_start_deploy: show_output = self._credentials['execution_mode']['parallel'] == "False" try: if show_output: print(colored("\n--> Rollback Initiated...", 'yellow')) self._connection.rollback() if show_output: print(colored("--> Rollback successfully performed.", 'green')) except Exception as e: if show_output: print(colored("--> Rollback not performed. Error: {}".format(e), 'red')) raise KeyboardInterrupt("Program Interrupted by User. Rollback successfully performed.") finally: # Close PyMySQL Connection if self._connection is not None and self._connection.open: self._connection.close()
def __check_sql_connection_logic(self, sql, output, shared_array=None): try: if self._ENV_DATA['ssh']['enabled'] == 'True': command = 'cd "{0}" && python meteor.py --environment "{1}" {2} --env_id "{3}" --env_check_sql "{4}" --execution_name "{5}"'.format( self._DEPLOY_PATH, self._ENV_NAME, self._servers, self._ENV_DATA['region'], sql['name'], self._EXECUTION_NAME) result = self.__ssh(command)['stdout'] else: result = self.__local( 'cd "{0}" && python meteor.py --environment "{1}" {2} --env_id "{3}" --env_check_sql "{4}" --execution_name "{5}" --logs_path "{6}"' .format(self._SCRIPT_PATH, self._ENV_NAME, self._servers, self._ENV_DATA['region'], sql['name'], self._EXECUTION_NAME, self._args.logs_path), show_output=False)['stdout'] if len(result) == 0: if output: self._logger.info( colored("✔", 'green') + colored(" [{}]".format(sql['name']), attrs=['bold']) + " Connection Succeeded") if shared_array is not None: shared_array.append({ "region": self._ENV_DATA['region'], "success": True, "sql": sql['name'] }) return True else: result = result[0] if type(result) is list else result if output: self._logger.error( colored("✘", 'red') + colored(" [{}] ".format(sql['name']), attrs=['bold']) + str(result.replace('\n', ''))) if shared_array is not None: shared_array.append({ "region": self._ENV_DATA['region'], "success": False, "sql": sql['name'], "error": result.replace('\n', '') }) return False except KeyboardInterrupt: if self._credentials['execution_mode']['parallel'] != 'True': raise except Exception: if self._credentials['execution_mode']['parallel'] != 'True': raise
def print_error(logger): logger.critical( colored( "+==================================================================+", 'red', attrs=['bold'])) logger.critical( colored( "‖ ERROR ‖", 'red', attrs=['bold'])) logger.critical( colored( "+==================================================================+", 'red', attrs=['bold']))
def main() -> None: args = parser.parse_args(namespace=CommandLineArgs()) versions = args.python_version or SUPPORTED_VERSIONS platforms = args.platform or [sys.platform] tested_directories = frozenset(args.dir) if args.dir else TYPESHED_DIRECTORIES code = 0 total_files_checked = 0 for (major, minor), platform in product(versions, platforms): config = TestConfig( verbose=args.verbose, dry_run=args.dry_run, exclude=args.exclude, major=major, minor=minor, directories=tested_directories, platform=platform, filter=args.filter, ) code, files_checked_this_version = test_typeshed(code, args=config) total_files_checked += files_checked_this_version if code: print_error(f"--- exit status {code}, {total_files_checked} files checked ---") sys.exit(code) if not total_files_checked: print_error("--- nothing to do; exit 1 ---") sys.exit(1) print(colored(f"--- success, {total_files_checked} files checked ---", "green"))
def get_stopid(stopname): status = None messages = '' stopid = None if verbose: print("stopname", stopname) ''' Check if we have number or name ''' if not stopname.isdigit(): if verbose: print("Looking up stopname.") stops, stops_status = fetch_stops(stopname) if len(stops) > 1: messages += "Flere treff, angi mer nøyaktig:\n" status = 3 for key in stops: messages += "[%d] \"%s\" \n" % (stops[key], key) elif 0 == len(stops): messages += "Ingen treff på stoppnavn." status = 4 else: selected_stop = list(stops.keys())[0] stopid = stops[selected_stop] messages += "Avganger fra %s, oppdatert %s\n" \ % (selected_stop, colored(Style.BRIGHT, datetime.datetime.now().strftime("%H:%M"))) status = 0 else: if verbose: print("Looking up stopid.") status = 0 stopid = stopname return stopid, status, messages
def _get_users(): users = {} for user in psutil.users(): name = user.name terminal = user.terminal if name in users: users[name].append(terminal) else: users[name] = [terminal] result = [] for name in users: terminals = users[name] colored_name = bright_colored(name, Fore.BLUE) colored_terminals = [colored(term, Style.DIM, Fore.WHITE) for term in terminals] terminals_str = ", ".join(colored_terminals) if len(colored_terminals) > 1: terminals_str = "(%s)" % terminals_str result.append(colored_name + "@" + terminals_str) return ", ".join(result)
def get_logs(self, shared_array=None): try: if self._ENV_DATA['ssh']['enabled'] == 'True': remote_path = "{0}/{1}.tar.gz".format( self._REMOTE_EXECUTION_LOGS_PATH, self._ENV_DATA['region']) local_path = "{0}/{1}".format(self._LOCAL_EXECUTION_LOGS_PATH, self._ENV_DATA['region']) # 1. Download Compressed Logs status = self.__get(remote_path, local_path + '.tar.gz') if status: # 2. Uncompress Downloaded Logs with tarfile.open(local_path + '.tar.gz') as tar: tar.extractall(path=self._LOCAL_EXECUTION_LOGS_PATH) # 3. Delete Downloaded Compressed Logs os.remove(local_path + '.tar.gz') except Exception: if self._credentials['execution_mode']['parallel'] != 'True': self._logger.error( colored( "--> Error Downloading Logs:\n{}".format( traceback.format_exc()), 'red')) raise else: shared_array.append(traceback.format_exc()) except KeyboardInterrupt: if self._credentials['execution_mode']['parallel'] != 'True': raise
def reportDataset(path_train, path_test, xTrain, xTest, xFeatures): training_filename = path.splitext(path.basename(path_train))[0] testing_filename = path.splitext(path.basename(path_test))[0] count_rows_xTraining = xTrain.shape[0] count_rows_xTesting = xTest.shape[0] count_features = len(xFeatures) print("TRAINING SET:\t%s" % c.colored(str(training_filename), c.Color.BLUE)) print("TESTING SET:\t%s\n" % c.colored(str(testing_filename), c.Color.BLUE)) print("TRAINING SIZE:\t%s" % c.colored(str(count_rows_xTraining), c.Color.BLUE)) print("TESTING SIZE:\t%s" % c.colored(str(count_rows_xTesting), c.Color.BLUE)) print("FEATURES:\t%s\n" % c.colored(str(count_features), c.Color.BLUE))
def login(config): while True: clear() payload = fetchUser() print( colored(f"OofOS v{config['version']} | Login | {date.today()}", "blue")) try: username = input(colored("Username: "******"blue") + color("green")) password = getpass(colored("Password: "******"blue")) except: login(config) if not username == payload["username"] or not password == payload[ "password"]: print() print( colored( "Invalid username or password! Retrying in 3 seconds..", "red")) sleep(3) else: break print() print(colored(f"Welcome to OofOS, {payload['username']}.", "green")) return username, password
def print_header(): cls() print( colored(""" __ __ _____ _____ _____ ___ ____ | \/ | ____|_ _| ____/ _ \| _ \\ | |\/| | _| | | | _|| | | | |_) | | | | | |___ | | | |__| |_| | _ < |_| |_|_____| |_| |_____\___/|_| \_\\ . [SQL] Mass Deployment Engine """, 'red', attrs=['bold']))
def repl(): """Start the interactive Read-Eval-Print-Loop""" print print " " + faded(" ^__^ ") print " welcome to " + faded(" (oo)\_______ ") print " the MOO-lisp " + faded(" (__)\ )\/\ ") print " REPL " + faded(" ||----w | ") print " " + faded(" || || ") print faded(" use ^D to exit") print env = default_env() while True: try: source = read_expression() print interpret(source, env) except LispError, e: print colored("!", "red"), print faded(str(e.__class__.__name__) + ":"), print str(e) except KeyboardInterrupt: msg = "Interupted. " + faded("(Use ^D to exit)") print "\n" + colored("! ", "red") + msg
def __check_sql_connection(self, output): connection_succeeded = True if output: self._logger.info("- Checking SQL Connections...") if self._credentials['execution_mode']['parallel'] == "True": # Init SyncManager manager = SyncManager() manager.start(self.__mgr_init) shared_array = manager.list() processes = [] try: for sql in self._ENV_DATA['sql']: p = multiprocessing.Process( target=self.__check_sql_connection_logic, args=(sql, output, shared_array)) p.start() processes.append(p) for process in processes: process.join() for data in shared_array: connection_succeeded &= data['success'] if data['success'] is False: self._logger.info( colored(" [{}/SQL] {} ".format( self._ENV_DATA['region'], data['sql']), attrs=['bold']) + data['error']) except KeyboardInterrupt: for process in processes: process.join() raise else: for sql in self._ENV_DATA['sql']: connection_succeeded &= self.__check_sql_connection_logic( sql, output) return connection_succeeded
def test_third_party_stubs(code: int, args: TestConfig) -> TestResults: print("Testing third-party packages...") print("Running mypy " + " ".join(get_mypy_flags(args, "/tmp/..."))) files_checked = 0 for distribution in sorted(os.listdir("stubs")): if distribution in {"SQLAlchemy", "Flask-SQLAlchemy"}: print(colored(f"Skipping {distribution} due to mypy crashes", "yellow")) continue distribution_path = Path("stubs", distribution) if not is_probably_stubs_folder(distribution, distribution_path): continue this_code, checked = test_third_party_distribution(distribution, args) code = max(code, this_code) files_checked += checked return TestResults(code, files_checked)
def setup(): while True: clear() print("Welcome to the OofOS setup utility.") print("To get started, you need to make a OofOS account.") print() try: username = input("Account Username: "******"Account Password: "******"Confirm Password: "******"Passwords do not match! Retrying in 3 seconds..", "red")) sleep(3) else: break user = {"username": username, "password": password} open("data/user.json", "w").write(dumps(user, indent=4))
def main() -> None: args = parser.parse_args() versions = [(3, 11), (3, 10), (3, 9), (3, 8), (3, 7), (3, 6), (2, 7)] if args.python_version: versions = [v for v in versions if any(("%d.%d" % v).startswith(av) for av in args.python_version)] if not versions: print_error("--- no versions selected ---") sys.exit(1) code = 0 total_files_checked = 0 for major, minor in versions: code, files_checked_this_version = test_typeshed(code, major, minor, args) total_files_checked += files_checked_this_version if code: print_error(f"--- exit status {code}, {total_files_checked} files checked ---") sys.exit(code) if not total_files_checked: print_error("--- nothing to do; exit 1 ---") sys.exit(1) print(colored(f"--- success, {total_files_checked} files checked ---", "green"))
def execute_before(self, region): try: # Deploy BEFORE Queries if self._credentials['execution_mode']['parallel'] != 'True': print( colored("--> Executing BEFORE Queries ...", "yellow", attrs=['bold', 'reverse'])) # Start Deploy self._query.clear_execution_log() self._query_execution.before(self._args.environment, region) except KeyboardInterrupt: if self._credentials['execution_mode']['parallel'] != 'True': raise finally: # Supress CTRL+C events signal.signal(signal.SIGINT, signal.SIG_IGN) # Store Execution Logs if self._environment_data['ssh']['enabled'] == 'True': execution_log_path = "{0}/logs/{1}/execution/{2}/{2}_before.json".format( self._script_path, self._execution_name, self._environment_data['region']) else: execution_log_path = "{0}execution/{1}/{1}_before.json".format( self._logs_path, self._environment_data['region']) with open(execution_log_path, 'w') as outfile: json.dump(self._query.execution_log, outfile, default=self.__dtSerializer, separators=(',', ':')) # Enable CTRL+C events signal.signal(signal.SIGINT, signal.default_int_handler)
def usage(): print('Bruk: %s [-a] [-l] [-n] [-v] <stasjonsnavn|stasjonsid>' % sys.argv[0]) print(''' -h Vis denne hjelpen. -a Ikke bruk Unicode symboler/ikoner, kun tekst. -c Deaktiver linjefarger. -d Ikke vis avvik. -j Vis turnummer. -l Begrens treff til kun linje-nummer (kommaseparert). -n Begrens treff pr. platform, tilbakefall er %s. -p Begrens treff til platform-navn (prefix). -w Skriv ut HTML. -t Bruk lokal fil ruter.temp som xml-kilde (kun for utvikling). -v Verbose for utfyllende informasjon. ''' % limitresults) for icon in TransportationType: print(icon, TransportationType[icon]) print(system_name, colored(Fore.BLUE, 'version'), system_version) sys.exit(1)
def execute_main(self, region, server, shared_array=None): try: # Deploy MAIN Queries if self._credentials['execution_mode']['parallel'] != 'True': print( colored("--> Executing MAIN Queries ...", "yellow", attrs=['bold', 'reverse'])) # Set SQL Connection self._query.set_sql_connection(server) self._query_execution.set_query(self._query) # Clear Execution Log self._query.clear_execution_log() # Get all Databases in current server databases = self._query.sql.get_all_databases() # Create Execution Server Folder os.mkdir("{0}/logs/{1}/execution/{2}/{3}/".format( self._script_path, self._execution_name, region, server['name'])) # Deployment in Parallel if self._credentials['execution_mode'][ 'parallel'] == 'True' and int( self._credentials['execution_mode']['threads']) > 1: manager = SyncManager() manager.start(self.__mgr_init) thread_shared_array = manager.list() thread_shared_array.extend(databases) progress_array = manager.list() processes = [] try: for i in range( int(self._credentials['execution_mode'] ['threads'])): p = multiprocessing.Process( target=self.__execute_main_databases, args=(region, server, thread_shared_array, progress_array)) p.start() processes.append(p) # Track progress tracking = True while tracking: if all(not p.is_alive() for p in processes): tracking = False d = len(progress_array) progress = float(d) / float(len(databases)) * 100 print('{{"r":"{}","s":"{}","p":{:.2f},"d":{},"t":{}}}'. format(region, server['name'], progress, d, len(databases))) if d == len(databases): break time.sleep(1) for process in processes: process.join() if len(thread_shared_array) > 0: shared_array.append(thread_shared_array[0]) except KeyboardInterrupt: for process in processes: process.join() raise # Deploy in Sequential else: self.__execute_main_databases(region, server, databases) except KeyboardInterrupt: if self._credentials['execution_mode']['parallel'] != 'True': raise except Exception as e: if self._credentials['execution_mode']['parallel'] == 'True': error_format = re.sub(' +', ' ', str(e)).replace('\n', '') shared_array.append(error_format) raise
def initialize(REQ_FIRST_TIME_SETUP): try: def init(text, time, action, args=None): multiLine(text) status = None if args: status = action(args) else: status = action() sleep(time) if not status: singleLine(colored(" [FAILED]", "red")) else: singleLine(colored(" [PASSED]", "green")) sleep(.7) multiLine("Checking user information..") sleep(.75) userData = loads(open("data/user.json", "r").read()) if not "username" in userData: REQ_FIRST_TIME_SETUP = True singleLine(colored(" [NOT SETUP]", "yellow")) else: singleLine(colored(" [PASSED]", "green")) sleep(.7) init("Checking file states..", .75, checkFiles) init("Checking system performance..", .75, checkPerformance) init("Checking CLI status..", 3.5, exists, "CLI") multiLine("Checking network status..") status = get("https://www.google.com").status_code sleep(3.5) if not status == 200: singleLine(colored(" [FAILED]", "red")) else: singleLine(colored(" [PASSED]", "green")) sleep(.7) print(colored("\nBooting..", "yellow")) sleep(2.7) return REQ_FIRST_TIME_SETUP except: raise KeyboardInterrupt("Boot manually canceled by host.")
def read_line(prompt): "Return touple of user input line and number of unclosed parens" line = raw_input(colored(prompt, "grey", "bold")) line = preprocess(line + "\n") return (line, line.count("(") - line.count(")"))
# Print Header if args.env_id is None: print_header() # Remote Execution - Check SSH Connection core.check_remote_execution() # Init Core core.init() except KeyboardInterrupt: logger.warning("") logger.warning( colored( "+==================================================================+", 'yellow')) logger.warning( colored( "‖ WARNING ‖", 'yellow')) logger.warning( colored( "+==================================================================+", 'yellow')) logger.warning("Program Interrupted By User.") core.clean() except Exception as e: print_error(logger) message = str(e).replace('[USER] ',
def run_stubtest(dist: Path, *, verbose: bool = False) -> bool: with open(dist / "METADATA.toml") as f: metadata = dict(tomli.loads(f.read())) print(f"{dist.name}... ", end="") stubtest_meta = metadata.get("tool", {}).get("stubtest", {}) if stubtest_meta.get("skip", False): print(colored("skipping", "yellow")) return True with tempfile.TemporaryDirectory() as tmp: venv_dir = Path(tmp) venv.create(venv_dir, with_pip=True, clear=True) pip_exe = str(venv_dir / "bin" / "pip") python_exe = str(venv_dir / "bin" / "python") dist_version = metadata["version"] assert isinstance(dist_version, str) dist_req = f"{dist.name}=={dist_version}" # If @tests/requirements-stubtest.txt exists, run "pip install" on it. req_path = dist / "@tests" / "requirements-stubtest.txt" if req_path.exists(): try: pip_cmd = [pip_exe, "install", "-r", str(req_path)] subprocess.run(pip_cmd, check=True, capture_output=True) except subprocess.CalledProcessError as e: print_command_failure("Failed to install requirements", e) return False # We need stubtest to be able to import the package, so install mypy into the venv # Hopefully mypy continues to not need too many dependencies # TODO: Maybe find a way to cache these in CI dists_to_install = [dist_req, get_mypy_req()] dists_to_install.extend(metadata.get("requires", [])) pip_cmd = [pip_exe, "install"] + dists_to_install try: subprocess.run(pip_cmd, check=True, capture_output=True) except subprocess.CalledProcessError as e: print_command_failure("Failed to install", e) return False ignore_missing_stub = ["--ignore-missing-stub"] if stubtest_meta.get("ignore_missing_stub", True) else [] packages_to_check = [d.name for d in dist.iterdir() if d.is_dir() and d.name.isidentifier()] modules_to_check = [d.stem for d in dist.iterdir() if d.is_file() and d.suffix == ".pyi"] stubtest_cmd = [ python_exe, "-m", "mypy.stubtest", # Use --custom-typeshed-dir in case we make linked changes to stdlib or _typeshed "--custom-typeshed-dir", str(dist.parent.parent), *ignore_missing_stub, *packages_to_check, *modules_to_check, ] allowlist_path = dist / "@tests/stubtest_allowlist.txt" if allowlist_path.exists(): stubtest_cmd.extend(["--allowlist", str(allowlist_path)]) try: subprocess.run(stubtest_cmd, env={"MYPYPATH": str(dist), "MYPY_FORCE_COLOR": "1"}, check=True, capture_output=True) except subprocess.CalledProcessError as e: print_error("fail") print_commands(dist, pip_cmd, stubtest_cmd) print_command_output(e) print("Ran with the following environment:", file=sys.stderr) ret = subprocess.run([pip_exe, "freeze", "--all"], capture_output=True) print_command_output(ret) if allowlist_path.exists(): print( f'To fix "unused allowlist" errors, remove the corresponding entries from {allowlist_path}', file=sys.stderr ) print(file=sys.stderr) else: print(f"Re-running stubtest with --generate-allowlist.\nAdd the following to {allowlist_path}:", file=sys.stderr) ret = subprocess.run(stubtest_cmd + ["--generate-allowlist"], env={"MYPYPATH": str(dist)}, capture_output=True) print_command_output(ret) return False else: print_success_msg() if verbose: print_commands(dist, pip_cmd, stubtest_cmd) return True
def execute(self, query=None, database=None, auxiliary=None, alias=None): # Core Variables database_name = database if auxiliary is None else auxiliary['database'] database_parsed = '' query_parsed = self.__parse_query( query) if auxiliary is None else self.__parse_query( auxiliary['query']) query_alias = query_parsed if alias is None else '[ALIAS] {}'.format( alias) server_sql = self._sql_connection[ 'name'] if auxiliary is None else auxiliary['auxiliary_connection'] region = self._environment_data['region'] # SQL Connection and Database conn = '' if auxiliary is None: conn = self._sql database_parsed = '__GLOBAL__' if database is None else database else: aux_credentials = self._credentials['auxiliary_connections'][ auxiliary['auxiliary_connection']] conn = mysql(self._logger, self._args, self._credentials) conn.connect(aux_credentials['hostname'], aux_credentials['username'], aux_credentials['password']) database_parsed = '__GLOBAL__' if auxiliary[ 'database'] is None else auxiliary['database'] # Init a new Row date_time = datetime.fromtimestamp( time()).strftime('%Y-%m-%d %H:%M:%S.%f UTC') execution_row = { "meteor_timestamp": date_time, "meteor_environment": self._environment_name, "meteor_region": region, "meteor_server": server_sql, "meteor_database": database_parsed, "meteor_query": query_alias, "meteor_status": "1", "meteor_response": "", "meteor_execution_time": "" } # Print Server, Database and Query is_auxiliary = '' if auxiliary is None else 'Auxiliary ' if (self._current_server != server_sql) or (self._current_database != database): self._current_server = server_sql self._current_database = database if self._credentials['execution_mode']['parallel'] != 'True': print("------------------------------------------------------") print( colored(is_auxiliary + "Server:", 'blue') + ' [' + colored(server_sql, 'magenta') + ']. ' + colored("Database:", 'blue') + ' [' + colored(database_parsed, 'magenta') + ']') if self._credentials['execution_mode']['parallel'] != 'True': print( colored("Query: ", attrs=['bold']) + ' '.join(query_parsed.replace('\n', '').split())) # If Test Run --> Syntax Checks + Execution Checks if not self._args.env_start_deploy: # Syntax Checks query_syntax = self.get_query_type(query_parsed, show_output=False) if query_syntax is False: exception_message = "Query '{}' has not passed the Syntax Validation".format( query_parsed) # Write Exception to the Log execution_row['meteor_response'] = exception_message self._execution_log['output'].append(execution_row) raise Exception(exception_message) # Execution Checks try: self._query_template_instance.validate_execution( query_parsed, conn, database_name) # Write Exception to the Log if query_syntax != 'Select': self._execution_log['output'].append(execution_row) # Print 'Test Succeeded' Message if self._credentials['execution_mode']['parallel'] != 'True': print(colored('Test Succeeded', 'green')) except Exception as e: if self._credentials['execution_mode']['parallel'] != 'True': print( colored('Test Failed: ', 'red', attrs=['bold']) + colored(self.__parse_error(str(e)), 'red')) # Write Exception to the Log execution_row['meteor_status'] = '0' execution_row['meteor_response'] = self.__parse_error(str(e)) self._execution_log['output'].append(execution_row) return # Execute Query (if --deploy or --test with SELECT queries) if self._args.env_start_deploy or query_syntax == 'Select': try: query_info = conn.execute(query_parsed, database_name) # If the query is executed successfully, then write the query result to the Log execution_row[ 'meteor_output'] = query_info['query_result'] if str( query_info['query_result']) != '()' else '[]' # execution_row['meteor_response'] = "Query successfully executed" execution_row['meteor_response'] = "" execution_row['meteor_execution_time'] = query_info[ 'query_time'] self._execution_log['output'].append(execution_row) if self._credentials['execution_mode'][ 'parallel'] != 'True' and self._args.env_start_deploy: print(colored("Query successfully executed", "green")) # Return the Execution Result return query_info['query_result'] except (KeyboardInterrupt, Exception) as e: # Write Exception to the Log execution_row['meteor_status'] = '0' execution_row['meteor_response'] = self.__parse_error(str(e)) self._execution_log['output'].append(execution_row) # Do not Raise the Exception. Continue with the Deployment if e.__class__ == KeyboardInterrupt: raise
while True: try: source = read_expression() print interpret(source, env) except LispError, e: print colored("!", "red"), print faded(str(e.__class__.__name__) + ":"), print str(e) except KeyboardInterrupt: msg = "Interupted. " + faded("(Use ^D to exit)") print "\n" + colored("! ", "red") + msg except EOFError: print faded("\nBye! o/") sys.exit(0) except Exception, e: print colored("! ", "red") + faded("The Python is showing through…") print faded(" " + str(e.__class__.__name__) + ":"), print str(e) def read_expression(): "Read from stdin until we have at least one s-expression" exp = "" open_parens = 0 while True: line, parens = read_line("→ " if not exp.strip() else "… ") open_parens += parens exp += line if exp.strip() and open_parens <= 0: break
def printAccuracy(accuracyScore): print ("Accuracy Score: %s" % c.colored(str(round(accuracyScore, 3)), c.Color.BLUE))
def validate(self, output=True, shared_array=None): try: if output: environment_type = '[LOCAL]' if self._ENV_DATA['ssh'][ 'enabled'] == 'False' else '[SSH]' self._logger.info( colored('{} Region: {}'.format(environment_type, self._ENV_DATA['region']), attrs=['bold'])) else: environment_type = '[LOCAL]' if self._ENV_DATA['ssh'][ 'enabled'] == 'False' else '[SSH] ' self._logger.info( colored( '--> {} Region \'{}\' Started...'.format( environment_type, self._ENV_DATA['region']), 'yellow')) if self._ENV_DATA['ssh']['enabled'] == "True": same_version = self.check_version(output) if same_version: if output: self._logger.info(colored('- Region Updated.', 'green')) else: if output: self._logger.info( colored( '- Region Outdated. Starting uploading the Meteor Engine...', 'red')) # Install Meteor in all SSH Regions self.prepare(output) # Setup User Execution Environment ('credentials.json', 'query_execution.py') self.setup(output) # Check SQL Connection of the Environment [True: All SQL Connections Succeeded | False: Some SQL Connections Failed] status = self.__check_sql_connection(output) if status is True: response = '{} Region \'{}\' Finished.'.format( environment_type, self._ENV_DATA['region']) self._logger.info(colored('--> ' + response, 'green')) if shared_array is not None: shared_array.append({ "region": self._ENV_DATA['region'], "success": True, "response": response }) else: # Handle SQL Error response = '{} Region \'{}\' Failed.'.format( environment_type, self._ENV_DATA['region']) self._logger.info(colored('--> ' + response, 'red')) if shared_array is not None: shared_array.append({ "region": self._ENV_DATA['region'], "success": False, "response": response }) return status except Exception as e: if self._ENV_DATA['ssh']['enabled'] == "True": # Handle SSH Error if self._credentials['execution_mode']['parallel'] == 'True': self._logger.info( colored(" [{}/SSH] {} ".format( self._ENV_DATA['region'], self._ENV_DATA['ssh'] ['hostname']), attrs=['bold']) + str(e)) else: self._logger.info( colored("✘", 'red') + colored(" [{}] ".format( self._ENV_DATA['ssh']['hostname']), attrs=['bold']) + str(e)) response = '{} Region \'{}\' Failed.'.format( environment_type, self._ENV_DATA['region']) self._logger.info(colored('--> ' + response, 'red')) if shared_array is not None: shared_array.append({ "region": self._ENV_DATA['region'], "ssh": self._ENV_DATA['ssh']['hostname'], "success": False, "response": response, "error": str(e) }) raise Exception() except KeyboardInterrupt: if self._credentials['execution_mode']['parallel'] != 'True': raise
def startClassification(clf): sys.stdout.write("\nClassification algorithm: %s\n\n" % c.colored(clf, c.Color.BLUE)) sys.stdout.flush()
def printSummary(classifiers, results, floating_precision): fields = ["", "Time training [s]", "Time prediction [s]", "Accuracy score", "Weighted precision", "Weighted f1 score", "Weighted recall", "Weighted support"] delimiter = "|" minCellLen = maxStrLen(fields) lineSeperator = "+-" + minCellLen * "-" + "-+" # Adding the named headers to the first column for i in range(0, len(fields)): header = fields[i] extra_spaces = minCellLen - len(header) fields[i] = "%s %s%s %s" % (delimiter, header, extra_spaces * " ", delimiter) # Adding missing spaces for a convenient layout to the first column minCellLen = maxStrLen(classifiers) for clf in classifiers: lineSeperator += "-" + minCellLen * "-" + "-+" extra_spaces = minCellLen - len(clf) fields[0] += " %s%s %s" % (clf, extra_spaces * " ", delimiter) # Extracting the best and worst results of all algorithms into an array of # [[best_0, worst_0],...,[best_n, worst_n]] best_worst_results = [] for i in range(1, len(fields) - 1): row = ([clf_result[i] for clf_result in results]) if (i is 1) or (i is 2): best_worst_results.append([min(row), max(row)]) else: best_worst_results.append([max(row), min(row)]) for clf_result in results: for i in range(1, len(fields)): value = clf_result[i] # Standard color for printing color = c.Color.BLACK # Do not change the order or "if" into "else if" since the order # prevents two equal values shown as "worst" instead of "best" # in this way both values will be shown as "best" if not (i is len(fields) - 1): if value is best_worst_results[i - 1][1]: color = c.Color.RED if value is best_worst_results[i - 1][0]: color = c.Color.GREEN value = round(value, floating_precision) extra_spaces = minCellLen - len(str(value)) formatted_value = c.colored(str(value), color) fields[i] += " %s%s %s" % (formatted_value, extra_spaces * " ", delimiter) # Print the whole table with the results of all classifiers # +---------------------...------------+ # | | CLF_0 | ... | CLF_K | # +---------------------...------------+ # | FIELD_0 | RSLT_00 | ... | RSLT_K0 | # | ... | ... | ... | ... | # | FIELD_K | RSLT_0L | ... | RSLT_KL | # +---------------------...------------+ print lineSeperator for i in range(0, len(fields)): print fields[i] if i is 0: print lineSeperator print lineSeperator + "\n"
def startSystem(doBoot=True): if doBoot: ### Variables ### REQ_FIRST_TIME_SETUP = False ### Booting Screen ### load() print(open("icons/bootLogo.txt", "r").read()) sleep(1.2) ### Initialization ### REQ_FIRST_TIME_SETUP = initialize(REQ_FIRST_TIME_SETUP) clear() if REQ_FIRST_TIME_SETUP: setup() config = loads(open("data/os.json", "r").read()) ### Begin Login ### username, password = login(config) sleep(2) ### Dashboard ### clear() print( colored( f"OofOS v{config['version']} | User: {username} | {date.today()}", "green")) print() while True: try: cmd = input( f"{colored(f'{username}@oofosServer', 'yellow')} {colored('$', 'green')} {color('green')}" ) except KeyboardInterrupt: cmd = "" print( "\n\nIgnoring exit key; in the future this will terminate ZealousOS.\n" ) args = None if " " in cmd: args = cmd.split(" ")[1:] cmd = cmd.split(" ")[0] parse(cmd, args, config, startSystem)