def endpoint_profile_show(hostname, orchestrator_id, workload_id, endpoint_id): """ List the profiles assigned to a particular endpoint. :param hostname: The hostname. :param orchestrator_id: The orchestrator ID. :param workload_id: The workload ID. :param endpoint_id: The endpoint ID. :return: None """ try: endpoint = client.get_endpoint(hostname=hostname, orchestrator_id=orchestrator_id, workload_id=workload_id, endpoint_id=endpoint_id) except MultipleEndpointsMatch: print "Failed to list profiles in endpoint.\n" print_paragraph("More than 1 endpoint matches the provided " "criteria. Please provide additional parameters to " "refine the search.") sys.exit(1) except KeyError: print "Failed to list profiles in endpoint.\n" print_paragraph("Endpoint %s is unknown to Calico.\n" % endpoint_id) sys.exit(1) if endpoint.profile_ids: x = PrettyTable(["Name"], sortby="Name") for name in endpoint.profile_ids: x.add_row([name]) print str(x) + "\n" else: print "Endpoint has no profiles associated with it."
def show_user_summary(self,user_name, delegate_account=None, account_id=None, print_table=True): """ Debug method for to display euare/iam info for a specific user. :param user_name: string - user to get policies for. :param delegate_account: string - used for user lookup :param account_id: regex - to use for account id """ user_name = user_name if delegate_account is None: account_id=self.eucarc.account_id delegate_account= self.get_all_accounts(account_id=account_id)[0]['account_name'] self.log.debug('Fetching user summary for: user_name:' + str(user_name) + " account:" + str(delegate_account) + " account_id:" + str(account_id)) title = 'USER SUMMARY: user:{0}, account:{1}'.format(user_name, delegate_account) pt = PrettyTable([title]) pt.align ='l' user_table = str(self.show_all_users(account_name=delegate_account, account_id=account_id, user_name=user_name, print_table=False)) + "\n" pt.add_row([user_table]) pol_pt = self.show_user_policy_summary(user_name, delegate_account=delegate_account, print_table=False) new_title = str(pol_pt._field_names[0]).center(len(user_table.splitlines()[0])-4) new_pt = PrettyTable([new_title]) new_pt.align[new_title] = 'l' new_pt.hrules = 1 new_pt._rows = pol_pt._rows pt.add_row([new_pt]) if print_table: self.log.info("\n" + str(pt) + "\n") else: return pt
def getReservation(self, reservationid, show='table'): """ Verify a reservation and get reservation details http://pubs.vmware.com/vra-62/index.jsp#com.vmware.vra.programming.doc/GUID-2A2D96DE-9BBE-414B-82AB-DD70B82D3E0C.html Parameters: reservationid = Id of a new or existing reservation show = return data as a table or json object """ host = self.host token = self.token url = 'https://{host}/reservation-service/api/reservations/{reservationid}'.format(host=host, reservationid=reservationid) headers = { 'Content-Type': 'application/json', 'Accept': 'application/json', 'Authorization': token } r = requests.get(url=url, headers=headers, verify=False) checkResponse(r) reservation = r.json() if show == 'table': table = PrettyTable(['Id', 'Name']) table.add_row([ reservation['id'], reservation['name']]) print table elif show == 'json': return reservation
def run_status(command): results = hs.status(command) # Enable IPython debug to shell #from IPython import embed #embed() #clear screen first os.system('cls' if os.name == 'nt' else 'clear') #iterate through list. Return only the rows we want which are : #Status, Name, Last Changed. #Limit the second row "Name" to be no more than 35 characters + #two ".." for proper screen formatting on an 80 row wide terminal #for items in results: # print str(items[0]).ljust(20) + \ # str(items[2][:35] + (items[2][35:] # and '..')).ljust(39) + str(items[5]) #print hs.createdict(results) #print hs.createjson(results) x = PrettyTable([results[0][0], results[0][2], results[0][5]]) x.align[results[0][0]] = "l" results.pop(0) x.padding_width = 1 for items in results: x.add_row([items[0], items[2], items[5]]) print x
def show_user_policy_summary(self,user_name,policy_name=None,delegate_account=None, doc=None, search=False, print_table=True): """ Debug method to display policy summary applied to a given user :param user_name: string - user to get policies for. :param policy_name: regex - to match/filter returned policies :param delegate_account: string - used for user lookup :param doc: policy document to use as a filter :param search: boolean - specify whether to use match or search when filtering the returned list """ title = 'POLICIES FOR USER: {0}'.format(user_name) main_pt = PrettyTable([title]) main_pt.hrules = 1 main_pt.align = 'l' main_pt.max_width[title] = 120 policies = self.get_user_policies(user_name, policy_name=policy_name, delegate_account=delegate_account, doc=doc, search=search) if not policies: main_pt.add_row(['-- No Policies --']) else: for policy in policies: main_pt.add_row(['POLICY NAME: "{0}" :'.format(policy['policy_name'])]) p_doc = urllib.unquote(policy['policy_document']) p_json = json.loads(p_doc) pretty_json = (json.dumps(p_json, indent=2) or "") + "\n" main_pt.add_row([pretty_json]) if print_table: self.log.info("\n" + str(main_pt) + "\n") else: return main_pt
def getReservationByName(self, name, show='table'): """ Get a reservation by name Parameters: name = name of a new or existing reservation show = return data as a table or json object """ host = self.host token = self.token url = "https://{host}/reservation-service/api/reservations?$filter=name%20eq%20'{name}'".format(host=host, name=name) headers = { 'Content-Type': 'application/json', 'Accept': 'application/json', 'Authorization': token } r = requests.get(url=url, headers=headers, verify=False) checkResponse(r) reservation = r.json() if show == 'table': table = PrettyTable(['Id', 'Name']) table.add_row([ reservation['content'][0]['id'], reservation['content'][0]['name']]) print table elif show == 'json': return reservation['content'][0]
def getAllReservations(self, show='table', limit=20): """ Get all reservations Parameters: show = Output either table format or raw json limit = The number of entries per page. """ host = self.host token = self.token url = 'https://{host}/reservation-service/api/reservations?limit={limit}'.format( host=host, limit=limit) headers = { 'Content-Type': 'application/json', 'Accept': 'application/json', 'Authorization': token } r = requests.get(url=url, headers=headers, verify=False) checkResponse(r) reservations = r.json() if show == 'table': table = PrettyTable(['Id', 'Name']) for i in reservations['content']: table.add_row([i['id'], i['name']]) print table elif show == 'json': return reservations['content']
def do_get_network(self, line): args = line.split() if len(args) < 2: print("Invalid parameters") return if not args[1].isdigit(): print("Invalid segmentation id %s.", args[1]) return try: net = self.dcnm_client.get_network(args[0], args[1]) if not net: print("No network found.") return except dexc.DfaClientRequestFailed: print("Failed to access DCNM.") return net_table = PrettyTable(net.keys()) row = [] for key, val in six.iteritems(net): if key == "configArg" or key == "dhcpScope": val = str(val) row.append(val) net_table.add_row(row) print(net_table)
def do_list_networks(self, line): tenant_name = line if not tenant_name: print("Tenant name is required.") return try: part_name = self._cfg.dcnm.default_partition_name net_list = self.dcnm_client.list_networks(tenant_name, part_name) if not net_list: print("No network found.") return except dexc.DfaClientRequestFailed: print("Failed to access DCNM.") return list_table = None for net in net_list: columns = net.keys() if list_table is None: list_table = PrettyTable(columns) if list_table: list_table.add_row(net.values()) print(list_table)
def das_tretas_vish(batch_tweets): tweet_texts, screen_names, hashtags, words, urls, symbols = extract_entities(batch_tweets) common_entities = get_common_tweet_entities(batch_tweets, entity_threshold=5) for label, data in (("Word", words), ("Sreen Name", screen_names), ("Hashtag", hashtags), ("Urls", urls), ("Symbols", symbols), ("Common Entities", common_entities)): pt = PrettyTable(field_names=[label, "Count"]) c = Counter(data) [ pt.add_row(kv) for kv in c.most_common()[:10] ] pt.align[label], pt.align["Count"] = "l", "r" print (pt) lexDivWords = lexical_diversity(words) avgWords = average_words(tweet_texts) mcWords = Counter(words).most_common()[:10] mcNames = Counter(screen_names).most_common()[:10] mcHashs = Counter(hashtags).most_common()[:10] print ("Diversidade léxica das palavras: %.2f" % lexDivWords) print ("Diversidade léxica dos screen_names: %.2f" % lexical_diversity(screen_names)) print ("Diversidade léxica das hashtags: %.2f" % lexical_diversity(hashtags)) print ("Média de palavras por tweet: %.2f" % avgWords) return mcWords, mcNames, mcHashs, lexDivWords, avgWords
def summary(since): from .jira_adapter import iter_projects, from_jira_formatted_date, to_jira_formatted_date from datetime import date from prettytable import PrettyTable table = PrettyTable(["Project", "Version", "Description", "Release Date"]) table.align = 'l' today = date.today() since_date = today if since == 'today' else datetime_to_date(from_jira_formatted_date(since)) for project in iter_projects(): for version in reversed(project.versions): if version.archived: continue release_date_string = getattr(version, 'releaseDate', '') if not release_date_string: continue release_date = datetime_to_date(from_jira_formatted_date(release_date_string)) if (release_date-since_date).days<0: continue if (release_date-today).days>0: continue table.add_row([project.name, version.name if version.released else version.name + ' **' if getattr(version, 'overdue', False) else version.name + ' *', getattr(version, 'description', ''), getattr(version, 'releaseDate', '')]) print(table.get_string())
def print_muts_configuration_from_json(json_data, join_delim=", "): """ Prints MUTs configuration passed to test script for verboseness. """ muts_info_cols = [] # We need to check all unique properties for each defined MUT for k in json_data: mut_info = json_data[k] for property in mut_info: if property not in muts_info_cols: muts_info_cols.append(property) # Prepare pretty table object to display all MUTs pt_cols = ["index"] + muts_info_cols pt = PrettyTable(pt_cols) for col in pt_cols: pt.align[col] = "l" # Add rows to pretty print object for k in json_data: row = [k] mut_info = json_data[k] for col in muts_info_cols: cell_val = mut_info[col] if col in mut_info else None if type(cell_val) == ListType: cell_val = join_delim.join(cell_val) row.append(cell_val) pt.add_row(row) return pt.get_string()
def generate_test_summary(test_summary): """ Prints well-formed summary with results (SQL table like) table shows target x test results matrix across """ result = "Test summary:\n" # Pretty table package is used to print results pt = PrettyTable(["Result", "Target", "Toolchain", "Test ID", "Test Description", "Elapsed Time (sec)", "Timeout (sec)", "Loops"]) pt.align["Result"] = "l" # Left align pt.align["Target"] = "l" # Left align pt.align["Toolchain"] = "l" # Left align pt.align["Test ID"] = "l" # Left align pt.align["Test Description"] = "l" # Left align pt.padding_width = 1 # One space between column edges and contents (default) result_dict = {single_test.TEST_RESULT_OK : 0, single_test.TEST_RESULT_FAIL : 0, single_test.TEST_RESULT_ERROR : 0, single_test.TEST_RESULT_UNDEF : 0, single_test.TEST_RESULT_IOERR_COPY : 0, single_test.TEST_RESULT_IOERR_DISK : 0, single_test.TEST_RESULT_IOERR_SERIAL : 0, single_test.TEST_RESULT_TIMEOUT : 0 } for test in test_summary: if test[0] in result_dict: result_dict[test[0]] += 1 pt.add_row(test) result += pt.get_string() result += "\n" # Print result count result += "Result: " + ' / '.join(['%s %s' % (value, key) for (key, value) in {k: v for k, v in result_dict.items() if v != 0}.iteritems()]) result += "\n" return result
def handleMetricsListRequest(YOMP, fmt, region=None, namespace=None, metricName=None, instance=None): metrics = getCloudwatchMetrics(YOMP, region=region, namespace=namespace, instance=instance, metricName=metricName) if fmt == "json": print(json.dumps(metrics)) else: table = PrettyTable() table.add_column("Region", [x['region'] for x in metrics]) table.add_column("Namespace", [x['namespace'] for x in metrics]) table.add_column("Name", [x['name'] if 'name' in x else '' for x in metrics]) table.add_column("Metric", [x['metric'] for x in metrics]) tableAddMetricDimensionColumn(table, metrics, 'VolumeId') tableAddMetricDimensionColumn(table, metrics, 'InstanceId') tableAddMetricDimensionColumn(table, metrics, 'DBInstanceIdentifier') tableAddMetricDimensionColumn(table, metrics, 'LoadBalancerName') tableAddMetricDimensionColumn(table, metrics, 'AutoScalingGroupName') tableAddMetricDimensionColumn(table, metrics, 'AvailabilityZone') table.align = "l" # left align print(table)
def format_stack_events(stack, limit=None): if hasattr(stack, 'describe_events'): events = stack.describe_events() else: cfn = boto.connect_cloudformation() events = cfn.describe_stack_events(stack.stack_name) cfn = boto.connect_cloudformation() events = list(cfn.describe_stack_events(stack.stack_name)) if limit is None: limit = len(events) tab = PrettyTable(['Time', 'Type', 'Logical ID', 'Status', 'Reason']) tab.align = 'l' for e in events: reason = e.resource_status_reason tab.add_row([ local_date(e.timestamp), e.resource_type, e.logical_resource_id, e.resource_status, reason if reason is not None else '' ]) return tab.get_string(end=limit)
def getAllRequests(self, show='table', limit=20): """ Function that will return the resource that were provisioned as a result of a given request. Parameters: show = return data as a table or json object limit = The number of entries per page. """ host = self.host token = self.token url = 'https://{host}/catalog-service/api/consumer/requests?limit={limit}&$orderby=requestNumber%20desc'.format(host=host, limit=limit) headers = { 'Content-Type': 'application/json', 'Accept': 'application/json', 'Authorization': token } r = requests.get(url=url, headers=headers, verify=False) checkResponse(r) items = r.json() if show == 'table': table = PrettyTable(['Id', 'Request Number', 'Item', 'State']) for i in items['content']: table.add_row([i['id'], i['requestNumber'], i['requestedItemName'], i['state']]) print table elif show == 'json': return items['content']
def getResourceByName(self, name, show='json'): """ Function that will get a vRA resource by id. Parameters: show = return data as a table or json object name = name of the vRA resource. """ host = self.host token = self.token url = "https://{host}/catalog-service/api/consumer/resources?$filter=name%20eq%20'{name}'".format(host=host, name=name) headers = { 'Content-Type': 'application/json', 'Accept': 'application/json', 'Authorization': token } r = requests.get(url=url, headers=headers, verify=False) checkResponse(r) resource = r.json() if show == 'table': table = PrettyTable(['Id', 'Name', 'Status', 'Catalog Item']) table.add_row([ resource['content'][0]['id'], resource['content'][0]['name'], resource['content'][0]['status'], resource['content'][0]['catalogItem']['label'] ]) print table elif show == 'json': return resource['content'][0]
def getEntitledCatalogItems(self, show='table', limit=20): """ Function that will return all entitled catalog items for the current user. Parameters: show = return data as a table or json object limit = The number of entries per page. """ host = self.host token = self.token url = 'https://{host}/catalog-service/api/consumer/entitledCatalogItems?limit={limit}&$orderby=name%20asc'.format( host=host, limit=limit) headers = { 'Content-Type': 'application/json', 'Accept': 'application/json', 'Authorization': token } r = requests.get(url=url, headers=headers, verify=False) checkResponse(r) items = r.json() if show == 'table': table = PrettyTable(['Id', 'Name']) for i in items['content']: table.add_row([i['catalogItem']['id'], i['catalogItem']['name']]) print table elif show == 'json': return items['content']
def getRequest(self, id, show='table'): """ Function that will return request information for a given request. Parameters: id = the id of the vRA request. show = return data as a table or json object """ host = self.host token = self.token url = 'https://{host}/catalog-service/api/consumer/requests/{id}'.format(host=host, id=id) headers = { 'Content-Type': 'application/json', 'Accept': 'application/json', 'Authorization': token } r = requests.get(url=url, headers=headers, verify=False) checkResponse(r) request = r.json() if show == 'table': table = PrettyTable(['Id', 'Request Number', 'Item', 'State']) table.add_row([request['id'], request['requestNumber'], request['requestedItemName'], request['state']]) print table elif show == 'json': return request
def getResourceByBusinessGroup(self, name, limit=100, show='json'): """ Function that will get all vRA resources running for a specific Business group Parameters: show = return data as a table or json object name = name of the vRA resource. """ host = self.host token = self.token url = "https://{host}/catalog-service/api/consumer/resources?$filter=organization/subTenant/name%20eq%20'{name}'&limit={limit}".format(host=host, name=name, limit=limit) headers = { 'Content-Type': 'application/json', 'Accept': 'application/json', 'Authorization': token } r = requests.get(url=url, headers=headers, verify=False) checkResponse(r) resource = r.json() if show == 'table': table = PrettyTable(['Id', 'Name', 'Description','Label' 'Status']) for item in resource['content']: table.add_row([ item['id'], item['name'], item['description'], item['resourceTypeRef']['label'], item['status'], ]) print table elif show == 'json': return resource
def getResourceNetworking(self, id=None, show='json', resource=None): """ Function that will return networking information for a given resource. Parameters: show = return data as a table or json object id = id of the vRA resource. """ assert id or resource assert not id or not resource if resource is None: resource = self.getResource(id) resourceData = resource['resourceData']['entries'] for i in resourceData: if i['key'] == 'NETWORK_LIST': networkList = i['value']['items'] for j in networkList: entries = j['values']['entries'] if show == 'table': table = PrettyTable(['Component', 'Value']) for i in entries: table.add_row([i['key'], i['value']['value']]) print table elif show == 'json': return entries
def show_ft(key = None, row_count = 100, detail=0): c_ft = pycassa.ColumnFamily(pool, 'FlowTable') l_ft = list(c_ft.get_range(row_count = row_count)) sl_ft=sorted(l_ft, key=lambda le: le[1].get('setup_time', 0)) x = PrettyTable(['setup_time', 'flow_id', 'sourcevn', 'sourceip', 'destvn', 'destip', 'dir', 'prot', 'sprt', 'dprt']) for l in sl_ft: setuptime = l[1].get('setup_time', None) if not setuptime: continue if (setuptime > 135300693300): setuptime = setuptime / 1000000 try: message_dt = datetime.datetime.fromtimestamp(setuptime) except: import pdb;pdb.set_trace() message_ts = message_dt.strftime('%Y-%m-%d %H:%M:%S') x.add_row([message_ts, str(l[0]) if detail else get_substring(str(l[0])), l[1]['sourcevn'] if detail else get_substring(l[1]['sourcevn']), socket.inet_ntoa(hex(l[1]['sourceip'] & 0xffffffff)[2:].zfill(8).decode('hex')), l[1]['destvn'] if detail else get_substring(l[1]['destvn']), socket.inet_ntoa(hex(l[1]['destip'] & 0xffffffff)[2:].zfill(8).decode('hex')), 'ing' if l[1]['direction_ing']==1 else 'egr', l[1]['protocol'], l[1]['sport'], l[1]['dport']]) print x print 'max row_count - %d, num elements = %d' %(row_count, len(l_ft))
def get_dhcp_logs(network_nodes, mac, network_id): """Get DHCP Logs From Network Node""" dhcp_namespace = "qdhcp-%s" % network_id dhcp_agent_inventory = ansible.inventory.Inventory(network_nodes) dhcp_logs_command = "(echo '***** %s [ip a] ******';ip netns exec %s ip a); (echo '******************** DHCP LOGS *********************'; grep -w %s /var/log/messages | grep -v ansible-command | tail -5)" % ( dhcp_namespace, dhcp_namespace, mac) runner = ansible.runner.Runner( module_name='shell', module_args=dhcp_logs_command, inventory=dhcp_agent_inventory, ) result = runner.run() for agent in result['contacted']: dhcp_log = [] for line in result['contacted'][agent]['stdout'].split('\n'): if line != "": dhcp_log.append(line) dhcp_header = "DHCP Data on host [%s] for Mac Address: %s" % (agent, mac) dhcp_table = PrettyTable([dhcp_header]) dhcp_table.align[dhcp_header] = "l" for log in dhcp_log: dhcp_table.add_row([log]) print dhcp_table
def printQualitatNeat(measdata: list, b: list, Ve, func, c, jac): """ Выводит таблицу показателей качества оценки USES PRETTYTABLE :param measdata: список словарей экспериментальных данных [{'x': [] 'y':[])},{'x': [] 'y':[])}] :param b: вектор коэфф :param Ve: Ve ковар. матрица измеренных данных :param funcf callable функция, параметры по формату x,b,c :param c словарь дополнительных постоянных :return: Среднее логарифма правдоподобия Дисперсия лп Сигма лп Среднее остатков Дисп. остатков Сигма остатков """ t = PrettyTable( [ "Среднее логарифма правдоподобия", "Дисперсия лп", "Сигма лп", "Среднее остатков", "Дисп. остатков", "Сигма остатков", ] ) t.add_row(list(logTruthness(measdata, b, Ve, func, c)) + list(averageDif(measdata, b, Ve, func, c))[:-1:]) print("Показатели качества оценки") print(t) print("Матрица Vb") Vb = countVbForMeasdata(b, c, Ve, jac, measdata) print(Vb) print("Parameter Sigmas") for i in range(Vb.shape[0]): print(math.sqrt(Vb[i][i]))
def show_workspace(self, name): """Show specific workspace.""" if not self.workspace.exists(name): raise ValueError("Workspace `%s` doesn't exists." % name) color = Color() workspaces = self.workspace.list() self.logger.info("<== %s workspace ==>" % color.colored(name, "green")) self.logger.info("\tPath: %s" % workspaces[name]["path"]) self.logger.info("\tNumber of repositories: %s" % color.colored( len(workspaces[name]["repositories"]), "yellow")) repo_colored = color.colored("Repositories", "blue") path_colored = color.colored("Path", "blue") trepositories = PrettyTable( [repo_colored, path_colored, color.colored("+", "blue")]) trepositories.align[repo_colored] = "l" trepositories.align[path_colored] = "l" for repo_name in workspaces[name]["repositories"]: fullname = "%s/%s" % (name, repo_name) fullpath = find_path(fullname, self.config)[fullname] try: repo = Repository(fullpath) repo_scm = repo.get_scm() except RepositoryAdapterNotFound: repo_scm = None trepositories.add_row( [color.colored(repo_name, "cyan"), fullpath, repo_scm]) self.logger.info(trepositories)
def freq_fault(): fault2, fault3 = [], [] for j in range(2, 10): circle1 = circle.CirclePath((70, 85), 30, T=j, V= 5, step=5) circle1.get_path_by_interpolation_predict() circle1.method_cont_carrier_freq() circle1.corrections_forecast() def find_fault(data): faults = [] points = list(zip(data['x'], data['y'])) for i in points: faults.append(abs(circle1.radius - circle1.get_distance_between_center_and_point(i[0], i[1]))) return(j, np.mean(faults), max(faults), sum(faults)) fault2.append(find_fault(circle1.inter_freq)) fault3.append(find_fault(circle1.cor_for)) for j in range(1,4): zero = [i[0] for i in fault2][:-2] second = [i[j] for i in fault2][:-2] third = [i[j] for i in fault3][:-2] table1 = PrettyTable() table1.field_names = ['T', 'const freq', 'corrections_forecast'] for i in range(len(zero)): table1.add_row([zero[i], second[i], third[i]]) print(table1) plot_fault(zero, [0 for i in range(len(zero))], second, third, 'T')
def print_args_as_table(): # args_table -> a_table a_first_row = ["Argument/Parameter", "Value"] a_first_row.insert(0,'') a_table = PrettyTable(a_first_row) a_dict = global_args._make_dict() listen_port = a_dict.pop('listen_port') mongo_port = a_dict.pop('mongo_port') row_no = 1 for key,val in a_dict.iteritems(): if key == "listen_host": val = "\033[31m " + "https://" + val + ":" + str(listen_port) + '/' + "\033[0m" if key == "mongo_host": val = "\033[31m " + "MongoDB: " + val + ":" + str(mongo_port) + "\033[0m" a_row = [ str(row_no), key, val ] a_table.add_row(a_row) row_no += 1 print a_table ''' # Make 'Critical' RED. if vd_item['summary'].lower().startswith('critical'): txt = "\033[31;43m" + vd_item['summary'] + "\033[0m" txt = "\033[31;43m" + vd_item['summary'] + "\033[0m" ''' return
def print_output(columns, rows=None, alignment=None, fmt='pretty', header=True, delimiter='\t'): rows = rows or [] if fmt == 'pretty': t = PrettyTable(columns) for k, v in (alignment or {}).items(): t.align[k] = v for r in rows: t.add_row(r) click.echo(t) return if fmt == 'json': elements = list() for r in rows: elements.append(dict(zip(columns, r))) click.echo(json.dumps(elements)) if fmt == 'tsv': if columns is not None and header: click.echo(delimiter.join(columns) + '\n') for r in rows: c = [str(c) for c in r] click.echo(delimiter.join(c))
def create_results_table(): x = PrettyTable(["Home", "Away", "Result", "Goals", "Status", "Date"]) x.align["Home"] = "l" x.align["Away"] = "l" x.align["Date"] = "l" x.align["Goals"] = "l" return x
def tags_list(self): req = requests.get(self.build_url("/tags/list"), auth=(self.username, self.password), verify=False) try: res = req.json() except: try: res = req.json except Exception as e: print("ERROR: Unable to parse results: {0}".format(e)) return if self.check_errors(req.status_code): return table = PrettyTable(["tag"]) table.align = "l" table.padding_width = 1 for tag in res: table.add_row([tag]) print(table) print("Total: %s" % len(res))
def run_test_api(config_json, namespace): output_path = './output/flow_test_data/' os.makedirs(os.path.dirname(output_path), exist_ok=True) fate_flow_path = config_json[ 'data_base_dir'] / 'fateflow' / 'python' / 'fate_flow' / 'fate_flow_client.py' if not fate_flow_path.exists(): raise FileNotFoundError( f'fate_flow not found. filepath: {fate_flow_path}') test_api = TestModel(config_json['data_base_dir'], str(fate_flow_path), config_json['component_name'], namespace) test_api.dsl_path = config_json['train_dsl_path'] test_api.cache_directory = config_json['cache_directory'] test_api.output_path = str(os.path.abspath(output_path)) + '/' conf_path = config_json['train_conf_path'] guest_party_id = config_json['guest_party_id'] host_party_id = config_json['host_party_id'] arbiter_party_id = config_json['arbiter_party_id'] upload_file_path = config_json['upload_file_path'] model_file_path = config_json['model_file_path'] conf_file = get_dict_from_file(upload_file_path) serving_connect_bool = serving_connect(config_json['serving_setting']) remove_path = str(config_json['data_base_dir']).split( "python" )[0] + '/model_local_cache/guest#{}#arbiter-{}#guest-{}#host-{}#model/'.format( guest_party_id[0], arbiter_party_id[0], guest_party_id[0], host_party_id[0]) max_iter = test_api.set_config(guest_party_id, host_party_id, arbiter_party_id, conf_path, config_json['component_name']) data = PrettyTable() data.set_style(ORGMODE) data.field_names = ['data api name', 'status'] data.add_row( ['data upload', judging_state(test_api.data_upload(upload_file_path))]) data.add_row([ 'data download', judging_state(test_api.data_download(conf_file, output_path)) ]) data.add_row([ 'data upload history', judging_state(test_api.data_upload_history(upload_file_path)) ]) print(data.get_string(title="data api")) table = PrettyTable() table.set_style(ORGMODE) table.field_names = ['table api name', 'status'] table.add_row([ 'table info', judging_state(test_api.table_api('table_info', conf_file)) ]) table.add_row([ 'delete table', judging_state(test_api.table_api('table_delete', conf_file)) ]) print(table.get_string(title="table api")) job = PrettyTable() job.set_style(ORGMODE) job.field_names = ['job api name', 'status'] job.add_row(['job stop', judging_state(test_api.job_api('stop_job'))]) job.add_row(['job submit', judging_state(test_api.submit_job(stop=False))]) job.add_row([ 'job query', judging_state(False if test_api.query_job() == "success" else True) ]) job.add_row( ['job data view', judging_state(test_api.job_api('data_view_query'))]) job.add_row( ['job config', judging_state(test_api.job_config(max_iter=max_iter))]) job.add_row(['job log', judging_state(test_api.job_api('job_log'))]) task = PrettyTable() task.set_style(ORGMODE) task.field_names = ['task api name', 'status'] task.add_row(['task query', judging_state(test_api.query_task())]) print(task.get_string(title="task api")) component = PrettyTable() component.set_style(ORGMODE) component.field_names = ['component api name', 'status'] component.add_row([ 'output data', judging_state(test_api.component_api('component_output_data')) ]) component.add_row([ 'output table', judging_state(test_api.component_api('component_output_data_table')) ]) component.add_row([ 'output model', judging_state(test_api.component_api('component_output_model')) ]) component.add_row([ 'component parameters', judging_state( test_api.component_api('component_parameters', max_iter=max_iter)) ]) component.add_row([ 'metrics', judging_state(test_api.component_api('component_metrics')) ]) component.add_row([ 'metrics all', judging_state(test_api.component_api('component_metric_all')) ]) model = PrettyTable() model.set_style(ORGMODE) model.field_names = ['model api name', 'status'] if not config_json.get('component_is_homo') and serving_connect_bool: model_load_conf = get_dict_from_file(model_file_path) model_load_conf["initiator"]["party_id"] = guest_party_id model_load_conf["role"].update({ "guest": [guest_party_id], "host": [host_party_id], "arbiter": [arbiter_party_id] }) model.add_row([ 'model load', judging_state( test_api.model_api('load', model_load_conf=model_load_conf)) ]) model.add_row([ 'model bind', judging_state( test_api.model_api('bind', model_load_conf=model_load_conf, servings=config_json['serving_setting'])) ]) status, model_path = test_api.model_api('export') model.add_row(['model export', judging_state(status)]) model.add_row([ 'model import', (judging_state( test_api.model_api('import', remove_path=remove_path, model_path=model_path))) ]) model.add_row( ['model store', (judging_state(test_api.model_api('store')))]) model.add_row( ['model restore', (judging_state(test_api.model_api('restore')))]) print(model.get_string(title="model api")) component.add_row([ 'metrics delete', judging_state(test_api.component_api('component_metric_delete')) ]) print(component.get_string(title="component api")) test_api.submit_job() test_api.submit_job() test_api.submit_job() job.add_row(['clean job', judging_state(test_api.job_api('clean_job'))]) job.add_row( ['clean queue', judging_state(test_api.job_api('clean_queue'))]) print(job.get_string(title="job api")) print('Please check the error content: {}'.format( test_api.error_log(None)))
def get_result_summary_table(): # get all unique test ID prefixes unique_test_id = [] for test in TESTS: split = test['id'].split('_')[:-1] test_id_prefix = '_'.join(split) if test_id_prefix not in unique_test_id: unique_test_id.append(test_id_prefix) unique_test_id.sort() counter_dict_test_id_types = dict((t, 0) for t in unique_test_id) counter_dict_test_id_types_all = dict((t, 0) for t in unique_test_id) test_properties = [ 'id', 'automated', 'description', 'peripherals', 'host_test', 'duration' ] # All tests status table print pt = PrettyTable(test_properties) for col in test_properties: pt.align[col] = "l" pt.align['duration'] = "r" counter_all = 0 counter_automated = 0 pt.padding_width = 1 # One space between column edges and contents (default) for test in TESTS: row = [] split = test['id'].split('_')[:-1] test_id_prefix = '_'.join(split) for col in test_properties: row.append(test[col] if col in test else "") if 'automated' in test and test['automated'] == True: counter_dict_test_id_types[test_id_prefix] += 1 counter_automated += 1 pt.add_row(row) # Update counters counter_all += 1 counter_dict_test_id_types_all[test_id_prefix] += 1 print pt print # Automation result summary test_id_cols = ['automated', 'all', 'percent [%]', 'progress'] pt = PrettyTable(test_id_cols) pt.align['automated'] = "r" pt.align['all'] = "r" pt.align['percent [%]'] = "r" percent_progress = round(100.0 * counter_automated / float(counter_all), 1) str_progress = progress_bar(percent_progress, 75) pt.add_row( [counter_automated, counter_all, percent_progress, str_progress]) print "Automation coverage:" print pt print # Test automation coverage table print test_id_cols = ['id', 'automated', 'all', 'percent [%]', 'progress'] pt = PrettyTable(test_id_cols) pt.align['id'] = "l" pt.align['automated'] = "r" pt.align['all'] = "r" pt.align['percent [%]'] = "r" for unique_id in unique_test_id: # print "\t\t%s: %d / %d" % (unique_id, counter_dict_test_id_types[unique_id], counter_dict_test_id_types_all[unique_id]) percent_progress = round( 100.0 * counter_dict_test_id_types[unique_id] / float(counter_dict_test_id_types_all[unique_id]), 1) str_progress = progress_bar(percent_progress, 75) row = [ unique_id, counter_dict_test_id_types[unique_id], counter_dict_test_id_types_all[unique_id], percent_progress, "[" + str_progress + "]" ] pt.add_row(row) print "Test automation coverage:" print pt print
elapsed_time = time() - start # Human readable summary if not opts.suppress_summary: result_dict = { single_test.TEST_RESULT_OK: 0, single_test.TEST_RESULT_FAIL: 0, single_test.TEST_RESULT_ERROR: 0, single_test.TEST_RESULT_UNDEF: 0 } print print "Test summary:" # Pretty table package is used to print results pt = PrettyTable([ "Result", "Target", "Toolchain", "Test ID", "Test Description", "Elapsed Time (sec)", "Timeout (sec)" ]) pt.align["Result"] = "l" # Left align pt.align["Target"] = "l" # Left align pt.align["Toolchain"] = "l" # Left align pt.align["Test ID"] = "l" # Left align pt.align["Test Description"] = "l" # Left align pt.padding_width = 1 # One space between column edges and contents (default) for test in test_summary: if test[0] in result_dict: result_dict[test[0]] += 1 pt.add_row(test) print pt # Print result count
iou_thres, conf_thres, precision, recall = [], [], [], [] # append to the list iou_thres.append(iou_thres_) conf_thres.append(float(line['Conf Threshold'])) precision.append(float(line['Precision'])) recall.append(float(line['Recall'])) prev_iou_thres = iou_thres_ if len(iou_thres) > 0: iou_thres_list.append(iou_thres) conf_thres_list.append(conf_thres) precision_list.append(precision) recall_list.append(recall) table = PrettyTable(['IOU Threshold', 'mAP']) mAP_list = [] for i, iou_thres in enumerate(iou_thres_list): conf_thres = conf_thres_list[i] precision = precision_list[i] recall = recall_list[i] precision_, recall_, mAP_ = mAP(precision, recall) mAP_list.append(mAP_) if iou_thres[0] > 0.8: continue plt.plot(recall_, precision_, '-') legend_list.append('IOU: {:.2f}'.format(iou_thres[0])) table.add_row( ['{:.3f}'.format(iou_thres[0]), '{:.3f}'.format(mAP_)]) print('mAP for class: {}'.format(obj_class))
def main(): learning_rate = 0.5 state_num = [ 0 ] * 19683 # State_Num capture the V* values for states(their dec value) try: state_num_temp = pickle.load(open( 'save_vstar.p', 'rb')) # Check if any progress exists choice = raw_input('Load previous progress - Y\~Y : ') if choice == 'Y': state_num = state_num_temp else: state_num = [0] * 19683 except: pass good_states = [] bad_states = [] tie_states = [] set_initial_values(state_num, good_states, bad_states, tie_states) init_state = [[1, 1, 1], [1, 1, 1], [1, 1, 1]] current_state = copy.deepcopy(init_state) mapping = {0: 'O', 1: ' ', 2: 'X'} # MAPPING : O -> 0, _ -> 1, X -> 2 iter1 = True # 1st iteration Bool while (True): pickle.dump(state_num, open('save_vstar.p', 'wb')) # Save the progress thus far if iter1: # iter1 is set to false down below print 'Current State of the Game :' t = PrettyTable(header=False, hrules=prettytable.ALL) t.add_row([ mapping[current_state[0][0]], mapping[current_state[0][1]], mapping[current_state[0][2]] ]) t.add_row([ mapping[current_state[1][0]], mapping[current_state[1][1]], mapping[current_state[1][2]] ]) t.add_row([ mapping[current_state[2][0]], mapping[current_state[2][1]], mapping[current_state[2][2]] ]) print t print 'Machine Thinking...' time.sleep(1) # Get list of all valid states the machine can goto goto_states = vaild_goto_states(current_state, good_states, bad_states) # Now we find the state with the largest V* / state_num value in the goto states max_vstar = -10000 for state in goto_states: temp_max = state_num[convert_state_to_decimal(state)] if temp_max > max_vstar: max_vstar = temp_max try: goto_state[:] = [] except: pass goto_state = copy.deepcopy(state) # If the machine is returning to initial state, notify it to the user if goto_state == init_state: current_state = copy.deepcopy(init_state) print 'Returning to initial state' continue # Setting the value of state_num/ V* for the current state based on the goto state chosen state_num_current_state = learning_rate * state_num[ convert_state_to_decimal(goto_state)] state_num[convert_state_to_decimal( current_state)] = state_num_current_state # Now goto state has become the current state current_state[:] = [] current_state = copy.deepcopy(goto_state) t.clear() t = PrettyTable(header=False, hrules=prettytable.ALL) t.add_row([ mapping[current_state[0][0]], mapping[current_state[0][1]], mapping[current_state[0][2]] ]) t.add_row([ mapping[current_state[1][0]], mapping[current_state[1][1]], mapping[current_state[1][2]] ]) t.add_row([ mapping[current_state[2][0]], mapping[current_state[2][1]], mapping[current_state[2][2]] ]) print t if current_state in good_states: print 'The machine has won!\nRise of the Deux Ex Machina.' current_state[:] = [] current_state = copy.deepcopy(init_state) if user_input(): exit(0) else: continue if current_state in bad_states: print 'User has won' current_state[:] = [] current_state = copy.deepcopy(init_state) if user_input(): exit(0) else: continue if current_state in tie_states: print 'Its a TIE!' current_state[:] = [] current_state = copy.deepcopy(init_state) if user_input(): exit(0) else: continue print 'YOUR MOVE!' i = -1 j = -1 while is_invalid_choice(i, j, current_state): print 'Input an available cell number', if iter1: print '(Eg. format -> 0 2 ) : ', iter1 = False else: print ': ', str_arr = raw_input().split(' ') try: i, j = [int(num) for num in str_arr] except ValueError: i, j = [-1, -1] continue prev_state = copy.deepcopy(current_state) current_state[i][j] = 0 t.clear() t = PrettyTable(header=False, hrules=prettytable.ALL) t.add_row([ mapping[current_state[0][0]], mapping[current_state[0][1]], mapping[current_state[0][2]] ]) t.add_row([ mapping[current_state[1][0]], mapping[current_state[1][1]], mapping[current_state[1][2]] ]) t.add_row([ mapping[current_state[2][0]], mapping[current_state[2][1]], mapping[current_state[2][2]] ]) print t t.clear() temp = learning_rate * state_num[convert_state_to_decimal( current_state)] state_num[convert_state_to_decimal(prev_state)] = temp if current_state in bad_states: print 'User has won!' current_state[:] = [] current_state = copy.deepcopy(init_state) if user_input(): exit(0) else: continue if current_state in tie_states: print 'Its a TIE!' current_state[:] = [] current_state = copy.deepcopy(init_state) if user_input(): exit(0) else: continue
def pretty_print(self): pt = PrettyTable() pt._set_field_names(self.header) for train in self.trains(): pt.add_row(train) print(pt)
import numpy as np import matplotlib.pyplot as plt from prettytable import PrettyTable l = 131.e-02 counter = 0 wlength = 55.e-08 const = 1.e-07 I_naught = 426.5 c = 3.e8 epsilon_naught = 8854.e-15 tab = PrettyTable() tab.field_names = ["slit width/m", "Photoelectric current/x10^-3 A"] p = [] i = [] j = [] a = 1 b = a * const while ((b**2) < (l * wlength)): intensity = (2 * I_naught / (c * epsilon_naught)) / (4 * ((np.pi)**2)) * (b**2) current = (0.0166 * intensity) + 0.0504 tab.add_row([b, current]) i.insert(counter, b) j.insert(counter, intensity) p.insert(counter, current) a = a + 1 if a % 10 == 0: a = 1 const = const * 10
def get_security_group(self, region, vpcid, section): srcgroups_dict = {} srcgroups_list = [] ec2 = self.get_access(region, section)[0] # print ec2.describe_security_groups()['SecurityGroups'][1] # print len(ec2.describe_security_groups(Filters=[{"Name": "vpc-id", "Values": [vpcid]}])['SecurityGroups']) if len(ec2.describe_security_groups(Filters=[{"Name": "vpc-id", "Values": [vpcid]}])['SecurityGroups']) > 1: for i in range(0, len( ec2.describe_security_groups(Filters=[{"Name": "vpc-id", "Values": [vpcid]}])['SecurityGroups'])): securitygroups = ec2.describe_security_groups(Filters=[ { 'Name': 'vpc-id', 'Values': [ vpcid, ] }, ], )['SecurityGroups'][i] if securitygroups['GroupName'] != 'default': sec_group = PrettyTable(['Direction', 'Subnets', 'Port', 'Protocol']) grpname = securitygroups['GroupName'] grpid = securitygroups['GroupId'] srcgroups_list.append(grpname) srcgroups_dict[grpname] = grpid # print "[*] Print Outgoing Rule for %s, the Group ID is %s"%(grpname, grpid) # for group in securitygroups['IpPermissionsEgress']: # print group # print "[*] Print Incoming Rule for %s, the Group ID is %s"%(grpname, grpid) # for group in securitygroups['IpPermissions']: # print group for item in securitygroups['IpPermissionsEgress']: if "FromPort" in item: for cidr in item['IpRanges']: sec_group.add_row(['Outgoing', cidr['CidrIp'], item['FromPort'], item['IpProtocol']]) for item in securitygroups['IpPermissions']: if "ToPort" in item: for cidr in item['IpRanges']: sec_group.add_row(['Incoming', cidr['CidrIp'], item['ToPort'], item['IpProtocol']]) print("[*] Firewall rules for %s " % grpname) print(sec_group) i += 1 # print "[*] Pls chose the correct Group ID %s"%str(srcgroups_list) print(srcgroups_dict) return srcgroups_dict else: print("[*] There is no security group, Pls create a new security group. ") groupname = input('[*] Pls key-in the security groupname: ') response = ec2.create_security_group( Description='test', GroupName=groupname, VpcId=vpcid, DryRun=False ) for i in range(0, len( ec2.describe_security_groups(Filters=[{"Name": "vpc-id", "Values": [vpcid]}])['SecurityGroups'])): securitygroups = ec2.describe_security_groups(Filters=[ { 'Name': 'vpc-id', 'Values': [ vpcid, ] }, ], )['SecurityGroups'][i] print(securitygroups) if securitygroups['GroupName'] != 'default': sec_group = PrettyTable(['Direction', 'Subnets', 'Port', 'Protocol']) grpname = securitygroups['GroupName'] grpid = securitygroups['GroupId'] srcgroups_list.append(grpname) srcgroups_dict[grpname] = grpid # print "[*] Print Outgoing Rule for %s, the Group ID is %s"%(grpname, grpid) # for group in securitygroups['IpPermissionsEgress']: # print group # print "[*] Print Incoming Rule for %s, the Group ID is %s"%(grpname, grpid) # for group in securitygroups['IpPermissions']: # print group for item in securitygroups['IpPermissionsEgress']: if "FromPort" in item: for cidr in item['IpRanges']: sec_group.add_row(['Outgoing', cidr['CidrIp'], item['FromPort'], item['IpProtocol']]) for item in securitygroups['IpPermissions']: if "ToPort" in item: for cidr in item['IpRanges']: sec_group.add_row(['Incoming', cidr['CidrIp'], item['ToPort'], item['IpProtocol']]) print("[*] Firewall rules for %s " % grpname) print(sec_group) i += 1 # print "[*] Pls chose the correct Group ID %s"%str(srcgroups_list) print(srcgroups_dict) return srcgroups_dict
# import another_module # print(another_module.another_variable) # # from turtle import Turtle, Screen # timmy = Turtle() # print(timmy) # timmy.shape("turtle") # timmy.color("coral") # timmy.forward(100) # # my_screen = Screen() # print(my_screen.canvheight) # my_screen.exitonclick() from prettytable import PrettyTable table = PrettyTable() table.add_column("Pokemon Name", ["Pikachu", "Squirtle", "Charmander"]) table.add_column("Type", ["Electric", "Water", "Fire"]) print(table.align) table.align = "l" table.align['Pokemon Name'] = "c" print(table)
def main(self): print(self.retr_section()) section = input('[*] Pls input the authentication section for this task: ') try: print("[*] Retrieving Region Lists") # region_invalid_input = True region_list = self.get_region(section) region_ref = PrettyTable(['Region_List']) for region in region_list: region_ref.add_row([region]) region = input("%s\n[*] Choose the region from the table Above: " % region_ref).lower() try: try: subnet_ref = self.get_vpc_subnets(region, section) if subnet_ref != "": count = input("[*] How Many instances You like to provision, maximum 5: ") try: print(self.get_amis(region, section)) imageid = input("[*] Pls the close the image to boot from: ") server_type = input("[*] Key-in the server type, web or db: ") subnet_id = input('%s\n[*] Pls select the subnet: ' % subnet_ref).lower() try: # print self.get_vpc_subnets(region) vpcid = self.get_vpcid(region, subnet_id, section) group_dict = self.get_security_group(region, vpcid, section) try: secgrp_name = input("[*] Pls In-put the Security Group: ") secgrp = group_dict[secgrp_name] rule_change = input('[*] Do you want to change the rules? yes or no? ') if 'yes' in rule_change: self.security_group_rules(region, secgrp, section) else: pass try: self.get_key(region, section) keyname = input("[*] Pls select the keypair: ") #keyname = 'singtel' try: self.create_instance(int(count), region, server_type, subnet_id, secgrp, imageid, vpcid, keyname, section) time.sleep(2) servers = self.get_initizing_serverlist(region, server_type, vpcid, section) try: srv_post_test_input = input( "[*] Do you want to test the access to the VM? yes or no?: ") if 'yes' in srv_post_test_input: self.srv_post_test(servers) else: print("%s Provision Done" % servers) dns_user_input = input( "[*] Do you want create DNS Alias? yes or no?: ") if dns_user_input == "yes": try: self.create_cname(region, servers, section) except: print("Assign CNAME Failed") else: pass provision = input( '[*] Do you want to provision the server? yes or no? ') if 'yes' in provision: if self.srv_post_test(servers): self._provision('/etc/ansible/test.yml') else: print("Test Failed") except: print("[*] Addtional Services Failed") except Exception as e: print(e) print("Initiate instance failed") except: print("[*] Failed to Retrieve the key") except: print("[*] Failed to retrieve the security group") except Exception as e: print("[*] Retrieve Security Group List failed") except Exception as e: print("[*] Retrieve AMI list failed") else: print("[Error] No VPC available in this Region") except Exception as e: print(e) except Exception as e: print("[Error] Retrieving Subnets Failed") except: print("[Error] Retrieving Regions Failed")
def byyyy(ordered_items,hhh,user_name): user_id=user_name final_amount=hhh s=ordered_items s=s.split(',') length = len(s) i=0 list1=[]; while(i<length): list1.append([s[i],s[i+1],s[i+2]]) i=i+3 con=sql.connect("test.db") cur=con.cursor() cur.execute('SELECT mobileno FROM users WHERE rollno = ?',(user_id,)) mobile_number=cur.fetchone()[0] cur.execute('SELECT email FROM users WHERE rollno = ?',(user_id,)) email_id=cur.fetchone()[0] con.close() con1=sql.connect("test.db") cur1=con1.cursor() cur1.execute("INSERT INTO payment_history (order_no,User,payment,mobileno,emailid) VALUES (?,?,?,?,?)",(order_no,user_id,final_amount,mobile_number,email_id) ) con1.commit() con1.close() file_name=str(order_no) file_name=file_name+".txt" f=open(file_name,"w") t=PrettyTable(['S.NO','ITEM NAME','QUANTITY']) len1=len(list1) j=1 i=0 while(i<len1): t.add_row([list1[i][0],list1[i][1],list1[i][2]]) i=i+1 r=str(t) f.write("\nCU-PAL\n\n") f.write("User Name %s"%user_id) f.write("\nEmail-id %s"%email_id) f.write("\n Your Order has been placed and your order number is %s\n"%order_no) f.write(r) f.write("\nTOTAL AMOUNT PAYED %s" % final_amount) f.write("\n Please go to the counter and collect you order\nTHANK YOU!!") f.close() fromaddr = "*****@*****.**" toaddr = email_id msg = MIMEMultipart() msg['From'] = fromaddr msg['To'] = toaddr msg['Subject'] = "CU-PAL(YOUR ORDER HAS BEEN PLACED)" body = "\nThank You for using our application\nYour Order Number is %s \nRegards\nCU-PAL"%order_no msg.attach(MIMEText(body, 'plain')) filename = file_name path="/home/sushant/Desktop/flask_test/"+file_name attachment = open(path, "rb") p = MIMEBase('application', 'octet-stream') p.set_payload((attachment).read()) encoders.encode_base64(p) p.add_header('Content-Disposition', "attachment; filename= %s" % filename) msg.attach(p) s = smtplib.SMTP('smtp.gmail.com', 587) s.starttls() s.login(fromaddr, "sushant123") text = msg.as_string() s.sendmail(fromaddr, toaddr, text) s.quit() return render_template('bye.html')
def main(args): global AWE_URL parser = OptionParser(usage='awe_viewer.py [options]') parser.add_option("-a", "--awe_url", dest="awe_url", default=AWE_URL, help="AWE API url") parser.add_option("-t", "--token", dest="token", default=None, help="User token") parser.add_option("-c", "--clientgroup", dest="clientgroup", default=None, help="clientgroup to view") parser.add_option("-p", "--pipeline", dest="pipeline", default='mgrast-prod', help="pipeline to view") parser.add_option("-s", "--summary", dest="summary", action="store_true", default=False, help="summarize by stage") (opts, args) = parser.parse_args() AWE_URL = opts.awe_url stages = MGP[opts.pipeline] if opts.token: if opts.summary: clients = get_awe(AWE_URL + '/client', opts.token) pt = PrettyTable(["stage"] + CGS) for i, s in enumerate(stages): num = 0 row = [s] + [0 for _ in range(len(CGS))] for c in clients: if (c['Status'] == 'active-busy') and (c['group'] in CGS): for k, v in c['current_work'].iteritems(): if v is True: parts = k.split('_') if int(parts[1]) == i: row[CGS.index(c['group']) + 1] += 1 pt.add_row(row) elif opts.clientgroup: clients = get_awe(AWE_URL + '/client?group=' + opts.clientgroup, opts.token) pt = PrettyTable(["name", "host", "status", "job", "stage"]) sc = sorted(clients, key=itemgetter('name')) for c in sc: jobs = [] work = [] if c['Status'] == 'active-busy': for k, v in c['current_work'].iteritems(): if v is True: parts = k.split('_') index = int(parts[1]) jobs.append(parts[0]) work.append(stages[index]) pt.add_row([ c['name'], c['host'], c['Status'], "\n".join(jobs), "\n".join(work) ]) else: pt.add_row([c['name'], c['host'], c['Status'], "", ""]) pt.align = "l" print pt else: print "Missing required --token" return 1 return 0
keystone = keystoneClient.Client(session=sess, endpoint_override=auth_url) except Exception as e: print("Keystone client could not be initialized. EXITING \n" + str(e)) exit(1) print("Check:") print("\t 1 - hypervisor stats") print("\t 2 - hypervisor servers") print("\t 3 - floating ip pool") sel = '2' sel = input("Selection: ") if sel == '1': nova = novaClient.Client(version="2", session=sess) outputtable = PrettyTable([ 'Hypervisor', 'ID', 'State', 'Status', 'Free RAM', 'Total RAM', 'Used RAM', 'vCPUs', 'Used vCPUs', 'Runnning VMs' ]) hypervisors = nova.hypervisor_stats.statistics() hypervisors_list = nova.hypervisors.list() print(hypervisors_list) for hv in hypervisors_list: outputtable.add_row([ hv.hypervisor_hostname, hv.id, hv.state, hv.status, hv.free_ram_mb, hv.memory_mb, hv.memory_mb_used, hv.vcpus, hv.vcpus_used, hv.running_vms ]) outputtable.add_row([ "TOTAL", "--", "--", "--", hypervisors.free_ram_mb, hypervisors.memory_mb, hypervisors.memory_mb_used, hypervisors.vcpus, hypervisors.vcpus_used, hypervisors.running_vms ])
'http://www.espncricinfo.com/ci/engine/match/index.html?view=live') html = response.read() soup = bs(html, "html.parser") if len(sys.argv) < 3: print "Usage: python wt20.py team1 team2" exit(1) ing = 0 team1 = sys.argv[1] team1 = team1.replace("_", " ") ing1team1 = False team2 = sys.argv[2] team2 = team2.replace("_", " ") t = PrettyTable(['Innings', 'Team', 'Runs/Wickets', 'Overs']) mydivs = soup.findAll("div", {"class": "innings-info-1"}) for div in mydivs: if re.search('^[^a-zA-Z]*' + team1 + '[^a-zA-Z]*', div.text): x = div.text if (len(team1.split()) > 1): x = div.text.replace(team1, team1.replace(" ", "_")) ing1team1 = True s = x.split() break if re.search('^[^a-zA-Z]*' + team2 + '[^a-zA-Z]*', div.text): x = div.text if (len(team2.split()) > 1): x = div.text.replace(team2, team2.replace(" ", "_")) ing1team1 = False
# ## Example 13. Tabulating frequency analysis # In[19]: from prettytable import PrettyTable # Get some frequency data twitter_api = oauth_login() search_results = twitter_search(twitter_api, q, max_results=100) common_entities = get_common_tweet_entities(search_results) # Use PrettyTable to create a nice tabular display pt = PrettyTable(field_names=['Entity', 'Count']) [pt.add_row(kv) for kv in common_entities] pt.align['Entity'], pt.align['Count'] = 'l', 'r' # Set column alignment pt._max_width = {'Entity': 60, 'Count': 10} print(pt) # ## Example 14. Finding users who have retweeted a status # In[20]: import twitter twitter_api = oauth_login() print("""User IDs for retweeters of a tweet by @fperez_org that was retweeted by @SocialWebMining and that @jyeee then retweeted
def help_menu(self): """ Prints a pretty help menu """ t = PrettyTable(['command', 'description']) t.align = 'l' t.add_row(['list', 'Lists all the clients registered']) t.add_row(['help', 'self.help()']) t.add_row(['select <id>', 'Selected a specific client from the list']) t.add_row( ['<command>', 'Executes a command to the current selected client']) t.add_row(['back', 'Detaches from the current client']) t.add_row(['exit', 'Exists this interactive shell']) t.add_row([ 'coms', 'Displays the commands and output for the current client' ]) t.add_row([ 'com <id>', 'Displays a specific command and output for the current client' ]) t.add_row(['comk', 'Kills a command ("*" for all)']) t.add_row(['clik', 'Kills a client ("*" for all)']) print(t)
def main(m, n, setk): global k, k3, k2 k3 += 1 if n == 15: const_l = 1.215 print( 'ŷ = b0 + b1 * x1 + b2 * x2 + b3 * x3 + b12 * x1 * x2 + b13 * x1 * x3 + b23 * x2 * x3 + b123 * x1 * x2 * ' 'x3 + b11 * x1 * x1 + b22 * x2 * x2 + b33 * x3 * x3') norm_x = [[+1, -1, -1, -1], [+1, -1, +1, +1], [+1, +1, -1, +1], [+1, +1, +1, -1], [+1, -1, -1, +1], [+1, -1, +1, -1], [+1, +1, -1, -1], [+1, +1, +1, +1], [+1, -const_l, 0, 0], [+1, const_l, 0, 0], [+1, 0, -const_l, 0], [+1, 0, const_l, 0], [+1, 0, 0, -const_l], [+1, 0, 0, const_l], [+1, 0, 0, 0]] delta_x1 = (x1_max - x1_min) / 2 delta_x2 = (x2_max - x2_min) / 2 delta_x3 = (x2_max - x3_min) / 2 x01 = (x1_min + x1_max) / 2 x02 = (x2_min + x2_max) / 2 x03 = (x3_min + x3_max) / 2 x = [[1, x1_min, x2_min, x3_min], [1, x1_min, x2_max, x3_max], [1, x1_max, x2_min, x3_max], [1, x1_max, x2_max, x3_min], [1, x1_min, x2_min, x3_max], [1, x1_min, x2_max, x3_min], [1, x1_max, x2_min, x3_min], [1, x1_max, x2_max, x3_max], [1, -const_l * delta_x1 + x01, x02, x03], [1, const_l * delta_x1 + x01, x02, x03], [1, x01, -const_l * delta_x2 + x02, x03], [1, x01, const_l * delta_x2 + x02, x03], [1, x01, x02, -const_l * delta_x3 + x03], [1, x01, x02, const_l * delta_x3 + x03], [1, x01, x02, x03]] append_to_list_x(norm_x, variant=2) append_to_list_x(x, variant=2) if n == 8: print( 'ŷ = b0 + b1 * x1 + b2 * x2 + b3 * x3 + b12 * x1 * x2 + b13 * x1 * x3 + b23 * x2 * x3 + b123 * x1 * x2 * x3' ) norm_x = [[+1, -1, -1, -1], [+1, -1, +1, +1], [+1, +1, -1, +1], [+1, +1, +1, -1], [+1, -1, -1, +1], [+1, -1, +1, -1], [+1, +1, -1, -1], [+1, +1, +1, +1]] x = [[1, x1_min, x2_min, x3_min], [1, x1_min, x2_max, x3_max], [1, x1_max, x2_min, x3_max], [1, x1_max, x2_max, x3_min], [1, x1_min, x2_min, x3_max], [1, x1_min, x2_max, x3_min], [1, x1_max, x2_min, x3_min], [1, x1_max, x2_max, x3_max]] append_to_list_x(norm_x, variant=1) append_to_list_x(x, variant=1) if n == 4: print('ŷ = b0 + b1 * x1 + b2 * x2 + b3 * x3') norm_x = [ [+1, -1, -1, -1], [+1, -1, +1, +1], [+1, +1, -1, +1], [+1, +1, +1, -1], ] x = [ [1, x1_min, x2_min, x3_min], [1, x1_min, x2_max, x3_max], [1, x1_max, x2_min, x3_max], [1, x1_max, x2_max, x3_min], ] y = np.random.randint(y_min, y_max, size=(n, m)) y_av = list(np.average(y, axis=1)) for i in range(len(y_av)): y_av[i] = round(y_av[i], 3) if n == 15: t = PrettyTable([ 'N', 'norm_x_0', 'norm_x_1', 'norm_x_2', 'norm_x_3', 'norm_x_1_x_2', 'norm_x_1_x_3', 'norm_x_2_x_3', 'norm_x_1_x_2_x_3', 'norm_x_1_x_1', 'norm_x_2_x_2', 'norm_x_3_x_3', 'x_0', 'x_1', 'x_2', 'x_3', 'x_1_x_2', 'x_1_x_3', 'x_2_x_3', 'x_1_x_2_x_3', 'x_1_x_1', 'x_2_x_2', 'x_3_x_3' ] + [f'y_{i + 1}' for i in range(m)] + ['y_av']) if n == 8: t = PrettyTable([ 'N', 'norm_x_0', 'norm_x_1', 'norm_x_2', 'norm_x_3', 'norm_x_1_x_2', 'norm_x_1_x_3', 'norm_x_2_x_3', 'norm_x_1_x_2_x_3', 'x_0', 'x_1', 'x_2', 'x_3', 'x_1_x_2', 'x_1_x_3', 'x_2_x_3', 'x_1_x_2_x_3' ] + [f'y_{i + 1}' for i in range(m)] + ['y_av']) if n == 4: t = PrettyTable([ 'N', 'norm_x_0', 'norm_x_1', 'norm_x_2', 'norm_x_3', 'x_0', 'x_1', 'x_2', 'x_3' ] + [f'y_{i + 1}' for i in range(m)] + ['y_av']) for i in range(n): t.add_row([i + 1] + list(norm_x[i]) + list(x[i]) + list(y[i]) + [y_av[i]]) print(t) m_ij = [] for i in range(len(x[0])): m_ij.append([ round(sum([x[k][i] * x[k][j] for k in range(len(x))]) / 15, 3) for j in range(len(x[i])) ]) k_i = [] for i in range(len(x[0])): a = sum(y_av[j] * x[j][i] for j in range(len(x))) / 15 k_i.append(a) det = np.linalg.det(m_ij) det_i = [ np.linalg.det(replace_column(m_ij, i, k_i)) for i in range(len(k_i)) ] b_i = [round(i / det, 3) for i in det_i] if n == 15: print( f"\nThe naturalized regression equation: " f"y = {b_i[0]:.5f} + {b_i[1]:.5f} * x1 + {b_i[2]:.5f} * x2 + " f"{b_i[3]:.5f} * x3 + {b_i[4]:.5f} * x1 * x2 + " f"{b_i[5]:.5f} * x1 * x3 + {b_i[6]:.5f} * x2 * x3 + {b_i[7]:.5f} * x1 * x2 * x3 + {b_i[8]:.5f} * x1 * x1 + " f"{b_i[9]:.5f} * x2 * x2 + {b_i[10]:.5f} * x3 * x3") if n == 8: print( f"\nThe naturalized regression equation: " f"y = {b_i[0]:.5f} + {b_i[1]:.5f} * x1 + {b_i[2]:.5f} * x2 + " f"{b_i[3]:.5f} * x3 + {b_i[4]:.5f} * x1 * x2 + " f"{b_i[5]:.5f} * x1 * x3 + {b_i[6]:.5f} * x2 * x3 + {b_i[7]:.5f} * x1 * x2 * x3" ) if n == 4: print( f"\nThe naturalized regression equation: " f"y = {b_i[0]:.5f} + {b_i[1]:.5f} * x1 + {b_i[2]:.5f} * x2 + {b_i[3]:.5f} * x3\n" ) check_i = [ round(sum(b_i[j] * i[j] for j in range(len(b_i))), 3) for i in x ] for i in range(len(check_i)): print(f'ŷ{i + 1} = {check_i[i]}, y_av{i + 1} = {y_av[i]}') print("\n[ Kohren's test ]") f_1 = m - 1 f_2 = n s_i = [sum([(i - y_av[j])**2 for i in y[j]]) / m for j in range(len(y))] g_p = max(s_i) / sum(s_i) table = { 3: 0.6841, 4: 0.6287, 5: 0.5892, 6: 0.5598, 7: 0.5365, 8: 0.5175, 9: 0.5017, 10: 0.4884, range(11, 17): 0.4366, range(17, 37): 0.3720, range(37, 145): 0.3093 } g_t = get_value(table, m) if g_p < g_t: print(f"The variance is homogeneous: Gp = {g_p:.5} < Gt = {g_t}") else: print( f"The variance is not homogeneous Gp = {g_p:.5} < Gt = {g_t}\nStart again with m = m + 1 = {m + 1}" ) return main(m=m + 1, n=n, setk=setk) print("\n[ Student's test ]") s2_b = sum(s_i) / n s2_beta_s = s2_b / (n * m) s_beta_s = sqrt(s2_beta_s) if n == 15: beta_i = b_i else: beta_i = [ sum([norm_x[i][j] * y_av[i] for i in range(len(norm_x))]) / n for j in range(len(norm_x[0])) ] beta_i = [round(i, 3) for i in beta_i] t = [abs(i) / s_beta_s for i in beta_i] f_3 = f_1 * f_2 t_table = { 8: 2.306, 9: 2.262, 10: 2.228, 11: 2.201, 12: 2.179, 13: 2.160, 14: 2.145, 15: 2.131, 16: 2.120, 17: 2.110, 18: 2.101, 19: 2.093, 20: 2.086, 21: 2.08, 22: 2.074, 23: 2.069, 24: 2.064, range(25, 30): 2.06, range(30, 40): 2.042, range(40, 60): 2.021, range(60, 100): 2, range(100, 2**100): 1.96 } d = deepcopy(n) for i in range(len(t)): if get_value(t_table, f_3) < t[i]: k2[i] += 1 if get_value(t_table, f_3) > t[i]: k[i] += 1 # Сумування кількості чисел t beta_i[i] = 0 d -= 1 if n == d: n = 8 if n == 4 else 15 print(f"n=d\nStart again with n = {n}") return main(m=m + 1, n=n, setk=setk) if n == 15: print( f"\nThe naturalized simplified regression equation: " f"y = {beta_i[0]:.5f} + {beta_i[1]:.5f} * x1 + " f"{beta_i[2]:.5f} * x2 + {beta_i[3]:.5f} * x3 + {beta_i[4]:.5f} * x1 * x2 + " f"{beta_i[5]:.5f} * x1 * x3 + {beta_i[6]:.5f} * x2 * x3 + {beta_i[7]:.5f} * x1 * x2 * x3 + " f"{beta_i[8]:.5f} * x1 * x1 + {beta_i[9]:.5f} * x2 * x2 + {beta_i[10]:.5f} * x3 * x3" ) check_i = [ round(sum(beta_i[j] * i[j] for j in range(len(beta_i))), 3) for i in x ] if n == 8: print( f"\nThe normalized regression equation: " f"y = {beta_i[0]:.5f} + {beta_i[1]:.5f} * x1 + {beta_i[2]:.5f} * x2 + " f"{beta_i[3]:.5f} * x3 + {beta_i[4]:.5f} * x1 * x2 + " f"{beta_i[5]:.5f} * x1 * x3 + {beta_i[6]:.5f} * x2 * x3 + {beta_i[7]:.5f} * x1 * x2 * x3" ) check_i = [ round(sum(beta_i[j] * i[j] for j in range(len(beta_i))), 3) for i in norm_x ] if n == 4: print( f"\nThe normalized regression equation: " f"y = {beta_i[0]:.5f} + {beta_i[1]:.5f} * x1 + {beta_i[2]:.5f} * x2 + " f"{beta_i[3]:.5f} * x3") check_i = [ round(sum(beta_i[j] * i[j] for j in range(len(beta_i))), 3) for i in norm_x ] for i in range(len(check_i)): print(f'ŷ{i + 1} = {check_i[i]}, y_av{i + 1} = {y_av[i]}') print("\n[ Fisher's test ]") f_4 = n - d s2_ad = m / f_4 * sum([(check_i[i] - y_av[i])**2 for i in range(len(y_av))]) f_p = s2_ad / s2_b f_t = { 1: [164.4, 199.5, 215.7, 224.6, 230.2, 234, 235.8, 237.6], 2: [18.5, 19.2, 19.2, 19.3, 19.3, 19.3, 19.4, 19.4], 3: [10.1, 9.6, 9.3, 9.1, 9, 8.9, 8.8, 8.8], 4: [7.7, 6.9, 6.6, 6.4, 6.3, 6.2, 6.1, 6.1], 5: [6.6, 5.8, 5.4, 5.2, 5.1, 5, 4.9, 4.9], 6: [6, 5.1, 4.8, 4.5, 4.4, 4.3, 4.2, 4.2], 7: [5.5, 4.7, 4.4, 4.1, 4, 3.9, 3.8, 3.8], 8: [5.3, 4.5, 4.1, 3.8, 3.7, 3.6, 3.5, 3.5], 9: [5.1, 4.3, 3.9, 3.6, 3.5, 3.4, 3.3, 3.3], 10: [5, 4.1, 3.7, 3.5, 3.3, 3.2, 3.1, 3.1], 11: [4.8, 4, 3.6, 3.4, 3.2, 3.1, 3, 3], 12: [4.8, 3.9, 3.5, 3.3, 3.1, 3, 2.9, 2.9], 13: [4.7, 3.8, 3.4, 3.2, 3, 2.9, 2.8, 2.8], 14: [4.6, 3.7, 3.3, 3.1, 3, 2.9, 2.8, 2.7], 15: [4.5, 3.7, 3.3, 3.1, 2.9, 2.8, 2.7, 2.7], 16: [4.5, 3.6, 3.2, 3, 2.9, 2.7, 2.6, 2.6], 17: [4.5, 3.6, 3.2, 3, 2.8, 2.7, 2.5, 2.3], 18: [4.4, 3.6, 3.2, 2.9, 2.8, 2.7, 2.5, 2.3], 19: [4.4, 3.5, 3.1, 2.9, 2.7, 2.7, 2.4, 2.3], range(20, 22): [4.4, 3.5, 3.1, 2.8, 2.7, 2.7, 2.4, 2.3], range(22, 24): [4.3, 3.4, 3.1, 2.8, 2.7, 2.6, 2.4, 2.3], range(24, 26): [4.3, 3.4, 3, 2.8, 2.6, 2.5, 2.3, 2.2], range(26, 28): [4.2, 3.4, 3, 2.7, 2.6, 2.5, 2.3, 2.2], range(28, 30): [4.2, 3.3, 3, 2.7, 2.6, 2.4, 2.3, 2.1], range(30, 40): [4.2, 3.3, 3, 2.7, 2.6, 2.4, 2.3, 2.1, 2, 2, 2, 2], range(40, 60): [4.1, 3.2, 2.9, 2.6, 2.5, 2.3, 2.2, 2, 1.9, 1.9, 1.9, 1.9], range(60, 120): [ 4, 3.2, 2.8, 2.5, 2.4, 2.3, 2.1, 1.9, 1.8, 1.8, 1.8, 1.8, 1.8, 1.8, 1.8, 1.8 ], range(120, 2**100): [3.8, 3, 2.6, 2.4, 2.2, 2.1, 2, 2, 1.9, 1.9, 1.9, 1.8, 1.8] } if f_p > get_value(f_t, f_3)[f_4]: n = 8 if n == 4 else 15 print( f"fp = {f_p} > ft = {get_value(f_t, f_3)[f_4]}.\n" f"The mathematical model is not adequate to the experimental data\n" f"Start again with m = m + 1 = {m + 1} and n = {n}") return main(m=m + 1, n=n, setk=setk) else: print(f"fP = {f_p} < fT = {get_value(f_t, f_3)[f_4]}.\n" f"The mathematical model is adequate to the experimental data\n") if setk == 2: tWord = [ "b0", "b1", "b2", "b3", "b12", "b13", "b23", "b123", "b11", "b22", "b33" ] for i in range(11): print("за", k3, " разів коефіціент", tWord[i], " був значимим ", k2[i]) print("за ", k3, "разів коефіціент", tWord[i], " був незначимим ", k[i])
def factory_styled_table_with_args( args: "CustomNamespace", output_fields=DEFAULT_OUTPUT_FIELDS): table = PrettyTable() table.field_names = output_fields table.align = 'l' table.border = args.format_ in (FormatArg.MARKDOWN, FormatArg.RST, FormatArg.CONFLUENCE, FormatArg.JSON) table.header = True if args.format_ == FormatArg.MARKDOWN: table.junction_char = '|' table.hrules = RULE_HEADER elif args.format_ == FormatArg.RST: table.junction_char = '+' table.hrules = RULE_ALL elif args.format_ == FormatArg.CONFLUENCE: table.junction_char = '|' table.hrules = RULE_NONE elif args.format_ == FormatArg.JSON: table = JsonPrettyTable(table.field_names) elif args.format_ == FormatArg.JSON_LICENSE_FINDER: table = JsonLicenseFinderTable(table.field_names) elif args.format_ == FormatArg.CSV: table = CSVPrettyTable(table.field_names) elif args.format_ == FormatArg.PLAIN_VERTICAL: table = PlainVerticalTable(table.field_names) return table
from cow import Cow from entero import entero from prettytable import PrettyTable from MaxProduction import MaxProduction x = PrettyTable() y = PrettyTable() if __name__ == '__main__': cows = [] cows1 = [] maxWeight = 0 maxProduction = 0 weightToCarry = 0 totalV = entero('Número de vacas que hay en Tolosa: \n') totalWeight = entero("Peso total que el camión puede llevar: \n") for i in range(1, totalV + 1): weight = entero("Introducir peso %d:\n" % i) milk = entero("Introducir cantidad de leche %d:\n" % i) nCow = Cow(i, weight, milk) cowsInMarket.append(nCow) [maxProduction, weightToCarry, cows1] = maxProduction(cows, maxWeight, 0, 0, 0, []) print("MAX: %d" % maxProduction) x.field_names = ["Número", "Peso", "Cantidad leche"] y.field_names = ["Número", "Peso", "Cantidad leche"] print("Produccion maxima de leche: %dl" % maxProduction) print("Peso a llevar: %dkg ; Peso maximo: %dkg" %
from prettytable import PrettyTable locale.setlocale(locale.LC_ALL, ('de', 'utf-8')) global_url = 'https://api.coinmarketcap.com/v2/global' ticker_url = 'https://api.coinmarketcap.com/v2/ticker/?structure=array' request = requests.get(global_url) results = request.json() data = results['data'] global_cap = int(data['quotes']['USD']['total_market_cap']) table = PrettyTable([ 'Name', 'Ticker', '% of total global cap', 'Current', '7.7T (Gold)', '36.8T (Narrow Money)', '73T (World Stock Money)', '90.4T (Broad Money)', '217T (Real Estate)', '544T (Derivatives)' ]) request = requests.get(ticker_url) results = request.json() data = results['data'] for currency in data: name = currency['name'] ticker = currency['symbol'] percentage_of_global_cap = float( currency['quotes']['USD']['market_cap']) / float(global_cap) current_price = round(float(currency['quotes']['USD']['price']), 2) available_supply = float(currency['total_supply'])
tablaLR0[estado[0] - 1][t.index(celda)] = str( estado[1][0][0]) + "->" + str(estado[1][0][1]) if reglaSaLambda == 1: tablaLR0[0][t.index("FDC")] = "aceptar" return (tablaLR0, t) #------------------------------------------------ #Ejecucion del programa automata = nuevoEstado(reglas[-1], 1)[0] automata.sort(key=lambda x: x[0]) lr0, encabezado = construyeTabla(automata) #Impresion bonita de la tabla: t = PrettyTable(encabezado) for row in lr0: t.add_row(row) print(t) #------------------------------------------------ #Lectura de una cadena segun algoritmo de Brookshear p. 128 cadena = sys.argv[2] #cadena="zazabzbz" stack = [] simboloEspecial = 1 stack.append(simboloEspecial) simbolo = cadena[0] cadena = cadena[1:] valorTabla = lr0[simboloEspecial - 1][encabezado.index(simbolo)]
from prettytable import PrettyTable myTable = PrettyTable( ['Footballer', 'Position', 'Goals', 'Assists', 'Clean Sheets']) myTable.add_row(['Giroud', 'ST', '1', '0', '0']) myTable.add_row(['Werner', 'ST', '4', '2', '0']) myTable.add_row(['Ziyech', 'RW', '1', '3', '0']) myTable.add_row(['Havertz', 'CM', '1', '1', '1']) myTable.add_row(['Kante', 'CM', '0', '1', '3']) myTable.add_row(['Mount', 'CM', '1', '1', '3']) myTable.add_row(['Chilwell', 'LB', '2', '2', '4']) myTable.add_row(['Silva', 'CB', '1', '0', '5']) myTable.add_row(['Zouma', 'CB', '3', '0', '4']) myTable.add_row(['James', 'RB', '1', '2', '4']) myTable.add_row(['Mendy', 'GK', '0', '0', '7']) print(myTable)
# use nltk.corpus.brown.words () to find the word sequence and nltk.corpus.brown.sents () to find the sequence of sentences from nltk.corpus import brown from prettytable import PrettyTable def calcARI(category): avg_words = 0 avg_letters = 0 for sentence in brown.sents(categories=category): avg_words += len(sentence) avg_words = avg_words / len(brown.sents(categories=category)) for word in brown.words(categories=category): avg_letters += len(word) avg_letters = avg_letters / len(brown.words(categories=category)) return (4.71 * avg_letters) + (0.5 * avg_words) - 21.43 x = PrettyTable(["Genres", "ARI"]) for c in brown.categories(): x.add_row([c, calcARI(c)]) print(x) # access ABC Rural News and ABC Science News Corps texts (nltk.corpus.abc) # find a value for evaluating the readability of texts (similar to task # 12) # use Punkt to separate text into individual sentences # The difference in ARI value from the previous exercise is explained by Punkt's work in dividing the text into sentences, resulting in the sentence being too large from nltk.corpus import abc import nltk from prettytable import PrettyTable def calcARI(file): sent_tokenizer=nltk.data.load('tokenizers/punkt/english.pickle')
def show(stu=stu_list): pt = PrettyTable(['学号', '姓名', '年龄', '性别']) pt.padding_width = 5 for line in stu: pt.add_row(line.values()) print(pt)
query, (int(line[8]), int(line[3]), line[0], line[9], float(line[5]))) # #### Do a SELECT to verify that the data have been inserted into each table # In[10]: ## Verify the data was inserted into the table query1 = "SELECT artist,song,length FROM songplays WHERE sessionid=338 AND iteminsession=4" try: rows = session.execute(query1) except Exception as e: print(e) t = PrettyTable(['Artist', 'Song', 'Length']) for row in rows: t.add_row([row[0], row[1], row[2]]) print(t) # ### COPY AND REPEAT THE ABOVE THREE CELLS FOR EACH OF THE THREE QUESTIONS # In[11]: ## Model the table to answer Query 2: Give me only the following: name of artist, song (sorted by itemInSession) and user (first and last name)\ ## for userid = 10, sessionid = 182 try: query = "CREATE TABLE IF NOT EXISTS user_listened_songs (userid int, sessionid int, iteminsession int, artist text, song text, firstname text, lastname text, PRIMARY KEY((userid,sessionid),iteminsession));" session.execute(query) except Exception as e:
def scan(target_directory, a_sid=None, s_sid=None, special_rules=None, language=None, framework=None, file_count=0, extension_count=0): r = Rule() vulnerabilities = r.vulnerabilities languages = r.languages frameworks = r.frameworks rules = r.rules(special_rules) find_vulnerabilities = [] try: if special_rules is None or len(special_rules) == 0: cve_vuls = scan_cve(target_directory) find_vulnerabilities += cve_vuls except Exception: logger.warning('[SCAN] [CVE] CVE rule is None') """ 2018.7.14 """ def store(result): if result is not None and isinstance(result, list) is True: for res in result: if os.path.isdir(target_directory): res.file_path = res.file_path.replace(target_directory, '') else: res.file_path = res.file_path.replace( os.path.dirname(target_directory), '') find_vulnerabilities.append(res) else: logger.debug( '[SCAN] [STORE] Not found vulnerabilities on this rule!') try: pool = multiprocessing.Pool() if len(rules) == 0: logger.critical('no rules!') return False logger.info('[PUSH] {rc} Rules'.format(rc=len(rules))) push_rules = [] off_rules = 0 for idx, single_rule in enumerate(rules): if single_rule['status'] is False: off_rules += 1 logger.debug('[CVI-{cvi}] [STATUS] OFF, CONTINUE...'.format( cvi=single_rule['id'])) continue # SR(Single Rule) logger.debug( """[PUSH] [CVI-{cvi}] {idx}.{name}({language})""".format( cvi=single_rule['id'], idx=idx, name=single_rule['name'], language=single_rule['language'])) logger.debug(str(single_rule['language'])) if single_rule['language'] in languages: single_rule['extensions'] = languages[ single_rule['language']]['extensions'] push_rules.append(single_rule['id']) pool.apply_async(scan_single, args=(target_directory, single_rule), callback=store) else: logger.critical('unset language, continue...') continue pool.close() pool.join() except Exception: raise # print data = [] table = PrettyTable( ['#', 'CVI', 'Rule', 'Level', 'Target', 'Source Code Content']) table.align = 'l' trigger_rules = [] for idx, x in enumerate(find_vulnerabilities): trigger = '{fp}:{ln}'.format(fp=x.file_path, ln=x.line_number) # commit = u'{time}, @{author}'.format(author=x.commit_author, time=x.commit_time) # 2018.7.24------------------------- level = score2level(x.level) cvi = x.id[0:3] if cvi in vulnerabilities: cvn = vulnerabilities[cvi] else: cvn = 'Unknown' try: code_content = x.code_content[:50].strip() except AttributeError as e: code_content = x.code_content.decode('utf-8')[:100].strip() row = [idx + 1, x.id, x.rule_name, level, trigger, code_content] data.append(row) table.add_row(row) if x.id not in trigger_rules: logger.debug(' > trigger rule (CVI-{cvi})'.format(cvi=x.id)) trigger_rules.append(x.id) diff_rules = list(set(push_rules) - set(trigger_rules)) vn = len(find_vulnerabilities) if vn == 0: logger.info('[SCAN] Not found vulnerability!') else: logger.info( "[SCAN] Trigger Rules/Not Trigger Rules/Off Rules: {tr}/{ntr}/{fr} Vulnerabilities ({vn})\r\n{table}" .format(tr=len(trigger_rules), ntr=len(diff_rules), fr=off_rules, vn=len(find_vulnerabilities), table=table)) if len(diff_rules) > 0: logger.info('[SCAN] Not Trigger Rules ({l}): {r}'.format( l=len(diff_rules), r=','.join(diff_rules))) if os.path.isfile(target_directory): target_directory = os.path.dirname(target_directory) # completed running data if s_sid is not None: Running(s_sid).data({ 'code': 1001, 'msg': 'scan finished', 'result': { 'vulnerabilities': [x.__dict__ for x in find_vulnerabilities], 'language': language, 'framework': framework, 'extension': extension_count, 'file': file_count, 'push_rules': len(rules), 'trigger_rules': len(trigger_rules), 'target_directory': target_directory, } }) return True
data = results['data'] ticker_url_pairs = {} for currency in data: symbol = currency['symbol'] url = currency['id'] ticker_url_pairs[symbol] = url print() print('My Portfolio') print() portfolio_value = 0.00 last_updated = 0 table = PrettyTable( ['Asset', 'Amount Owned', convert + 'Value', 'Price', '1h', '24h', '7d']) with open("portfolio.txt") as inp: for line in inp: ticker, amount = line.split() ticker = ticker.upper() ticker_url = 'https://api.coinmarketcap.com/v2/ticker/' + str( ticker_url_pairs[ticker]) + '/' + url_end request = requests.get(ticker_url) results = request.json() currency = results['data'][0] rank = currency['rank'] name = currency['name'] last_updated = currency['last_updated']
def format_for_print(df): table = PrettyTable(list(df.columns)) for row in df.itertuples(): table.add_row(row[1:]) return str(table)
Base.print_info("Timeout: ", str(args.timeout) + " sec.") Base.print_info("Retry: ", str(args.retry)) # endregion # region Start scanner arp_scan = ArpScan() results = arp_scan.scan(current_network_interface, args.timeout, args.retry, args.target_ip, True) # endregion # region Print results if len(results) > 0: Base.print_success("Found devices:") pretty_table = PrettyTable([ Base.cINFO + 'IP address' + Base.cEND, Base.cINFO + 'MAC address' + Base.cEND, Base.cINFO + 'Vendor' + Base.cEND ]) for result in results: pretty_table.add_row([ result['ip-address'], result['mac-address'], result['vendor'] ]) print pretty_table else: Base.print_error( "Could not find devices in local network on interface: ", current_network_interface) # endregion # endregion
def cli(): argv_len = len(sys.argv) print(argv_len) if argv_len != 4 and argv_len != 5: print(__doc__) return None print('searching ticket.....') #arguments = docopt(__doc__,version='ticket 1.0') #print(arguments) if 4 == argv_len: from_station = stations.get_telecode(sys.argv[1]) to_station = stations.get_telecode(sys.argv[2]) date = sys.argv[3] options = None else: from_station = stations.get_telecode(sys.argv[2]) to_station = stations.get_telecode(sys.argv[3]) date = sys.argv[4] options = sys.argv[1] #list(sys.argv[1])[1:] #print(options, type(options)) #print(not options or 'd' in options) # 列表推导式,得到的是查询车次类型的集合 #options = None#''.join([key for key,value in arguments.items() if value is True]) #print(options) url = ('https://kyfw.12306.cn/otn/leftTicket/query?' 'leftTicketDTO.train_date={}&' 'leftTicketDTO.from_station={}&' 'leftTicketDTO.to_station={}&' 'purpose_codes=ADULT').format(date,from_station,to_station) print(url) r = requests.get(url, verify=False) #print(r.json()) # requests得到的是一个json格式的对象,r.json()转化成python字典格式数据来提取,所有的车次结果result raw_trains = r.json()['data']['result'] #print(raw_trains) pt = PrettyTable() pt._set_field_names("车次 车站 始终 时间 经历时 一等座 二等座 软卧 硬卧 硬座 无座".split()) for raw_train in raw_trains: # split切割之后得到的是一个列表 #print(raw_train) data_list = raw_train.split("|") #print(data_list[3:]) train_no = data_list[3] initial = train_no[0].lower() #print(initial, type(initial),not options or initial in options) # 判断是否是查询特定车次的信息 if not options or initial in options: begin_station_code = data_list[4] end_station_code = data_list[5] from_station_code = data_list[6] to_station_code = data_list[7] from_station_name = '' to_station_name = '' begin_station_name = '' end_station_name = '' start_time = data_list[8] arrive_time = data_list[9] time_duration = data_list[10] first_class_seat = data_list[31] or "--" second_class_seat = data_list[30] or "--" soft_sleep = data_list[23] or "--" hard_sleep = data_list[28] or "--" hard_seat = data_list[29] or "--" no_seat = data_list[33] or "--" pt.add_row([ # 对特定文字添加颜色 train_no, '\n'.join([stations.get_name(from_station_code), stations.get_name(to_station_code)]), '\n'.join([stations.get_name(begin_station_code), stations.get_name(end_station_code)]), '\n'.join([start_time, arrive_time]), #'\n'.join([Fore.GREEN + stations.get_name(from_station_code) + Fore.RESET, Fore.RED + stations.get_name(to_station_code) + Fore.RESET]), #'\n'.join([Fore.GREEN + start_time + Fore.RESET,Fore.RED + arrive_time + Fore.RESET]), time_duration, first_class_seat, second_class_seat, soft_sleep, hard_sleep, hard_seat, no_seat ]) print(pt)