def find_commands(db, *filters): user_filter = '\s+'.join(filters) user_re = re.compile(user_filter) RE_CACHE[user_filter] = user_re query = ''' SELECT hostname, timestamp, duration, user_string FROM commands WHERE timestamp > ? AND user_string REGEXP ? ORDER BY timestamp ''' table = Texttable() table.set_deco(Texttable.HEADER) table.set_cols_align(('l', 'r', 'r', 'l')) table.header(('host', 'date', 'duration', 'command')) host_width = 6 max_command_width = 9 now = time.time() for row in db.execute(query, (TIMESTAMP, user_filter)): host_width = max(host_width, len(row[0])) max_command_width = max(max_command_width, len(row[3])) table.add_row(( row[0], format_time(row[1], now), format_duration(row[2]) if row[2] > 0 else '', highlight(row[3], user_re))) table.set_cols_width((host_width, 30, 10, max_command_width + 2)) print table.draw()
def get_aggregation_summary_text(self, matches): text = '' if 'aggregation' in self.rule and 'summary_table_fields' in self.rule: summary_table_fields = self.rule['summary_table_fields'] if not isinstance(summary_table_fields, list): summary_table_fields = [summary_table_fields] # Include a count aggregation so that we can see at a glance how many of each aggregation_key were encountered summary_table_fields_with_count = summary_table_fields + ['count'] text += "Aggregation resulted in the following data for summary_table_fields ==> {0}:\n\n".format(summary_table_fields_with_count) text_table = Texttable() text_table.header(summary_table_fields_with_count) match_aggregation = {} # Maintain an aggregate count for each unique key encountered in the aggregation period for match in matches: key_tuple = tuple([unicode(lookup_es_key(match, key)) for key in summary_table_fields]) if key_tuple not in match_aggregation: match_aggregation[key_tuple] = 1 else: match_aggregation[key_tuple] = match_aggregation[key_tuple] + 1 for keys, count in match_aggregation.iteritems(): text_table.add_row([key for key in keys] + [count]) text += text_table.draw() + '\n\n' return unicode(text)
def get_hard_drive_list_table(self): """Return a table of hard drives""" # Manually set permissions asserted, as this function can # run high privilege calls, but doesn't not require # permission checking self._get_registered_object('auth').set_permission_asserted() # Create table and set headings table = Texttable() table.set_deco(Texttable.HEADER | Texttable.VLINES) table.header(('ID', 'Size', 'Type', 'Storage Backend', 'Virtual Machine')) table.set_cols_width((50, 15, 15, 50, 20)) # Obtain hard ives and add to table for hard_drive_obj in self.get_all(): vm_object = hard_drive_obj.get_virtual_machine() hdd_type = '' storage_backend_id = 'Storage backend does not exist' try: storage_backend_id = hard_drive_obj.storage_backend.id_ hdd_type = hard_drive_obj.get_type() hdd_size = SizeConverter(hard_drive_obj.get_size()).to_string() except (VolumeDoesNotExistError, HardDriveDoesNotExistException, StorageBackendDoesNotExist), exc: hdd_size = str(exc) table.add_row((hard_drive_obj.id_, hdd_size, hdd_type, storage_backend_id, vm_object.get_name() if vm_object else 'Not attached'))
def do_info_draw_publication(self, info_image): printer.out("Information about publications:") pimages = self.api.Users(self.login).Pimages.Getall() table = Texttable(0) table.set_cols_align(["l", "l"]) has_pimage = False for pimage in pimages.publishImages.publishImage: if pimage.imageUri == info_image.uri: has_pimage = True cloud_id = None publish_status = image_utils.get_message_from_status(pimage.status) if not publish_status: publish_status = "Publishing" if publish_status == "Done": cloud_id = pimage.cloudId format_name = info_image.targetFormat.format.name if format_name == "docker" or format_name == "openshift": cloud_id = pimage.namespace + "/" + pimage.repositoryName + ":" + pimage.tagName table.add_row([publish_status, cloud_id]) if has_pimage: table.header(["Status", "Cloud Id"]) print table.draw() + "\n" else: printer.out("No publication")
def do_promote(self, args): try: doParser = self.arg_promote() doArgs = doParser.parse_args(shlex.split(args)) orgSpecified = org_utils.org_get(api=self.api, name=doArgs.org) adminUser = self.api.Users(doArgs.account).Get() if adminUser == None: printer.out("User [" + doArgs.account + "] doesn't exist.", printer.ERROR) else: self.api.Orgs(orgSpecified.dbId).Members(adminUser.loginName).Change(Admin=True, body=adminUser) printer.out("User [" + doArgs.account + "] has been promoted in [" + orgSpecified.name + "] :", printer.OK) if adminUser.active == True: active = "X" else: active = "" printer.out("Informations about [" + adminUser.loginName + "] :") table = Texttable(200) table.set_cols_align(["c", "l", "c", "c", "c", "c", "c", "c"]) table.header( ["Login", "Email", "Lastname", "Firstname", "Created", "Active", "Promo Code", "Creation Code"]) table.add_row([adminUser.loginName, adminUser.email, adminUser.surname, adminUser.firstName, adminUser.created.strftime("%Y-%m-%d %H:%M:%S"), active, adminUser.promoCode, adminUser.creationCode]) print table.draw() + "\n" return 0 except ArgumentParserError as e: printer.out("In Arguments: " + str(e), printer.ERROR) self.help_promote() except Exception as e: return marketplace_utils.handle_uforge_exception(e)
def do_search(self, args): try: #add arguments doParser = self.arg_search() doArgs = doParser.parse_args(shlex.split(args)) #if the help command is called, parse_args returns None object if not doArgs: return 2 #call UForge API printer.out("Search package '"+doArgs.pkg+"' ...") distribution = self.api.Distributions(doArgs.id).Get() printer.out("for OS '"+distribution.name+"', version "+distribution.version) pkgs = self.api.Distributions(distribution.dbId).Pkgs.Getall(Query="name=="+doArgs.pkg) pkgs = pkgs.pkgs.pkg if pkgs is None or len(pkgs) == 0: printer.out("No package found") else: table = Texttable(800) table.set_cols_dtype(["t","t","t","t","t","t","t"]) table.header(["Name", "Version", "Arch", "Release", "Build date", "Size", "FullName"]) pkgs = generics_utils.order_list_object_by(pkgs, "name") for pkg in pkgs: table.add_row([pkg.name, pkg.version, pkg.arch, pkg.release, pkg.pkgBuildDate.strftime("%Y-%m-%d %H:%M:%S"), size(pkg.size), pkg.fullName]) print table.draw() + "\n" printer.out("Found "+str(len(pkgs))+" packages") except ArgumentParserError as e: printer.out("ERROR: In Arguments: "+str(e), printer.ERROR) self.help_search() except Exception as e: return handle_uforge_exception(e)
def schedule(self, label="default", *argvs, **kwargs): simulate = kwargs.get("simulate") reserver = kwargs.get("reserve") fullInfo = kwargs.get("fullInfo") if kwargs.get("list"): tp = TaskPeriod.objects.all() table = Texttable() table.set_deco(Texttable.HEADER) table.header(["Id", "Title", "Label", "Schedule"]) for it in tp: table.add_row([it.id, it.title, it.label, it.cron]) print(table.draw()) if kwargs.get("template_id"): template_ids = kwargs.get("template_id") logger.debug("Schedule template id %s" % template_ids) filter = {"id__in": template_ids} self.scheduleByJobTemplates( filter, label, fullInfo, simulate, reserver) if kwargs.get("schedule_label"): period_label = kwargs.get("schedule_label") filter = {"schedule__label__in": period_label, "is_enable": True} if not label: label = period_label self.scheduleByJobTemplates( filter, "".join(label), fullInfo, simulate, reserver)
def do_list(self, args): try: #call UForge API printer.out("Getting scans for ["+self.login+"] ...") myScannedInstances = self.api.Users(self.login).Scannedinstances.Get(None, Includescans="true") if myScannedInstances is None or not hasattr(myScannedInstances, 'get_scannedInstance'): printer.out("No scans available") return else: table = Texttable(800) table.set_cols_dtype(["t","t","t","t"]) table.header(["Id", "Name", "Status", "Distribution"]) myScannedInstances = generics_utils.oder_list_object_by(myScannedInstances.get_scannedInstance(), "name") for myScannedInstance in myScannedInstances: table.add_row([myScannedInstance.dbId, myScannedInstance.name, "", myScannedInstance.distribution.name + " "+ myScannedInstance.distribution.version + " " + myScannedInstance.distribution.arch]) scans = generics_utils.oder_list_object_by(myScannedInstance.get_scans().get_scan(), "name") for scan in scans: if (scan.status.complete and not scan.status.error and not scan.status.cancelled): status = "Done" elif(not scan.status.complete and not scan.status.error and not scan.status.cancelled): status = str(scan.status.percentage)+"%" else: status = "Error" table.add_row([scan.dbId, "\t"+scan.name, status, "" ]) print table.draw() + "\n" printer.out("Found "+str(len(myScannedInstances))+" scans") except ArgumentParserError as e: printer.out("ERROR: In Arguments: "+str(e), printer.ERROR) self.help_list() except Exception as e: return generics_utils.handle_uforge_exception(e)
def do_list(self, args): try: #call UForge API printer.out("Getting distributions for ["+self.login+"] ...") distributions = self.api.Users(self.login).Distros.Getall() distributions = distributions.distributions if distributions is None or not hasattr(distributions, "distribution"): printer.out("No distributions available") else: table = Texttable(800) table.set_cols_dtype(["t","t","t","t","t", "t"]) table.header(["Id", "Name", "Version", "Architecture", "Release Date", "Profiles"]) distributions = generics_utils.order_list_object_by(distributions.distribution, "name") for distribution in distributions: profiles = self.api.Distributions(distribution.dbId).Profiles.Getall() profiles = profiles.distribProfiles.distribProfile if len(profiles) > 0: profile_text="" for profile in profiles: profile_text+=profile.name+"\n" table.add_row([distribution.dbId, distribution.name, distribution.version, distribution.arch, distribution.releaseDate.strftime("%Y-%m-%d %H:%M:%S") if distribution.releaseDate is not None else "", profile_text]) else: table.add_row([distribution.dbId, distribution.name, distribution.version, distribution.arch, distribution.releaseDate.strftime("%Y-%m-%d %H:%M:%S") if distribution.releaseDate is not None else "", "-"]) print table.draw() + "\n" printer.out("Found "+str(len(distributions))+" distributions") return 0 except ArgumentParserError as e: printer.out("ERROR: In Arguments: "+str(e), printer.ERROR) self.help_list() except Exception as e: return handle_uforge_exception(e)
def print_deploy_header(): table = Texttable(200) table.set_cols_dtype(["t", "t", "t", "t", "t", "t", "t", "t", "t"]) table.header( ["Deployment name", "Deployment ID", "Cloud provider", "Region", "Hostname", "Source type", "Source ID", "Source name", "Status"]) return table
def list(self): """List the Drbd volumes and statuses""" # Create table and add headers table = Texttable() table.set_deco(Texttable.HEADER | Texttable.VLINES) table.header(('Volume Name', 'VM', 'Minor', 'Port', 'Role', 'Connection State', 'Disk State', 'Sync Status')) # Set column alignment and widths table.set_cols_width((30, 20, 5, 5, 20, 20, 20, 13)) table.set_cols_align(('l', 'l', 'c', 'c', 'l', 'c', 'l', 'c')) # Iterate over Drbd objects, adding to the table for drbd_object in self.get_all_drbd_hard_drive_object(True): table.add_row((drbd_object.resource_name, drbd_object.vm_object.get_name(), drbd_object.drbd_minor, drbd_object.drbd_port, 'Local: %s, Remote: %s' % (drbd_object._drbdGetRole()[0].name, drbd_object._drbdGetRole()[1].name), drbd_object._drbdGetConnectionState().name, 'Local: %s, Remote: %s' % (drbd_object._drbdGetDiskState()[0].name, drbd_object._drbdGetDiskState()[1].name), 'In Sync' if drbd_object._isInSync() else 'Out of Sync')) return table.draw()
def do_delete(self, args): try: # add arguments doParser = self.arg_delete() doArgs = doParser.parse_args(shlex.split(args)) #if the help command is called, parse_args returns None object if not doArgs: return 2 # call UForge API printer.out("Searching account with id [" + doArgs.id + "] ...") account = self.api.Users(self.login).Accounts(doArgs.id).Get() if account is None: printer.out("No Account available", printer.WARNING) else: table = Texttable(800) table.set_cols_dtype(["t", "t", "t", "t"]) table.header(["Id", "Name", "Type", "Created"]) table.add_row( [account.dbId, account.name, account.targetPlatform.name, account.created.strftime("%Y-%m-%d %H:%M:%S")]) print table.draw() + "\n" if doArgs.no_confirm: self.api.Users(self.login).Accounts(doArgs.id).Delete() printer.out("Account deleted", printer.OK) elif generics_utils.query_yes_no("Do you really want to delete account with id " + str(account.dbId)): self.api.Users(self.login).Accounts(doArgs.id).Delete() printer.out("Account deleted", printer.OK) return 0 except ArgumentParserError as e: printer.out("ERROR: In Arguments: " + str(e), printer.ERROR) self.help_delete() except Exception as e: return handle_uforge_exception(e)
def do_delete(self, args): try: #add arguments doParser = self.arg_delete() doArgs = doParser.parse_args(shlex.split(args)) #if the help command is called, parse_args returns None object if not doArgs: return 2 #call UForge API printer.out("Searching bundle with id ["+doArgs.id+"] ...") myBundle = self.api.Users(self.login).Mysoftware(doArgs.id).Get() if myBundle is None or type(myBundle) is not MySoftware: printer.out("Bundle not found", printer.WARNING) else: table = Texttable(800) table.set_cols_dtype(["t","t","t", "t","t", "t"]) table.header(["Id", "Name", "Version", "Description", "Size", "Imported"]) table.add_row([myBundle.dbId, myBundle.name, myBundle.version, myBundle.description, size(myBundle.size), "X" if myBundle.imported else ""]) print table.draw() + "\n" if generics_utils.query_yes_no("Do you really want to delete bundle with id "+str(myBundle.dbId)): self.api.Users(self.login).Mysoftware(myBundle.dbId).Delete() printer.out("Bundle deleted", printer.OK) except ArgumentParserError as e: printer.out("ERROR: In Arguments: "+str(e), printer.ERROR) self.help_delete() except Exception as e: return handle_uforge_exception(e)
def print_mapping(prefix, key, items): table = Texttable(max_width=160) table.set_deco(Texttable.HEADER) table.header(['%s_%s' % (prefix, key), '%s_fk' % prefix]) for key, value in items.iteritems(): table.add_row([key, value]) print table.draw() + "\n"
def print_table(prefix, items): table = Texttable(max_width=160) table.set_deco(Texttable.HEADER) table.header(['%s_id' % prefix, '%s_updated' % prefix, '%s_fk' % prefix]) for key, values in items.iteritems(): table.add_row([key, values.get('updated'), values.get('opposite_id')]) print table.draw() + "\n"
def do_list(self, args): try: #call UForge API printer.out("Getting templates for ["+self.login+"] ...") appliances = self.api.Users(self.login).Appliances().Getall() appliances = appliances.appliances if appliances is None or not hasattr(appliances, 'appliance'): printer.out("No template") else: images = self.api.Users(self.login).Images.Get() images = images.images table = Texttable(800) table.set_cols_dtype(["t","t","t","t","t","t","t","t","t","t"]) table.header(["Id", "Name", "Version", "OS", "Created", "Last modified", "# Imgs", "Updates", "Imp", "Shared"]) appliances = generics_utils.order_list_object_by(appliances.appliance, "name") for appliance in appliances: nbImage=0 if images is not None and hasattr(images, 'image'): for image in images.image: if hasattr(image, 'applianceUri') and image.applianceUri == appliance.uri: nbImage+=1 table.add_row([appliance.dbId, appliance.name, str(appliance.version), appliance.distributionName+" "+appliance.archName, appliance.created.strftime("%Y-%m-%d %H:%M:%S"), appliance.lastModified.strftime("%Y-%m-%d %H:%M:%S"), nbImage, appliance.nbUpdates, "X" if appliance.imported else "", "X" if appliance.shared else ""]) print table.draw() + "\n" printer.out("Found "+str(len(appliances))+" templates") return 0 except ArgumentParserError as e: printer.out("ERROR: In Arguments: "+str(e), printer.ERROR) self.help_list() except Exception as e: return handle_uforge_exception(e)
def do_delete(self, args): try: #add arguments doParser = self.arg_delete() try: doArgs = doParser.parse_args(args.split()) except SystemExit as e: return #call UForge API printer.out("Searching template with id ["+doArgs.id+"] ...") myAppliance = self.api.Users(self.login).Appliances(doArgs.id).Get() if myAppliance is None or type(myAppliance) is not Appliance: printer.out("Template not found") else: table = Texttable(800) table.set_cols_dtype(["t","t","t","t","t","t","t","t","t","t"]) table.header(["Id", "Name", "Version", "OS", "Created", "Last modified", "# Imgs", "Updates", "Imp", "Shared"]) table.add_row([myAppliance.dbId, myAppliance.name, str(myAppliance.version), myAppliance.distributionName+" "+myAppliance.archName, myAppliance.created.strftime("%Y-%m-%d %H:%M:%S"), myAppliance.lastModified.strftime("%Y-%m-%d %H:%M:%S"), len(myAppliance.imageUris.uri),myAppliance.nbUpdates, "X" if myAppliance.imported else "", "X" if myAppliance.shared else ""]) print table.draw() + "\n" if doArgs.no_confirm: self.api.Users(self.login).Appliances(myAppliance.dbId).Delete() printer.out("Template deleted", printer.OK) elif generics_utils.query_yes_no("Do you really want to delete template with id "+str(myAppliance.dbId)): self.api.Users(self.login).Appliances(myAppliance.dbId).Delete() printer.out("Template deleted", printer.OK) return 0 except ArgumentParserError as e: printer.out("ERROR: In Arguments: "+str(e), printer.ERROR) self.help_delete() except Exception as e: return handle_uforge_exception(e)
def make_ssh_fingerprints_table(output): ('Generate a table of SSH fingerprints in reStructuredText for the EOS Lab' 'machines.') # Enable skip_bad_hosts -- we will list down hosts as such in the table. # # skip_bad_hosts and parallel are, unfortunately, mutually exclusive. Don't # see why they have to be, but that's the way it is. env.skip_bad_hosts = True # Generate table. results_dict = execute(_get_fingerprint) table = Texttable() # The default decoration produces the correct table. table.header(['Host', 'Fingerprint']) for host, command_output in sorted(six.iteritems(results_dict)): # Use the short host name. short_hostname = host.split('.')[0] fingerprint_text = ( # Indicate that the host is down in the table. 'down for maintenance' if # Fabric returns the exception if the task failed. isinstance(command_output, Exception) # Use a fixed-width font for the fingerprint itself. else '``{0}``'.format(command_output)) table.add_row((short_hostname, fingerprint_text)) with open(output, 'w') as output_file: six.print_(table.draw(), file=output_file)
def do_list(self, args): try: #call UForge API printer.out("Getting generation formats for ["+self.login+"] ...") targetFormatsUser = self.api.Users(self.login).Targetformats.Getall() if targetFormatsUser is None or len(targetFormatsUser.targetFormats.targetFormat) == 0: printer.out("No generation formats available") return 0 else: targetFormatsUser = generics_utils.order_list_object_by(targetFormatsUser.targetFormats.targetFormat,"name") table = Texttable(200) table.set_cols_align(["l", "l", "l", "l", "l", "c"]) table.header(["Name", "Format", "Category", "Type", "CredAccountType", "Access"]) for item in targetFormatsUser: if item.access: access = "X" else: access = "" if item.credAccountType is None: credAccountType = "" else: credAccountType = item.credAccountType table.add_row( [item.name, item.format.name, item.category.name, item.type, credAccountType, access]) print table.draw() + "\n" return 0 except ArgumentParserError as e: printer.out("In Arguments: " + str(e), printer.ERROR) self.help_list() except Exception as e: return handle_uforge_exception(e)
def do_info(self, args): try: doParser = self.arg_info() doArgs = doParser.parse_args(shlex.split(args)) printer.out("Getting user ["+doArgs.account+"] ...") user = self.api.Users(doArgs.account).Get() if user is None: printer.out("user "+ doArgs.account +" does not exist", printer.ERROR) else: if user.active: active = "X" else: active = "" printer.out("Informations about " + doArgs.account + ":",) table = Texttable(200) table.set_cols_align(["c", "l", "c", "c", "c", "c", "c", "c"]) table.header(["Login", "Email", "Lastname", "Firstname", "Created", "Active", "Promo Code", "Creation Code"]) table.add_row([user.loginName, user.email, user.surname , user.firstName, user.created.strftime("%Y-%m-%d %H:%M:%S"), active, user.promoCode, user.creationCode]) print table.draw() + "\n" return 0 except ArgumentParserError as e: printer.out("In Arguments: "+str(e), printer.ERROR) self.help_info() except Exception as e: return handle_uforge_exception(e)
def do_disable(self, args): try: doParser = self.arg_disable() doArgs = doParser.parse_args(shlex.split(args)) printer.out("Disabling user [" + doArgs.account + "] ...") user = self.api.Users(doArgs.account).Get() if user is None: printer.out("user " + doArgs.account + "does not exist", printer.ERROR) else: if user.active == False: printer.out("User [" + doArgs.account + "] is already disabled", printer.ERROR) else: user.active = False self.api.Users(doArgs.account).Update(body=user) printer.out("User [" + doArgs.account + "] is now disabled", printer.OK) if user.active == True: actived = "X" else: actived = "" printer.out("Informations about [" + doArgs.account + "] :") table = Texttable(200) table.set_cols_align(["c", "l", "c", "c", "c", "c", "c", "c"]) table.header( ["Login", "Email", "Lastname", "Firstname", "Created", "Active", "Promo Code", "Creation Code"]) table.add_row([user.loginName, user.email, user.surname, user.firstName, user.created.strftime("%Y-%m-%d %H:%M:%S"), actived, user.promoCode, user.creationCode]) print table.draw() + "\n" return 0 except ArgumentParserError as e: printer.out("In Arguments: " + str(e), printer.ERROR) self.help_disable() except Exception as e: return marketplace_utils.handle_uforge_exception(e)
def top(db): count_query = ''' SELECT count(*) FROM commands WHERE timestamp > ? ''' percentage = 100 / float(execute_scalar(db, count_query, TIMESTAMP)) query = ''' SELECT count(*) AS counts, command FROM commands WHERE timestamp > ? GROUP BY command ORDER BY counts DESC LIMIT 20 ''' table = Texttable() table.set_deco(Texttable.HEADER) table.set_cols_align(('r', 'r', 'l')) table.header(('count', '%', 'command')) for row in db.execute(query, (TIMESTAMP,)): table.add_row((row[0], int(row[0]) * percentage, row[1])) print table.draw()
def render_datasets_as_table(datasets, display_heading=True): """ Returns ASCII table view of datasets. :param datasets: The datasets to be rendered. :type datasets: :class:`mytardisclient.models.resultset.ResultSet` :param render_format: The format to display the data in ('table' or 'json'). :param display_heading: Setting `display_heading` to True ensures that the meta information returned by the query is summarized in a 'heading' before displaying the table. This meta information can be used to determine whether the query results have been truncated due to pagination. """ heading = "\n" \ "Model: Dataset\n" \ "Query: %s\n" \ "Total Count: %s\n" \ "Limit: %s\n" \ "Offset: %s\n\n" \ % (datasets.url, datasets.total_count, datasets.limit, datasets.offset) if display_heading else "" table = Texttable(max_width=0) table.set_cols_align(["r", 'l', 'l', 'l']) table.set_cols_valign(['m', 'm', 'm', 'm']) table.header(["Dataset ID", "Experiment(s)", "Description", "Instrument"]) for dataset in datasets: table.add_row([dataset.id, "\n".join(dataset.experiments), dataset.description, dataset.instrument]) return heading + table.draw() + "\n"
def render_schemas_as_table(schemas, display_heading=True): """ Returns ASCII table view of schemas. :param schemas: The schemas to be rendered. :type schemas: :class:`mytardisclient.models.resultset.ResultSet` :param render_format: The format to display the data in ('table' or 'json'). :param display_heading: Setting `display_heading` to True ensures that the meta information returned by the query is summarized in a 'heading' before displaying the table. This meta information can be used to determine whether the query results have been truncated due to pagination. """ heading = "\n" \ "Model: Schema\n" \ "Query: %s\n" \ "Total Count: %s\n" \ "Limit: %s\n" \ "Offset: %s\n\n" \ % (schemas.url, schemas.total_count, schemas.limit, schemas.offset) if display_heading else "" table = Texttable(max_width=0) table.set_cols_align(["r", 'l', 'l', 'l', 'l', 'l', 'l']) table.set_cols_valign(['m', 'm', 'm', 'm', 'm', 'm', 'm']) table.header(["ID", "Name", "Namespace", "Type", "Subtype", "Immutable", "Hidden"]) for schema in schemas: table.add_row([schema.id, schema.name, schema.namespace, schema.type, schema.subtype or '', str(bool(schema.immutable)), str(bool(schema.hidden))]) return heading + table.draw() + "\n"
def render_instruments_as_table(instruments, display_heading=True): """ Returns ASCII table view of instruments. :param instruments: The instruments to be rendered. :type instruments: :class:`mytardisclient.models.resultset.ResultSet` :param render_format: The format to display the data in ('table' or 'json'). :param display_heading: Setting `display_heading` to True ensures that the meta information returned by the query is summarized in a 'heading' before displaying the table. This meta information can be used to determine whether the query results have been truncated due to pagination. """ heading = "\n" \ "Model: Instrument\n" \ "Query: %s\n" \ "Total Count: %s\n" \ "Limit: %s\n" \ "Offset: %s\n\n" \ % (instruments.url, instruments.total_count, instruments.limit, instruments.offset) if display_heading else "" table = Texttable(max_width=0) table.set_cols_align(["r", 'l', 'l']) table.set_cols_valign(['m', 'm', 'm']) table.header(["ID", "Name", "Facility"]) for instrument in instruments: table.add_row([instrument.id, instrument.name, instrument.facility]) return heading + table.draw() + "\n"
def do_search(self, args): try: #add arguments doParser = self.arg_search() try: doArgs = doParser.parse_args(args.split()) except SystemExit as e: return #call UForge API printer.out("Search package '"+doArgs.pkg+"' ...") distribution = self.api.Distributions(doArgs.id).Get() printer.out("for OS '"+distribution.name+"', version "+distribution.version) pkgs = self.api.Distributions(distribution.dbId).Pkgs.Getall(Search=doArgs.pkg, Version=distribution.version) if pkgs is None or not hasattr(pkgs, 'pkgs'): printer.out("No package found") else: table = Texttable(800) table.set_cols_dtype(["t","t","t","t","t","t"]) table.header(["Name", "Version", "Arch", "Release", "Build date", "Size"]) pkgs = generics_utils.oder_list_object_by(pkgs.get_pkgs().get_pkg(), "name") for pkg in pkgs: table.add_row([pkg.name, pkg.version, pkg.arch, pkg.release, pkg.pkgBuildDate.strftime("%Y-%m-%d %H:%M:%S"), size(pkg.size)]) print table.draw() + "\n" printer.out("Found "+str(len(pkgs))+" packages") except ArgumentParserError as e: printer.out("ERROR: In Arguments: "+str(e), printer.ERROR) self.help_search() except Exception as e: generics_utils.print_uforge_exception(e)
def do_list(self, args): try: doParser = self.arg_list() doArgs = doParser.parse_args(shlex.split(args)) printer.out("Getting roles and their entitlements for user [" + doArgs.account + "]:\n") roles = self.api.Users(doArgs.account).Roles.Getall() table = Texttable(200) table.set_cols_align(["l", "l"]) table.header(["Name", "Description"]) table.set_cols_width([30,60]) for role in roles.roles.role: table.add_row([role.name.upper(), role.description]) for entitlement in role.entitlements.entitlement: table.add_row(["===> " + entitlement.name, entitlement.description]) printer.out("Role entitlements are represented with \"===>\".", printer.INFO) print table.draw() + "\n" return 0 except ArgumentParserError as e: printer.out("In Arguments: "+str(e), printer.ERROR) self.help_list() except Exception as e: return handle_uforge_exception(e)
def list(self): """List the Drbd volumes and statuses""" # Set permissions as having been checked, as listing VMs # does not require permissions self._get_registered_object('auth').set_permission_asserted() # Create table and add headers table = Texttable() table.set_deco(Texttable.HEADER | Texttable.VLINES) table.header(('Name', 'Type', 'Location', 'Nodes', 'Shared', 'Free Space', 'ID')) # Set column alignment and widths table.set_cols_width((15, 5, 30, 70, 6, 15, 50)) table.set_cols_align(('l', 'l', 'l', 'l', 'l', 'l', 'l')) for storage_backend in self.get_all(): table.add_row(( storage_backend.name, storage_backend.storage_type, storage_backend.get_location(), ', '.join(storage_backend.nodes), str(storage_backend.shared), SizeConverter(storage_backend.get_free_space()).to_string(), storage_backend.id_ )) return table.draw()
def dump(relation): width,height = term_size() table = Texttable(width) sample, iterator = tee(relation) table.add_rows(take(1000,sample)) table._compute_cols_width() del sample table.reset() table.set_deco(Texttable.HEADER) table.header([f.name for f in relation.schema.fields]) rows = take(height-3, iterator) try: while rows: table.add_rows(rows, header=False) print table.draw() rows = take(height-3, iterator) if rows: raw_input("-- enter for more ^c to quit --") except KeyboardInterrupt: print
def sub(db, command, *filters): counts = collections.defaultdict(int) user_filter = ' '.join(itertools.chain([command], filters)) total = 0 query = ''' SELECT user_string FROM commands WHERE timestamp > ? AND command = ? ''' for row in db.execute(query, (TIMESTAMP, command)): command = normalize_user_string(row[0]) if command.startswith(user_filter): counts[command] += 1 total += 1 percentage = 100 / float(total) table = Texttable() table.set_deco(Texttable.HEADER) table.set_cols_align(('r', 'r', 'l')) table.set_cols_width((5, 6, 75)) table.header(('count', '%', 'command')) for key, value in sorted(counts.iteritems(), key=lambda (k, v): (v, k), reverse=True)[:20]: table.add_row((value, value * percentage, key)) print table.draw()
def do_modify(self, args): try: doParser = self.arg_modify() doArgs = doParser.parse_args(shlex.split(args)) if not doArgs.unlimited and doArgs.limit is None and doArgs.nb is None: printer.out( "You must specify a modification (unlimited|limit|nb).", printer.ERROR) return 0 printer.out("Getting quotas for [" + doArgs.account + "] ...") quotas = self.api.Users(doArgs.account).Quotas.Get() if quotas is None or len(quotas.quotas.quota) == 0: printer.out( "No quotas available for [" + doArgs.account + "].", printer.ERROR) else: typeExist = False for item in quotas.quotas.quota: if item.type == doArgs.type: typeExist = True if doArgs.nb is not None: item.nb = doArgs.nb if doArgs.unlimited and doArgs.limit is None: item.limit = -1 elif doArgs.limit is not None and not doArgs.unlimited: item.limit = doArgs.limit elif doArgs.limit is not None and doArgs.unlimited: printer.out( "You can't set a defined limit and on the other hand set an unlimited limit.", printer.ERROR) return 2 if not typeExist: printer.out("Type is not defined or correct.", printer.ERROR) return 2 else: quotas = self.api.Users( doArgs.account).Quotas.Update(body=quotas) printer.out("Changes done.", printer.OK) quotas = generics_utils.order_list_object_by( quotas.quotas.quota, "type") table = Texttable(200) table.set_cols_align(["c", "c", "c"]) table.header(["Type", "Consumed", "Limit"]) for item in quotas: if item.limit == -1: limit = "unlimited" else: limit = item.limit if item.nb > 1: name = item.type + "s" else: name = item.type table.add_row([name, item.nb, limit]) print table.draw() + "\n" return 0 except ArgumentParserError as e: printer.out("In Arguments: " + str(e), printer.ERROR) self.help_modify() return 0 except Exception as e: return handle_uforge_exception(e)
def __init__(self): parser = argparse.ArgumentParser( description="Description for my parser") parser.add_argument("-H", "--Host", help="HOST_NAME or IP_ADDRESS", required=False, default="") parser.add_argument("-u", "--user", help="NETCONF USERNAME", required=False, default="") parser.add_argument("-p", "--password", help="NETCONF PASSWORD", required=False, default="") parser.add_argument("-n", "--netconfport", help="NETCONF PORT", required=False, default="") parser.add_argument("-f", "--function", help="FUNCTION NAME -f ALL FOR ALL OPTIONS", required=False, default="") parser.add_argument("-v", "--vlan", help="VLAN-ID NUMBER", required=False, default="") argument = parser.parse_args() status = False if argument.Host: global host host = (argument.Host) status = True if argument.user: global user user = (argument.user) status = True if argument.password: global password password = (argument.password) status = True if argument.netconfport: global netconfport netconfport = (argument.netconfport) status = True else: netconfport = defaultnetconfport if argument.vlan: global vlan vlan = (argument.vlan) status = True else: vlan = '' ''' FUNCTION SELECT ''' if argument.function: global function function = (argument.function) status = True if function == 'system': system(host, netconfport, user, password) if function == 'interfaceterse': interfaceterse(host, netconfport, user, password) if function == 'subscriber': subscriber(host, netconfport, user, password) if function == 'vlanpppoe': vlanPppoe(host, netconfport, user, password) if function == 'subscriberforonevlan': if vlan: numSubscriberForVlan(host, netconfport, user, password, vlan) else: print('You must declare vlan with -v VLAN_NUMBER') if function == 'subscriberforvlan': vlanPppoeCount(host, netconfport, user, password) if function == 'interfacepspppoe': interfacePsPppoe(host, netconfport, user, password) if function == 'ALL': print( "Usage ./netconf-juniper-api.py -H 200.200.200.200 -u root -p 1234 -n 2222 -f FUNCTION_NAME" ) printHelp = Texttable() printHelp.header(['FUNCTION', 'DESCRIBRE']) printHelp.add_row(['system', 'show system information']) printHelp.add_row( ['interfaceterse', 'show all interface as terse']) printHelp.add_row([ 'subscriber', 'show subscribers connected numers as PPPOE, DHCP and all' ]) printHelp.add_row([ 'vlanpppoe', 'show all vlans used to connect for customers to conect pppoe' ]) printHelp.add_row([ 'subscriberforonevlan', 'show number of sbscriber using a specific VLAN' ]) printHelp.add_row([ 'subscriverforvlan', 'show all vlans and how many subscribers is connected in witch vlan and total' ]) printHelp.add_row([ 'interfacepspppoe', 'show all interface PS and numer of subscribers is connected using witch interface' ]) print(printHelp.draw()) if not status: print( "Usage ./netconf-juniper-api.py -H 200.200.200.200 -u root -p 1234 -n 2222 -f FUNCTION_NAME" ) printHelp = Texttable() printHelp.header(['FUNCTION', 'DESCRIBRE']) printHelp.add_row(['system', 'show system information']) printHelp.add_row( ['interfaceterse', 'show all interface as terse']) printHelp.add_row([ 'subscriber', 'show subscribers connected numers as PPPOE, DHCP and all' ]) printHelp.add_row([ 'vlanpppoe', 'show all vlans used to connect for customers to conect pppoe' ]) printHelp.add_row([ 'subscriberforonevlan', 'show number of sbscriber using a specific VLAN' ]) printHelp.add_row([ 'subscriverforvlan', 'show all vlans and how many subscribers is connected in witch vlan and total' ]) printHelp.add_row([ 'interfacepspppoe', 'show all interface PS and numer of subscribers is connected using witch interface' ]) print(printHelp.draw())
def check_depfile(env, verbose, component, depfile, toolset): '''Perform basic checks on dependency files''' lPackage, lComponent = component if depfile is None: depfile = basename(lComponent) + ".dep" lPathMaker = Pathmaker(env.srcdir, env._verbosity) try: lParser = DepFileParser(toolset, lPathMaker) lParser.parse(lPackage, lComponent, depfile) except OSError as lExc: raise click.ClickException( "Failed to parse dep file - '{}'".format(lExc)) echo() # N.B. Rest of this function is heavily based on implementation of 'dep report' command; assuming # that output of these 2 commands does not significantly diverge, might make sense to implement # command output in a separate function, that's invoked by both commands lCmdHeaders = [ 'path', 'flags', 'map', 'lib', ] # ['path', 'flags', 'package', 'component', 'map', 'lib'] lFilters = [] lPrepend = re.compile('(^|\n)') if verbose: secho('Parsed commands', fg='blue') for k in lParser.commands: echo(' + {0} ({1})'.format(k, len(lParser.commands[k]))) if not lParser.commands[k]: echo() continue lCmdTable = Texttable(max_width=0) lCmdTable.header(lCmdHeaders) lCmdTable.set_deco(Texttable.HEADER | Texttable.BORDER) lCmdTable.set_chars(['-', '|', '+', '-']) for lCmd in lParser.commands[k]: # print(lCmd) # lCmdTable.add_row([str(lCmd)]) lRow = [ relpath(lCmd.FilePath, env.srcdir), ','.join(lCmd.flags()), lCmd.Map, lCmd.Lib, ] if lFilters and not all( [rxp.match(lRow[i]) for i, rxp in lFilters]): continue lCmdTable.add_row(lRow) echo(lPrepend.sub('\g<1> ', lCmdTable.draw())) echo() secho('Resolved packages & components', fg='blue') string = '' for pkg in sorted(lParser.components): string += ' + %s (%d)\n' % (pkg, len(lParser.components[pkg])) for cmp in sorted(lParser.components[pkg]): string += ' > ' + str(cmp) + '\n' echo(string) if lParser.missingPackages: secho('Missing packages:', fg='red') echo(str(list(lParser.missingPackages))) lCNF = lParser.missingComponents if lCNF: secho('Missing components:', fg='red') string = '' for pkg in sorted(lCNF): string += '+ %s (%d)\n' % (pkg, len(lCNF[pkg])) for cmp in sorted(lCNF[pkg]): string += ' > ' + str(cmp) + '\n' echo(string) lFNF = lParser.missingFiles if lFNF: secho('Missing files:', fg='red') lFNFTable = Texttable(max_width=0) lFNFTable.header([ 'path', 'included by' ]) # ['path expression','package','component','included by']) lFNFTable.set_deco(Texttable.HEADER | Texttable.BORDER) for pkg in sorted(lFNF): lCmps = lFNF[pkg] for cmp in sorted(lCmps): lPathExps = lCmps[cmp] for pathexp in sorted(lPathExps): lFNFTable.add_row([ relpath(pathexp, env.srcdir), '\n'.join([ relpath(src, env.srcdir) for src in lPathExps[pathexp] ]), ]) echo(lPrepend.sub('\g<1> ', lFNFTable.draw())) echo() if lParser.missingPackages or lParser.missingComponents or lParser.missingFiles: raise click.ClickException( "Cannot find 1 or more files referenced by depfile {}".format( lPathMaker.getPath(lPackage, lComponent, 'include', depfile))) elif not verbose: echo("No errors found in depfile {}".format( lPathMaker.getPath(lPackage, lComponent, 'include', depfile)))
def fstat_iter(depot_path, to_changelist, from_changelist=0, cache_dir='.o4'): ''' Return the needed fstat data by combining three possible sources: perforce, the fstat server, and local fstat cache files. Note that the local files and the fstat server are guaranteed to return lines in (descending) changelist order, while the Perforce data may not be. The three sources are ordered [fstat server, perforce, fstat server, local]; each one may or may not be used, and the fstat server will not be used twice. In the order read, each subset will contain only changelist numbers less than all that have been read in previous subsets. The local cache file created should not have more than one entry for any filename. Such duplication may come about due to a file having been changed in more than one of the changelist subsets being queried; a row for a file that has been seen already (and thus, at a higher changelist) must be ignored. Beware: do not break out of the returned generator! This will prevent local cache files from being created, causing superfluous access to perforce and/or fstat server. ''' from tempfile import mkstemp from o4_pyforce import P4TimeoutError, P4Error to_changelist, from_changelist = int(to_changelist), int(from_changelist) cache_cl, cache_fname = get_fstat_cache(to_changelist, cache_dir) updated = [] all_filenames = set() CLR = '%c[2K\r' % chr(27) summary = {'Perforce': None, 'Fstat server': None, 'Local cache': None} try: fout = temp_fname = None highest_written_cl = 0 _first = _last = 0 # These are local and re-used in various blocks below fh, temp_fname = mkstemp(dir=cache_dir) os.close(fh) fout = gzip.open(temp_fname, 'wt', encoding='utf8', compresslevel=9) print( "# COLUMNS: F_CHANGELIST, F_PATH, F_REVISION, F_FILE_SIZE, F_CHECKSUM", file=fout) if cache_cl == to_changelist: print(f'*** INFO: Satisfied from local cache {cache_fname}', file=sys.stderr) for cl, line in fstat_from_csv(cache_fname, fstat_cl): if not cl: continue if cl < from_changelist: break yield line return missing_range = (to_changelist, cache_cl + 1) o4server_range = (None, None) if o4_config.fstat_server(): _first = _last = 0 try: for line in fstat_from_server(depot_path, missing_range[0], missing_range[1], o4_config.fstat_server_nearby()): cl, path, line = fstat_cl_path(line) if not cl: continue _last = cl _first = _first or cl all_filenames.add(path) print(line, file=fout) if from_changelist < cl <= to_changelist: yield line summary['Fstat server'] = (missing_range, (int(_first), int(_last))) missing_range = (None, None) except FstatRedirection as e: print( f'*** INFO: Fstat server redirected to changelist {e.cl}', file=sys.stderr) if e.cl > to_changelist: print( f'*** WARNING: Fstat server redirected to {e.cl} which is greater', f'than {to_changelist}.', file=sys.stderr) print( ' Please contact [email protected].', file=sys.stderr) elif e.cl > cache_cl: missing_range = (to_changelist, e.cl + 1) o4server_range = (e.cl, cache_cl + 1) except FstatServerError as e: summary['Fstat server'] = (missing_range, (0, 0)) highest_written_cl = max(highest_written_cl, int(_first)) perforce_filenames = dict() if missing_range[0]: retry = 3 while retry: retry -= 1 try: for f in fstat_from_perforce(depot_path, missing_range[0], missing_range[1]): if f[F_PATH] and f[F_PATH] not in all_filenames: if from_changelist < int( f[F_CHANGELIST]) <= to_changelist: yield fstat_join(f) f[0] = int(f[0]) perforce_filenames[f[F_PATH]] = f break except P4Error as e: done = False for a in e.args: fix = False if 'Too many rows scanned' in a.get('data', ''): if cache_cl: print( f"{CLR}*** WARNING: Maxrowscan occurred, ignoring cache {cache_fname}", file=sys.stderr) fix = True missing_range = (to_changelist, None) retry += 1 elif 'Request too large' in a.get('data', ''): sys.exit( f"{CLR}*** ERROR: 'Request too large'. {depot_path} may be too broad." ) elif 'no such file' in a.get('data', ''): print( f"{CLR}*** INFO: Empty changelist range ({missing_range}).", file=sys.stderr) # Just an empty range of changelists, we are done done = True break if not fix: raise if done: break except P4TimeoutError: perforce_filenames.clear() print( f"{CLR}*** WARNING: ({retry+1}/3) P4 Timeout while getting fstat", file=sys.stderr) else: sys.exit(f"{CLR}*** ERROR: " f"Too many P4 Timeouts for p4 fstat" f"{depot_path}@{from_changelist},@{to_changelist}") all_filenames.update(perforce_filenames.keys()) if perforce_filenames: perforce_rows = sorted(perforce_filenames.values(), reverse=True) summary['Perforce'] = (missing_range, (int(perforce_rows[0][F_CHANGELIST]), int(perforce_rows[-1][F_CHANGELIST]))) highest_written_cl = max(highest_written_cl, int(perforce_rows[0][F_CHANGELIST])) for f in perforce_rows: print(fstat_join(f), file=fout) del perforce_filenames if o4server_range[0]: _first = _last = 0 for line in fstat_from_server(depot_path, o4server_range[0], o4server_range[1]): cl, path, line = fstat_cl_path(line) if not cl: continue _last = cl _first = _first or cl if path not in all_filenames: all_filenames.add(path) print(line, file=fout) if from_changelist < cl <= to_changelist: yield line summary['Fstat server'] = (o4server_range, (int(_first), int(_last))) highest_written_cl = max(highest_written_cl, int(_first)) if cache_cl: _first = _last = 0 for cl, path, line in fstat_from_csv(cache_fname, fstat_cl_path): if not cl: continue _last = cl _first = _first or cl if path not in all_filenames: print(line, file=fout) if from_changelist < cl <= to_changelist: yield line else: all_filenames.remove(path) summary['Local cache'] = ((cache_cl, 1), (int(_first), int(_last))) highest_written_cl = max(highest_written_cl, int(_first)) fout.close() fout = None if highest_written_cl: os.chmod(temp_fname, 0o444) os.rename(temp_fname, f'{cache_dir}/{highest_written_cl}.fstat.gz') finally: if fout: fout.close() try: if temp_fname: os.unlink(temp_fname) except FileNotFoundError: pass from texttable import Texttable table = Texttable() table.set_cols_align(['l', 'l', 'l']) table.set_header_align(['l', 'l', 'l']) table.header(['Fstat source', 'Requested', 'Provided']) table.set_chars(['-', '|', '+', '-']) table.set_deco(table.HEADER) for k in 'Perforce', 'Fstat server', 'Local cache': data = summary[k] if summary[k] else ('Not used', '') if summary[k]: v = summary[k] data = ('{:10,} - {:10,}'.format( (v[0][0] or 0), (v[0][1] or 0)), '{:10,} - {:10,}'.format( (v[1][0] or 0), (v[1][1] or 0))) else: data = ('Not used', '') table.add_row([k, data[0], data[1]]) table = '\n'.join('*** INFO: ' + row for row in table.draw().split('\n')) print(table, file=sys.stderr)
def generate_report(proj_conf, single_end, stranded): d = { 'project_id': proj_conf['id'], 'samplenames': ' '.join(proj_conf['samples']), 'latex_opt': "", 'uppnex': "", 'mapping': "", 'dup_rem': "", 'read_count': "", 'quantifyer': "", 'gene_body_cov': "", 'FPKM_heatmap': "", 'Mapping_statistics': "", 'Read_Distribution': "", 'rRNA_table': "", 'GBC': "", 'strandness_table': "", 'complexity_plot': "", 'species': "", 'genombuild': "", 'rseqc_version': '', 'Preseq': '', 'date': date.today(), 'anotation_version': '' } ## Latex option (no of floats per page) floats_per_page = '.. raw:: latex\n\n \setcounter{totalnumber}{8}' d['latex_opt'] = floats_per_page ## Metadata fetched from the 'Genomics project list' on Google Docs try: proj_data = ProjectMetaData(proj_conf['id'], proj_conf['config']) uppnex_proj = proj_data.uppnex_id if proj_data.ref_genome == "hg19": d['species'] = 'Human' elif proj_data.ref_genome == "mm9": d['species'] = 'Mouse' else: d['species'] = proj_data.ref_genome except: uppnex_proj = "b201YYXX" print "No uppnex ID fetched" pass d['uppnex'] = uppnex_proj ## RNA-seq tools fetched from config file post_process.yaml try: tools = proj_conf['config']['custom_algorithms']['RNA-seq analysis'] d['mapping'] = os.path.join(tools['aligner'], tools['aligner_version']) d['dup_rem'] = os.path.join(tools['dup_remover'], tools['dup_remover_version']) d['read_count'] = os.path.join(tools['counts'], tools['counts_version']) d['quantifyer'] = os.path.join(tools['quantifyer'], tools['quantifyer_version']) d['genombuild'] = tools[proj_data.ref_genome]['name'] d['rseqc_version'] = tools['rseqc_version'] d['Preseq'] = tools['preseq'] d['anotation_version'] = tools[ proj_data.ref_genome]['annotation_release'] except: print "Could not fetched RNA-seq tools from config file post_process.yaml" pass ## Mapping Statistics tab = Texttable() tab.set_cols_dtype(['t', 't', 't', 't']) tab.header(['Sample', 'Tot NO Reads', 'UniqMapped', 'UniqMapped DuplRem']) statistics = {} try: for sample_name in proj_conf['samples']: try: f = open('tophat_out_' + sample_name + '/logs/prep_reads.log', 'r') tot_NO_read_pairs = f.readlines()[2].split()[3] f.close() f = open('tophat_out_' + sample_name + '/stat' + sample_name, 'r') dict = make_stat(f, tot_NO_read_pairs, single_end) tab.add_row([ sample_name, tot_NO_read_pairs, str(dict['bef_dup_rem']['%uniq_mapped']) + '%', str(dict['aft_dup_rem']['%uniq_mapped']) + '%' ]) statistics[sample_name] = dict except: print 'Could not make mapping statistics for sample ' + sample_name d['Mapping_statistics'] = indent_texttable_for_rst(tab) stat_json = open('stat.json', 'w') print >> stat_json, statistics stat_json.close() except: print "Could not make Mapping Statistics table" pass ## Read Distribution try: tab = Texttable() tab.set_cols_dtype(['t', 't', 't', 't', 't', 't', 't', 't']) tab.header([ "Sample", "CDS", "5'UTR", "3'UTR", "Intron", "TSS", "TES", "mRNA" ]) read_dist = {} for i in range(len(proj_conf['samples'])): sample_name = proj_conf['samples'][i] dict = {} try: f = open('RSeQC_rd_' + sample_name + '.out', 'r') dict = read_RSeQC_rd233(f) row = [ sample_name, dict['CDS_Exons']['Tags/Kb'], dict["5'UTR_Exons"]['Tags/Kb'], dict["3'UTR_Exons"]['Tags/Kb'], dict['Introns']['Tags/Kb'], dict['TSS_up_1kb']['Tags/Kb'], dict['TES_down_1kb']['Tags/Kb'], dict['mRNA_frac'] ] tab.add_row(row) read_dist[sample_name] = dict except: print "Could not make read distribution for sample " + sample_name pass RSeQC_rd_json = open('RSeQC_rd.json', 'w') print >> RSeQC_rd_json, read_dist RSeQC_rd_json.close() d['Read_Distribution'] = indent_texttable_for_rst(tab) except: print "Could not make Read Distribution table" pass ## Gene Body Coverage try: figure() x = range(0, 101) for i in range(len(proj_conf['samples'])): y = zeros(101) sample_name = proj_conf['samples'][i] f = open(sample_name + '.geneBodyCoverage.txt', 'r') for line in f.readlines(): try: key = int(line.split()[0]) val = int(line.split()[1]) y[key] = val except: pass plot(x, y) #,label=proj_conf['samples'][i]) #legend(loc='upper left',fontsize='xx-small') ylabel("read number") xlabel("percentile of gene body (5'->3')") savefig('gbc.pdf') d['GBC'] = image("gbc.pdf", width="100%") except: print "could not make GBC plot" ## FPKM_heatmap if os.path.exists("FPKM_heatmap.pdf"): d['FPKM_heatmap'] = image("FPKM_heatmap.pdf", width="100%") else: print "could not make FPKM heatmap" ## complexity plot if os.path.exists("complexity_curves.pdf"): d['complexity_plot'] = image("complexity_curves.pdf", width="100%") else: complexity = False print "could not make complexity plot" ## rRNA_table try: tab = Texttable() tab.set_cols_dtype(['t', 't']) tab.header(["Sample", "rRNA"]) f = open('rRNA.quantification', 'r') D = {} for line in f: D[str(line.split('\t')[0].strip())] = str( line.split('\t')[1].strip()) for sample_name in proj_conf['samples']: if D.has_key(sample_name): tab.add_row([sample_name, D[sample_name]]) d['rRNA_table'] = indent_texttable_for_rst(tab) f.close() except: print "could not generate rRNA table" pass ## strandness_table try: tab = Texttable() tab.set_cols_dtype(['t', 't']) tab.header(["Sample", "strand-specific reads"]) try: f = open('infer_experiment.json', 'rb') data = json.load(f) except: print "can't open infer_experiment.json\n" D = data for sample_name in proj_conf['samples']: if D.has_key(sample_name): tab.add_row( [sample_name, str(float(D[sample_name]) * 100) + '%']) d['strandness_table'] = indent_texttable_for_rst(tab) f.close() except: print "could not generate strandness_table" pass return d
ip = '218.255.67.239' report = client.ip(ip, attributes=attributes) if report.has_threat_intel: print("This IP has threat intel") if report.predicted: print("This IP was Predicted to be Malicious") # Who reported this? print("Reported By", report.reported_by) # What IOCs are connected print("\n\nConnected IOCs\n\n") table = Texttable(max_width=0) table.set_deco(Texttable.HEADER) table.set_cols_dtype(['t', 't', 't', 't']) table.header(["ioc_type", "ioc", "categories", "identifiers"]) for connection in report.connections: table.add_row([ connection.ioc_type, connection.ioc_id, ','.join(connection.categories), ','.join(connection.identifiers) ]) print(table.draw()) # The raw JSON reponse is available via the intel attribute # print(report.intel) ips = ['218.255.67.239', '141.255.151.79'] reports = client.ips(ips) for r in reports: if r.predicted: print("%s was preditected to be malicious" % r.ioc_id)
#Iterate through the regions #for region in regions: for region in regions: print("checking region: " + region['RegionName'] + "\n") ec2 = boto3.client(service_name='ec2', region_name=region['RegionName']) response = ec2.describe_instances() table = Texttable() table.set_cols_align(["c", "c", "c", "c", "c"]) table.set_cols_valign(["m", "m", "m", "m", "m"]) #tablearray = [] #tablearray.append(["Instance ID", "Name", "State", "VolumeId"]) if response['Reservations']: table.header( ["Instance ID", "Name", "State", "VolumeId", "DeviceName"]) for r in response['Reservations']: for i in r['Instances']: instanceid = i['InstanceId'] ec2 = boto3.resource('ec2', region['RegionName']) Instance = ec2.Instance(i['InstanceId']) if Instance.tags: for tag in Instance.tags: if tag['Key'] == 'Name': name = tag['Value'] #There may be another condition here else: name = " " state = i['State']['Name'] vs = '' dn = ''
def do_enable(self, args): try: doParser = self.arg_enable() doArgs = doParser.parse_args(shlex.split(args)) org = org_utils.org_get(self.api, doArgs.org) if org is None: printer.out("There is no organization matching ["+doArgs.org+"].", printer.OK) return 0 targetFormatsOrg = self.api.Orgs(org.dbId).Targetformats.Getall() if targetFormatsOrg is None or len(targetFormatsOrg.targetFormats.targetFormat) == 0: printer.out("There is no target format for the user \""+doArgs.account+"\" in [" + org.name + "].") return 0 else: targetFormatsOrg = targetFormatsOrg.targetFormats.targetFormat targetFormatsList = targetFormats() targetFormatsList.targetFormats = pyxb.BIND() targetFormatsOrg = compare(targetFormatsOrg, doArgs.targetFormats, "name") if len(targetFormatsOrg) == 0: listName = "" for tfname in doArgs.targetFormats: listName = listName + tfname + " " printer.out("There is no target formats matching ["+listName+"].") return 0 for item in targetFormatsOrg: targetFormatToEnable = targetFormat() targetFormatToEnable = item targetFormatToEnable.active = True targetFormatToEnable.access = True printer.out("Enabling ["+item.name+"].") targetFormatsList.targetFormats.append(targetFormatToEnable) result = self.api.Users(doArgs.account).Targetformats.Update(Org=org.name,body=targetFormatsList) result =generics_utils.order_list_object_by(result.targetFormats.targetFormat, "name") table = Texttable(200) table.set_cols_align(["c", "l", "l", "l", "l", "l", "c"]) table.header(["Id", "Name", "Format", "Category", "Type", "CredAccountType", "Access"]) for item in result: if item.access: access = "X" else: access = "" if item.credAccountType is None: credAccountType = "" else: credAccountType = item.credAccountType table.add_row([item.dbId, item.name, item.format.name, item.category.name, item.type, credAccountType, access]) printer.out("Target Format list for user \""+doArgs.account+"\" :") print table.draw() + "\n" return 0 except ArgumentParserError as e: printer.out("In Arguments: "+str(e), printer.ERROR) self.help_enable() except Exception as e: return handle_uforge_exception(e)
def getUser( login ): headers = {"Authorization":"token "+random.choice(t_tokens)} try: r = requests.get( 'https://api.github.com/users/'+login, headers=headers ) return r.json() except Exception as e: print( colored("[-] error occurred: %s" % e, 'red') ) return False max_page = 0 total_found = 0 tab = Texttable( 300 ) tab.header( ['Login','Profile','Name','Email','Company','Public repos'] ) r_json = searchUser( keyword, 1 ) if len(r_json) and 'documentation_url' in r_json: print( colored("[-] error occurred!", 'red') ) exit() total_found = r_json['total_count'] max_page = math.ceil( r_json['total_count'] / 30) sys.stdout.write( colored('[+] %d users found, %d pages.\n' % (total_found,max_page), 'green') ) sys.stdout.write( '[+] retrieving user list...\n' ) def doGetUserList( page ): time.sleep( 200/1000 ) sys.stdout.write( 'progress: %d/%d\r' % (t_stats['n_current'],t_stats['n_max_page']) )
sys.stdout.write("%s (%d)\n" % (collab['login'], collab['contributions'])) if not collab['login'] in t_collab: t_collab[collab['login']] = 0 t_collab[collab['login']] = t_collab[ collab['login']] + collab['contributions'] else: sys.stdout.write("-\n") sys.stdout.write("\n") print( colored('[+] %d contributors found, reading profiles...\n' % len(t_collab), 'green')) tab = Texttable(300) tab.header( ['Contributions', 'Profile', 'Name', 'Email', 'Company', 'Public repos']) # tab.set_max_width( 100 ) def grabUserHtmlLight(ghaccount, login): url = 'https://github.com/' + login try: r = requests.get(url, timeout=5) except Exception as e: print(colored("[-] error occurred: %s" % e, 'red')) return False if not 'Not Found' in r.text: r_org = re.search('data-hovercard-url="/orgs/([^/]*)/hovercard"', r.text, re.MULTILINE | re.IGNORECASE)
def output_list(data, label, vt=ValueType.STRING): table = Texttable(max_width=get_terminal_size()[1]) table.set_deco(Texttable.BORDER | Texttable.VLINES | Texttable.HEADER) table.header([label]) table.add_rows([[i] for i in data], False) six.print_(table.draw())
def getUser(login): headers = {"Authorization": "token " + random.choice(t_tokens)} try: r = requests.get('https://api.github.com/users/' + login, headers=headers) return r.json() except Exception as e: print(colored("[-] error occurred: %s" % e, 'red')) return False max_page = 0 total_found = 0 tab = Texttable(300) tab.header(['login', 'html_url', 'name', 'email', 'company', 'public_repos']) r_json = searchUser(keyword, 1) if len(r_json) and 'documentation_url' in r_json: print(colored("[-] error occurred!", 'red')) exit() total_found = r_json['total_count'] max_page = math.ceil(r_json['total_count'] / 30) sys.stdout.write( colored('[+] %d users found, %d pages.\n' % (total_found, max_page), 'green')) sys.stdout.write('[+] retrieving user list...\n') def doGetUserList(page):
def GetMovieSummary(exp_type): """ Print a table & write a file summarising the properties of all movies available per experiment. Args: exp_type (string) -> Type of experiment. Options: "MDCK_WT_Pure", "MDCK_Sc_Tet-_Pure", "MDCK_Sc_Tet+_Pure" Return: None. Prints a table into a console. Writes a txt_file into the directory given by cell_type. """ name = "C E L L T Y P E : '{}'\n".format(exp_type) _, txt_file_list = GetMovieFilesPaths(exp_type=exp_type) movie_number = [] movie_date = [] movie_pos = [] total_frames = [] total_divisions = [] mean_div_time = [] std_div_time = [] num_lines_raw = [] num_lines_trimmed = [] num_lines_sorted = [] num_lines_filtered = [] # How many movies do you have available? total_movies = 0 if exp_type == "MDCK_WT_Pure": total_movies = 18 if exp_type == "MDCK_Sc_Tet-_Pure": total_movies = 13 if exp_type == "MDCK_Sc_Tet+_Pure": total_movies = 13 if exp_type == "MDCK_90WT_10Sc_NoComp": total_movies = 3 # out of 16 for order, raw_file in enumerate(sorted(txt_file_list)): print("\nProcessing txt_file: {}".format(raw_file)) # Initialise 3 types of files: trimmed_file = raw_file.replace("raw", "trimmed") sorted_file = raw_file.replace("raw", "sorted") filtered_file = raw_file.replace("raw", "filtered") # Order the movies, get their date & pos: movie_number.append(order + 1) movie_date.append(sorted_file.split("/")[-4]) movie_pos.append(sorted_file.split("/")[-3]) # Get total number of frames per movie: path = "/Volumes/lowegrp/Data/Kristina/{}/{}/{}/segmented/".format( exp_type, movie_date[order], movie_pos[order]) frame_count = os.listdir(path) frame_count = [ item for item in frame_count if str(item).startswith("s_") and str(item).endswith(".tif") ] total_frames.append(len(frame_count)) # Get number of divisions (every non-leaf cell): div_counter = 0 for line in open(sorted_file, 'r'): line = line.rstrip().split('\t') if line[0] != 'Cell_ID' and line[7] == "False": div_counter += 1 total_divisions.append(div_counter) # Extract cell cycle duration according to given limit: cct_hrs = [] for line in open(filtered_file, "r"): line = line.rstrip().split("\t") if line[0] == 'Cell_ID': continue # Include only non-root & non-leaf cell_IDs: if line[6] == "False" and line[7] == "False": cct_hrs.append(float(line[4])) # Get mean division time per movie (condition for movies which are too short to output usable data): if len(cct_hrs) >= 2: mean_div_time.append(round(stats.mean(cct_hrs), 2)) std_div_time.append(round(stats.stdev(cct_hrs), 2)) else: mean_div_time.append(0.0) std_div_time.append(0.0) # Count number of lines per file: (-1 to exclude header OR -2 to take the weird line out from 'raw_file') num_lines_raw.append(sum(1 for line in open(raw_file)) - 2) num_lines_trimmed.append(sum(1 for line in open(trimmed_file)) - 1) num_lines_sorted.append(sum(1 for line in open(sorted_file)) - 1) num_lines_filtered.append(sum(1 for line in open(filtered_file)) - 1) # Write the table & file name & header: table = Texttable(max_width=0) print(name) header = [ "Movie", "DataDate", "Pos", "Frms", "Div#", "MeanDT", "StdDT", "Raw-l", "Trim-l", "Sort-l", "Filt-l" ] table.header(header) # Write into a new file: summary_file = "/Volumes/lowegrp/Data/Kristina/{}/summary_movies.txt".format( exp_type) summary_file = open(summary_file, "w") summary_file.write(name) header_string = '' for item in header: header_string += str(item) + "\t" header_string = header_string[:-1] header_string += "\n" summary_file.write(header_string) for i in list(range(0, total_movies)): my_list = [ movie_number[i], movie_date[i], movie_pos[i], total_frames[i], total_divisions[i], mean_div_time[i], std_div_time[i], num_lines_raw[i], num_lines_trimmed[i], num_lines_sorted[i], num_lines_filtered[i] ] table.add_row(my_list) string = '' for item in my_list: string += str(item) + "\t" string = string[:-1] string += "\n" summary_file.write(string) # Print the table & close the newly-written file: print(table.draw()) summary_file.close()
def makeTable(self, data, filter=None, sort=None, tail=None, reverse=False, list_details=False, fast_list_mode=False, status=False, start_config=False): number = 1 if fast_list_mode: list_details = True debug(sort=sort) if sort: data = self.sort_dict(data, sort, reverse) if tail: data = data[-int(tail):] else: if tail: data1 = {} data_keys = data.keys()[-int(tail):] for i in data_keys: data1.update({i: data.get(i)}) data = data1 if MAX_LENGTH <= (220 / 2) or list_details: debug(list_details=list_details) debug(fast_list_mode=fast_list_mode) if filter: #MAX_LENGTH <= (220 / 2) data = self.setFilter(filter, data, sort) if status: data = self.sortState(data, sort, status) if start_config: data = self.sortState(data, sort, start_config) debug(data_220=data) for i in data: name, displayname, pid, status, description, start_config = self.getData( i, sort, data) self.printList(name, displayname, pid, status, description, start_config, number, fast_list_mode) if not fast_list_mode: print "-" * MAX_LENGTH number += 1 #END ~ MAX_LENGTH <= (220 / 2) else: table = Texttable() table.header([ 'No', 'Name', 'Display Name', 'PID', 'STATUS', 'DESCRIPTIONS', 'START' ]) table.set_cols_align(["l", 'l', "l", "l", "l", "l", "c"]) table.set_cols_valign(["t", "m", "m", "m", "m", "t", "m"]) table.set_cols_width([ int(MAX_LENGTH * 0.03), #No int(MAX_LENGTH * 0.15), #Name int(MAX_LENGTH * 0.20), #DisplayName int(MAX_LENGTH * 0.05), #Pid int(MAX_LENGTH * 0.06), #STATUS int(MAX_LENGTH * 0.33), #Descriptions int(MAX_LENGTH * 0.07), #Start Config ]) sys.dont_write_bytecode = True number = 1 debug(status=status) debug(start_config=start_config) if filter: data = self.setFilter(filter, data, sort) if status: data = self.sortState(data, sort, status) if start_config: data = self.sortState(data, sort, start_config=start_config) debug(data=data) for i in data: name, displayname, pid, status, descriptions, start_config = self.getData( i, sort, data) table = self.makeTableAdd(table, number, name, displayname, pid, status, descriptions, start_config) number += 1 print table.draw() return data, number
def handle(self, *args, **options): log.info("Available Index Definitions:") indexes, _, apply_all, _, _ = self.get_index_specifying_options( options, require_one_include_list=['es_only']) es_only = options.get('es_only', False) table = Texttable(max_width=85) if es_only: table.header(["Name", "Count"]) [ table.add_row(row) for row in DEMIndexManager.list_es_doc_counts().items() ] else: indexes = DEMIndexManager.get_indexes() if indexes and not apply_all: new_indexes = [] for index in indexes: if index.get_base_name() in options['index']: new_indexes.append(index) indexes = new_indexes rows = [] try: for dem_index in indexes: dem_index_model = dem_index.get_index_model() index_versions = dem_index_model.get_available_versions_with_prefix( ) row = None if index_versions: for index_version in index_versions: num_docs = DEMIndexManager.get_es_index_doc_count( index_version.name) row = EsListRow( dem_index_model.name, index_version.name, not (index_version.is_deleted is None), index_version.is_active or 0, num_docs, index_version.tag) else: row = EsListRow(dem_index.get_base_name(), "", False, False, 0, "Current (not created)") if row: rows.append(row) except AttributeError: raise FirstMigrationNotRunError() table.header([ "Index Base Name", "Index Version Name", "Created", "Active", "Docs", "Tag" ]) table.set_cols_width([20, 35, 7, 6, 5, 9]) # sort the rows so it's a consistent ordering; these are tuples so they sort nicely [table.add_row(r) for r in sorted(rows)] log.info(table.draw()) log.info( "An index version name is: \n" "{environment prefix}{index name}-{version primary key id}. \n" "Most Django Elastic Migrations management commands take the \n" "base name (in which case the activated version is used) or \n" "the specific index version name.")
def output_table(tab): table = Texttable(max_width=get_terminal_size()[1]) table.set_deco(0) table.header([i.label for i in tab.columns]) table.add_rows([[AsciiOutputFormatter.format_value(resolve_cell(row, i.accessor), i.vt) for i in tab.columns] for row in tab.data], False) print table.draw()
def format_table(tab, conv2ascii=False): def _try_conv2ascii(s): return ascii(s) if not _is_ascii(s) and isinstance(s, str) else s max_width = get_terminal_size()[1] table = Texttable(max_width=max_width) table.set_deco(0) table.header([i.label for i in tab.columns]) widths = [] ideal_widths = [] number_columns = len(tab.columns) remaining_space = max_width # set maximum column width based on the amount of terminal space minus the 3 pixel borders max_col_width = (remaining_space - number_columns * 3) / number_columns for i in range(0, number_columns): current_width = len(tab.columns[i].label) tab_cols_acc = tab.columns[i].accessor if len(tab.data) > 0: max_row_width = max([ len(str(resolve_cell(row, tab_cols_acc))) for row in tab.data ]) ideal_widths.insert(i, max_row_width) current_width = max_row_width if max_row_width > current_width else current_width if current_width < max_col_width: widths.insert(i, current_width) # reclaim space not used remaining_columns = number_columns - i - 1 remaining_space = remaining_space - current_width - 3 if remaining_columns != 0: max_col_width = (remaining_space - remaining_columns * 3) / remaining_columns else: widths.insert(i, max_col_width) remaining_space = remaining_space - max_col_width - 3 if remaining_space > 0 and len(ideal_widths) > 0: for i in range(0, number_columns): if remaining_space == 0: break if ideal_widths[i] > widths[i]: needed_space = ideal_widths[i] - widths[i] if needed_space <= remaining_space: widths[i] = ideal_widths[i] remaining_space = remaining_space - needed_space elif needed_space > remaining_space: widths[i] = widths[i] + remaining_space remaining_space = 0 table.set_cols_width(widths) table.set_cols_dtype(['t'] * len(tab.columns)) if conv2ascii: table.add_rows([[ AsciiOutputFormatter.format_value( _try_conv2ascii(resolve_cell(row, i.accessor)), i.vt) for i in tab.columns ] for row in tab.data], False) else: table.add_rows([[ AsciiOutputFormatter.format_value( resolve_cell(row, i.accessor), i.vt) for i in tab.columns ] for row in tab.data], False) return table
class FunctionSearchResults(list): """Wrapper class to have better display of results""" FILTER_COLUMNS = { 'function_code', 'entry_id', 'group', 'public', 'container_uuid', 'function_source' } def __init__(self, gsearchresult): """ Parameters ---------- gsearchresult : dict """ # wrapper for an array of results results = gsearchresult['results'] super().__init__(results) # track data about where we are in total results self.has_next_page = gsearchresult['has_next_page'] self.offset = gsearchresult['offset'] self.total = gsearchresult['total'] # we can use this to load functions and run them self.serializer = FuncXSerializer() # Reformat for pretty printing and easy viewing self._init_columns() self.table = Texttable(max_width=120) self.table.header(self.columns) for res in self: self.table.add_row([res[col] for col in self.columns]) def _init_columns(self): self.columns = [] if len(self): assert isinstance(self[0], dict) self.columns = [ k for k in self[0].keys() if k not in FunctionSearchResults.FILTER_COLUMNS ] def __str__(self): if len(self): return self.table.draw() return "[]" def load_result(self, ix: int): """Get the code for a function. If in an ipython environment, this creates a new input and places the source in it. Otherwise, the source code is printed. Parameters ---------- ix : int index into the current list of results Returns ------- None """ res = self[ix] func_source = res['function_source'] # func = self.serializer.unpack_and_deserialize(packed_func)[0] # return func # if we also saved the source code of the function, we could interactively # generate a cell to edit the searched function ipython = get_ipython() if ipython: ipython.set_next_input(func_source) else: print(func_source)
def PopupReceipt(self, item): self.option.destroy() self.top = Toplevel() self.top.iconbitmap(r'favicon.ico') self.top.title('Order ID: ' + item) text2 = Text(self.top, width=80, undo=True) scroll = Scrollbar(self.top, command=text2.yview) text2.configure(yscrollcommand=scroll.set) text2.tag_configure('bold_italics', font=('Arial', 12, 'bold', 'italic'), justify='center') text2.tag_configure('big', font=('Verdana', 20, 'bold'), justify='center') text2.tag_configure('lines', font=('Times', 12, 'bold'), justify='left', foreground='black') text2.tag_configure('color', foreground='#476042', font=('Tempus Sans ITC', 12, 'bold')) table = Texttable() table.set_deco(Texttable.HEADER) table.set_cols_dtype([ 'i', # integer 't', # text 'i' ]) # integer table.set_cols_align(["c", "l", "l"]) header = ['Qty', 'Product', 'Total'] table.header(header) x = 1 db = Database() cursor = db.conn.cursor() sql = """ SELECT id, order_detail_id, user_id, quantity, price, name, currency, delivery_charges, price_all FROM order_invoice WHERE order_detail_id='""" + item + """' """ cursor.execute(sql) rows = cursor.fetchall() for i, row in enumerate(rows): currency = row[6] total_single = float(row[4]) * float(row[3]) receiptdata = [row[3], row[5], currency + str(total_single)] totals = row[8] x = x + 1 dev_charge = row[7] table.add_row(receiptdata) table.set_cols_width([10, 22, 12]) text2.insert(END, table.draw() + '\n') text2.insert( END, '=================================================== \n') dev_charge = 0 text2.insert( END, 'Delivery-Charges: ' + currency + str(dev_charge) + ' \n') text2.insert( END, 'Net-Total: ' + currency + str(totals) + ' \n') text2.insert( END, '=================================================== \n') sql_setting = """ SELECT * FROM settings """ cursor.execute(sql_setting) rows_setting = cursor.fetchall() for setting in rows_setting: text2.insert(END, setting[2] + ' \n') self.button = ttk.Button( text2, text="Print", command=lambda: self.Print(item, text2.get('1.0', END))) self.button.pack() text2.window_create(END, window=self.button) text2.pack(side=LEFT, expand=True, fill=BOTH) scroll.pack(side=RIGHT, fill=Y)
def main(): parser = buildArgsParser() args = parser.parse_args() sort_by = args.sort names = [] results_files = [] hyperparams_files = [] status_files = [] for f in args.results: exp_folder = f if os.path.isfile(f): exp_folder = os.path.dirname(f) result_file = pjoin(exp_folder, "result.json") hyperparams_file = pjoin(exp_folder, "hyperparams.json") status_file = pjoin(exp_folder, "status.json") if not os.path.isfile(result_file): print 'Skip: {0} is not a file!'.format(result_file) continue if not os.path.isfile(hyperparams_file): print 'Skip: {0} is not a file!'.format(hyperparams_file) continue if not os.path.isfile(status_file): print 'Skip: {0} is not a file!'.format(status_file) continue name = os.path.abspath(exp_folder) while 'hyperparams.json' in os.listdir( os.path.abspath(pjoin(name, os.path.pardir))): name = os.path.abspath(pjoin(name, os.path.pardir)) name = os.path.basename(name) names.append(name) results_files.append(result_file) hyperparams_files.append(hyperparams_file) status_files.append(status_file) if len([no for no in sort_by if no == 0]) > 0: parser.error('Column ID are starting at 1!') # Retrieve headers from hyperparams headers_hyperparams = set() headers_results = set() headers_status = set() for hyperparams_file, status_file, results_file in zip( hyperparams_files, status_files, results_files): hyperparams = load_dict_from_json_file(hyperparams_file) results = load_dict_from_json_file(results_file) status = load_dict_from_json_file(status_file) headers_hyperparams |= set(hyperparams.keys()) headers_results |= set(results.keys()) headers_status |= set(status.keys()) headers_hyperparams = sorted(list(headers_hyperparams)) headers_status = sorted(list(headers_status)) # TODO: when generating result.json split 'trainset' scores in two key: # 'trainset' and 'trainset_std' (same goes for validset and testset). headers_results |= set(["trainset_std", "validset_std", "testset_std"]) headers_results = sorted(list(headers_results)) headers = headers_hyperparams + headers_status + ["name"] + headers_results # Build results table table = Texttable(max_width=0) table.set_deco(Texttable.HEADER) table.set_precision(8) table.set_cols_dtype(['a'] * len(headers)) table.set_cols_align(['c'] * len(headers)) # Headers table.header([str(i) + "\n" + h for i, h in enumerate(headers, start=1)]) if args.only_header: print table.draw() return # Results for name, hyperparams_file, status_file, results_file in zip( names, hyperparams_files, status_files, results_files): hyperparams = load_dict_from_json_file(hyperparams_file) results = load_dict_from_json_file(results_file) status = load_dict_from_json_file(status_file) # Build results table row (hyperparams columns) row = [] for h in headers_hyperparams: value = hyperparams.get(h, '') row.append(value) for h in headers_status: value = status.get(h, '') row.append(value) row.append(name) for h in headers_results: if h in ["trainset", "validset", "testset"]: value = results.get(h, '')[0] elif h in ["trainset_std", "validset_std", "testset_std"]: value = results.get(h[:-4], '')[1] else: value = results.get(h, '') row.append(value) table.add_row(row) # Sort for col in reversed(sort_by): table._rows = sorted(table._rows, key=sort_nicely(abs(col) - 1), reverse=col < 0) if args.out is not None: import csv results = [] results.append(headers) results.extend(table._rows) with open(args.out, 'wb') as csvfile: w = csv.writer(csvfile) w.writerows(results) else: print table.draw()
def _str_val(self): tt = Texttable() tt.add_rows(self._value.items(), header=False) tt.header(['Key', 'Value']) return tt.draw()
def do_list(self, args): try: # call UForge API # get images printer.out("Getting all images and publications for [" + self.login + "] ...") images = self.api.Users(self.login).Images.Getall() images = images.images.image # get publications pimages = self.api.Users(self.login).Pimages.Getall() pimages = pimages.publishImages.publishImage if images is None or len(images) == 0: printer.out("No images available") else: printer.out("Images:") table = Texttable(800) table.set_cols_dtype( ["t", "t", "t", "t", "t", "t", "t", "c", "t"]) table.header([ "Id", "Name", "Version", "Rev.", "Format", "Created", "Size", "Compressed", "Generation Status" ]) images = generics_utils.order_list_object_by(images, "name") for image in images: imgStatus = self.get_image_status(image.status) table.add_row([ image.dbId, image.name, image.version, image.revision, image.targetFormat.name, image.created.strftime("%Y-%m-%d %H:%M:%S"), size(image.fileSize), "X" if image.compress else "", imgStatus ]) print table.draw() + "\n" printer.out("Found " + str(len(images)) + " images") if pimages is None or len(pimages) == 0: printer.out("No publication available") else: printer.out("Publications:") table = Texttable(800) table.set_cols_dtype(["t", "t", "t", "t", "t", "t", "t"]) table.header([ "Template name", "Image ID", "Publish ID", "Account name", "Format", "Cloud ID", "Status" ]) pimages = generics_utils.order_list_object_by(pimages, "name") for pimage in pimages: pubStatus = self.get_publish_status(pimage.status) table.add_row([ pimage.name, generics_utils.extract_id(pimage.imageUri), pimage.dbId, pimage.credAccount.name if pimage.credAccount is not None else "-", pimage.credAccount.targetPlatform.name, pimage.cloudId if pimage.cloudId is not None else "-", pubStatus ]) print table.draw() + "\n" printer.out("Found " + str(len(pimages)) + " publications") return 0 except ArgumentParserError as e: printer.out("ERROR: In Arguments: " + str(e), printer.ERROR) self.help_list() except Exception as e: return handle_uforge_exception(e)
def srcstat(env): if not env.workPath: secho('ERROR: No ipbb work area detected', fg='red') return secho("Packages", fg='blue') lSrcs = env.getSources() if not lSrcs: return lSrcTable = Texttable(max_width=0) lSrcTable.set_deco(Texttable.HEADER | Texttable.BORDER) lSrcTable.set_chars(['-', '|', '+', '-']) lSrcTable.header(['name', 'kind', 'version']) for lSrc in lSrcs: lSrcDir = join(env.src, lSrc) lKind, lBranch = "unknown", None # Check if a git repository if exists(join(lSrcDir, '.git')): with DirSentry(lSrcDir) as _: lKind = 'git' try: sh.git('rev-parse', '--git-dir') except sh.ErrorReturnCode_128: lKind += ' (broken)' lBranch = '(unknown)' if lKind == 'git': try: # lBranch = sh.git('symbolic-ref','--short', 'HEAD').strip() lBranch = sh.git('symbolic-ref', 'HEAD').split('/')[-1].strip() except sh.ErrorReturnCode_128: lBranch = sh.git('rev-parse', '--short', 'HEAD').strip() + '...' try: sh.git('diff', '--no-ext-diff', '--quiet').strip() except sh.ErrorReturnCode_1: lBranch += '*' try: sh.git('diff', '--no-ext-diff', '--cached', '--quiet').strip() except sh.ErrorReturnCode_1: lBranch += '+' elif exists(join(lSrcDir, '.svn')): with DirSentry(lSrcDir) as _: lKind = 'svn' lSVNInfoRaw = sh.svn('info') lSVNInfo = { lEntry[0]: lEntry[1].strip() for lEntry in (lLine.split(':', 1) for lLine in lSVNInfoRaw.split('\n') if lLine) } lBranch = lSVNInfo['URL'].replace( lSVNInfo['Repository Root'] + '/', '') lSVNStatus = sh.svn('status', '-q') if len(lSVNStatus): lBranch += '*' lSrcTable.add_row([lSrc, lKind, lBranch]) echo(lSrcTable.draw())
def do_disable(self, args): try: doParser = self.arg_disable() doArgs = doParser.parse_args(shlex.split(args)) org = org_utils.org_get(self.api, doArgs.org) if org is None: printer.out( "There is no organization matching [" + doArgs.org + "].", printer.OK) return 0 if doArgs.org is not None: targetPlatformsUser = self.api.Users( doArgs.account).Targetplatforms.Getall(org=org.dbId) else: targetPlatformsUser = self.api.Users( doArgs.account).Targetplatforms.Getall() if targetPlatformsUser is None or len( targetPlatformsUser.targetPlatforms.targetPlatform) == 0: printer.out("There is no target platform for the user \"" + doArgs.account + "\" in [" + org.name + "].") return 0 else: targetPlatformsUser = targetPlatformsUser.targetPlatforms.targetPlatform targetPlatformsList = targetPlatforms() targetPlatformsList.targetPlatforms = pyxb.BIND() targetPlatformsUser = compare(targetPlatformsUser, doArgs.targetPlatforms, "name") if len(targetPlatformsUser) == 0: listName = "" for tpname in doArgs.targetPlatforms: listName = listName + tpname + " " printer.out("There is no target platforms matching [" + listName + "].") return 0 for item in targetPlatformsUser: targetPlatformToDisable = targetPlatform() targetPlatformToDisable = item targetPlatformToDisable.active = False targetPlatformToDisable.access = False printer.out("Disabling [" + item.name + "].") targetPlatformsList.targetPlatforms.append( targetPlatformToDisable) result = self.api.Users(doArgs.account).Targetplatforms.Update( Org=org.name, body=targetPlatformsList) result = generics_utils.order_list_object_by( result.targetPlatforms.targetPlatform, "name") table = Texttable(200) table.set_cols_align(["c", "c", "c", "c"]) table.header(["Id", "Name", "Type", "Access"]) for item in result: if item.access: access = "X" else: access = "" table.add_row([item.dbId, item.name, item.type, access]) printer.out("Target Platform list for user \"" + doArgs.account + "\" :") print table.draw() + "\n" return 0 except ArgumentParserError as e: printer.out("In Arguments: " + str(e), printer.ERROR) self.help_disable() except Exception as e: return handle_uforge_exception(e)
writer.add_graph(model=model, input_to_model=(inputs[0].unsqueeze(0), y_inputs[0].unsqueeze(0))) (train_losses, val_losses, val_f1s_macro, val_f1s_micro, val_aurocs_macro, val_aurocs_micro) = model.fit(train_loader, val_loader, device) stats = np.column_stack([ range(1, len(train_losses) + 1), train_losses, val_losses, val_aurocs_macro, val_aurocs_micro, val_f1s_macro, val_f1s_micro, ]) table = Texttable(max_width=0) table.set_cols_dtype(['i', 'f', 'f', 'f', 'f', 'f', 'f']) table.header([ 'Epoch', 'Training loss', 'Validation loss', 'Validation Macro-AUROC', 'Validation Micro-AUROC', 'Validation Macro-F1', 'Validation Micro-F1' ]) table.add_rows(stats, header=False) print('Training summary') print(table.draw()) print()
def _str_val(self): tt = Texttable(max_width=120) tt.add_rows(self._value, header=self.header is None) if self.header is not None: tt.header(self.header) return tt.draw()
#connection stricly to get regions client = boto3.client(service_name='ec2', region_name='us-east-1') regions = client.describe_regions()['Regions'] #Iterate through the regions for region in regions: print('[*] Checking region ' + region['RegionName'] + '\n') ec2 = boto3.resource(service_name='ec2', region_name=region['RegionName']) # Start building the table table = Texttable() table.set_cols_align(["c", "l"]) table.set_cols_valign(["m", "m"]) table.set_cols_width([20, 80]) table.header(["InstanceID", "UserData"]) # this is used to not print the table if there is no data in this region isData = False # iterate through instances to look for userData for instance in ec2.instances.all(): response = instance.describe_attribute(Attribute='userData') instanceid = ec2.Instance(instance.id).id # Probably a better way to do this PRs accepted! try: if response['UserData']['Value']: # add to the table if we have data table.add_row([ instanceid,
) plaintext = private_key.decrypt( ciphertext,padding.PKCS1v15() ) return plaintext ##You may need to adjust the region? #This loads the aws profile called 'default' session = boto3.session.Session(profile_name='default', region_name=args.region) client = session.client('ec2', args.region) #write the top of the csv table = Texttable() table.set_cols_align(["c", "c", "c", "c", "c"]) table.set_cols_valign(["m", "m", "m", "m", "m"]) table.set_cols_width([20,15,15,15, 32]) table.header(["Instance ID", "Name", "PrivateIpAddress", "PublicIpAddress", "Password"]) def get_ec2_info(): #load all the reservations reservations = client.describe_instances() #iterate through all of the reservations and instances. for reservation in reservations['Reservations']: for instance in reservation["Instances"]: # Get the encrypted password and decrypt data = client.get_password_data(InstanceId=instance['InstanceId']) if client.get_password_data(InstanceId=instance['InstanceId'])['PasswordData']: password = decrypt(base64.b64decode(data['PasswordData'])).decode('ascii') #print the instance ID, private IP and password as the for loops iterate #TODO Not sure if this works well with multiple tags. if instance['Tags'][0]['Value']: name = instance['Tags'][0]['Value'] else:
def cross_validation(all_db_list): """ Generated cross-validation results for decision tree learning This is a single loop cross-validation process :param all_db_list: input data set """ header_list = ["index", "accuracy", "precision", "recall", "f1", "maximal depth"] # set up heading for evaluation result table class_list = ["room1", "room2", "room3", "room4"] # set up heading for the confusion matrix macro_table = Texttable() macro_table.header(header_list) # total accuracy, precision, recall, f1 scores and confusion matrix for all 10 folds of validation total_accuracy = [0] * CLASS_NUM total_precision = [0] * CLASS_NUM total_recall = [0] * CLASS_NUM total_f1 = [0] * CLASS_NUM total_matrix = np.zeros((CLASS_NUM, CLASS_NUM)) # maximum depth of all decision trees generated max_depth = 0 # calculate step size db_size = len(all_db_list) step = db_size // FOLD_NUM # initialise 4 charts for result output metric_charts_display = [] for i in range(CLASS_NUM): t = Texttable() t.add_row(header_list) metric_charts_display.append(t) for start in range(0, db_size, step): # permute training data set and test data set # separate data into training data and test data end = start + step test_db, training_db = separate_data(all_db_list, start, end, db_size) # training d_tree, depth = dt.decision_tree_learning(training_db, 0) # update maximum depth if depth > max_depth: max_depth = depth # get confusion matrix confusion_matrix = get_confusion_matrix(test_db, d_tree) total_matrix = np.array(confusion_matrix) + np.array(total_matrix) # display confusion matrix matrix_display = Texttable() matrix_display.header(class_list) for i in range(CLASS_NUM): matrix_display.add_row(confusion_matrix[i]) fold_num = np.int(np.ceil(start / step)) + 1 print('Confusion matrix of fold ' + str(fold_num) + ' is: ') print(matrix_display.draw()) # print average confusion matrix print() for roomi in range(CLASS_NUM): # validate for each class (room) # calculate metrics precision = get_precision(roomi, confusion_matrix) recall = get_recall(roomi, confusion_matrix) f1 = get_f1(roomi, confusion_matrix) accuracy = get_accuracy(roomi, confusion_matrix) total_precision[roomi] += precision total_recall[roomi] += recall total_f1[roomi] += f1 total_accuracy[roomi] += accuracy # add result of each fold to the text-table of each room col = [str(fold_num), str(accuracy), str(precision), str(recall), str(f1), str(depth)] metric_charts_display[roomi].add_row(col) for roomi in range(CLASS_NUM): # display results for each room print('Evaluation result for room ' + str(roomi + 1) + ' is: ') average_result = ["average of room " + str(roomi + 1), str(total_accuracy[roomi] / FOLD_NUM), str(total_precision[roomi] / FOLD_NUM), str(total_recall[roomi] / FOLD_NUM), str(total_f1[roomi] / FOLD_NUM), str(max_depth) + ' (Note: this is max depth rather than avg depth)'] macro_table.add_row(average_result) metric_charts_display[roomi].add_row(average_result) # print "index", "accuracy", "precision", "recall", "f1" of each fold for each room print(metric_charts_display[roomi].draw()) print() # display confusion matrix average_matrix = np.array(total_matrix) / FOLD_NUM matrix_display = Texttable() matrix_display.header(class_list) for i in range(CLASS_NUM): matrix_display.add_row(average_matrix[i]) print('Average confusion matrix is: ') print(matrix_display.draw()) # print average confusion matrix print() # display average results in all folds for each room print('Average metrics for each room is:') print(macro_table.draw()) print()
def info(env): if not env.work.path: secho('ERROR: No ipbb work area detected', fg='red') return echo() secho("Packages", fg='blue') lSrcs = env.sources if not lSrcs: return lSrcTable = Texttable(max_width=0) lSrcTable.set_deco(Texttable.HEADER | Texttable.BORDER) lSrcTable.set_chars(['-', '|', '+', '-']) lSrcTable.header(['name', 'kind', 'version', 'hash']) for lSrc in lSrcs: lSrcDir = join(env.srcdir, lSrc) lKind, lHEADId, lHash = "unknown", None, None # Check if a git repository if exists(join(lSrcDir, '.git')): with DirSentry(lSrcDir) as _: lKind = 'git' try: sh.git('rev-parse', '--git-dir') except sh.ErrorReturnCode_128: lKind += ' (broken)' lHEADId = '(unknown)' if lKind == 'git': try: lBranch = '/'.join( sh.git('symbolic-ref', 'HEAD').split('/')[2:]).strip() except sh.ErrorReturnCode_128: lBranch = None try: lTag = sh.git('describe', '--tags', '--exact-match', 'HEAD').strip() except sh.ErrorReturnCode_128: lTag = None lHash = sh.git('rev-parse', '--short=8', 'HEAD').strip() + '...' if lTag is not None: lHEADId = lTag elif lBranch is not None: lHEADId = lBranch else: lHEADId = lHash try: sh.git('diff', '--no-ext-diff', '--quiet').strip() except sh.ErrorReturnCode_1: lHEADId += '*' try: sh.git('diff', '--no-ext-diff', '--cached', '--quiet').strip() except sh.ErrorReturnCode_1: lHEADId += '+' elif exists(join(lSrcDir, '.svn')): with DirSentry(lSrcDir) as _: lKind = 'svn' lSVNInfoRaw = sh.svn('info') lSVNInfo = { lEntry[0]: lEntry[1].strip() for lEntry in (lLine.split(':', 1) for lLine in lSVNInfoRaw.split('\n') if lLine) } lHEADId = lSVNInfo['URL'].replace( lSVNInfo['Repository Root'] + '/', '') lSVNStatus = sh.svn('status', '-q') if len(lSVNStatus): lHEADId += '*' lHash = lSVNInfo['Revision'] lSrcTable.add_row([lSrc, lKind, lHEADId, lHash]) echo(lSrcTable.draw())