def xmlHandlingOptions(data=0): fileManager = managers.utility.loadFileManager() from managers import session_manager text = "" #BeautifulSoup to get all the tags for file in fileManager.getActiveFiles(): text = text + " " + file.loadContents() import bs4 from bs4 import BeautifulSoup soup = BeautifulSoup(text, 'html.parser') for e in soup: if isinstance(e, bs4.element.ProcessingInstruction): e.extract() tags = [] [tags.append(tag.name) for tag in soup.find_all()] tags = list(set(tags)) from natsort import humansorted tags = humansorted(tags) for tag in tags: if tag not in session_manager.session['xmlhandlingoptions']: session_manager.session['xmlhandlingoptions'][tag] = {"action": 'remove-tag',"attribute": ''} if data: #If they have saved, data is passed. This block updates any previous entries in the dict that have been saved for key in data.keys(): if key in tags: dataValues = data[key].split(',') session_manager.session['xmlhandlingoptions'][key] = {"action": dataValues[0], "attribute": data["attributeValue"+key]} for key in session_manager.session['xmlhandlingoptions'].keys(): if key not in tags: #makes sure that all current tags are in the active docs del session_manager.session['xmlhandlingoptions'][key]
def find_new(alias=None): """Return a list of new chapters as Chapter objects and applies human sorting to it. Accepts an optional 'alias' argument, which will filter the query. """ query = session.query(Chapter).join(Series).filter(Series.following) if alias: query = query.filter(Series.alias == alias) query = query.filter(Chapter.downloaded == 0).all() return humansorted([x.to_object() for x in query], key=lambda x: x.chapter)
def sortentries(text): """Helper function to sort lists, it will use the natsort module's humansorted function if its available. Otherwise it will use the builtin sorting function (which is not quite as good). It will split strings into lists if that's what's been given to sort. text is the text to be sorted.""" try: if isinstance(text, str): return humansorted(text.split()) elif isinstance(text, (list, collections.deque)): return humansorted(text) else: return list(humansorted(text)) except NameError: if isinstance(text, str): return sorted(text.split(), key=str.lower) elif isinstance(text, (list, collections.deque)): return sorted(text, key=str.lower) else: return list(sorted(text, key=str.lower))
def backends(self): """ Get the virtual machines that are backends Returns: List of server objects """ nodes = self.nova.servers.list() backends = [] for node in nodes: if "node" in node.name: backends.append(node) obj = humansorted(backends, key=lambda x: x.name, reverse=True) return obj
def execute(self, *args): datamodel_ids = [] # Load all data models. datamodels_path = openmediavault.getenv("OMV_DATAMODELS_DIR", "/usr/share/openmediavault/datamodels"); for f in glob.glob(os.path.join(datamodels_path, "conf.*.json")): datamodel_id = os.path.splitext(os.path.basename(f))[0] # Note, currently the filename is the data model id, but # this may change someday, so we load the data model and # ask for its identifier to be on the safe side. datamodel = openmediavault.config.Datamodel(datamodel_id) datamodel_ids.append(datamodel.id) # Print the data model identifiers. for datamodel_id in natsort.humansorted(datamodel_ids): print(datamodel_id) return 0
def _prepare_tag_cloud(self, lang, config): """Create tag cloud task.""" # Collect information fn = os.path.join(self.site.config['OUTPUT_FOLDER'], config['filename']) css_fn = os.path.join(self.site.config['OUTPUT_FOLDER'], config['style_filename']) taxonomy_type = config['taxonomy_type'] posts_per_tag = self.site.posts_per_classification[taxonomy_type][lang] taxonomy = self.site.taxonomy_plugins[taxonomy_type] # Compose list of tags, their post count and links tag_count_url_list = [] for tag in natsort.humansorted(list(posts_per_tag.keys())): tag_count_url_list.append(( taxonomy.get_classification_friendly_name(tag, lang), len([post for post in posts_per_tag[tag] if self.site.config['SHOW_UNTRANSLATED_POSTS'] or post.is_translation_available(lang)]), self.site.link(taxonomy_type, tag, lang) )) # Get tag cloud data tags, level_weights = engine.create_tag_cloud_data(tag_count_url_list, max_number_of_levels=config['max_number_of_levels'], max_tags=config['max_tags'], minimal_number_of_appearances=config['minimal_number_of_appearances']) # Determine url type for rewriting. Must not be relative. url_type = self.site.config['URL_TYPE'] if url_type == 'rel_path': url_type = 'full_path' # Create task for HTML fragment task = { 'basename': self.name, 'name': fn, 'targets': [fn], 'actions': [(self._render_tag_cloud_html, [fn, tags, level_weights, config, lang, url_type])], 'clean': True, 'uptodate': [utils.config_changed({1: tags, 2: level_weights}, 'nikola.plugins.render_tag_cloud:tags'), utils.config_changed(config, 'nikola.plugins.render_tag_cloud:config')] } yield utils.apply_filters(task, self.site.config["FILTERS"]) # Create task for CSS task = { 'basename': self.name, 'name': css_fn, 'targets': [css_fn], 'actions': [(self._render_tag_cloud_css, [css_fn, tags, level_weights, config])], 'clean': True, 'uptodate': [utils.config_changed({1: tags, 2: level_weights}, 'nikola.plugins.render_tag_cloud:tags'), utils.config_changed(config, 'nikola.plugins.render_tag_cloud:config')] } yield utils.apply_filters(task, self.site.config["FILTERS"])
def ordered_chapters(self): return humansorted(self.chapters, key=lambda x: x.chapter)
def execute(self): # Default values. address = "" netmask = "" gateway = "" method = "dhcp" address6 = "" method6 = "manual" netmask6 = 64 gateway6 = "" wol = False dns_nameservers = "" wpa_ssid = None wpa_psk = None rpc_method = "setEthernetIface" rpc_params = {} # Get the network interface device. devices = [] context = pyudev.Context() for device in context.list_devices(subsystem="net"): # Skip unwanted network interface devices. if device.sys_name in ("lo"): continue if device.device_type and device.device_type in ("bond"): continue # Append the network interface name for later use. devices.append(device.sys_name) devices = natsort.humansorted(devices) choices = [] for idx, sys_name in enumerate(devices): device = pyudev.Device.from_name(context, "net", sys_name) for id in [ "ID_MODEL_FROM_DATABASE", "ID_VENDOR_FROM_DATABASE" ]: if not id in device: continue choices.append([ sys_name, openmediavault.string.truncate( device[id], 50) ]) break d = dialog.Dialog(dialog="dialog") (code, tag) = d.menu("Please select a network interface. Note, the " \ "existing network interface configuration will be deleted.", backtitle=self.description, clear=True, height=14, width=70, menu_height=6, choices=choices) if code in (d.CANCEL, d.ESC): return 0 device_name = tag # Use DHCP? code = d.yesno("Do you want to use DHCPv4 for this interface?", backtitle=self.description, height=5, width=49) if code == d.ESC: return 0 if code != d.OK: address = None netmask = None gateway = None method = "static" # Get the static IPv4 address. while not address: (code, address) = d.inputbox( "Please enter the IPv4 address.", backtitle=self.description, clear=True, height=8, width=60, init="") if code != d.OK: return 0 if not address: d.msgbox("The field must not be empty.", backtitle=self.description, height=5, width=32) continue try: ipaddress.ip_address(address) except Exception as e: address = None d.msgbox("Please enter a valid IPv4 address.", backtitle=self.description, height=5, width=38) continue # Get the IPv4 netmask. while not netmask: (code, netmask) = d.inputbox( "Please enter the IPv4 netmask.", backtitle=self.description, clear=True, height=8, width=60, init="") if code != d.OK: return 0 if not netmask: d.msgbox("The field must not be empty.", backtitle=self.description, height=5, width=32) continue try: ipaddress.ip_address(netmask) except: netmask = None d.msgbox("Please enter a valid netmask.", backtitle=self.description, height=5, width=33) continue # Get default IPv4 gateway. while not gateway: (code, gateway) = d.inputbox( "Please enter the default IPv4 gateway.", backtitle=self.description, clear=True, height=8, width=60, init="") if code != d.OK: return 0 try: ipaddress.ip_address(gateway) except: gateway = None d.msgbox("Please enter a valid gateway.", backtitle=self.description, height=5, width=33) continue # Use IPv6? code = d.yesno("Do you want to configure IPv6 for this interface?", backtitle=self.description, height=5, width=53, defaultno=True) if code == d.ESC: return 0 if code == d.OK: # Use stateful address autoconfiguration (DHCPv6)? code = d.yesno("Do you want to enable stateful address " \ "autoconfiguration (DHCPv6)?", backtitle=self.description, height=6, width=42) if code == d.ESC: return 0 if code == d.OK: method6 = "dhcp" else: # Use stateless address autoconfiguration (SLAAC)? code = d.yesno("Do you want to enable stateless address " \ "autoconfiguration (SLAAC)?", backtitle=self.description, height=6, width=42) if code == d.ESC: return 0 if code == d.OK: method6 = "auto" else: method6 = "static" # Get static IPv6 address. address6 = None while not address6: (code, address6) = d.inputbox( "Please enter the IPv6 address.", backtitle=self.description, clear=True, height=8, width=60, init="") if code != d.OK: return 0 if not address6: d.msgbox("The field must not be empty.", backtitle=self.description, height=5, width=32) continue try: ipaddress.ip_address(address6) except: address6 = None d.msgbox("Please enter a valid IPv6 address.", backtitle=self.description, height=5, width=38) continue # Get the prefix length. netmask6 = None while not netmask6: (code, netmask6) = d.inputbox( "Please enter the IPv6 prefix length.", backtitle=self.description, clear=True, height=8, width=64, init="64") if code != d.OK: return 0 if not netmask6: d.msgbox("The field must not be empty.", backtitle=self.description, height=5, width=32) continue if int(netmask6) < 0 or int(netmask6) > 128: netmask6 = None d.msgbox("Please enter a valid netmask.", backtitle=self.description, height=5, width=33) continue # Get default IPv6 gateway. gateway6 = None while not gateway6: (code, gateway6) = d.inputbox( "Please enter the default IPv6 gateway.", backtitle=self.description, clear=True, height=8, width=60, init="") if code != d.OK: return 0 try: ipaddress.ip_address(gateway6) except: gateway6 = None d.msgbox("Please enter a valid gateway.", backtitle=self.description, height=5, width=33) continue # Get the DNS name servers. Note, only one IP address is # supported here. if method == "static" or method6 == "static": while True: (code, dns_nameservers) = d.inputbox( "Please enter the DNS name server. If you don't want " \ "to use any name server, just leave this field blank.", backtitle=self.description, clear=True, height=8, width=60, init="") if code != d.OK: return 0 if not dns_nameservers: break try: ipaddress.ip_address(dns_nameservers) break except: dns_nameservers = "" d.msgbox("Please enter a valid IP address.", backtitle=self.description, height=5, width=30) # Enable WOL? code = d.yesno("Do you want to enable WOL for this interface?", backtitle=self.description, height=5, width=50, defaultno=True) if code == d.ESC: return 0 if code == d.OK: wol = True # Set the default RPC parameters. rpc_params.update({ "uuid": openmediavault.getenv("OMV_CONFIGOBJECT_NEW_UUID"), "devicename": device_name, "method": method, "address": address, "netmask": netmask, "gateway": gateway, "method6": method6, "address6": address6, "netmask6": netmask6, "gateway6": gateway6, "dnsnameservers": dns_nameservers, "dnssearch": "", "mtu": 0, "wol": wol, "options": "", "comment": "", }) # Do we process a wireless network interface? if re.match(r"^wlan[0-9]+$", device_name): rpc_method = "setWirelessIface" # Get the SSID. while not wpa_ssid: (code, wpa_ssid) = d.inputbox( "Please enter the name of the wireless network (SSID).", backtitle=self.description, clear=True, height=8, width=60, init="") if code != d.OK: return 0 if not wpa_ssid: d.msgbox("The field must not be empty.", backtitle=self.description, height=5, width=32) rpc_params["wpassid"] = wpa_ssid # Get the pre-shared key. while not wpa_psk: (code, wpa_psk) = d.inputbox( "Please enter the pre-shared key (PSK).", backtitle=self.description, clear=True, height=8, width=45, init="") if code != d.OK: return 0 if not wpa_psk: d.msgbox("The field must not be empty.", backtitle=self.description, height=5, width=32) rpc_params["wpapsk"] = wpa_psk # Update the interface configuration. print("Configuring network interface. Please wait ...") # Delete all existing network interface configuration objects. interfaces = openmediavault.rpc.call("Network", "enumerateConfiguredDevices") for interface in interfaces: openmediavault.rpc.call("Network", "deleteInterface", { "uuid": interface["uuid"] }) # Insert a new network interface configuration object. openmediavault.rpc.call("Network", rpc_method, rpc_params) openmediavault.rpc.call("Config", "applyChanges", { "modules": [], "force": False }) print("The network interface configuration was successfully changed.") return 0
def execute(self): # Default values. address = "" netmask = "" gateway = "" method = "manual" address6 = "" method6 = "manual" netmask6 = 64 gateway6 = "" wol = False dns_nameservers = "" wpa_ssid = None wpa_psk = None rpc_method = "setEthernetIface" rpc_params = {} # Get the network interface device. devices = [] context = pyudev.Context() for device in context.list_devices(subsystem="net"): # Skip unwanted network interface devices. if device.sys_name in ["lo"]: continue if device.device_type and device.device_type in ["bond"]: continue # Append the network interface name for later use. devices.append(device.sys_name) devices = natsort.humansorted(devices) choices = [] # Get a description for each network interface to help the user to # choose the correct one. for _, sys_name in enumerate(devices): device = pyudev.Devices.from_name(context, "net", sys_name) description = "" # Use the following properties as description in the specified order: for prop in [ "ID_MODEL_FROM_DATABASE", "ID_VENDOR_FROM_DATABASE", "ID_NET_NAME_MAC", ]: if prop not in device.properties: continue description = device.properties.get(prop) break choices.append( [sys_name, openmediavault.string.truncate(description, 50)]) if not choices: raise Exception("No network interfaces found.") d = dialog.Dialog(dialog="dialog") (code, tag) = d.menu( "Please select a network interface. Note, the existing network interface configuration will be deleted.", backtitle=self.description, clear=True, height=14, width=70, menu_height=6, choices=choices, ) if code in (d.CANCEL, d.ESC): return 0 device_name = tag # Use IPv4? code = d.yesno( "Do you want to configure IPv4 for this interface?", backtitle=self.description, height=5, width=53, defaultno=True, ) if code == d.ESC: return 0 if code == d.OK: # Use DHCPv4? code = d.yesno( "Do you want to use DHCPv4 for this interface?", backtitle=self.description, height=5, width=49, ) if code == d.ESC: return 0 if code == d.OK: method = "dhcp" if code != d.OK: address = None netmask = None gateway = None method = "static" # Get the static IPv4 address. while not address: (code, address) = d.inputbox( "Please enter the IPv4 address.", backtitle=self.description, clear=True, height=8, width=60, init="", ) if code != d.OK: return 0 if not address: d.msgbox( "The field must not be empty.", backtitle=self.description, height=5, width=32, ) continue try: ipaddress.ip_address(address) except Exception: # pylint: disable=broad-except address = None d.msgbox( "Please enter a valid IPv4 address.", backtitle=self.description, height=5, width=38, ) continue # Get the IPv4 netmask. while not netmask: (code, netmask) = d.inputbox( "Please enter the IPv4 netmask.", backtitle=self.description, clear=True, height=8, width=60, init="", ) if code != d.OK: return 0 if not netmask: d.msgbox( "The field must not be empty.", backtitle=self.description, height=5, width=32, ) continue try: ipaddress.ip_address(netmask) except Exception: # pylint: disable=broad-except netmask = None d.msgbox( "Please enter a valid netmask.", backtitle=self.description, height=5, width=33, ) continue # Get default IPv4 gateway. while not gateway: (code, gateway) = d.inputbox( "Please enter the default IPv4 gateway.", backtitle=self.description, clear=True, height=8, width=60, init="", ) if code != d.OK: return 0 try: ipaddress.ip_address(gateway) except Exception: # pylint: disable=broad-except gateway = None d.msgbox( "Please enter a valid gateway.", backtitle=self.description, height=5, width=33, ) continue # Use IPv6? code = d.yesno( "Do you want to configure IPv6 for this interface?", backtitle=self.description, height=5, width=53, defaultno=True if method != "manual" else False, ) if code == d.ESC: return 0 if code == d.OK: # Use stateful address autoconfiguration (DHCPv6)? code = d.yesno( "Do you want to enable stateful address autoconfiguration (DHCPv6)?", backtitle=self.description, height=6, width=42, ) if code == d.ESC: return 0 if code == d.OK: method6 = "dhcp" else: # Use stateless address autoconfiguration (SLAAC)? code = d.yesno( "Do you want to enable stateless address autoconfiguration (SLAAC)?", backtitle=self.description, height=6, width=42, ) if code == d.ESC: return 0 if code == d.OK: method6 = "auto" else: method6 = "static" if method6 == "static": # Get static IPv6 address. address6 = None while not address6: (code, address6) = d.inputbox( "Please enter the IPv6 address.", backtitle=self.description, clear=True, height=8, width=60, init="", ) if code != d.OK: return 0 if not address6: d.msgbox( "The field must not be empty.", backtitle=self.description, height=5, width=32, ) continue try: ipaddress.ip_address(address6) except Exception: # pylint: disable=broad-except address6 = None d.msgbox( "Please enter a valid IPv6 address.", backtitle=self.description, height=5, width=38, ) continue # Get the prefix length. netmask6 = None while not netmask6: (code, netmask6) = d.inputbox( "Please enter the IPv6 prefix length.", backtitle=self.description, clear=True, height=8, width=64, init="64", ) if code != d.OK: return 0 if not netmask6: d.msgbox( "The field must not be empty.", backtitle=self.description, height=5, width=32, ) continue if int(netmask6) < 0 or int(netmask6) > 128: netmask6 = None d.msgbox( "Please enter a valid netmask.", backtitle=self.description, height=5, width=33, ) continue # Get default IPv6 gateway. gateway6 = None while not gateway6: (code, gateway6) = d.inputbox( "Please enter the default IPv6 gateway.", backtitle=self.description, clear=True, height=8, width=60, init="", ) if code != d.OK: return 0 try: ipaddress.ip_address(gateway6) except Exception: # pylint: disable=broad-except gateway6 = None d.msgbox( "Please enter a valid gateway.", backtitle=self.description, height=5, width=33, ) continue # Get the DNS name servers. Note, only one IP address is # supported here. if method == "static" or method6 == "static": while True: (code, dns_nameservers) = d.inputbox( "Please enter the DNS name server. If you don't want to use any name server, just leave this field blank.", backtitle=self.description, clear=True, height=8, width=60, init="", ) if code != d.OK: return 0 if not dns_nameservers: break try: ipaddress.ip_address(dns_nameservers) break except Exception: # pylint: disable=broad-except dns_nameservers = "" d.msgbox( "Please enter a valid IP address.", backtitle=self.description, height=5, width=30, ) # Enable WOL? code = d.yesno( "Do you want to enable WOL for this interface?", backtitle=self.description, height=5, width=50, defaultno=True, ) if code == d.ESC: return 0 if code == d.OK: wol = True # Set the default RPC parameters. rpc_params.update({ "uuid": openmediavault.getenv("OMV_CONFIGOBJECT_NEW_UUID"), "devicename": device_name, "method": method, "address": address, "netmask": netmask, "gateway": gateway, "method6": method6, "address6": address6, "netmask6": netmask6, "gateway6": gateway6, "dnsnameservers": dns_nameservers, "dnssearch": "", "mtu": 0, "wol": wol, "comment": "", }) # Do we process a wireless network interface? if openmediavault.net.is_wifi(device_name): rpc_method = "setWirelessIface" # Get the SSID. while not wpa_ssid: (code, wpa_ssid) = d.inputbox( "Please enter the name of the wireless network (SSID).", backtitle=self.description, clear=True, height=8, width=60, init="", ) if code != d.OK: return 0 if not wpa_ssid: d.msgbox( "The field must not be empty.", backtitle=self.description, height=5, width=32, ) rpc_params["wpassid"] = wpa_ssid # Get the pre-shared key. while not wpa_psk: (code, wpa_psk) = d.inputbox( "Please enter the pre-shared key (PSK).", backtitle=self.description, clear=True, height=8, width=45, init="", ) if code != d.OK: return 0 if not wpa_psk: d.msgbox( "The field must not be empty.", backtitle=self.description, height=5, width=32, ) rpc_params["wpapsk"] = wpa_psk # Update the interface configuration. print("Configuring network interface. Please wait ...") # Delete all existing network interface configuration objects. interfaces = openmediavault.rpc.call("Network", "enumerateConfiguredDevices") for interface in interfaces: openmediavault.rpc.call("Network", "deleteInterface", {"uuid": interface["uuid"]}) # Insert a new network interface configuration object. openmediavault.rpc.call("Network", rpc_method, rpc_params) openmediavault.rpc.call("Config", "applyChanges", { "modules": [], "force": False }) print("The network interface configuration was successfully changed.") return 0
natsorted(a, key=lambda x: x.replace('.', '~')) natsorted(a, key=lambda x: x.replace('.', '~') + 'z') a = [ './folder/file (1).txt', './folder/file.txt', './folder (1)/file.txt', './folder (10)/file.txt' ] natsorted(a) natsorted(a, alg=ns.PATH) from natsort import humansorted import locale locale.setlocale(locale.LC_ALL, 'en_US.UTF-8') a = ['Apple', 'corn', 'Corn', 'Banana', 'apple', 'banana'] natsorted(a, alg=ns.LOCALE) humansorted(a) a = ['Apple', 'corn', 'Corn', 'Banana', 'apple', 'banana'] natsorted(a) natsorted(a, alg=ns.IGNORECASE) natsorted(a, alg=ns.LOWERCASEFIRST) natsorted(a, alg=ns.GROUPLETTERS) natsorted(a, alg=ns.G | ns.LF) a = ['a50', 'a51.', 'a+50.4', 'a5.034e1', 'a+50.300'] natsorted(a, alg=ns.FLOAT) natsorted(a, alg=ns.FLOAT | ns.SIGNED) natsorted(a, alg=ns.FLOAT | ns.SIGNED | ns.NOEXP) natsorted(a, alg=ns.REAL) from natsort import realsorted realsorted(a)
read combine files from same experiment but from different fast regimes """ import pandas as pd import os import glob import natsort Experiment = 'SM_1_03042019_FR' os.chdir( '/Volumes/mpistaff/Diaz_Pichugina_Pseudomona/Data/Mosaik_tracking_1_TIMELAPSES_2019_1-1/' + 'SM_1_03042019_FR/') path = os.getcwd() all_files = glob.glob(path + "/*Trajectories.txt") all_files = natsort.humansorted(all_files) #=============================================# # read files from one experiment in one data frame df = pd.DataFrame() for filename in all_files: fast_regime = (filename.split('_')[-1])[0:2] fast_regime = int(fast_regime) print(fast_regime) df_temp = pd.read_table(filename) df_temp["Experiment"] = Experiment df_temp["Fast_regime"] = fast_regime df = df.append(df_temp)
def test_humansorted(): a = ['Apple', 'corn', 'Corn', 'Banana', 'apple', 'banana'] assert humansorted(a) == ['apple', 'Apple', 'banana', 'Banana', 'corn', 'Corn'] assert humansorted(a) == natsorted(a, alg=ns.LOCALE) assert humansorted(a, reverse=True) == humansorted(a)[::-1]
def save(): with open(DEFAULT_FILE, 'w') as fd: for key, value in natsort.humansorted( Environment.as_dict().items()): if key.startswith('OMV_'): fd.write('{}="{}"\n'.format(key, value))
def main(): plt.rcParams.update({ "font.family": "sans-serif", "font.sans-serif": ["Helvetica"], "font.size": 20 }) parser = argparse.ArgumentParser() parser.add_argument('csvs', nargs="+", type=str, help='Full path to CSV file') parser.add_argument('-o', required=True, type=str, default="", help='output filename') args = parser.parse_args() m = "*" #input_re = re.compile(".*quant-.+\.csv") matplotlib.rcParams['agg.path.chunksize'] = 10000 fig = plt.figure() ax = fig.add_subplot(111) #ax12 = ax.twinx() fig2 = plt.figure() ax2 = fig2.add_subplot(111) fig3 = plt.figure() ax3 = fig3.add_subplot(111) bsizes = set() bsize_xs = [] bsize_ys = [] bsize_ys2 = [] bsize_ys3 = [] bsize_dicts = dict() for csv in humansorted(args.csvs): #label = basename(splitext(csv)[0])[6:] size = None print("csv:", csv) if "pf" in basename(dirname(csv)): size = int(basename(dirname(csv)).split("_")[2]) else: size = int(basename(dirname(csv)).split("_")[1]) bsize = int(basename(dirname(csv)).split("_")[-1]) if bsize > 1024: continue bsizes.add(bsize) if bsize not in bsize_dicts: bsize_dicts[bsize] = dict() e = Experiment(csv) batches = e.len_batches() total = sum(e.get_total_faults()) dups = sum(e.get_duplicates()) #dups = sum([c - a for c, a in zip(e.get_total_faults(), e.get_no_duplicates())]) #dups = sum(e.get_duplicates()) #total = sum(e.get_total_faults()) #batches = len(e.get_duplicates()) assert (size not in bsize_dicts[bsize]) #bsize_dicts[bsize][size] = (dups, batches, total) bsize_dicts[bsize][size] = (dups / total, batches, total) print(f"{args.o} faults/batch:", dups / batches) for size, d in sorted(bsize_dicts.items(), key=lambda t: t[0]): print(d) xs, ys = zip(*humansorted(d.items(), key=lambda t: t[0])) ys1, ys2, ys3 = zip(*ys) bsize_xs.append(xs) bsize_ys.append(ys1) bsize_ys2.append(ys2) bsize_ys3.append(ys3) evenly_spaced_interval = np.linspace(0, 1, len(bsize_xs)) colors = [cm.rainbow(x) for x in evenly_spaced_interval] #ax2 = ax.twinx() for x, y, y2, y3, l, c in zip(bsize_xs, bsize_ys, bsize_ys2, bsize_ys3, sorted(bsizes), colors): ax.plot(x, y, "b+", label=l, marker=".", color=c, linestyle="-") ax2.plot(x, y2, "b+", label=l, marker="*", color=c, linestyle="--") ax3.plot(x, y3, "b+", label=l, marker="*", color=c, linestyle="--") ax.set_xlabel("Problem Size") ax.set_ylabel("% of Duplicates/Batch") ax.set_ylim(0, 1) ax.legend() figname = args.o figname2 = "batches-" + args.o figname3 = "faults-" + args.o if ".png" not in figname: figname += ".png" figname2 += "-dup.png" #plt.tight_layout() fig.tight_layout() print('saving figure:', figname) fig.savefig(figname, dpi=500) plt.close(fig) ax2.set_xlabel("Problem Size") ax2.legend() ax2.set_ylabel("# Batches") #plt.tight_layout() fig2.tight_layout() fig2.savefig(figname2, dpi=500) plt.close(fig2) ax3.set_xlabel("Problem Size") ax3.legend() ax3.set_ylabel("Total Faults") #plt.tight_layout() fig3.tight_layout() fig3.savefig(figname3, dpi=500) plt.close(fig3)
RoundedfTotalProfit = round(fTotalProfit) strTotalProfit = str(RoundedfTotalProfit) AllProfit = AllProfit + RoundedfTotalProfit #print(AllProfit) toplist.append(srProfit + "$ by selling " + readableName + " to the bazaar, or " + strTotalProfit + "$ if you flip 640, after you bought it from the " + Merchant + "merchant") # print("--------------------------------------") else: print("--------------------------------------") #print("========================================") #reversing sortedtoplist = humansorted(toplist) sortedtoplist.reverse() window = Tk() window.eval("tk::PlaceWindow %s center" % window.winfo_toplevel()) window.title("Here are the results:") def quit(): window.destroy() text = Text(window, height=30, width=120) text.insert( INSERT, "Top 1: you can make " + sortedtoplist[0] + ". Nice!\n\nTop 2: you can make " + sortedtoplist[1] +
def image_grid(inp: Union[P, Sequence[P], Sequence[ImageGetter]], sort=True, shuffle=False, n: int = None, aspect_ratio: float = None, rows: int = None, columns: int = None, width: int = None, height: int = None, fill=False, interpolation='auto', border_size=2, border_size_around=0, border_color: Sequence[int] = (0, 0, 0, 0)): img_getters = None if isinstance(inp, np.ndarray): img_getters = image_getters_from_ndarray(inp), if isinstance(inp, (str, Path)): img_getters = image_getters_from_folder(inp) elif isinstance(inp, Sequence): if isinstance(inp[0], Tuple): img_getters = inp elif isinstance(inp[0], (str, Path)): img_getters = image_getters_from_img_paths(inp) assert img_getters, "incorrect type of input to image_grid" assert not (shuffle and sort ), "shuffle and sort are mutually exclusive, only provide one" if shuffle: img_getters = list(img_getters) random.shuffle(img_getters) elif sort: img_getters = humansorted(img_getters) n = n or len(img_getters) assert 0 < n <= len(img_getters) if rows and not columns: columns = math.ceil(n / rows) elif columns and not rows: rows = math.ceil(n / columns) elif not columns and not rows: columns = math.ceil(math.sqrt(n)) rows = math.ceil(n / columns) assert columns * rows >= n, "not enough rows and columns to include all images" assert 0 <= border_size assert 0 <= border_size_around def get_color_arg(inp: Sequence[int]): inp = list(inp) if len(inp) == 1: inp = inp * 3 if len(inp) == 3: inp.append(255) assert len(inp) == 4 inp = np.array(inp) assert np.all(0 <= inp) and np.all(inp <= 255), 'color range [0-255]' return inp border_color = get_color_arg(border_color) border_total_w = (columns - 1) * border_size + 2 * border_size_around border_total_h = (rows - 1) * border_size + 2 * border_size_around first_img = np.array(img_getters[0][1]()) if not aspect_ratio: aspect_ratio = first_img.shape[1] / first_img.shape[0] assert not (height and width ), "height and width are mutually exclusive, only provide one" if height: img_h = round((height - border_total_h) / rows) img_w = round(img_h * aspect_ratio) elif width: img_w = round((width - border_total_w) / columns) img_h = round(img_w / aspect_ratio) else: img_h = first_img.shape[0] img_w = round(img_h * aspect_ratio) height = img_h * rows + border_total_h width = img_w * columns + border_total_w img_grid = np.empty((height, width, 4), dtype=np.uint8) img_grid[:, :, :] = border_color def minmax(*inp): return min(*inp), max(*inp) for row in range(rows): for col in range(columns): i = row * columns + col if i >= n: break img = img_getters[i][1]() if img.mode == "I": # support 16bit .png single channel img = (np.array(img) // 256).astype(np.uint8) img = Image.fromarray(img) h, w = img.height, img.width ar = w / h rmi, rma = minmax(img_h / h, img_w / w) scale = rma if fill else rmi _h, _w = round(scale * h), round(scale * w) inter = interpolation if inter == Interpolation.AUTO: if _h < img_h: inter = Interpolation.LANCZOS else: inter = Interpolation.BICUBIC img = img.resize((_w, _h), resample=getattr(Image, inter.upper())) img = np.array(img) # crop image (fill) if _h > img_h: h_start = (_h - img_h) // 2 img = img[h_start:h_start + img_h] _h = img_h if _w > img_w: w_start = (_w - img_w) // 2 img = img[:, w_start:w_start + img_w] _w = img_w # define offsets (fit) off_h = (img_h - _h) // 2 off_w = (img_w - _w) // 2 # insert image in img_grid x = border_size_around + col * (border_size + img_w) + off_w y = border_size_around + row * (border_size + img_h) + off_h img = img.reshape((*img.shape[:2], -1)) if img.shape[2] == 1: img = np.tile(img, (1, 1, 3)) if img.shape[2] == 3: img = np.concatenate((img, np.ones( (_h, _w, 1), dtype=np.uint8) * 255), axis=2) assert img.shape[2] == 4 img_grid[y:y + _h, x:x + _w] = img return img_grid
def test_humansorted_returns_results_identical_to_natsorted_with_LOCALE(): a = ["Apple", "corn", "Corn", "Banana", "apple", "banana"] assert humansorted(a) == natsorted(a, alg=ns.LOCALE)
def test_humansorted_is_identical_to_natsorted_with_locale_alg(fruit_list): assert humansorted(fruit_list) == natsorted(fruit_list, alg=ns.LOCALE)
def _build_taxonomy_list_and_hierarchy(self, taxonomy_name, lang): """Build taxonomy list and hierarchy for the given taxnonmy name and language.""" if taxonomy_name not in self.site.posts_per_classification or taxonomy_name not in self.site.taxonomy_plugins: return None, None posts_per_tag = self.site.posts_per_classification[taxonomy_name][lang] taxonomy = self.site.taxonomy_plugins[taxonomy_name] def acceptor(post): return True if self.site.config['SHOW_UNTRANSLATED_POSTS'] else post.is_translation_available(lang) # Build classification list classifications = [(taxonomy.get_classification_friendly_name(tag, lang, only_last_component=False), tag) for tag in posts_per_tag.keys()] if classifications: # Sort classifications classifications = natsort.humansorted(classifications) # Build items list result = list() for classification_name, classification in classifications: count = len([post for post in posts_per_tag[classification] if acceptor(post)]) result.append((classification_name, count, self.site.link(taxonomy_name, classification, lang))) # Build hierarchy if taxonomy.has_hierarchy: # Special post-processing for archives: get rid of root and cut off tree at month level if taxonomy_name == 'archive': root_list = self.site.hierarchy_per_classification[taxonomy_name][lang] root_list = utils.clone_treenode(root_list[0]).children def cut_depth(node, cutoff): if cutoff <= 1: node.children = [] else: for node in node.children: cut_depth(node, cutoff - 1) def invert_order(node): node.children.reverse() for node in node.children: invert_order(node) # Make sure that days don't creep in for node in root_list: cut_depth(node, 2) invert_order(node) root_list.reverse() flat_hierarchy = utils.flatten_tree_structure(root_list) else: flat_hierarchy = self.site.flat_hierarchy_per_classification[taxonomy_name][lang] else: root_list = [] for classification_name, classification in classifications: node = utils.TreeNode(classification_name) node.classification_name = classification node.classification_path = taxonomy.extract_hierarchy(classification) root_list.append(node) flat_hierarchy = utils.flatten_tree_structure(root_list) # Build flattened hierarchy list hierarchy = [(taxonomy.get_classification_friendly_name(node.classification_name, lang, only_last_component=False), node.classification_name, node.classification_path, self.site.link(taxonomy_name, node.classification_name, lang), node.indent_levels, node.indent_change_before, node.indent_change_after, len(node.children), len([post for post in posts_per_tag[node.classification_name] if acceptor(post)])) for node in flat_hierarchy] return result, hierarchy else: return None, None
def execute(self): # Default values. address = None netmask = None gateway = None method = "dhcp" address6 = None method6 = "manual" netmask6 = 64 gateway6 = None wol = False dns_nameservers = None wpa_ssid = None wpa_psk = None rpc_method = "setEthernetIface" rpc_params = {} # Get the network interface device. devices = [] context = pyudev.Context() for device in context.list_devices(subsystem="net"): # Skip unwanted network interface devices. if device.sys_name in ("lo"): continue if device.device_type and device.device_type in ("bond"): continue # Append the network interface name for later use. devices.append(device.sys_name) devices = natsort.humansorted(devices) choices = [] for idx, sys_name in enumerate(devices): device = pyudev.Device.from_name(context, "net", sys_name) choices.append([ sys_name, omv.string.truncate(device["ID_MODEL_FROM_DATABASE"], 50) ]) d = dialog.Dialog(dialog="dialog") (code, tag) = d.menu("Please select a network interface. Note, the " \ "existing network interface configuration will be deleted.", backtitle=self.description, clear=True, height=14, width=70, menu_height=6, choices=choices) if code in (d.CANCEL, d.ESC): return 0 device_name = tag # Use DHCP? code = d.yesno("Do you want to use DHCPv4 for this interface?", backtitle=self.description, height=5, width=49) if code == d.ESC: return 0 if code != d.OK: method = "static" # Get the static IPv4 address. while not address: (code, address) = d.inputbox("Please enter the IPv4 address.", backtitle=self.description, clear=True, height=8, width=60, init="") if code != d.OK: return 0 if not address: d.msgbox("The field must not be empty.", backtitle=self.description, height=5, width=32) continue try: ipaddress.ip_address(address) except Exception as e: address = None d.msgbox("Please enter a valid IPv4 address.", backtitle=self.description, height=5, width=38) continue # Get the IPv4 netmask. while not netmask: (code, netmask) = d.inputbox("Please enter the IPv4 netmask.", backtitle=self.description, clear=True, height=8, width=60, init="") if code != d.OK: return 0 if not netmask: d.msgbox("The field must not be empty.", backtitle=self.description, height=5, width=32) continue try: ipaddress.ip_address(netmask) except: netmask = None d.msgbox("Please enter a valid netmask.", backtitle=self.description, height=5, width=33) continue # Get default IPv4 gateway. while not gateway: (code, gateway) = d.inputbox( "Please enter the default IPv4 gateway.", backtitle=self.description, clear=True, height=8, width=60, init="") if code != d.OK: return 0 try: ipaddress.ip_address(gateway) except: gateway = None d.msgbox("Please enter a valid gateway.", backtitle=self.description, height=5, width=33) continue # Use IPv6? code = d.yesno("Do you want to configure IPv6 for this interface?", backtitle=self.description, height=5, width=53, defaultno=True) if code == d.ESC: return 0 if code == d.OK: # Use stateful address autoconfiguration (DHCPv6)? code = d.yesno("Do you want to enable stateful address " \ "autoconfiguration (DHCPv6)?", backtitle=self.description, height=6, width=42) if code == d.ESC: return 0 if code == d.OK: method6 = "dhcp" else: # Use stateless address autoconfiguration (SLAAC)? code = d.yesno("Do you want to enable stateless address " \ "autoconfiguration (SLAAC)?", backtitle=self.description, height=6, width=42) if code == d.ESC: return 0 if code == d.OK: method6 = "auto" else: method6 = "static" # Get static IPv6 address. while not address: (code, address6) = d.inputbox("Please enter the IPv6 address.", backtitle=self.description, clear=True, height=8, width=60, init="") if code != d.OK: return 0 if not address6: d.msgbox("The field must not be empty.", backtitle=self.description, height=5, width=32) continue try: ipaddress.ip_address(address6) except: address6 = None d.msgbox("Please enter a valid IPv6 address.", backtitle=self.description, height=5, width=38) continue # Get the prefix length. while not netmask6: (code, netmask6) = d.inputbox( "Please enter the IPv6 prefix length.", backtitle=self.description, clear=True, height=8, width=64, init="") if code != d.OK: return 0 if not netmask6: d.msgbox("The field must not be empty.", backtitle=self.description, height=5, width=32) continue try: ipaddress.ip_address(netmask6) except: netmask6 = None d.msgbox("Please enter a valid netmask.", backtitle=self.description, height=5, width=33) continue # Get default IPv6 gateway. while not gateway6: (code, gateway6) = d.inputbox( "Please enter the default IPv6 gateway.", backtitle=self.description, clear=True, height=8, width=60, init="") if code != d.OK: return 0 try: ipaddress.ip_address(gateway6) except: gateway6 = None d.msgbox("Please enter a valid gateway.", backtitle=self.description, height=5, width=33) continue # Get the DNS name servers. Note, only one IP address is # supported here. if method == "static" or method6 == "static": while True: (code, dns_nameservers) = d.inputbox( "Please enter the DNS name server. If you don't want " \ "to use any name server, just leave this field blank.", backtitle=self.description, clear=True, height=8, width=60, init="") if code != d.OK: return 0 if not dns_nameservers: break try: ipaddress.ip_address(dns_nameservers) break except: dns_nameservers = None d.msgbox("Please enter a valid IP address.", backtitle=self.description, height=5, width=30) # Enable WOL? code = d.yesno("Do you want to enable WOL for this interface?", backtitle=self.description, height=5, width=50, defaultno=True) if code == d.ESC: return 0 if code == d.OK: wol = True # Set the default RPC parameters. rpc_params.update({ "uuid": omv.getenv("OMV_CONFIGOBJECT_NEW_UUID"), "devicename": device_name, "method": method, "address": address, "netmask": netmask, "gateway": gateway, "method6": method6, "address6": address6, "netmask6": netmask6, "gateway6": gateway6, "dnsnameservers": dns_nameservers, "dnssearch": "", "mtu": 0, "wol": wol, "options": "", "comment": "", }) # Do we process a wireless network interface? if re.match(r"^wlan[0-9]+$", device_name): rpc_method = "setWirelessIface" # Get the SSID. while not wpa_ssid: (code, wpa_ssid) = d.inputbox( "Please enter the name of the wireless network (SSID).", backtitle=self.description, clear=True, height=8, width=60, init="") if code != d.OK: return 0 if not wpa_ssid: d.msgbox("The field must not be empty.", backtitle=self.description, height=5, width=32) rpc_params["wpassid"] = wpa_ssid # Get the pre-shared key. while not wpa_psk: (code, wpa_psk) = d.inputbox( "Please enter the pre-shared key (PSK).", backtitle=self.description, clear=True, height=8, width=45, init="") if code != d.OK: return 0 if not wpa_psk: d.msgbox("The field must not be empty.", backtitle=self.description, height=5, width=32) rpc_params["wpapsk"] = wpa_psk # Update the interface configuration. print("Configuring network interface. Please wait ...") # Delete all existing network interface configuration objects. interfaces = omv.rpc.call("Network", "enumerateConfiguredDevices") for interface in interfaces: omv.rpc.call("Network", "deleteInterface", {"uuid": interface["uuid"]}) # Insert a new network interface configuration object. omv.rpc.call("Network", rpc_method, rpc_params) omv.rpc.call("Config", "applyChanges", {"modules": [], "force": False}) print("The network interface configuration was successfully changed.") return 0
def test_humansorted_returns_results_identical_to_natsorted_with_LOCALE(): a = ['Apple', 'corn', 'Corn', 'Banana', 'apple', 'banana'] assert humansorted(a) == natsorted(a, alg=ns.LOCALE)
def write_reads_on_contig(net, ): '''Write out all those number of mapped reads on contigsto a file.''' with open("stat_reads_contigs", 'w') as out: for contig in humansorted(ref_reads_counts.keys()): out.write(contig +"\t" +ref_reads_counts[contig] + "\n")
def main(): global appname parser = argparse.ArgumentParser() parser.add_argument('-p', nargs="+", type=str, help='Full path to CSV file') parser.add_argument('-f', nargs="+") parser.add_argument('--app', type=str, help="app name for output") args = parser.parse_args() m = "*" expstats = [] expstats_pf = [] bsize_dicts = [] perfs = [] for csv in humansorted(args.p): bsize = int(splitext(basename(csv))[0].split("-")[-1]) appname = basename(dirname(csv)) pf = "pf" in csv with open(csv, "r") as c: while True: size = c.readline().split(",")[1] alloced = c.readline().split(",")[1] perf = float(c.readline().split(",")[1]) perfs.append( ExperimentPerf(perf, alloced, size, bsize, pf, appname)) if c.tell() == os.fstat(c.fileno()).st_size: break bsize_dict = dict() for csv in humansorted(args.f): bsize = int(basename(dirname(csv)).split("_")[-1]) if bsize not in bsize_dict: bsize_dict[bsize] = [] bsize_dict[bsize].append(csv) if "tealeaf" in args.app: with Pool(processes=2) as pool: #x, y, bsize = pool.map(parse_bsize, bsize_dict.items()) ret = pool.map(parse_bsize, bsize_dict.items()) for e, bsize in ret: if e.pf: expstats_pf += e else: expstats += e else: all_pairs = [(bsize, i) for bsize, lis in bsize_dict.items() for i in lis] with Pool() as pool: #x, y, bsize = pool.map(parse_bsize, bsize_dict.items()) #ret = pool.map(parse_bsize, bsize_dict.items()) #ret = map(parse_bsize_para, bsize_dict.items()) #ret = pool.map(parse_bsize_para, bsize_dict.items()) ret = pool.map( parse_bsize_sub2, sorted(all_pairs, key=lambda f: os.stat(f[1]).st_size, reverse=True)) for e in ret: if e.pf: expstats_pf.append(e) else: expstats.append(e) # normalize the app performance apps = {perf.app for perf in perfs} for app in apps: perf_set = [perf for perf in perfs if perf.app == app] #perf_set = [perf for perf in perfs if perf.app == app and perf.pf] m = max(perf_set, key=lambda p: p.perf).perf for perf in perf_set: perf.perf = perf.perf / m # for app in apps: # perf_set = [perf for perf in perfs if perf.app == app and not perf.pf] # m = max(perf_set, key=lambda p: p.perf).perf # for perf in perf_set: # perf.perf = perf.perf / m csv_name = "csvs/" + args.app + ".csv" print("saving to", csv_name) with open(csv_name, "w+") as csv: #csv.write("bsize, pf, size, faults, batches, avg_batch, 4k_dups, 64k_dups, perf\n") csv.write( "perf, pf, bsize, size, faults, batches, avg_batch, 4k_dups, 64k_dups, alloced, avg_vablock\n" ) for exp in expstats: ep = None for e in perfs: #print (f"{e.bsize} == {exp.bsize}, {e.pf} == {exp.pf}, {e.size} == {exp.size}") if exp.bsize == e.bsize and exp.pf == e.pf and exp.size == e.size: ep = e break if ep is None: print(f"no perf match: {exp.bsize}, {exp.pf}, {exp.size}") print( f"no perf match: {type(exp.bsize)}, {type(exp.pf)}, {type(exp.size)}" ) csv.write( f"{ep.perf}, {1 if e.pf else 0}, {exp.bsize}, {exp.size}, {exp.num_faults}, {exp.num_batches}, {exp.avg_batches}, {exp.num_4k_dups}, {exp.num_64k_dups}, {ep.alloced}, {exp.avg_vablock}\n" ) csv_name = "csvs/" + args.app + "-pf.csv" print("saving to", csv_name) with open(csv_name, "w+") as csv: #csv.write("bsize, pf, size, faults, batches, avg_batch, 4k_dups, 64k_dups, perf\n") csv.write( "perf, pf, bsize, size, faults, batches, avg_batch, 4k_dups, 64k_dups, alloced, avg_vablock\n" ) for exp in expstats_pf: ep = None for e in perfs: #print (f"{e.bsize} == {exp.bsize}, {e.pf} == {exp.pf}, {e.size} == {exp.size}") if exp.bsize == e.bsize and exp.pf == e.pf and exp.size == e.size: ep = e break if ep is None: print(f"no perf match: {exp.bsize}, {exp.pf}, {exp.size}") print( f"no perf match: {type(exp.bsize)}, {type(exp.pf)}, {type(exp.size)}" ) csv.write( f"{ep.perf}, {1 if e.pf else 0}, {exp.bsize}, {exp.size}, {exp.num_faults}, {exp.num_batches}, {exp.avg_batches}, {exp.num_4k_dups}, {exp.num_64k_dups}, {ep.alloced}, {exp.avg_vablock}\n" ) csv_name = "csvs/" + args.app + "-all.csv" print("saving to", csv_name) with open(csv_name, "w+") as csv: #csv.write("bsize, pf, size, faults, batches, avg_batch, 4k_dups, 64k_dups, perf\n") csv.write( "perf, pf, bsize, size, faults, batches, avg_batch, 4k_dups, 64k_dups, alloced, avg_vablock\n" ) for exp in expstats_pf + expstats: ep = None for e in perfs: #print (f"{e.bsize} == {exp.bsize}, {e.pf} == {exp.pf}, {e.size} == {exp.size}") if exp.bsize == e.bsize and exp.pf == e.pf and exp.size == e.size: ep = e break if ep is None: print(f"no perf match: {exp.bsize}, {exp.pf}, {exp.size}") print( f"no perf match: {type(exp.bsize)}, {type(exp.pf)}, {type(exp.size)}" ) csv.write( f"{ep.perf}, {1 if e.pf else 0}, {exp.bsize}, {exp.size}, {exp.num_faults}, {exp.num_batches}, {exp.avg_batches}, {exp.num_4k_dups}, {exp.num_64k_dups}, {ep.alloced}, {exp.avg_vablock}\n" ) print("done")
def plot_logs(experiments: List[Summary], smooth_factor: float = 0, share_legend: bool = True, ignore_metrics: Optional[Set[str]] = None, pretty_names: bool = False, include_metrics: Optional[Set[str]] = None) -> plt.Figure: """A function which will plot experiment histories for comparison viewing / analysis. Args: experiments: Experiment(s) to plot. smooth_factor: A non-negative float representing the magnitude of gaussian smoothing to apply (zero for none). share_legend: Whether to have one legend across all graphs (True) or one legend per graph (False). pretty_names: Whether to modify the metric names in graph titles (True) or leave them alone (False). ignore_metrics: Any keys to ignore during plotting. include_metrics: A whitelist of keys to include during plotting. If None then all will be included. Returns: The handle of the pyplot figure. """ # Sort to keep same colors between multiple runs of visualization experiments = humansorted(to_list(experiments), lambda exp: exp.name) n_experiments = len(experiments) if n_experiments == 0: return plt.subplots(111)[0] ignore_keys = ignore_metrics or set() ignore_keys = to_set(ignore_keys) ignore_keys |= {'epoch'} include_keys = to_set(include_metrics) # TODO: epoch should be indicated on the axis (top x axis?). Problem - different epochs per experiment. # TODO: figure out how ignore_metrics should interact with mode metric_histories = defaultdict(_MetricGroup) # metric: MetricGroup for idx, experiment in enumerate(experiments): history = experiment.history # Since python dicts remember insertion order, sort the history so that train mode is always plotted on bottom for mode, metrics in sorted(history.items(), key=lambda x: 0 if x[0] == 'train' else 1 if x[0] == 'eval' else 2 if x[0] == 'test' else 3 if x[0] == 'infer' else 4): for metric, step_val in metrics.items(): if len(step_val) == 0: continue # Ignore empty metrics if metric in ignore_keys: continue if include_keys and metric not in include_keys: continue metric_histories[metric].add(idx, mode, step_val) metric_list = list(sorted(metric_histories.keys())) if len(metric_list) == 0: return plt.subplots(111)[0] # If sharing legend and there is more than 1 plot, then dedicate 1 subplot for the legend share_legend = share_legend and (len(metric_list) > 1) n_legends = math.ceil(n_experiments / 4) n_plots = len(metric_list) + (share_legend * n_legends) # map the metrics into an n x n grid, then remove any extra columns. Final grid will be n x m with m <= n n_rows = math.ceil(math.sqrt(n_plots)) n_cols = math.ceil(n_plots / n_rows) metric_grid_location = {} nd1_metrics = [] idx = 0 for metric in metric_list: if metric_histories[metric].ndim() == 1: # Delay placement of the 1D plots until the end nd1_metrics.append(metric) else: metric_grid_location[metric] = (idx // n_cols, idx % n_cols) idx += 1 for metric in nd1_metrics: metric_grid_location[metric] = (idx // n_cols, idx % n_cols) idx += 1 sns.set_context('paper') fig, axs = plt.subplots(n_rows, n_cols, sharex='all', figsize=(4 * n_cols, 2.8 * n_rows)) # If only one row, need to re-format the axs object for consistency. Likewise for columns if n_rows == 1: axs = [axs] if n_cols == 1: axs = [axs] for metric in metric_grid_location.keys(): axis = axs[metric_grid_location[metric][0]][ metric_grid_location[metric][1]] if metric_histories[metric].ndim() == 1: axis.grid(linestyle='') else: axis.grid(linestyle='--') axis.ticklabel_format(axis='y', style='sci', scilimits=(-2, 3)) axis.set_title( metric if not pretty_names else prettify_metric_name(metric), fontweight='bold') axis.spines['top'].set_visible(False) axis.spines['right'].set_visible(False) axis.spines['bottom'].set_visible(False) axis.spines['left'].set_visible(False) axis.tick_params(bottom=False, left=False) # some of the later rows/columns might be unused or reserved for legends, so disable them last_row_idx = math.ceil(len(metric_list) / n_cols) - 1 last_column_idx = len(metric_list) - last_row_idx * n_cols - 1 for c in range(n_cols): if c <= last_column_idx: axs[last_row_idx][c].set_xlabel('Steps') axs[last_row_idx][c].xaxis.set_tick_params(which='both', labelbottom=True) else: axs[last_row_idx][c].axis('off') axs[last_row_idx - 1][c].set_xlabel('Steps') axs[last_row_idx - 1][c].xaxis.set_tick_params(which='both', labelbottom=True) for r in range(last_row_idx + 1, n_rows): axs[r][c].axis('off') # the 1D metrics don't need x axis, so move them up, starting with the last in case multiple rows of them for metric in reversed(nd1_metrics): row = metric_grid_location[metric][0] col = metric_grid_location[metric][1] axs[row][col].axis('off') if row > 0: axs[row - 1][col].set_xlabel('Steps') axs[row - 1][col].xaxis.set_tick_params(which='both', labelbottom=True) colors = sns.hls_palette( n_colors=n_experiments, s=0.95) if n_experiments > 10 else sns.color_palette("colorblind") color_offset = defaultdict(lambda: 0) # If there is only 1 experiment, we will use alternate colors based on mode if n_experiments == 1: color_offset['eval'] = 1 color_offset['test'] = 2 color_offset['infer'] = 3 handles = [] labels = [] has_label = defaultdict(lambda: defaultdict(lambda: defaultdict( lambda: False))) # exp_id : {mode: {type: True}} ax_text = defaultdict(lambda: (0.0, 0.9)) # Where to put the text on a given axis for exp_idx, experiment in enumerate(experiments): for metric, group in metric_histories.items(): axis = axs[metric_grid_location[metric][0]][ metric_grid_location[metric][1]] if group.ndim() == 1: # Single value for mode in group.modes(exp_idx): ax_id = id(axis) prefix = f"{experiment.name} ({mode})" if n_experiments > 1 else f"{mode}" axis.text(ax_text[ax_id][0], ax_text[ax_id][1], f"{prefix}: {group.get_val(exp_idx, mode)}", color=colors[exp_idx + color_offset[mode]], transform=axis.transAxes) ax_text[ax_id] = (ax_text[ax_id][0], ax_text[ax_id][1] - 0.1) if ax_text[ax_id][1] < 0: ax_text[ax_id] = (ax_text[ax_id][0] + 0.5, 0.9) elif group.ndim() == 2: for mode, data in group[exp_idx].items(): title = f"{experiment.name} ({mode})" if n_experiments > 1 else f"{mode}" if data.shape[0] < 2: # This particular mode only has a single data point, so need to draw a shape instead of a line xy = [data[0][0], data[0][1]] if mode == 'train': style = MarkerStyle(marker='o', fillstyle='full') elif mode == 'eval': style = MarkerStyle(marker='v', fillstyle='full') elif mode == 'test': style = MarkerStyle(marker='*', fillstyle='full') else: style = MarkerStyle(marker='s', fillstyle='full') if isinstance(xy[1], ValWithError): # We've got error bars x = xy[0] y = xy[1] # Plotting requires positive values for error y_err = [[max(1e-9, y.y - y.y_min)], [max(1e-9, y.y_max - y.y)]] axis.errorbar( x=x, y=y.y, yerr=y_err, ecolor=colors[exp_idx + color_offset[mode]], elinewidth=1.5, capsize=4.0, capthick=1.5, zorder=3 ) # zorder to put markers on top of line segments xy[1] = y.y s = axis.scatter( xy[0], xy[1], s=40, c=[colors[exp_idx + color_offset[mode]]], marker=style, linewidth=1.0, edgecolors='black', zorder=4 ) # zorder to put markers on top of line segments if not has_label[exp_idx][mode]['patch']: labels.append(title) handles.append(s) has_label[exp_idx][mode]['patch'] = True else: # We can draw a line y = data[:, 1] y_min = None y_max = None if isinstance(y[0], ValWithError): y = np.stack(y) y_min = y[:, 0] y_max = y[:, 2] y = y[:, 1] if smooth_factor != 0: y_min = gaussian_filter1d(y_min, sigma=smooth_factor) y_max = gaussian_filter1d(y_max, sigma=smooth_factor) if smooth_factor != 0: y = gaussian_filter1d(y, sigma=smooth_factor) x = data[:, 0] ln = axis.plot( x, y, color=colors[exp_idx + color_offset[mode]], label=title, linewidth=1.5, linestyle='solid' if mode == 'train' else 'dashed' if mode == 'eval' else 'dotted' if mode == 'test' else 'dashdot') if not has_label[exp_idx][mode]['line']: labels.append(title) handles.append(ln[0]) has_label[exp_idx][mode]['line'] = True if y_max is not None and y_min is not None: axis.fill_between( x.astype(np.float32), y_max, y_min, facecolor=colors[exp_idx + color_offset[mode]], alpha=0.3, zorder=-1) else: # Some kind of image or matrix. Not implemented yet. pass plt.tight_layout() if labels: if share_legend: # Sort the labels handles = [ h for _, h in sorted(zip(labels, handles), key=lambda pair: pair[0]) ] labels = sorted(labels) # Split the labels over multiple legends if there are too many to fit in one axis elems_per_legend = math.ceil(len(labels) / n_legends) i = 0 for r in range(last_row_idx, n_rows): for c in range(last_column_idx + 1 if r == last_row_idx else 0, n_cols): if len(handles) <= i: break axs[r][c].legend( handles[i:i + elems_per_legend], labels[i:i + elems_per_legend], loc='center', fontsize='large' if elems_per_legend <= 6 else 'medium' if elems_per_legend <= 8 else 'small') i += elems_per_legend else: for i in range(n_rows): for j in range(n_cols): if i == last_row_idx and j > last_column_idx: break axs[i][j].legend(loc='best', fontsize='small') return fig
def update(self, keymap, context, active_keychains, keys_given): active_keychains = list(active_keychains) self._pile.contents[:] = [] if not active_keychains: return # Find number of keychain columns key_col_num = max(len(kc) for kc,action in active_keychains) # Find widest key for each column (each key is in its own cell) key_col_widths = [0] * key_col_num for kc,action in active_keychains: for colnum in range(key_col_num): try: width = len(kc[colnum]) except IndexError: width = 0 key_col_widths[colnum] = max(key_col_widths[colnum], width) # Total key chain column width is: # max(len(key) for each key) + (1 space between keys) key_col_width = sum(key_col_widths) + key_col_num - 1 # Create list of rows keychain_col_width = max(len(self._headers[0]), key_col_width) spacer = ('pack' , urwid.Text(' ')) rows = [ # First row is the headers urwid.AttrMap(urwid.Columns([ (keychain_col_width, urwid.Text(self._headers[0])), spacer, urwid.Text(self._headers[1]), spacer, urwid.Text(self._headers[2]), ]), 'keychains.header') ] next_key_index = len(keys_given) if len(active_keychains) > 10: active_keychains = self._compress_keychains(active_keychains, next_key_index) for kc,action in humansorted(active_keychains, key=lambda x: str(x[0])): row = [] for colnum in range(key_col_num): colwidth = key_col_widths[colnum] try: keytext = kc[colnum].ljust(colwidth) except IndexError: # This keychain is shorter than the longest one row.append(('pack', urwid.Text(('keychains.keys', ''.ljust(colwidth))))) else: # Highlight the key the user needs to press to advance keychain attrs = ('keychains.keys.next' if colnum == next_key_index else 'keychains.keys') row.append(('pack', urwid.Text((attrs, keytext)))) # Add space between this key cell and the next unless this is the last column if colnum < key_col_num - 1: row.append(('pack', urwid.Text(('keychains.keys', ' ')))) # Fill remaining space if 'Key Chain' header is longer than all key chains remaining_width = keychain_col_width - key_col_width row.append(('pack', urwid.Text(('keychains.keys', ''.ljust(remaining_width))))) row.append(spacer) row.append(urwid.AttrMap(urwid.Text(str(action)), 'keychains.action')) row.append(spacer) row.append(urwid.AttrMap(urwid.Text(keymap.get_description(kc, context)), 'keychains.description')) rows.append(urwid.Columns(row)) options = self._pile.options('pack') for row in rows: self._pile.contents.append((row, options))
Otherwise: s(t) = alpha * x(t) + ( 1 - alpha ) * s(t-1). :return: Mean frame. """ mean_frame_new = alpha * current_frame + (1 - alpha) * mean_frame return mean_frame_new #%% MEAN FRAME COMPUTATION # We calculate the mean frames for each video set and save it in the directory over which it belongs. for sequence in range(39, 40): file_dir = "../experiment_data/imageset_" + str(sequence + 1) file_list = glob.glob(file_dir + "/*.jpg") file_list = humansorted(file_list) it = 0 mean_frame = 0.0 with tqdm.tqdm(total=len(file_list)) as pbar: for file in file_list: x = cv2.imread(file).astype(np.float) if it == 0: mean_frame = x else: #mean_frame = mean_frame_computation_exp_smoothing(x, mean_frame,alpha=0.075) mean_frame = mean_frame + x it += 1 pbar.update(1) mean_frame = mean_frame.astype(np.float) / len(file_list)
model_types = ["V", "S", "VS"] color_dict = {"V": (14, 127, 255), "S": (44, 160, 44), "VS": (40, 39, 214)} # load the predictions pred_dict = {} for model_type in model_types: pred_dict[model_type] = pickle.load( open("../../clean_code/preds_" + model_type + "_test.preds", "rb")) # get the reference forces labels = np.loadtxt('../labels_' + str(dataset_num) + '.txt', delimiter=",") force = labels[:, 1:4] time = labels[:, 0] # glob the images heatmap_VS = glob.glob("imageset_captum_VS_" + str(dataset_num) + "/*.jpg") heatmap_VS = humansorted(heatmap_VS) heatmap_V = glob.glob("imageset_captum_V_" + str(dataset_num) + "/*.jpg") heatmap_V = humansorted(heatmap_V) original = glob.glob("../imageset_" + str(dataset_num) + "/*.jpg") original = humansorted(original) out = cv2.VideoWriter('summary_video_' + str(dataset_num) + '.avi', cv2.VideoWriter_fourcc(*'MJPG'), 30, (672, 972), True) with tqdm.tqdm(total=len(heatmap_VS), desc="dataset " + str(dataset_num)) as pbar: for frame_num in range(len(heatmap_VS)): #frame_num = np.random.randint(0,3000) # for the given frame: frame_force = force[frame_num, :] frame_pred = {}
def get_tags_table(): """ :return: an html table of the xml handling options """ from natsort import humansorted utility.xml_handling_options() s = '' keys = list(session['xmlhandlingoptions'].keys()) keys = humansorted(keys) for key in keys: b = '<select name="' + key + '">' if session['xmlhandlingoptions'][key]['action'] == r'remove-element': b += '<option value="remove-tag,' + key + \ '">Remove Tag Only</option>' b += '<option value="remove-element,' + key + \ '" selected="selected">' \ 'Remove Element and All Its Contents</option>' b += '<option value="replace-element,' + key + \ '">Replace Element and Its Contents with Attribute Value' \ '</option>' b += '<option value="leave-alone,' + key + \ '">Leave Tag Alone</option>' elif session['xmlhandlingoptions'][key]["action"] == 'replace-element': b += '<option value="remove-tag,' + key + \ '">Remove Tag Only</option>' b += '<option value="remove-element,' + key + \ '">Remove Element and All Its Contents</option>' b += '<option value="replace-element,' + key + \ '" selected="selected">Replace Element and Its ' \ 'Contents with Attribute Value</option>' b += '<option value="leave-alone,' + key + \ '">Leave Tag Alone</option>' elif session['xmlhandlingoptions'][key]["action"] == r'leave-alone': b += '<option value="remove-tag,' + key + \ '">Remove Tag Only</option>' b += '<option value="remove-element,' + key + \ '">Remove Element and All Its Contents</option>' b += '<option value="replace-element,' + key + \ '">Replace Element and Its Contents ' \ 'with Attribute Value</option>' b += '<option value="leave-alone,' + key + \ '" selected="selected">Leave Tag Alone</option>' else: b += '<option value="remove-tag,' + key + \ '" selected="selected">Remove Tag Only</option>' b += '<option value="remove-element,' + key + \ '">Remove Element and All Its Contents</option>' b += '<option value="replace-element,' + key + \ '">Replace Element and Its Contents with Attribute Value' \ '</option>' b += '<option value="leave-alone,' + key + \ '">Leave Tag Alone</option>' b += '</select>' c = 'Attribute: <input type="text" name="attributeValue' + key + \ '" value="' + session['xmlhandlingoptions'][key]["attribute"] + \ '"/>' s += "<tr><td>" + key + "</td><td>" + b + "</td><td>" + c + \ "</td></tr>" response = {"menu": s, "selected-options": "multiple"} # Count the number of actions and change selected-options to # the selected option if they are all the same. num_actions = [] for item in session['xmlhandlingoptions'].items(): num_actions.append(item[1]["action"]) num_actions = list(set(num_actions)) if len(num_actions) == 1: response["selected-options"] = num_actions[0] + ",allTags" return json.dumps(response)
def _document_models(self) -> None: """Add model summaries to the traceability document. """ with self.doc.create(Section("Models")): for model in humansorted(self.system.network.models, key=lambda m: m.model_name): if not isinstance(model, (tf.keras.Model, torch.nn.Module)): continue self.doc.append(NoEscape(r'\FloatBarrier')) with self.doc.create(Subsection(f"{model.model_name.capitalize()}")): if isinstance(model, tf.keras.Model): # Text Summary summary = [] model.summary(line_length=92, print_fn=lambda x: summary.append(x)) summary = "\n".join(summary) self.doc.append(Verbatim(summary)) with self.doc.create(Center()): self.doc.append(HrefFEID(FEID(id(model)), model.model_name)) # Visual Summary # noinspection PyBroadException try: file_path = os.path.join(self.resource_dir, "{}_{}.pdf".format(self.report_name, model.model_name)) dot = tf.keras.utils.model_to_dot(model, show_shapes=True, expand_nested=True) # LaTeX \maxdim is around 575cm (226 inches), so the image must have max dimension less than # 226 inches. However, the 'size' parameter doesn't account for the whole node height, so # set the limit lower (100 inches) to leave some wiggle room. dot.set('size', '100') dot.write(file_path, format='pdf') except Exception: file_path = None print( f"FastEstimator-Warn: Model {model.model_name} could not be visualized by Traceability") elif isinstance(model, torch.nn.Module): if hasattr(model, 'fe_input_spec'): # Text Summary # noinspection PyUnresolvedReferences inputs = model.fe_input_spec.get_dummy_input() self.doc.append( Verbatim( pms.summary(model.module if self.system.num_devices > 1 else model, inputs, print_summary=False))) with self.doc.create(Center()): self.doc.append(HrefFEID(FEID(id(model)), model.model_name)) # Visual Summary # Import has to be done while matplotlib is using the Agg backend old_backend = matplotlib.get_backend() or 'Agg' matplotlib.use('Agg') # noinspection PyBroadException try: # Fake the IPython import when user isn't running from Jupyter sys.modules.setdefault('IPython', MagicMock()) sys.modules.setdefault('IPython.display', MagicMock()) import hiddenlayer as hl with Suppressor(): graph = hl.build_graph(model.module if self.system.num_devices > 1 else model, inputs) graph = graph.build_dot() graph.attr(rankdir='TB') # Switch it to Top-to-Bottom instead of Left-to-Right # LaTeX \maxdim is around 575cm (226 inches), so the image must have max dimension less # than 226 inches. However, the 'size' parameter doesn't account for the whole node # height, so set the limit lower (100 inches) to leave some wiggle room. graph.attr(size="100,100") graph.attr(margin='0') file_path = graph.render(filename="{}_{}".format(self.report_name, model.model_name), directory=self.resource_dir, format='pdf', cleanup=True) except Exception: file_path = None print("FastEstimator-Warn: Model {} could not be visualized by Traceability".format( model.model_name)) finally: matplotlib.use(old_backend) else: file_path = None self.doc.append("This model was not used by the Network during training.") if file_path: with self.doc.create(Figure(position='ht!')) as fig: fig.append(Label(Marker(name=str(FEID(id(model))), prefix="model"))) fig.add_image(os.path.relpath(file_path, start=self.save_dir), width=NoEscape(r'1.0\textwidth,height=0.95\textheight,keepaspectratio')) fig.add_caption(NoEscape(HrefFEID(FEID(id(model)), model.model_name).dumps()))
def do(self, *args, exp_cands, **kwargs): cands = candidates.fs_path(*args, **kwargs) self.assertEqual(tuple(cands), tuple(humansorted(exp_cands))) self.assertEqual(cands.curarg_seps, ('/', ))
n_files = len(all_files) # default is scanning all files else: n_files = args.n_files if args.test: test_calc_total_work_time() test_get_nth_prev_month() print("If nothing was printed above, all tests succeeded") exit() # all_files.remove('view.py') # all_files.remove('this week') # all_files.remove('worth-readings') # all_files.remove count = 0 ordered_files = natsort.humansorted(all_files, reverse=True) today = datetime.date.today() one_day = timedelta(days=1) if args.count: n_days = args.count dt = today - n_days * one_day + one_day # get datetime object for (n_days - 1) days ago hours = [] off_days = 0 for _ in range(n_days): day = dt.strftime(DATE_FORMAT_YMD) worked_time = get_worked_time_for_strdate(day) print(dt, dt.strftime('%a'), worked_time) # YYYY-MM-DD Mon/Tue/... H:MM if is_day_off(dt): off_days += 1
def parse_xml(self, path, module): logger.debug("parsing xml: %s", path) # lookup tables lookup = {} lookup["encounter"] = {} lookup["page"] = {} lookup["map"] = {} lookup["image"] = {} lookup["npc"] = {} lookup["quest"] = {} # arrays pages = [] maps = [] groups = [] encounters = [] # xml tree tree = ElementTree.parse(path) root = tree.getroot() # NPCS logger.info("parsing npcs") for category in root.findall("./npc/category"): for node in category.findall("*"): tag = node.tag name = node.find("name").text npc = NPC() npc.name = name lookup["npc"][tag] = npc # PAGES logger.info("parsing pages") parent = Group() parent.name = "Story" parent.slug = slugify(parent.name) groups.append(parent) for category in root.findall("./encounter/category"): group = Group() group.name = category.get("name") group.slug = slugify(group.name) group.parent = parent if group.name == None or group.name == "": group = parent else: groups.append(group) # get all pages for node in category.findall("*"): # tag tag = node.tag # create page page = Page() page.meta["tag"] = tag page.name = node.find("name").text page.slug = slugify(page.name) page.content = ElementTree.tostring( node.find("text"), encoding='utf-8', method='xml').decode('utf-8') page.parent = group pages.append(page) lookup["page"][tag] = page # QUESTS logger.info("parsing quests") parent = Group() parent.name = "Quests" parent.slug = slugify(parent.name) groups.append(parent) # some modules got, so use this instead for node in root.findall("./quest/*/*"): # for node in root.findall("./quest/*"): # tag tag = node.tag # create quest page = Page() page.meta["tag"] = id page.name = node.find("name").text page.slug = slugify(page.name) page.content = ElementTree.tostring(node.find("description"), encoding='utf-8', method='xml').decode('utf-8') cr = node.find("cr").text if node.find("cr") else "" xp = node.find("xp").text if node.find("xp") else "" page.content += '<p><strong>CR:</strong> ' + cr + ' <strong>XP:</strong> ' + xp + '</p>' page.parent = parent pages.append(page) lookup["quest"][tag] = page # sort pages_sorted = humansorted(pages, key=lambda x: x.name) # MAPS & IMAGES logger.info("parsing images and maps") parent = Group() parent.name = "Maps & Images" parent.slug = slugify(parent.name) groups.append(parent) for category in root.findall("./image/category"): group = Group() group.name = category.get("name") group.slug = slugify(group.name) group.parent = parent if group.name == None or group.name == "": group = parent else: groups.append(group) for node in category.findall("*"): # tag tag = node.tag # create image image = Image() image.tag = tag image.bitmap = node.find("./image/bitmap").text.replace( "\\", "/") image.name = node.find("name").text lookup["image"][tag] = image markers = [] # get shortcouts (markers) for shortcut in node.findall("./image/shortcuts/shortcut"): # create marker marker = Marker() marker.x = shortcut.find("x").text marker.y = shortcut.find("y").text shortcut_ref = shortcut.find("recordname").text.replace( "encounter.", "").replace("@*", "") page = None if shortcut_ref in lookup["page"]: page = lookup["page"][shortcut_ref] # remove chapter numbers from page name # maybe use a regex? name = page.name if " " in page.name: first, second = page.name.split(' ', 1) if "." in first: name = second marker.name = name marker.contentRef = "/page/" + page.slug markers.append(marker) if markers: # if markers not empty, its a map map = Map() map.parent = group map.meta["tag"] = tag map.name = image.name map.slug = slugify(map.name) map.image = image.bitmap if node.find("./image/gridsize") != None: map.gridSize = node.find("./image/gridsize").text if node.find("./image/gridoffset") != None: gridOffset = node.find("./image/gridoffset").text map.gridOffsetX = gridOffset.split(",")[0] map.gridOffsetY = gridOffset.split(",")[1] map.markers = markers maps.append(map) lookup["map"][tag] = map else: # otherwise, its a image page = Page() page.parent = group page.meta["tag"] = tag page.name = image.name page.slug = slugify(page.name) page.content = '<p><img class="size-full" src="' + image.bitmap + '" /></p>' pages_sorted.append(page) # do not add to lookup tables # sort maps_sorted = humansorted(maps, key=lambda x: x.name) # ENCOUNTERS logger.info("parsing encounters") parent = Group() parent.name = "Encounters" parent.slug = slugify(parent.name) groups.append(parent) for category in root.findall("./battle/category"): group = Group() group.name = category.get("name") group.slug = slugify(group.name) group.parent = parent if group.name == None or group.name == "": group = parent else: groups.append(group) for node in category.findall("*"): # tag tag = node.tag # create encounter encounter = Encounter() encounter.meta["tag"] = tag encounter.parent = group encounter.name = node.find("name").text encounter.slug = slugify(encounter.name) encounters.append(encounter) lookup["encounter"][tag] = encounter # get combatants for npcnode in node.find("npclist").findall("*"): # get positions maplinks = npcnode.findall("./maplink/*") # combatants count count = int(npcnode.find("count").text) # iterate for x in range(count): combatant = Combatant() combatant.name = npcnode.find("name").text encounter.combatants.append(combatant) # if position on map if len(maplinks) == count: maplinknode = maplinks[x] if maplinknode.find("./imagex") != None: combatant.x = maplinknode.find("./imagex").text if maplinknode.find("./imagey") != None: combatant.y = maplinknode.find("./imagey").text encounters_sorted = humansorted(encounters, key=lambda x: x.name) # custom regex for processing links def href_replace(match): key = str(match.group(2)).split("@")[0] type = match.group(1) if type == "image" and key in lookup["map"]: return 'href="/map/' + lookup["map"][key].slug elif type == "image" and key in lookup["image"]: return 'href="' + lookup["image"][key].bitmap elif type == "encounter" and key in lookup["page"]: return 'href="' + lookup["page"][key].slug elif type == "battle" and key in lookup["encounter"]: return 'href="/encounter/' + lookup["encounter"][key].slug elif type == "quest" and key in lookup["quest"]: return 'href="' + lookup["quest"][key].slug else: return key # fix content tags in pages for page in pages_sorted: content = page.content # maybe regex content = content.replace('<text type="formattedtext">', '').replace('<text>', '').replace('</text>', '') content = content.replace('<description type="formattedtext">', '').replace('<description>', '').replace( '</description>', '') content = content.replace('<frame>', '<blockquote class="read">').replace( '</frame>', '</blockquote>') content = content.replace('<frameid>DM</frameid>', '') content = content.replace('\r', '<br />') content = content.replace('<h>', '<h3>').replace('</h>', '</h3>') content = content.replace('<list>', '<ul>').replace('</list>', '</ul>') # content = content.replace("<linklist>", "<ul>").replace("</linklist>", "</ul>") content = content.replace('<linklist>', '').replace('</linklist>', '') content = content.replace('<link', '<p><a').replace('</link>', '</a></p>') content = content.replace(' recordname', ' href') content = content.strip() # fix links content = re.sub( r'href=[\'"]?(encounter|battle|image|quest)\.([^\'">]+)', href_replace, content) # add title if content.startswith('<h3>'): page.content = content.replace('<h3>', '<h2>', 1).replace('</h3>', '</h2>', 1) else: page.content = '<h2>' + page.name + '</h2>' + content # assign data to module module.groups = groups module.pages = pages_sorted module.maps = maps_sorted module.encounters = encounters_sorted return module
def _build_taxonomy_list_and_hierarchy(self, taxonomy_name, lang): """Build taxonomy list and hierarchy for the given taxnonmy name and language.""" if taxonomy_name not in self.site.posts_per_classification or taxonomy_name not in self.site.taxonomy_plugins: return None, None posts_per_tag = self.site.posts_per_classification[taxonomy_name][lang] taxonomy = self.site.taxonomy_plugins[taxonomy_name] def acceptor(post): return True if self.site.config[ 'SHOW_UNTRANSLATED_POSTS'] else post.is_translation_available( lang) # Build classification list classifications = [(taxonomy.get_classification_friendly_name( tag, lang, only_last_component=False), tag) for tag in posts_per_tag.keys()] if classifications: # Sort classifications classifications = natsort.humansorted(classifications) # Build items list result = list() for classification_name, classification in classifications: count = len([ post for post in posts_per_tag[classification] if acceptor(post) ]) result.append((classification_name, count, self.site.link(taxonomy_name, classification, lang))) # Build hierarchy if taxonomy.has_hierarchy: # Special post-processing for archives: get rid of root and cut off tree at month level if taxonomy_name == 'archive': root_list = self.site.hierarchy_per_classification[ taxonomy_name][lang] root_list = utils.clone_treenode(root_list[0]).children def cut_depth(node, cutoff): if cutoff <= 1: node.children = [] else: for node in node.children: cut_depth(node, cutoff - 1) def invert_order(node): node.children.reverse() for node in node.children: invert_order(node) # Make sure that days don't creep in for node in root_list: cut_depth(node, 2) invert_order(node) root_list.reverse() flat_hierarchy = utils.flatten_tree_structure(root_list) else: flat_hierarchy = self.site.flat_hierarchy_per_classification[ taxonomy_name][lang] else: root_list = [] for classification_name, classification in classifications: node = utils.TreeNode(classification_name) node.classification_name = classification node.classification_path = taxonomy.extract_hierarchy( classification) root_list.append(node) flat_hierarchy = utils.flatten_tree_structure(root_list) # Build flattened hierarchy list hierarchy = [ (taxonomy.get_classification_friendly_name( node.classification_name, lang, only_last_component=False), node.classification_name, node.classification_path, self.site.link(taxonomy_name, node.classification_name, lang), node.indent_levels, node.indent_change_before, node.indent_change_after, len(node.children), len([ post for post in posts_per_tag[node.classification_name] if acceptor(post) ])) for node in flat_hierarchy ] return result, hierarchy else: return None, None
async def file_priority(self, torrents, files, priority): """ Change download priority of individual torrent files torrents: See `torrents` method files: FileFilter object (or its string representation), sequence of (torrent ID, file ID) tuples or None for all files priority: One of the strings 'off', 'low', 'normal' or 'high' Return Response with the following properties: torrents: Tuple of matching Torrents with matching files with the keys 'id', 'name' and 'files' success: True if any file priorities were changed, False otherwise msgs: List of info messages errors: List of error messages """ response = await self.torrents(torrents, keys=('name', 'files')) if not response.success: return Response(success=False, torrents=(), errors=response.errors) else: if isinstance(files, str): files = FileFilter(files) # Set filter_files to a lambda that takes a TorrentFileTree and # returns a list of TorrentFiles. if files is None: def filter_files(ftree): return tuple(ftree.files) elif isinstance(files, FileFilter): def filter_files(ftree): return tuple(files.apply(ftree.files)) elif isinstance(files, abc.Sequence): def filter_files(ftree): return tuple(f for f in ftree.files if f['id'] in files) else: raise ValueError("Invalid 'files' argument: %r" % (files,)) torrent_ids = [] msgs = [] errors = [] for t in humansorted(response.torrents, key=lambda t: t['name']): # Filter torrent's files flist = filter_files(t['files']) if files is None: msgs.append('%d file%s: %s' % (len(flist), '' if len(flist) == 1 else 's', t['name'])) else: if not flist: errors.append('No matching files: %s' % (t['name'],)) else: msgs.append('%d matching file%s: %s' % (len(flist), '' if len(flist) == 1 else 's', t['name'])) success = len(flist) > 0 # Transmission wants a list of file indexes. For # aiotransmission, the 'id' field of a TorrentFile is a tuple: # (<torrent ID>, <file index>) # (See aiotransmission.torrent._create_TorrentFileTree()) findexes = tuple(f['id'][1] for f in flist) if findexes: response = await self._set_files_priority(priority, t['id'], findexes) if response.success: torrent_ids.append(t['id']) msgs.extend(response.msgs) errors.extend(response.errors) if torrent_ids: response = await self.torrents(torrent_ids, keys=('id', 'name', 'files')) if not response.success: return Response(success=False, torrents=(), errors=response.errors) else: torrents = response.torrents return Response(success=success, torrents=torrents, msgs=msgs, errors=errors)
def main(): plt.rcParams.update({"font.family": "sans-serif", "font.sans-serif": ["Helvetica"], "font.size": 16}) parser = argparse.ArgumentParser() parser.add_argument('csv', nargs="+", type=str, help='Full path to CSV file') parser.add_argument('-o', required=True, type=str, default="", help='output filename') args = parser.parse_args() c = args.csv m = "*" input_re = re.compile(".*quant-.+\.csv") for csv in args.csv: if input_re.match(csv) is None: print("Suspicious input file:", csv) exit(1) matplotlib.rcParams['agg.path.chunksize'] = 10000 fig = plt.figure() bsize_labels = [] bsize_xs = [] bsize_ys = [] bsize_dicts = [] for csv in humansorted(args.csv): #label = basename(splitext(csv)[0])[6:] label = basename(splitext(csv)[0]).split("-")[-1] if (int(label) > 1024): continue bsize_labels.append(label) bsize_dict = dict() with open(csv, 'r') as c: while True: print (csv) #FIXME won't work with string-based prob sizes size = int(c.readline().split(",")[1]) #size = int(c.readline().split(",")[1]) alloced = c.readline().split(",")[1] perf = float(c.readline().split(",")[1]) bsize_dict[size] = perf #bs_x.append(size) #bs_y.append(perf) if c.tell() == os.fstat(c.fileno()).st_size: break bsize_dicts.append(bsize_dict) #bsize_xs.append(bs_x) #bsize_ys.append(bs_y) for d in bsize_dicts: xs, ys = zip(*humansorted(d.items(), key=lambda t: t[0])) bsize_xs.append(xs) bsize_ys.append(ys) evenly_spaced_interval = np.linspace(0, 1, len(bsize_xs)) colors = [cm.rainbow(x) for x in evenly_spaced_interval] for x, y, l, c in zip(bsize_xs, bsize_ys, bsize_labels, colors): plt.plot(x, y, "b+", label=l, marker=".", color=c, linestyle="-") plt.xlabel("Problem Size") plt.ylabel("GFLOPS") plt.legend() figname = args.o if ".png" not in figname: figname += ".png" plt.tight_layout() print('saving figure:', figname) fig.savefig(figname, dpi=500) plt.close(fig)
def _document_models(self) -> None: """Add model summaries to the traceability document. """ with self.doc.create(Section("Models")): for model in humansorted(self.system.network.models, key=lambda m: m.model_name): if not isinstance(model, (tf.keras.Model, torch.nn.Module)): continue self.doc.append(NoEscape(r'\FloatBarrier')) with self.doc.create(Subsection(f"{model.model_name}")): if isinstance(model, tf.keras.Model): # Text Summary summary = [] model.summary(line_length=92, print_fn=lambda x: summary.append(x)) summary = "\n".join(summary) self.doc.append(Verbatim(summary)) with self.doc.create(Center()): self.doc.append( HrefFEID(FEID(id(model)), model.model_name)) # Visual Summary # noinspection PyBroadException try: file_path = os.path.join( self.figure_dir, f"FE_Model_{model.model_name}.pdf") tf.keras.utils.plot_model(model, to_file=file_path, show_shapes=True, expand_nested=True) # TODO - cap output image size like in the pytorch implementation in case of huge network # TODO - save raw .dot file in case system lacks graphviz except Exception: file_path = None print( f"FastEstimator-Warn: Model {model.model_name} could not be visualized by Traceability" ) elif isinstance(model, torch.nn.Module): if hasattr(model, 'fe_input_spec'): # Text Summary # noinspection PyUnresolvedReferences inputs = model.fe_input_spec.get_dummy_input() self.doc.append( Verbatim(pms.summary(model, inputs))) with self.doc.create(Center()): self.doc.append( HrefFEID(FEID(id(model)), model.model_name)) # Visual Summary # Import has to be done while matplotlib is using the Agg backend old_backend = matplotlib.get_backend() or 'Agg' matplotlib.use('Agg') # noinspection PyBroadException try: # Fake the IPython import when user isn't running from Jupyter sys.modules.setdefault('IPython', MagicMock()) sys.modules.setdefault('IPython.display', MagicMock()) import hiddenlayer as hl with Suppressor(): graph = hl.build_graph(model, inputs) graph = graph.build_dot() graph.attr( rankdir='TB' ) # Switch it to Top-to-Bottom instead of Left-to-Right graph.attr( size="200,200" ) # LaTeX \maxdim is around 575cm (226 inches) graph.attr(margin='0') # TODO - save raw .dot file in case system lacks graphviz file_path = graph.render( filename=f"FE_Model_{model.model_name}", directory=self.figure_dir, format='pdf', cleanup=True) except Exception: file_path = None print( "FastEstimator-Warn: Model {} could not be visualized by Traceability" .format(model.model_name)) finally: matplotlib.use(old_backend) else: self.doc.append( "This model was not used by the Network during training." ) if file_path: with self.doc.create(Figure(position='ht!')) as fig: fig.append( Label( Marker(name=str(FEID(id(model))), prefix="model"))) fig.add_image( os.path.relpath(file_path, start=self.save_dir), width=NoEscape( r'1.0\textwidth,height=0.95\textheight,keepaspectratio' )) fig.add_caption( NoEscape( HrefFEID(FEID(id(model)), model.model_name).dumps()))
def execute(self): d = dialog.Dialog(dialog="dialog") # Determine the first revision file which should look like # '<filename>.<revision>'. pathname = "%s.*" % openmediavault.getenv("OMV_CONFIG_FILE") configbaks = natsort.humansorted(glob.glob(pathname)) # Does a auto-generated configuration backup exist? if not configbaks: d.msgbox("No configuration backup found!", backtitle=self.description, height=5, width=34) return 0 # Get the latest configuration backup file. configbak = configbaks.pop() # Only show a diff, if there's a difference. rc = openmediavault.subprocess.call([ "diff", "--brief", openmediavault.getenv("OMV_CONFIG_FILE"), configbak ], stdout=subprocess.PIPE) if rc == 0: d.msgbox("There's no difference between the configuration " \ "files. Nothing to restore.", backtitle=self.description, height=6, width=58) return 0 # Display the differences? code = d.yesno("Do you want to see the differences between the " \ "current configuration and the backup.", backtitle=self.description, height=6, width=46) if code == d.ESC: return 0 if code == d.OK: output = "===================================================================\n" \ "All lines with '-' will be changed to the lines with '+'\n" \ "===================================================================\n" p = openmediavault.subprocess.Popen([ "diff", "--unified=1", openmediavault.getenv("OMV_CONFIG_FILE"), configbak ], stdout=subprocess.PIPE, shell=False) stdout, stderr = p.communicate() output += stdout.decode() d.scrollbox(output, backtitle=self.description, height=18, width=72, clear=True) # Restore configuration backup? code = d.yesno("Do you want to restore the configuration backup? " \ "This will overwrite the actual configuration?", backtitle=self.description, height=6, width=57, defaultno=True) if code != d.OK: return 0 openmediavault.rpc.call("Config", "revertChanges", {"filename": configbak}) print("Configuration backup successfully restored.") return 0
def main(): parser = argparse.ArgumentParser() parser.add_argument('csv', nargs="+", type=str, help='Full path to CSV file') parser.add_argument('-o', required=True, type=str, default="", help='output filename') args = parser.parse_args() c = args.csv m = "*" for csv in args.csv: if ".txt" not in csv: print("Suspicious input file:", csv) exit(1) matplotlib.rcParams['agg.path.chunksize'] = 10000 fig = plt.figure() bsize_labels = [] bsize_xs = [] bsize_ys = [] bsize_dicts = [] csvs = humansorted(args.csv) bsize_dict = dict() for csv in csvs: bsize = int(basename(dirname(csv)).split("_")[-1]) if bsize not in bsize_dict: bsize_dict[bsize] = [] bsize_dict[bsize].append(csv) with Pool(processes=NUM_THREADS) as pool: #x, y, bsize = pool.map(parse_bsize, bsize_dict.items()) ret = pool.map(parse_bsize, bsize_dict.items()) for x, y, bsize in ret: bsize_xs.append(x) bsize_ys.append(y) bsize_labels.append(bsize) #for bsize, lis in bsize_dict.items(): #x = [] #y = [] #bsize_labels.append(bsize) #for csv in lis: # e = Experiment(csv) # size = None # total_dups = None # if "pf" in csv: # size = basename(dirname(csv)).split("_")[2] # total_dups = e.print_duplicate_faults_64k() # else: # size = basename(dirname(csv)).split("_")[1] # total_dups = e.print_duplicate_faults_4k() # size = int(size) # x.append(size) # y.append(total_dups) #bsize_xs.append(x) #bsize_ys.append(y) evenly_spaced_interval = np.linspace(0, 1, len(bsize_xs)) colors = [cm.rainbow(x) for x in evenly_spaced_interval] for x, y, l, c in zip(bsize_xs, bsize_ys, bsize_labels, colors): plt.plot(x, y, "b+", label=l, marker=".", color=c, linestyle="-") plt.xlabel("Problem Size") plt.ylabel("Duplicates") plt.legend() figname = args.o if ".png" not in figname: figname += ".png" plt.tight_layout() print('saving figure:', figname) fig.savefig(figname, dpi=500) plt.close(fig)
def sort(self): for key, item in natsort.humansorted(self.items, key=lambda s: s): # print '{0}\t{1}'.format( key, item ) yield item return
def MerchantFlip(): JSON = JSONData() #print("working!") AllProfit = 0 toplist = [] print("replied to a message!") #LOOP THROUGH ALL PRODUCTS for x in (NPCPrices["productIds"]): #print(x) Product = x NormalPName = Product #npc data, local try: NPCSellPrice = ( NPCPrices["productIds"][Product]["MerchantSellPrice"]) NPCBuyPrice = ( NPCPrices["productIds"][Product]["MerchantBuyPrice"]) Merchant = (NPCPrices["productIds"][Product]["Merchant"]) Product = (NPCPrices["productIds"][Product]["NormalName"]) #print(Product) #print(NPCPrices) #print(NPCPrices["productIds"][Product]["MerchantSellPrice"]) except KeyError as ke: a = 0 #bazaar data sellPrice = JSON[Product]['quick_status']['sellPrice'] buyPrice = JSON[Product]['quick_status']['buyPrice'] #round up the prices rSellPrice = round(sellPrice, 2) rBuyPrice = round(buyPrice, 2) #make the prices to strings frSellPrice = str(rSellPrice) # float -> str frBuyPrice = str(rBuyPrice) # float -> str #Can you even buy that item? if NPCBuyPrice == "CantBuyThat": a = 0 else: #Do you make Profit? fffNPCBuyPrice = float(NPCBuyPrice) Profit = rSellPrice - fffNPCBuyPrice rProfit = round(Profit, 2) srProfit = str(rProfit) frProfit = float(rProfit) #would you make profit? if frProfit > 0: #add the Profit value to a list #calculate total profit fTotalProfit = frProfit * 640 RoundedfTotalProfit = round(fTotalProfit) strTotalProfit = str(RoundedfTotalProfit) AllProfit = AllProfit + RoundedfTotalProfit toplist.append( "You can make " + srProfit + "$ by selling **" + NormalPName + "** to the bazaar, or " + strTotalProfit + "$ if you flip 640, after you bought it from the " + Merchant + "-merchant.") strAllProfit = str(AllProfit) #print the stuff sortedtoplist = humansorted(toplist) sortedtoplist.reverse() return sortedtoplist