def get_the_big_dict(data, values=""): result = {} for key, value in data.iteritems(): if key == "links": value = dict(value) key = "ref" str_key = values + " " + key result[str_key] = value if not isinstance(value, dict): try: assert int(str(value)) == float(str(value)) result[str_key + " " + str(value)] = data except: pass else: res = get_the_big_dict(value, values=str_key) res = dict(sorted(res.items())) if "isotopes" in res: res["isotopes"] = res.pop("isotopes") result.update(res) result = dict((key.strip().lower().replace(" ", " ").replace( "symbol", "").replace("(", "").replace(")", ""), value) for key, value in result.iteritems()) return dict((" ".join(sorted(set(key.split(" ")))), value) for key, value in result.iteritems())
def compare_plot(var, plot_range, weight, cut, **kwargs): histsD = dict() for samp in samples_mc: histsD[samp.name] = samp.drawHistogram(var, str(cut), weight=weight, plot_range=plot_range) Styling.mc_style(histsD[samp.name].hist, samp.name) for name, hist in histsD.items(): hist.normalize_lumi(lumi_total) for samp in samples_data: histsD[samp.name] = samp.drawHistogram(var, str(cut), plot_range=plot_range) Styling.data_style(histsD[samp.name].hist) hists_thD = dict() for (k,v) in histsD.items(): hists_thD[k] = v.hist merge_cmd = copy.deepcopy(merge_cmds) merge_cmd["QCD (MC)"] = ["QCDMu"] merged = merge_hists(hists_thD, merge_cmd) stack = dict() stack["mc"] = [merged[name] for name in merged.keys() if name!="single #mu"] stack["data"] = [merged["single #mu"]] canv = ROOT.TCanvas() pl = plot_hists_stacked(canv, stack, **kwargs) leg = legend(stack["data"] + stack["mc"][::-1], **kwargs) lb = lumi_textbox(lumi_total) canv.SaveAs(kwargs.get("filename", "plot") + ".pdf") return stack, canv, pl, leg, lb
def _get_config_files(self, paths=[]): """ Get config files. :param list path: ist of paths to add to the scan. :returns: dictionary of config names/filenames. :rtype: dict """ cfg_files = dict() if not paths: return [] for path in paths: for fn in os.listdir(path): bn, fext = os.path.splitext(fn) if fext.lower() in ['.ini', '.cfg']: cfg_file = os.path.join(path, fn) names = bn.split('-') if len(names) < 2: log.warning('improperly named config file: "%s"' % cfg_file) continue style_name, cfg_type = names if style_name not in cfg_files: cfg_files[style_name] = dict(fonts=None, palette=None) log.debug('adding %s config "%s" from "%s".' % (cfg_type, style_name, cfg_file)) cfg_files[style_name][cfg_type] = cfg_file return cfg_files
def scanNodeTypes(self, path): """ Scan the given directory for node types. :param str path: path to scan. """ nodes = dict() if not os.path.exists(path): log.warning('node path "%s" does not exist.' % path) return nodes for fn in os.listdir(path): if fn not in ['README']: node_name = os.path.splitext(os.path.basename(fn))[0] node_mod = os.path.join(path, '%s.py' % node_name) node_data = os.path.join(path, '%s.mtd' % node_name) if os.path.exists(node_mod) and os.path.exists(node_data): node_attrs = dict() node_attrs['module'] = node_mod node_attrs['metadata'] = node_data nodes[node_name]=node_attrs else: if not os.path.exists(node_mod): log.warning('cannot find "%s" module %s' % (node_name, node_mod)) if not os.path.exists(node_data): log.warning('cannot find "%s" metadata %s' % (node_name, node_data)) return nodes
def generate(self): # Check if there are something close to switch in python if self.timer.state == 0: menu_items = dict([('Start Working', 'start_counting'), ('Options', 'options'), ('Exit', 'quit')]) elif self.timer.state == 1: menu_items = dict([('Pause', 'pause_counting'), ('Stop Working', 'stop_counting'), ('Options', 'options'), ('Exit', 'quit')]) elif self.timer.state == 2: menu_items = dict([('Start Break', 'start_counting'), ('Options', 'options'), ('Exit', 'quit')]) elif self.timer.state == 3: menu_items = dict([('Pause', 'pause_counting'), ('Stop Break', 'stop_counting'), ('Options', 'options'), ('Exit', 'quit')]) for key, value in menu_items.iteritems(): item = gtk.MenuItem() item.set_label(key) item.connect("activate", getattr(self.timer, value)) self.menu.append(item) self.menu.show_all() self.timer.ind.set_menu(self.menu)
def processar_entrada(tabela): """ Cria a matriz e as relações entre horarios-linhas e entre professores-colunas. Retorna a matriz e os 2 dicionarios de relacionamento. :param nome_arq: Nome do arquivo """ horarios_t = [[horario[0].split('!')[0], indice - 1] for indice, horario in enumerate(tabela) if horario[0] is not '' and not horario[0].startswith('!')] horarios = dict() for i, hora_t in enumerate(horarios_t): if i + 1 != len(horarios_t): final = horarios_t[i + 1][1] else: final = len(tabela) - 1 horarios[hora_t[0]] = tuple(list(range(hora_t[1], final))) professores_t = [[nome, indice - 1] for indice, nome in enumerate(tabela[0]) if nome is not ''] professores = dict() for i, nome_t in enumerate(professores_t): if i + 1 != len(professores_t): final = professores_t[i + 1][1] else: final = len(tabela) - 1 professores[nome_t[0]] = tuple(list(range(nome_t[1], final))) tabela = np.array([[float(v) for v in valor[1:] if v != ''] for valor in tabela[1:]]) return tabela, horarios, professores
def setUp(self): dataset = [] self.data = [ dict(a=dict(b=1, c=2, d="aaa")), dict(e=[1, 2, 3]), dict(f="ggg"), ] self.files = [tempfile.mkstemp(suffix=".pickle")[1]] pickle.dump(self.data[0], open(self.files[0], "wb")) if self.is_loader_supported("json"): self.files.append(tempfile.mkstemp(suffix=".json")[1]) json.dump(self.data[1], open(self.files[1], "w")) else: self.files.append(tempfile.mkstemp(suffix=".pickle")[1]) pickle.dump(self.data[1], open(self.files[1], "wb")) if self.is_loader_supported("yaml"): self.files.append(tempfile.mkstemp(suffix=".yaml")[1]) yaml.dump(self.data[2], open(self.files[2], "w")) else: self.files.append(tempfile.mkstemp(suffix=".pickle")[1]) pickle.dump(self.data[2], open(self.files[2], "wb"))
def node_snapshot(self, nodes=[]): """ Returns a snapshot of just the graph. :param list dagnodes: list of dag node names. :returns: dictionary of nodes & connected edges. :rtype: dict """ if nodes: if type(nodes) not in [list, tuple]: nodes = [nodes,] self.evaluate() data = self.snapshot() result = dict() node_data = data.get('nodes', dict()) # filter just the nodes we're querying. node_data_filtered = [] link_data_filtered = [] for node in node_data: # filter nodes if nodes: if node.get('name') not in nodes: continue dagnode = self.get_node(node.get('name')) node_data_filtered.append(node) link_data_filtered.extend(self.connectedEdges(dagnode)) result.update(nodes=node_data_filtered) result.update(links=link_data_filtered) return result
def merge_dicts(ionization, isotopes): element_data = dict() # Processes Ionization Levels for elem, ion in ionization.iteritems(): symbol = SYMBOL_MAPPING.get(elem.lower(), elem.lower()) ion = lower_keys(ion) ion['protons'] = ion['atomic number'] ion = dict(sorted(ion.items())) element_data[symbol] = ion # Process Isotopes for elem, isotope in isotopes.iteritems(): symbol = SYMBOL_MAPPING.get(elem.lower(), elem.lower()) isotope = lower_keys(isotope) isotope = dict(sorted(isotope.items())) isotope["isotopes"] = isotope.pop("isotopes") if "standard atomic weight" in isotope: set_value( element_data[symbol], ["atomic weight", "atomic mass", "mass", "amu", "weight"], isotope["standard atomic weight"]) for each in isotope["isotopes"]: each = lower_keys(each) set_value( each, ["atomic weight", "atomic mass", "mass", "amu", "weight"], each["relative atomic mass"]) mass_number = each["mass number"] if symbol in element_data: element_data[symbol][mass_number] = dict(sorted(each.items())) return element_data
def setUp(self): (_fd, self.path) = tempfile.mkstemp(suffix=self.path_suffix) self.data = dict( a=dict(b=1, c=2, d="aaa"), e=[1, 2, 3], f="ggg", )
def __init__(self, parent=None, **kwargs): self._parent = parent self._data = dict() self._default_xform = "Node Transform" self._default_attrs = "Node Attributes" self._template_data = dict() # dictionary to hold parsed data self._data.update(**kwargs)
def create_objects(cls): ''' Create class specific objects 1) Create VNs HR and ENG 2) Create VMs Web, Logic, DB in each VN 3) Create Network-Policy to interconnect VNs (for route leaking) ''' cls.vns = dict(); cls.vms = dict(); cls.policys = dict() for vn in ['hr']: cls.vns[vn] = cls.create_only_vn() for vm in ['web', 'logic', 'db']: cls.vms[vn+'_'+vm] = cls.create_only_vm(vn_fixture=cls.vns[vn])
def defaults(self): """ Returns default node attributes. returns: (dict) - attributes dictionary. """ if self._default_attrs is None: return dict() if self._default_attrs in self._data.keys(): return self._data.get(self._default_attrs) return dict()
def create_common_objects(cls): ''' Create class specific objects 1) Create VNs HR and ENG 2) Create VMs Web, Logic, DB in each VN 3) Create Network-Policy to interconnect VNs (for route leaking) ''' cls.vns = dict(); cls.vms = dict(); cls.policys = dict() for vn in ['hr', 'eng']: # for vn in ['hr']: cls.vns[vn] = cls.create_only_vn() for vm in ['web', 'logic', 'db']: cls.vms[vn+'_'+vm] = cls.create_only_vm(vn_fixture=cls.vns[vn]) cls.policys['hr_eng'] = cls.setup_only_policy_between_vns(cls.vns['hr'], cls.vns['eng']) assert cls.check_vms_active(cls.vms.itervalues(), do_assert=False)
def dj(): """Use the local database to create a record for each DJ.""" def extract(directory): names = dict() for record in ls(directory): with open(directory + record, "rb") as f: disc = json.loads(f.read().decode()) all_dj.update((dj["djname"], dj["djicon"]) for dj in disc["ranking"]) names[disc["name"]] = (9999, 0) # default rank and score return names def fill(mode, directory): for record in ls(directory): with open(directory + record, "rb") as f: disc = json.loads(f.read().decode()) for dj in disc["ranking"]: all_dj[dj["djname"]][mode][disc["name"]] = (dj["rank"], dj["score"]) all_type = (game.mode.star, game.chart.nm["str"], game.chart.hd["str"], game.chart.mx["str"], game.chart.ex["str"], game.mode.club, game.mode.mission) all_db_path = (path.db.star, path.db.nm, path.db.hd, path.db.mx, path.db.ex, path.db.club, path.db.mission) # extract all djnames and disc/club/mission names all_dj = set() all_name = dict() for mode, directory in zip(all_type, all_db_path): all_name[mode] = extract(directory) # convert the set into a dictionary indexable by djname all_dj = {dj[0]: dict(zip(("name", "icon"), (dj[0], dj[1]))) for dj in all_dj} # insert extracted names for each djname the dictionary for mode in all_name: for dj in all_dj: all_dj[dj][mode] = dict(all_name[mode]) del all_name # no longer needed # fill scores for mode, directory in zip(all_type, all_db_path): fill(mode, directory) # write dj index with open(path.index.dj, "wb") as f: f.write(json.dumps([{"id": zlib.crc32(dj.encode()), "name": dj} for dj in sorted(all_dj)], indent=1).encode()) # write dj records while all_dj: key, value = all_dj.popitem() with open("{}{}.json".format(path.db.dj, zlib.crc32(key.encode())), "wb") as f: f.write(json.dumps(value, indent=1).encode())
def create(): """Create the index.""" all_modes = (game.mode.star, game.mode.pop, game.mode.club, game.mode.mission) all_ids = (url.id.star, url.id.pop, url.id.club, url.id.mission) all_pages = (site.pages.star, site.pages.pop, site.pages.club, site.pages.mission) all_keys = (key["name"] for key in (site.key.star, site.key.pop, site.key.club, site.key.mission)) index = dict() for mode, address, end, key in zip(all_modes, all_ids, all_pages, all_keys): index[mode] = dict() for page in range(1, end + 1): for record in urlopen_json(address.format(page), "Create index"): index[mode][record[key]] = dict(zip(("timestamp", "page"), (0, page))) with open(path.index.db, "wb") as f: f.write(json.dumps(index, indent=1).encode())
def __init__(self, name=None, **kwargs): self._attributes = dict() self._metadata = Metadata(self) # event handlers self.nodeNameChanged = EventHandler(self) self.nodePositionChanged = EventHandler(self) self.nodeAttributeUpdated = EventHandler(self) # basic node attributes self.name = name if name else self.default_name self.color = kwargs.pop('color', self.default_color) self.docstring = "" self._graph = kwargs.pop('_graph', None) self.width = kwargs.pop('width', 100.0) self.base_height = kwargs.pop('base_height', 15.0) self.force_expand = kwargs.pop('force_expand', False) self.pos = kwargs.pop('pos', (0.0, 0.0)) self.enabled = kwargs.pop('enabled', True) self.orientation = kwargs.pop('orientation', 'horizontal') self.style = kwargs.pop('style', 'default') # metadata metadata = kwargs.pop('metadata', dict()) attributes = kwargs.pop('attributes', dict()) # if the node metadata isn't passed from another class, # read it from disk if not metadata: metadata = self.read_metadata() # ui self._widget = None UUID = kwargs.pop('id', None) self.id = UUID if UUID else str(uuid.uuid4()) self._metadata.update(metadata) # update attributes (if reading from scene) if attributes: print '# DEBUG: %s attributes: ' % self.Class(), attributes for attr_name, properties in attributes.iteritems(): if attr_name in self._attributes: self._attributes.get(attr_name).update(**properties) else: self.add_attr(attr_name, **properties)
def __init__(self, *args, **kwargs): default_width = kwargs.pop('width', 150.0) default_height = kwargs.pop('height', 150.0) # events self.nodesAdded = EventHandler(self) self.edgesAdded = EventHandler(self) self.graphUpdated = EventHandler(self) # events - TESTING self.graphAboutToBeSaved = EventHandler(self) self.graphSaved = EventHandler(self) self.graphAboutToBeRead = EventHandler(self) self.graphRead = EventHandler(self) self.graphRefreshed = EventHandler(self) #self.network = nx.DiGraph() self.network = nx.MultiDiGraph() # mutliple edges between nodes self.mode = 'standalone' self.grid = Grid(5, 5, width=default_width, height=default_height) self.handler = None self.plug_mgr = PluginManager() self._initialized = 0 # attributes for current nodes/dynamically loaded nodes self._node_types = dict() self.dagnodes = dict() self.autosave_path = os.path.join(os.getenv('TMPDIR'), 'sg_autosave.json') self._autosave_file = None # testing mode only self.debug = kwargs.pop('debug', False) # initialize the NetworkX graph attributes self.initializeNetworkAttributes() # if scene file is passed as an argument, read it for arg in args: if os.path.exists(arg): self.read(arg) continue if self.debug: self.read(os.path.join(os.getenv('HOME'), 'graphs', 'connections.json'))
def main(argv=sys.argv): defaults = { "output": "", "idata": "", } p = optparse.OptionParser("%prog [OPTION ...] TEMPLATE_FILE") p.set_defaults(**defaults) p.add_option("-o", "--output", help="Output file") p.add_option("", "--idata", help="Data path [and format] to find params passed when instantiating templates, e.g. data.pkl:pickle, ../data.json.") (opts, args) = p.parse_args() if not args: p.print_usage() sys.exit(1) template = args[0] params = dict() output = opts.output and open(opts.output, "w") or sys.stdout if opts.idata: path_and_formats = parse_idata_option(opts.idata) params = loads_idata(path_and_formats) res = compile_template(template, params, is_file=True) print >> output, res sys.exit()
def parse_arguments(arguments): """ Parse the argument string from the action call template, where arguments are surrounded in parenthesis. Make sure to parse and store default values as well. :param arguments: the argument string from the action call templates :return: the dict from argument to value """ args = dict() if len(arguments) > 2: # remove parentheses from arguments arguments = arguments[1:len(arguments)-1] # split args up arg_list = arguments.split(",") for arg in arg_list: arg = arg.strip() # handle default values arg_vals = arg.split("=") if len(arg_vals) == 2: args[arg_vals[0]] = arg_vals[1] else: args[arg_vals[0]] = "" return args
def get_action_keys_and_values(template_path): # read actions file with open(template_path, 'r') as f: actions_string = f.read() # determine each of the actions from string actions = actions_string.split('\n') # filter out comments from actions filtered_actions = [] for action in actions: action = action.strip() if not action.startswith("#") and len(action) > 3: filtered_actions.append(action) action_map = dict() # create a map from token to action list sequence for action in filtered_actions: # split up action token and function or list parts toke_and_func = action.split(':') action_key = toke_and_func[0].strip() action_value = toke_and_func[1].strip() if len(action_key) > 0 and len(action_value) > 0: # strip [] and trailing whitespace from list action_value = action_value.lstrip('[').rstrip(']').strip() if len(action_value) > 0: action_map[action_key] = action_value return action_map
def filter_results(action_dict, curr_context): # filters out only the actions for the current context keys = get_possible_action_text_mapping_keys(curr_context, action_dict) new_dict = dict() for key in keys: new_dict[key] = action_dict[key] return new_dict
def get(mode, name, chart=game.chart.nm): """The complete ranking of the specified mode and name. Arguments: mode -- One of the four game modes. name -- The full name of a disc, disc set, or mission. chart -- One of the four game charts. Only relevant for Pop mode. """ if mode == game.mode.star: address = url.ranking.star elif mode == game.mode.pop: address = url.ranking.pop elif mode == game.mode.club: address = url.ranking.club elif mode == game.mode.mission: address = url.ranking.mission else: raise ValueError("Invalid game mode") identifier = _id(mode, name) results = [] for page in itertools.count(1): addr = address.format(identifier, page) if mode == game.mode.pop: addr += "&pt={}".format(chart["int"]) records = urlopen_json(addr, "Ranking retrieval") results.extend([dict(zip(("rank", "djicon", "djname", "score"), (r["RANK"], r["DJICON"], r["DJNAME"], r["SCORE"]))) for r in records]) if len(records) < 20: break return results
def _get_field_doc(self, field): """ Return documentation for a field in the representation. """ fieldspec = dict() fieldspec['type'] = field.__class__.__name__ fieldspec['required'] = field.required fieldspec['validators'] = [{validator.__class__.__name__: validator.__dict__} for validator in field.validators] return fieldspec
def full(params, network=None): ids = params.agent_ids if network is None: network = dict() for i in range(len(ids)): network[ids[i]] = [ids[x] for x in range(len(ids)) if x != i] return network
def _get_qss_files(self, paths=[]): """ Get qss files. :param list path: ist of paths to add to the scan. :returns: dictionary of stylesheet names/filenames. :rtype: dict """ qss_files = dict() if not paths: return [] for path in paths: for fn in os.listdir(path): bn, fext = os.path.splitext(fn) if fext.lower() in ['.qss', '.css']: qss_file = os.path.join(path, fn) if qss_file not in qss_files.values(): style_name = self._parse_stylesheet_name(qss_file) if style_name is None: log.warning('cannot parse style name from "%s".' % qss_file) style_name = 'no-style' log.debug('adding stylesheet "%s" from "%s".' % (style_name, qss_file)) if style_name not in qss_files: qss_files[style_name] = qss_file return qss_files
def create(mode, name): """Create a local record of the specified mode and name. Arguments: mode -- One of the four game modes. name -- The full name of a disc, disc set, or mission. """ all_charts = (game.chart.nm, game.chart.hd, game.chart.mx, game.chart.ex) all_pop_paths = (path.db.nm, path.db.hd, path.db.mx, path.db.ex) if mode == game.mode.star: level = [(game.chart.nm, path.db.star)] elif mode == game.mode.pop: level = zip(all_charts, all_pop_paths) elif mode == game.mode.club: level = [(game.chart.nm, path.db.club)] elif mode == game.mode.mission: level = [(game.chart.nm, path.db.mission)] else: raise ValueError("Invalid game mode") for chart, directory in level: results = ranking.get(mode, name, chart) if results: clean_name = clean(name) record = dict() record["name"] = name record["eyecatch"] = "{}.png".format(clean_name) record["icon"] = "{}_{}.png".format(clean_name, chart["int"]) record["ranking"] = results with open(directory + clean_name + ".json", "wb") as f: f.write(json.dumps(record, indent=1).encode()) print('Wrote: "{}{}.json"'.format(directory, clean_name))
def get_test_collection(self): ret = dict() ret['cls_score'] = tf.get_collection('cls_score')[0] ret['cls_prob'] = tf.get_collection('cls_prob')[0] ret['bbox_pred'] = tf.get_collection('bbox_pred')[0] ret['rois'] = tf.get_collection('rois')[0] return ret
def font_defaults(self, platform=None, style='default'): """ Builds a dictionary of font & size defaults by platform. :param str platform: os type :returns: font and font size defaults dictionary. :rtype: dict """ if platform is None: platform = options.PLATFORM defaults = dict() def_font_config = self.config_files(style).get('fonts', None) if not os.path.exists(def_font_config): log.error('config "%s" does not exist.' % def_font_config) return defaults parser = StyleParser(self) data = parser._parse_configs(def_font_config) data = parser._parse_platform_data(data) # substitute attribute names for attr, val in data.iteritems(): attr = re.sub('-', '_', attr) defaults[attr] = val return defaults
def xml2Dict(self,xmlLst): xmlDict = {} xmlDict["attribute"] = dict() xmlDict["data"] = [] xmlDict["name"] = "" if xmlLst[0].strip().startswith("<?xml"): firLine = xmlLst[0].strip().split() for item in firLine: item = item.strip() if item.startswith("<"): xmlDict["name"] = item.split()[0].replace('<','').replace('>','') elif '=' in item: key,value = item.split('=') xmlDict["attribute"][key]=value.replace('"','') xmlDict["data"].append(self.xml2Dict(xmlLst[1:])) return xmlDict if xmlLst[0].startswith("<!--"): while xmlLst and not xmlLst[0].strip().endswith("/-->"): xmlLst.pop(0) xmlLst.pop(0) if not xmlLst: return xmlDict firLine = xmlLst[0].strip().split() xmlDict["name"] = firLine[0].split()[0].replace('<','').replace('>','') for item in firLine[1:]: item = item.strip() if item.endswith('>'): item = item[:-1].strip() if '=' in item: key,value = item.split('=') xmlDict["attribute"][key] = value.replace('"','') if len(xmlLst) == 1: return xmlDict nodeName = "" childNode = [] for line in xmlLst[1:-1]: try: line = line.strip() if nodeName: childNode.append(line) if line == "</%s>" % nodeName: xmlDict["data"].append(self.xml2Dict(childNode)) childNode = [] nodeName = "" elif line.endswith("/>"): xmlDict["data"].append(self.xml2Dict([line])) elif line.startswith("<") and not line.startswith("<!--"): lineLst = line.split() nodeName = lineLst[0].split()[0].replace('<','').replace('>','') childNode.append(line) except Exception as e: print e return xmlDict
class InputComponent(Component): """ Input Component class Attaching this component to an Entitiy will enable the functionality so perform actions by inputs from the user. """ # Defaut type/name the component will have DEFAULT_TYPE = "input" defaults = dict( {"actions": None} ) def __init__(self, *args, **kwargs): """ Input initialization """ super(InputComponent,self).__init__(*args, **kwargs)
def next_feed(self, batch_data=None): if self._data_iter is None and batch_data is None: raise ValueError('No input data.') feed_dict = dict() if batch_data is None: for inputs in self._input_list: blobs = next(self._data_iter) for i, inp in enumerate(inputs): inp_shape = inp.get_shape().as_list() if None in inp_shape: feed_dict[inp] = blobs[i] else: feed_dict[inp] = blobs[i].reshape(*inp_shape) else: assert isinstance(batch_data, list) or isinstance( batch_data, tuple), "Input data should be list-type." assert len(batch_data) == len( self._input_list[0]), "Input data is incomplete." batch_size = self.cfg.batch_size if self._input_list[0][0].get_shape().as_list()[0] is None: # fill batch for i in range(len(batch_data)): batch_size = (len(batch_data[i]) + self.cfg.num_gpus - 1) // self.cfg.num_gpus total_batches = batch_size * self.cfg.num_gpus left_batches = total_batches - len(batch_data[i]) if left_batches > 0: batch_data[i] = np.append( batch_data[i], np.zeros((left_batches, *batch_data[i].shape[1:])), axis=0) self.logger.warning( "Fill some blanks to fit batch_size which wastes %d%% computation" % (left_batches * 100. / total_batches)) else: assert self.cfg.batch_size * self.cfg.num_gpus == len(batch_data[0]), \ "Input batch doesn't fit placeholder batch." for j, inputs in enumerate(self._input_list): for i, inp in enumerate(inputs): feed_dict[inp] = batch_data[i][j * batch_size:(j + 1) * batch_size] #@TODO(delete) assert (j + 1) * batch_size == len(batch_data[0]), 'check batch' return feed_dict, batch_size
def _get_items(self, values, default_key="id", format_key=None): """ This function return a dictionary with the parsed base objects. The class will detect if the values are a list, objects, ids, etc.. The returned dictionary will be a key, value using the default_key of the Base class and the value with the final item. Typical use is set default_key as: "id", "name" or "type". Also there is an option to change the key formats by specifying a eval function in the format_key. ex. Following eval function lower the keys: format_key="{}.lower()" """ result = dict() # Check the values are not None initially if values is not None: # Check if the values is a collection or single value if is_collection(values): # Iterate through all the values for value in values: if value is None: continue value = self._get_item(value) # This is to suport multiple items if not using ids if isinstance(value,(Base)) and default_key != "id": key = str(getattr(value,value.key)) else: key = str(getattr(value,default_key)) if (format_key is None): result[key] = value else: result[eval(format_key.format("key"))] = value else: value = self._get_item(values) # This is to suport multiple items if not using ids if isinstance(value,(Base)) and default_key != "id": key = str(getattr(value,value.key)) else: key = str(getattr(value,default_key)) if (format_key is None): result[key] = value else: result[eval(format_key.format("key"))] = value # Return the result return result
def __init__( self, figure=None, ax=None, **kwargs): # "=None" added by Ronny; to use it just as text field self.master = Tk.Tk() # extract keyword arguments title = kwargs.get('title', 'Matplotlib window') self.master.title(title) # get figure self.figure = figure self.ax = ax self.func = kwargs.get('func', None) self.funkwargs = kwargs.get('kwargs', dict()) self.widgets = kwargs.get('widgets', dict()) self.wprops = kwargs.get('widgetprops', dict(orientation='h', position='bottom')) self.bardir = self.wprops['orientation'] # default validation command args self.vcmdargs = ['%d', '%i', '%P', '%s', '%S', '%v', '%V', '%W'] # validation variables self.current_problem = "" # label variables self.statusmessage = Tk.StringVar(value="Ready") # empty data self.data = dict() self.entries = dict() self.funcs = dict() self.validation = dict() self.fmts = dict() self.onclickvalue = dict() # populate matplotlib drawing area and buttons self.populate()
def mesh_rect(params, network=None): from math import floor, sqrt ids = params.agent_ids if network is None: network = dict() s = int(floor(sqrt(len(ids)))) for i in range(len(ids)): d = list() if i - s >= 0: d.append(ids[i - s]) # Node above i if i + s < len(ids): d.append(ids[i + s]) # Node below i if i % s > 0 and i > 0: d.append(ids[i - 1]) # Node left from i if (i + 1) % s > 0 and i + 1 < len(ids): d.append(ids[i + 1]) # Node right from i network[ids[i]] = d return network
def map(self, name, properties, connection_type='input', verbose=False): """ Maps data dictionary to an Attribute. :param str name: attribute name. :param dict properties: attribute dictionary. :param str connection_type: connection type (input or output). :returns: attribute node. :rtype: Attribute """ # connection properties max_connections = properties.pop('max_connections', 1) attr_type = None #print '- Mapping: "%s.%s": ' % (self.name, name) #print json.dumps(properties, indent=5) pdict = dict() # attribute properties (ie 'label', 'desc', 'connection_type') for property_name in properties: #print ' - updating property: "%s.%s:%s' % (self.name, name, property_name) pattrs = properties.get(property_name) #print '# DEBUG: pattrs: ', pattrs if not util.is_dict(pattrs): continue property_value = pattrs.get('value') property_type = pattrs.get('type') if property_name == 'default': attr_type = property_type.lower() #print '# DEBUG: "%s.%s" default: "%s"' % (self.name, name, attr_type.upper()) # {'label': 'Name'} pdict[property_name] = property_value pdict['attr_type'] = attr_type return self.add_attr(name, connectable=True, connection_type=connection_type, max_connections=max_connections, user=False, **pdict)
def __new__(cls, title, **attributes): '''Ties name of component to a specific instance, which is shared by all components of the same template''' # new_class = type(type_name, type_class_bases, type_attributes) defaults = dict((k, v) for k, v in attributes.items()) if title not in cls.Catalog: Component.ComponentTypes[title] = cls template_cls = type(title, (Component, ), attributes) template_cls.Catalog = {} template_cls.defaults = defaults cls.Catalog[title] = template_cls else: template_cls = cls.Catalog[title] for attr in attributes: if not hasattr(template_cls, attr): msg = 'Component attribute mismatch: {} vs {}' msg = msg.format(attributes, template_cls.defaults) raise RuntimeError(msg) return template_cls
class Liquidcontainer(Container): defaults = dict([('capacity', 100), ('filled', 0), ('unit', 'litre')]) def __init__(self, e: Entity, *args, **kwargs) -> None: super().__init__(e, *args, **kwargs) e.__setattr__('fill', self.fill) @property # only one content per liquid container def content(self): try: return self.__content[0] except IndexError: return self.__content @content.setter def content(self, value): if isinstance(value, list): self.__content = value else: self.__content.clear() self.__content.append(value) def fill(self, liquid: Entity, volume=None): if not self.same_position(liquid): raise DifferentPositionException(self.entity, liquid) if 'liquid' not in liquid.components.keys(): raise NoSuchComponentException('liquid', liquid) elif self.is_empty(): self.content = liquid elif self.content.name != liquid.name: raise MixedLiquidsException(self, liquid) self.filled += volume or self.capacity self.update_status() def pour(self, volume: int, recipient: Entity): self.filled -= volume liquid = Liquid(self.content.type) recipient.receive( liquid ) # todo drink action with Character drinking Liquid from Liquidcontainer self.update_status()
def toJson(dbName): db = sqlite3.connect(dbName) db.row_factory = sqlite3.Row cursor = db.cursor() data = {} tables = [ table[0] for table in cursor.execute( 'SELECT name FROM sqlite_master WHERE type="table";').fetchall() ] for table in tables: tableData = [ dict(rec) for rec in cursor.execute('SELECT * FROM ' + str(table)) ] data.update({table: tableData}) return json.dumps(data, indent=4)
def grid_search_tabular(self, learning_agent, agent_name="", alphas=[0.1, 0.2, 0.3, 0.5, 0.8], epsilons=[20], num_e=100, trail=1, path="Result/", decay=True, more=""): a = dict() trails = [] for alpha in alphas: for epsilon in epsilons: trails = [] for i in range(trail): q_table = np.zeros((self.action_n, self.state_n)) if epsilon == 0: result = learning_agent(num_e=num_e, alpha=alpha, epsilon=epsilon, decay=decay) else: result = learning_agent(num_e=num_e, alpha=alpha, epsilon=epsilon, decay=decay) trails.append(result) if i % 20 == 0: print("Now at: ", i) print("Done with ", (alpha, epsilon)) name = path + self.env_name + "/" + self.policy_type + "/" + "/" + agent_name + "/" + str( alpha) + "_" + str(epsilon) + more name = name.replace(".", "") #name = name.replace("/", "_") pickle.dump(trails, open(name, "wb")) plot_trails(trails, name) a.update({(alpha, epsilon): result}) return a
def read(self, filename, force=False): """ Read a graph from a saved scene. :param str filename: file to read :param bool force: force scenes not meeting API_MINIMUM to be read. :returns: current scene. :rtype: str """ # callbacks self.graphAboutToBeRead() graph_data = self.read_file(filename) if not graph_data: log.error('scene "%s" appears to be invalid.' % filename) return False file_data = graph_data.get('graph', []) if len(file_data) > 1: api_ver = [x[1] for x in file_data if x[0] == 'api_version'] if api_ver: if not self.version_check(graph_data): if not force: log.error('scene "%s" requires api version %s ( %s )' % (filename, options.API_MINIMUM, api_ver[0])) return False # restore from state. self.restore(graph_data) # callbacks prefs = dict() for data in graph_data.get('graph').items(): if len(data) > 1: dname, attrs = data if dname == 'preferences': prefs = attrs self.graphRead(**prefs) return self.setScene(filename)
class Display(Defaults): """ Abstract Display class """ # Default Display Mode that will be used when crating the window # Open GL and Double Buffer are neccesary to display OpenGL defaultmode = [DisplayMode.opengl, DisplayMode.doublebuf] defaults = dict([("title","Display Window"), ("width",800), ("height",600), ("bpp",16), ("mode",DisplayMode.resizable)]) def __init__(self, *args, **kwargs ): """ Initialize all the variables """ super().__init__(*args,**kwargs) keys = list(Display.defaults.keys()) for index, arg in enumerate(args): setattr(self, keys[index], arg) def init(self): """ Initialize the creation of the window """ raise NotImplementedError def update(self): """ Update the window """ raise NotImplementedError def close(self,dispose=False): """ Close the window """ raise NotImplementedError def dispose(self): """ Dispose manually the window """ raise NotImplementedError
def extractProfile(fname): f = dict() #f['Group'] = re.findall(re_grp,fname)[0] with codecs.open(fname, 'r', encoding='utf-8') as uf: u = json.load(uf) f['UID'] = str(u['id']) f['Nick'] = u['screen_name'] f['性别'] = u['gender'] f['所在地'] = u['location'] f['允许所有人发送私信'] = '1' if u['allow_all_act_msg'] else '0' f['允许所有人评论'] = '1' if u['allow_all_comment'] else '0' f['有自定义头像'] = '1' if '180/0/' in u['avatar_large'] else '0' f['互粉数/粉丝数'] = str( 1.0 * u['bi_followers_count'] / u['followers_count']) if u['followers_count'] > 0 else 'DIV#0' f['互粉数/关注数'] = str( 1.0 * u['bi_followers_count'] / u['friends_count']) if u['friends_count'] > 0 else 'DIV#0' f['互粉数'] = str(u['bi_followers_count']) f['开博日期'] = str(parser.parse(u['created_at'], fuzzy=True).date()) f['自我描述长度'] = str(len(u['description'])) f['自我描述中含“我”'] = '1' if u'我' in u['description'] else '0' f['个性域名'] = u['domain'] f['域名长度'] = str(len(u['domain'])) f['域名中含数字'] = '1' if len( [val for val in u['domain'] if val in '0123456789']) > 0 else '0' f['微博数'] = str(u['statuses_count']) f['收藏数'] = str(u['favourites_count']) f['粉丝数'] = str(u['followers_count']) f['关注数'] = str(u['friends_count']) f['开启地理定位'] = '1' if u['geo_enabled'] else '0' f['用户个人网站URL含域名'] = '1' if u['domain'] in u['url'] else '0' f['昵称长度'] = str(len(u['screen_name'])) f['用户有个人网站URL'] = '1' if u['url'] is not None else '0' f['是否认证'] = '1' if u['verified'] else '0' f['认证原因长度'] = str(len(u['verified_reason'])) f['认证类别'] = str(u['verified_type']) f['省/市ID'] = u['province'] f['市/区ID'] = u['city'] return f
def data(self): """ Output data for writing. :returns: attribute data. :rtype: dict """ data = dict() #for attr in self.REQUIRED: for attr in [ 'label', 'value', 'desc', '_edges', 'attr_type', 'private', 'hidden', 'connectable', 'connection_type', 'locked', 'required', 'user' ]: if hasattr(self, attr): value = getattr(self, attr) if value or attr in self.REQUIRED: #if value or attr in self.REQUIRED: data[attr] = value return data
def grid_search_continue( self, learning_agent, agent_name="", alphas=[0.001, 0.005, 0.01, 0.05], epsilons=[0.01, 0.05, 0.08, 0.1, 0.3, 0.5, 0.8, 1], degrees=[5], num_e=100, path="Result/", decay=True, more=""): a = dict() trails = [] for alpha in alphas: for epsilon in epsilons: for degree in degrees: for i in range(self.trail_n): #n_out_features = Util.get_n_features(self.env.n_state_space, degree) n_out_features = self.n_featurized weight = np.zeros((self.action_n, n_out_features)) #weight = np.random.rand(self.action_n,n_out_features) result = learning_agent(weight, num_e=self.num_e, alpha=alpha, epsilon=epsilon, decay=decay) trails.append(result) if i % 20 == 0: print("Step: ", i) print("Done with ", (alpha, epsilon, self.degree)) name = path + self.env_name + "/" + self.policy_type + "/" + self.expansion + "/" + agent_name + "/" + str( alpha) + "_" + str(epsilon) + "_" + str( self.degree) + more name = name.replace(".", "") #name = name.replace("/","") pickle.dump(trails, open(name, "wb")) plot_trails(trails, name) a.update({(alpha, epsilon, self.degree): result}) return a
class Health(Component): defaults = dict([('current', 100), ('max', 100), ('min', 0)]) def __init__(self, e: Entity, *args, current=100, max=100, min=0, **kwargs) -> None: self.current = current self.max = max self.min = min super().__init__(e, *args, **kwargs) def change(self, value): if self.alive: self.current = min(max(self.current + value, self.min), self.max) @property def alive(self) -> bool: return self.current > 0
def __init__(self, states=None, active_state=0, **kwargs): super().__init__(**kwargs) # Property-style attributes self._states = dict() # Create module states if (states is not None): if isinstance(states, dict): state_defs = states.items() elif isinstance(states, (list, tuple, set)): state_defs = range(len(states)), states else: raise Exception('Unrecognized format for module states') # If not already, state terms will be converted to <tf.Variable> for state_id, state in zip(*state_defs): self.create_state(state_id, **state) # Indicator for active state if not isinstance(active_state, np.ndarray): active_state = np.array([active_state], dtype='int') self._active_state = active_state
def read_subsection(start_line, config): _, name = start_line.lstrip().split(' ', 1) name = name.strip('"') def settings(): for line in config: if line.startswith('config'): yield read_section(line, config) elif line.startswith('set'): yield read_set(line, config) elif line.startswith('unset'): yield read_unset(line, config) elif line.startswith('next'): break elif line.startswith('end'): warnings.warn("Potential corruption: Subsection `%s` starting " "with `edit` ends with `end`" % (name,)) # Add an extra `end` due to inconsistency in VDOM configs ... config.pushback('end') break else: raise SyntaxError("Corrupt config?: [%d] %s" % (config.line_number, line)) return name, dict(settings())
def print_diff_section(left, right, prefix="", output=sys.stdout, header=None): diffs = 0 for name, value in left.items(): if right is Undefined or name not in right: rvalue = Undefined else: rvalue = right[name] diffs += print_diff_value(name, value, rvalue, prefix, output, header) # And for all the keys missing in the left (or added in the right) if type(right) is dict: for k in set(right.keys()) - set(left.keys()): rvalue = right[k] lvalue = None if type(rvalue) is dict: lvalue = dict((j, Undefined) for j in rvalue) diffs += print_diff_value(k, lvalue, rvalue, prefix, output, header) return diffs
class Geo(Component): defaults = dict([('pos', (0, 0))]) def __init__(self, e: Entity, area: Entity = None, *args, **kwargs) -> None: super().__init__(e, *args, **kwargs) self.area = area e.__setattr__('area', self.area) e.__setattr__('pos', self.pos) e.__setattr__('x', self.x) e.__setattr__('y', self.y) @property def x(self) -> int: return self.pos[0] @property def y(self) -> int: return self.pos[1]
def _optimize_inputs_random(self, sess, inputs_new, losses_op, feed_dict=None, config=None, time_limit=np.inf, options=None, **kwargs): ''' Optimize a loss function w.r.t. a set of inputs using Random Search. Batched evaluations are used in order to allow for a runtime limit. ''' feed_dict = dict() if (feed_dict is None) else feed_dict.copy() config = self.update_dict(self.configs['random'], config, as_copy=True) # Settings for Random Search eval_limit = config['eval_limit'] shard_shape = inputs_new.get_shape().as_list() shard_size = shard_shape[0] start_time = self.timer() # Iterate Random Search until evaluation/runtime budget is exhausted inputs_seq, loss_seq, counter = [], [], 0 while counter + shard_size <= eval_limit: if (self.timer() - start_time > time_limit): break inputs_seq.append(self.rng.rand(*shard_shape)) feed_per_step = {**feed_dict, inputs_new:inputs_seq[-1]} losses_per_step = sess.run(losses_op, feed_per_step) loss_seq.append(np.ravel(losses_per_step)) counter += shard_size # Choose top-k loss minimizers loss_seq = np.hstack(loss_seq) argmins = np.argpartition(loss_seq, shard_size-1)[:shard_size] # Assign optimized values to <tf.Variable> ref = self.get_or_create_ref('assignment', dtype=inputs_new.dtype) assign_op = self.get_or_create_node('assign', tf.assign, (inputs_new, ref)) sess.run(assign_op, {ref:np.vstack(inputs_seq)[argmins]}) logger.info('Random Search evaluated {:d} losses in {:.3e}s'\ .format(len(loss_seq), self.timer() - start_time)) return loss_seq
def initializeFontsList(self, valid=[]): """ Builds the manager fonts list. :param list valid: list of valid font names. :returns: dictionary of fonts. :rtype: dict """ if not valid: valid = [ x for fontlist in options.SCENEGRAPH_VALID_FONTS.values() for x in fontlist ] result = dict(ui=[], mono=[]) for font_name in self._font_db.families(): if font_name in valid: if not self._font_db.isFixedPitch(font_name): result['ui'].append(font_name) else: result['mono'].append(font_name) return result
def find_a_kind(self): self.cards.sort(reverse=True) ranked = self.classify_by_rank() kind = dict() for cards in ranked.values(): l = len(cards) if l >= 2: if l in kind: kind[l].append(cards) else: kind[l] = [cards] ranking = None hand_cards = [] if 4 in kind: hand_cards.extend(kind[4][0]) ranking = Ranking.FOUR_OF_A_KIND elif 3 in kind: hand_cards.extend(kind[3][0]) if 2 in kind: hand_cards.extend(kind[3][0]) ranking = Ranking.FULL_HOUSE else: ranking = Ranking.THREE_OF_A_KIND elif 2 in kind: hand_cards.extend(kind[2][0]) if len(kind[2]) > 1: hand_cards.extend(kind[2][1]) ranking = Ranking.TWO_PAIRS else: ranking = Ranking.ONE_PAIR if ranking: kickers = [c for c in self.cards if c not in hand_cards] self.cards = hand_cards + kickers return ranking
def main(): with open('inputs/264_intermediate.in', 'r') as f: inp = f.read() inp = list(map(lambda line: list(map(int, line.strip().split())), inp.splitlines())) drivers = dict([(d, set([d])) for d in range(len(inp))]) for t in range(8 * 60): stops = [inp[d][t % len(inp[d])] for d in drivers.keys()] if any_dupes(stops): for dupe in any_dupes(stops): pool = set() for d in dupe: pool |= drivers[d] for d in dupe: drivers[d] |= pool if is_all(drivers): print(t, 'minutes') break else: print('never')
def gen_graph(self, reduced_vectors, clustered_labels, description_vectors): colors = dict() colors[0] = 'red' colors[1] = 'blue' colors[2] = 'green' fig = plt.figure(figsize=(15, 15)) ax = fig.add_subplot(111, projection='3d') for i, (vec, l, name) in enumerate( zip(reduced_vectors, clustered_labels, description_vectors)): x, y, z = vec ax.scatter(x, y, z, color=colors[l], s=100, label=name, cmap='RdPu') ax.text(x + 0.1 * x, y + 0.1 * y, z + 0.1 * z, name, fontsize=16) plt.savefig('graph.png')
def style_data(self, **kwargs): """ Return the stylesheet data. :returns: parsed stylesheet data. :rtype: str """ stylesheet_name = kwargs.pop('stylesheet_name', 'default') palette_style = kwargs.pop('palette_style', 'default') font_style = kwargs.pop('font_style', 'default') self._data = dict() parser = StyleParser(self, style=stylesheet_name, palette_style=palette_style, font_style=font_style) parser.run(style=stylesheet_name, palette_style=palette_style, font_style=font_style) data = parser.data(**kwargs) # grab data here for debugging self._data = parser._data return data
def train(self): from gen_batch import generate_batch from tfflat.data_provider import DataFromList, MultiProcessMapDataZMQ, BatchData, MapData from test import test start_val_itr = self.cfg.cnt_val_itr if self.cfg.cnt_val_itr >= 0 else 0 for out_itr in range(start_val_itr, self.d.num_val_split): # reset input and output lists self._input_list = [] self._output_list = [] self._outputs = [] self.graph_ops = None # reset current epoch self.cur_epoch = 0 #reset summary dict self.summary_dict = {} # timer self.tot_timer = Timer() self.gpu_timer = Timer() self.read_timer = Timer() run_pref = "run_{}".format(out_itr + 1) lr_eval = self.cfg.lr save_summary_steps = self.cfg.save_summary_steps summary_dir = os.path.join(self.cfg.summary_dir, run_pref) # train_data, val_data = self.d.load_train_data(out_itr) train_data, val_data = self.d.load_train_data() with tf.Session(config=self.tfconfig) as sess: lr = tf.Variable(self.cfg.lr, trainable=False) self._optimizer = get_optimizer(lr, self.cfg.optimizer) if self.cfg.equal_random_seed: # set random seed for the python pseudo random number generator in order to obtain comparable results tf.set_random_seed(2223) random.seed(2223) # build_graph self.build_graph() data_load_thread = DataFromList(train_data) if self.cfg.multi_thread_enable: data_thread = MultiProcessMapDataZMQ(data_load_thread, self.cfg.num_thread, generate_batch, strict=True) else: data_thread = MapData(data_load_thread, generate_batch) data_load_thread = BatchData(data_thread, self.cfg.batch_size) if self.cfg.equal_random_seed: data_load_thread.reset_state() dataiter = data_load_thread.get_data() itr_per_epoch = math.ceil( len(train_data) / self.cfg.batch_size / self.cfg.num_gpus) # summaries # merge all summaries, run this operation later in order to retain the added summaries merged_sums = tf.summary.merge_all() writer = tf.summary.FileWriter(summary_dir, sess.graph) # saver self.logger.info('Initialize saver ...') model_dump_dir = os.path.join(self.cfg.model_dump_dir, run_pref) train_saver = Saver(sess, tf.global_variables(), model_dump_dir) best_model_dir = os.path.join(model_dump_dir, "best_model") val_dir = os.path.join(self.cfg.val_dir, run_pref) if not os.path.isdir(best_model_dir): os.makedirs(best_model_dir) if not os.path.isdir(val_dir): os.makedirs(val_dir) best_saver = Saver(sess, tf.global_variables(), best_model_dir, max_to_keep=1) # initialize weights self.logger.info('Initialize all variables ...') sess.run( tf.variables_initializer(tf.global_variables(), name='init')) self.load_weights('last_epoch' if self.cfg.continue_train else self.cfg.init_model, model_dump_dir, sess=sess) # self.cfg.continue_train = False self.logger.info( 'Start training; validation iteration #{}...'.format( out_itr)) start_itr = self.cur_epoch * itr_per_epoch + 1 end_itr = itr_per_epoch * self.cfg.end_epoch + 1 best_loss = self.cfg.min_save_loss for itr in range(start_itr, end_itr): self.tot_timer.tic() self.cur_epoch = itr // itr_per_epoch setproctitle.setproctitle( 'val_it {};train epoch{}:'.format( out_itr, self.cur_epoch)) # apply current learning policy cur_lr = self.cfg.get_lr(self.cur_epoch) if not approx_equal(cur_lr, lr_eval): print(lr_eval, cur_lr) sess.run(tf.assign(lr, cur_lr)) # input data self.read_timer.tic() feed_dict = self.next_feed(dataiter) self.read_timer.toc() # train one step self.gpu_timer.tic() _, lr_eval, *summary_res, tb_summaries = sess.run( [ self.graph_ops[0], lr, *self.summary_dict.values(), merged_sums ], feed_dict=feed_dict) self.gpu_timer.toc() # write summary values to event file at disk if itr % save_summary_steps == 0: writer.add_summary(tb_summaries, itr) itr_summary = dict() for i, k in enumerate(self.summary_dict.keys()): itr_summary[k] = summary_res[i] screen = [ 'Validation itr %d' % (out_itr), 'Epoch %d itr %d/%d:' % (self.cur_epoch, itr, itr_per_epoch), 'lr: %g' % (lr_eval), 'speed: %.2f(%.2fs r%.2f)s/itr' % (self.tot_timer.average_time, self.gpu_timer.average_time, self.read_timer.average_time), '%.2fh/epoch' % (self.tot_timer.average_time / 3600. * itr_per_epoch), ' '.join( map(lambda x: '%s: %.4f' % (x[0], x[1]), itr_summary.items())), ] #TODO(display stall?) if itr % self.cfg.display == 0: self.logger.info(' '.join(screen)) # save best model loss = itr_summary['loss'] # print('current loss is:', loss, 'best loss is:', best_loss) if loss < best_loss: best_loss = loss print( "Saving model because best loss was undergone; Value is {}." .format(loss)) best_saver.save_model(self.cfg.end_epoch + 1) if itr % itr_per_epoch == 0: train_saver.save_model(self.cur_epoch) self.tot_timer.toc() #clean up sess.close() tf.reset_default_graph() if self.cfg.multi_thread_enable: data_thread.__del__() print("Finish training for val run #{}; Apply validation".format( out_itr + 1)) if self.cfg.additional_name == "CrowdPose": print( "Training on CrowdPose, no additional validation required!" ) else: self.cross_val(val_data, self.cfg.end_epoch + 1, val_dir, best_model_dir)
class DBase(Base, Defaults): """ DBase Class This class is a subclass of Base and Defaults. This provide multi-inheritance functionality to allow to set more attributes to Base and also set default values to the attributes. Example class Flexible(DBase): defaults = dict( {"nick":None}) def __init__(self, *args, **kwargs): # Create default objects in order super.__init__(*args, **kwargs) (Or simply) class Flexible(DBase): defaults = dict( [("nick","Awesome")]) flexible = Flexible() flexible = Flexible("Javier", 12) flexible = Flexible("Javier", type="JavierType", nick="jsa000") # Following is allowed by Base class, but you always need to # include nick in the creation since it can not be created inside # the class. See the init function at the top for CustomBase #flexible = CustomBase("Javier", type="JavierType", nick="jsa000") #flexible = Flexible(id=23) print(flexible.name) print(flexible.nick) print(str(flexible)) print(repr(flexible)) <FlexibleDB> FlexibleDB(OrderedDict([('name', 'Javier'), ('id', '5f212236-8214-4ae7-bc86-eca5aae8b3a5'), ('type', 'JavierType')])) FlexibleDB(['nick:jsa000']) FlexibleDB(FlexibleDB(OrderedDict([('name', 'Javier'), ('id', '5f212236-8214-4ae7-bc86-eca5aae8b3a5'), ('type', 'JavierType')])) FlexibleDB(['nick:jsa000'])) """ defaults = dict() def __init__(self, *args, **kwargs): """ """ # Create default DefaultBase object # Note: "self"" it's required Base.__init__(self, *args, **kwargs) Defaults.__init__(self, *args, **kwargs) def __str__(self): """Returns the string representation of this instance """ return "<{}> \n {} \n {}".format(self.__class__.__name__, Base.__repr__(self), Defaults.__repr__(self)) def __repr__(self): """Returns the string representation of this instance """ return "{}({} {})".format(self.__class__.__name__, Base.__repr__(self), Defaults.__repr__(self))
class Defaults(object): """ Defaults Class This is the base Defaults class that any object should inherit from if it's required to have more parameters to use without adding them to __slots__. Also this provides a way to set a default value to the parameters if they are not given in the creaction. In classes created from this base class, properties and default paramters can be given in the "defaults" variable shared for all instances created from current class. If parameter not in default, this won't be updated or added. In case of accessing to this attribute then python give you an error. # Supposse "name" is not in "defaults" custom = Custom(name="Alex") # This will be ignored custom.name # error !! Example: class Custom(Defaults): defaults = dict([("name","Michael Jordan"), ("age", 20)]) def __init__(self, *args, **kwargs): # Create default Database object super(Custom, self).__init__(*args, **kwargs) flexible = Custom(age = 34) print(flexible.name) print(flexible.age) print(str(flexible)) print(repr(flexible)) Output: Michael Jordan 34 <Custom> ['name:Michael Jordan', 'age:34'] Custom(['name:Michael Jordan', 'age:34']) Example with multiple inheritance from Base and Defauts -> Order is important when multiple inheritance class Flexible(Base, Defaults): defaults = dict( ["my_name",None]) def __init__(self, *args, **kwargs): # Create default objects in order Base.__init__(self,*args, **kwargs) Defaults.__init__(self,*args, **kwargs) flexible = Flexible(name = "Javier", my_name = "Javier") print(flexible.name) # From __slots__ print(flexible.my_name) # From defaults >Javier >Javier flexible = Flexible() print(flexible.name) # From __slots__ print(flexible.my_name) # From defaults >Flexible >None """ # Default dinctionary with properties # Orderect dict is created by [("key1",value2),("key2",value2)] # => This won't considered ordered: {"key1":value1","key2":value2} defaults = dict() def __init__(self, *args, **kwargs): """This is the main contructor of the class Initially set the dafult valiues """ # Update the default properties if None self._update_defaults(kwargs, True) def _update_defaults(self, properties, force=True): """ Update current properties given in the parameters """ for param in self.defaults: if param not in self.__dict__: setattr(self, param, self.defaults[param]) if param in properties: # Update the value if required if force or self.__dict__[param] is None: setattr(self, param, properties[param]) def __str__(self): """Returns the string representation of this instance """ attribs = self.__dict__ parameters = [ "{}:{}".format(key, attribs[key]) for key in attribs if key in self.defaults ] return "<{}> {}".format(self.__class__.__name__, parameters) def __repr__(self): """Returns the string representation of this instance """ attribs = self.__dict__ parameters = [ "{}:{}".format(key, attribs[key]) for key in attribs if key in self.defaults ] return "{}({})".format(self.__class__.__name__, parameters)
parser.add_argument('-i', '--infile', type=str, required=True) parser.add_argument('--normalize_lumi', action="store_true", default=True) args = parser.parse_args() #fname = "/Users/joosep/Documents/stpol/data/step3_trees_Mar28/T_t.root" sample = Sample.fromFile(args.infile) # ev_ids = dict() # for ev in sample.tree: # evid = int(ev.event_id) # if evid not in ev_ids.keys(): # ev_ids[evid] = 1 # else: # ev_ids[evid] += 1 cuts = dict() cuts["MT_mu"] = Cuts.mt_mu cuts["2J"] = Cuts.n_jets(2) cuts["1T"] = Cuts.n_tags(1) #cuts["eta_lj"] = Cuts.eta_lj cuts["rms_lj"] = Cuts.rms_lj cuts["eta_jet"] = Cuts.eta_jet cuts["top_mass_sig"] = Cuts.top_mass_sig def makeSequences(cutsD): cuts = cutsD.values() cut_sequences = [cuts[0]] for cut in cuts[1:]: new_sequence = cut_sequences[-1]*cut cut_sequences.append(new_sequence) return cut_sequences