def parse_args(struct_engine_mode: namedtuple): ''' @function parse_args @date Sun, 10 May 2020 12:21:57 +0530 @brief function to parse the arguments provided for engine @param [IN] struct_engine_mode - namedtuple containing the fields to be parsed from the arguments and populated ''' if struct_engine_mode is None: raise ValueError(ERR_VALUE.format("Engine mode structure")) argsp = ArgumentParser() # elements handled in engine_mode_t -> struct_engine_mode # gameconf, build_mode # optional arguments build_mode_info = "Engine mode has to be in debug(1)" argsp.add_argument("--build-mode", help=build_mode_info, type=int) gameconf_info = "Custom configuration filepath [absolute]" argsp.add_argument("--config", help=gameconf_info) pr = argsp.parse_args() struct_engine_mode.build_mode = True if (pr.build_mode == 1) else False struct_engine_mode.gameconf = pr.config
def move(row:int, column:int, game_state: namedtuple) -> namedtuple: """ This function will perform the given move the for the current player :rtype : othello namedtuple :param row: int :param column: int :param game_state: othello namedtuple """ #print('move {},{} = {}'.format(row, column, game_state.game_board[row - 1][column - 1])) if row > len(game_state.game_board) or row < 1: raise InvalidMove if column > len(game_state.game_board[0]) or column < 1: raise InvalidMove if not _check_move(row, column, game_state): raise InvalidMove for number in range(_find_north(row, column, game_state)): game_state.game_board[(row - 1) - (number + 1)][(column - 1)] = game_state.current_player for number in range(_find_south(row, column, game_state)): game_state.game_board[(row - 1) + (number + 1)][(column - 1)] = game_state.current_player for number in range(_find_west(row, column, game_state)): game_state.game_board[(row - 1)][(column - 1) - (number + 1)] = game_state.current_player for number in range(_find_east(row, column, game_state)): game_state.game_board[(row - 1)][(column - 1) + (number + 1)] = game_state.current_player for number in range(_find_northwest(row, column, game_state)): game_state.game_board[(row - 1) - (number + 1)][(column - 1) - (number + 1)] = game_state.current_player for number in range(_find_northeast(row, column, game_state)): game_state.game_board[(row - 1) - (number + 1)][(column - 1) + (number + 1)] = game_state.current_player for number in range(_find_southwest(row, column, game_state)): game_state.game_board[(row - 1) + (number + 1)][(column - 1) - (number + 1)] = game_state.current_player for number in range(_find_southeast(row, column, game_state)): game_state.game_board[(row - 1) + (number + 1)][(column - 1) + (number + 1)] = game_state.current_player game_state.game_board[row - 1][column - 1] = game_state.current_player game_state = count_disk(game_state) game_state = _change_player(game_state) if not _check_player(game_state): game_state = _change_player(game_state) if _check_win(game_state): game_state = game_state._replace(win_result=_find_winner(game_state)) return game_state return game_state
def __init__(self, conf: namedtuple): self.size = conf.size # inhibitory neurons isi_size = int((1 - conf.splitSize) * self.size) self.neurons = [conf.neuron(conf.isiConfig) for _ in range(isi_size)] # exicitory neurons self.neurons.extend( [conf.neuron(conf.iseConfig) for _ in range(self.size - isi_size)]) self.α = conf.traceAlpha self.activities = []
def _change_player(game_state: namedtuple) -> namedtuple: """ This function changes the current player in the game state and returns it. :rtype : othello namedtuple :param game_state: othello namedtuple """ #print('_change_player)') if game_state.current_player == BLACK: game_state = game_state._replace(current_player=WHITE) else: game_state = game_state._replace(current_player=BLACK) return game_state
def __init__(self, ipaddr=None, device=None, community='Public', retries=3, timeout=9): self.device = device self.ipaddr = ipaddr self.community = community self.SNMPObject = NT('SNMPObject', ['modName', 'datetime', 'symName', 'index', 'value']) self.SNMPIndexed = NT('SNMPIndexed', ['modName', 'datetime', 'symName', 'index', 'value']) self.query_timeout = float(timeout)/int(retries) self.query_retries = int(retries) self._index = None self.cmdGen = cmdgen.CommandGenerator()
def set_variable(var: namedtuple) -> namedtuple: """Sets the variable from the user input Args: var (namedtuple): namedtuple("Variable", ["name", "string", "content"]) Returns: namedtuple: namedtuple with input set correcly. """ return var._replace(content=input(f"Set variable {var.name}: "))
def FillNC(root_grp_ptr, scene_location): retGps = NT("returnGroups", "calGrp, productsGrp, navGrp, slaGrp, periodGrp") root_grp_ptr.createDimension('samples', 512) root_grp_ptr.createDimension('scan_lines', 2000) root_grp_ptr.createDimension('bands', 128) root_grp_ptr.instrument = 'HICO' root_grp_ptr.institution = 'NASA Goddard Space Flight Center' root_grp_ptr.resolution = '100m' root_grp_ptr.location_description = scene_location root_grp_ptr.license = 'http://science.nasa.gov/earth-science/earth-science-data/data-information-policy/' root_grp_ptr.naming_authority = 'gov.nasa.gsfc.sci.oceandata' root_grp_ptr.date_created = DT.strftime(DT.utcnow(), '%Y-%m-%dT%H:%M:%SZ') root_grp_ptr.creator_name = 'NASA/GSFC' root_grp_ptr.creator_email = '*****@*****.**' root_grp_ptr.publisher_name = 'NASA/GSFC' root_grp_ptr.publisher_url = 'http_oceancolor.gsfc.nasa.gov' root_grp_ptr.publisher_email = '*****@*****.**' root_grp_ptr.processing_level = 'L1B' nav_grp = root_grp_ptr.createGroup('navigation') nav_vars = list() nav_vars.append(nav_grp.createVariable('sensor_zenith', 'f4', ('scan_lines', 'samples',))) nav_vars.append(nav_grp.createVariable('solar_zenith', 'f4', ('scan_lines', 'samples',))) nav_vars.append(nav_grp.createVariable('sensor_azimuth', 'f4', ('scan_lines', 'samples',))) nav_vars.append(nav_grp.createVariable('solar_azimuth', 'f4', ('scan_lines', 'samples',))) nav_vars.append(nav_grp.createVariable('longitudes', 'f4', ('scan_lines', 'samples',))) nav_vars.append(nav_grp.createVariable('latitudes', 'f4', ('scan_lines', 'samples',))) for var in nav_vars: var.units = 'degrees' var.valid_min = -180 var.valid_max = 180 var.long_name = var.name.replace('_', ' ').rstrip('s') retGps.navGrp = nav_grp retGps.productsGrp = root_grp_ptr.createGroup('products') lt = retGps.productsGrp.createVariable('Lt', 'u2', ('scan_lines', 'samples', 'bands')) lt.scale_factor = float32([0.02]) lt.add_offset = float32(0) lt.units = "W/m^2/micrometer/sr" # lt.valid_range = nparray([0, 16384], dtype='u2') lt.long_name = "HICO Top of Atmosphere" lt.wavelength_units = "nanometers" # lt.createVariable('fwhm', 'f4', ('bands',)) lt.fwhm = npones((128,), dtype='f4') * -1 # wv = lt.createVariable('wavelengths', 'f4', ('bands',)) lt.wavelengths = npones((128,), dtype='f4') lt.wavelength_units = "nanometers" retGps.slaGrp = root_grp_ptr.createGroup('scan_line_attributes') retGps.slaGrp.createVariable('scan_quality_flags', 'u1', ('scan_lines', 'samples')) # Create metadata group and sub-groups meta_grp = root_grp_ptr.createGroup('metadata') pl_info_grp = meta_grp.createGroup("FGDC/Identification_Information/Platform_and_Instrument_Identification") pl_info_grp.Instrument_Short_Name = "hico" prc_lvl_grp = meta_grp.createGroup("FGDC/Identification_Information/Processing_Level") prc_lvl_grp.Processing_Level_Identifier = "Level-1B" retGps.periodGrp = meta_grp.createGroup("FGDC/Identification_Information/Time_Period_of_Content") # fill HICO group retGps.calGrp = meta_grp.createGroup("HICO/Calibration") return retGps
def create_dict_named(obj: namedtuple) -> dict: """ Create a dict out of a namedtuple. Args: obj: Namedtuple. Returns: A dict containing the data. """ data = { '__type__': type(obj).__name__, '__data__': create_dict(obj._asdict()) } return data
def genotype_to_dict(genotype: namedtuple): """Converts the given genotype to a dictionary that can be serialized. Inverse operation to dict_to_genotype(). Args: genotype (namedtuple): The genotype that should be converted. Returns: dict: The converted genotype. """ genotype_dict = genotype._asdict() for key, val in genotype_dict.items(): if type(val) == range: genotype_dict[key] = [node for node in val] return genotype_dict
def prepare_log(log: namedtuple): try: LogModel = log_model_map[log.__class__.__name__] except KeyError: raise sensitive_fields = sorted(LogModel.sensitive_fields()) log_dic_python = log._asdict() log_dic_python['hmac1'] = encrypt_log(log_dic_python, sensitive_fields) table = LogModel._meta.table_name log_dic_db = { field.column_name: field.db_value(value) for field, value in LogModel._normalize_data(None, log_dic_python).items() } fields = log._fields return log_dic_db, table, fields
def results_to_csv(config: namedtuple, val_metrics: dict, attn_metrics: dict, output_csv=None): keys = set(val_metrics.keys()) | set(attn_metrics.keys()) keys.add('epoch') overlapping_keys = val_metrics.keys() & attn_metrics.keys() if len(overlapping_keys) > 0: raise ValueError("Found overlapping keys {} in training and attention metrics".format(overlapping_keys)) def find_keys_with_epoch(metrics_dict: dict) -> set: return set(list(filter(lambda x: type(metrics_dict[x]) is list, metrics_dict.keys()))) val_keys_with_epoch = find_keys_with_epoch(val_metrics) val_keys_without_epoch = val_metrics.keys() - val_keys_with_epoch attn_keys_with_epoch = find_keys_with_epoch(attn_metrics) attn_metrics_without_epoch = attn_metrics.keys() - attn_keys_with_epoch keys_without_epochs = (val_metrics.keys() - val_keys_with_epoch) | (attn_metrics.keys() - attn_keys_with_epoch) model_parameters = dict(config._asdict()) df = pd.DataFrame(columns=(list(model_parameters.keys()) + list(keys))) def add_all_keys_with_epoch(df_, keys_with_epoch, undef_keys, metrics): (max_epoch,) = set(len(metrics[x]) for x in keys_with_epoch) for epoch in range(max_epoch): update_dict_ = dict(model_parameters) update_dict_['epoch'] = epoch for k in keys_with_epoch: update_dict_[k] = metrics[k][epoch] for k in undef_keys: update_dict_[k] = "" df_ = df_.append(update_dict_, ignore_index=True) return df_ df = add_all_keys_with_epoch(df, val_keys_with_epoch, keys_without_epochs | attn_keys_with_epoch, val_metrics) df = add_all_keys_with_epoch(df, attn_keys_with_epoch, keys_without_epochs | val_keys_with_epoch, attn_metrics) update_dict = dict(model_parameters) for k in attn_metrics_without_epoch: update_dict[k] = attn_metrics[k] for k in val_keys_without_epoch: update_dict[k] = val_metrics[k] for k in val_keys_with_epoch | attn_keys_with_epoch: update_dict[k] = "" update_dict['epoch'] = "" df = df.append(update_dict, ignore_index=True) if output_csv: os.makedirs(os.path.dirname(output_csv), exist_ok=True) df.to_csv(output_csv, index=False, compression=None) return df
def namedtuple_to_xml(item: namedtuple): elem = Element(type(item).__name__) asdict = item._asdict().items() for key, val in asdict: if val is None: continue if type(val) is namedtuple: child = namedtuple_to_xml(val) elif type(val) is list: child = Element(key) for item in val: child.append(namedtuple_to_xml(item)) else: child = Element(key) child.text = str(val) elem.append(child) return elem
def search_variable(var: namedtuple, vars: List[namedtuple], i: int, session: sessionmaker): while True: show_variable(None) print("Enter variable name:") inp = input() if inp == "exit": break db_var = session.query(Variable).filter(Variable.name == inp).first() if db_var: print() print(f"Set {var.name} to {db_var.content}") vars[i] = var._replace(content=db_var.content) break else: print() print("Variable name not found") print("Enter new name or enter exit to leave") print() return vars
def count_disk(game_state: namedtuple) -> namedtuple: """ This function counts the disks on the game board and updates the game state and returns it. :rtype : othello namedtuple :param game_state: othello namedtuple """ #print('_count_disk') black_count = 0 white_count = 0 for row in game_state.game_board: for cell in row: if cell == BLACK: black_count += 1 if cell == WHITE: white_count += 1 game_state = game_state._replace(black_score=black_count) game_state = game_state._replace(white_score=white_count) return game_state
def _get_by_entity(self, entity: namedtuple) -> namedtuple: if self.n >= 5: self.stop() self.start() self.n += 1 # noinspection PyProtectedMember result = entity._asdict() self.browser.get(entity.url) table = self.browser.find_element_by_xpath("//div[@class='portlet']//table[@align='center'][2]") if table: result["content"] = table.text if '不开展' not in table.text: span_tag = table.find_elements_by_xpath("//table[@border>'0']//tr[1]//td") data_tag = table.find_elements_by_xpath("//table[@border>'0']//tr[2]//td") span = [tag.text for tag in span_tag] data = [tag.text for tag in data_tag] try: result["days"] = data[["期限" in w for w in span].index(True)] result["amount"] = data[["量" in w for w in span].index(True)] result["rate"] = data[["利率" in w for w in span].index(True)] except ValueError: pass return self.entity(**result)
def save_link(self, link: namedtuple) -> None: # noinspection PyProtectedMember self.update("link", { "head": link.head, "tail": link.tail }, link._asdict(), True)
class v2c(object): """Build an SNMPv2c manager object""" def __init__(self, ipaddr=None, device=None, community='Public', retries=3, timeout=9): self.device = device self.ipaddr = ipaddr self.community = community self.SNMPObject = NT('SNMPObject', ['modName', 'datetime', 'symName', 'index', 'value']) self.SNMPIndexed = NT('SNMPIndexed', ['modName', 'datetime', 'symName', 'index', 'value']) self.query_timeout = float(timeout)/int(retries) self.query_retries = int(retries) self._index = None self.cmdGen = cmdgen.CommandGenerator() #mibBuilder = builder.MibBuilder() #mibPath = mibBuilder.getMibPath()+('/opt/python/Models/Network/MIBs',) #mibBuilder.setMibPath(*mibPath) #mibBuilder.loadModules( # 'RFC-1213', # ) #mibView = view.MibViewController(mibBuilder) def index(self, oid=None): """Build an SNMP Manager index to reference in get or walk operations. First v2c.index('ifName'). Then, v2c.get_index('ifHCInOctets', 'eth0') or v2c.walk_index('ifHCInOctets'). Instead of referencing a numerical index, the index will refer to the value that was indexed.""" self._index = dict() self._intfobj = dict() snmpidx = self.walk(oid=oid) for ii in snmpidx: ## the dicts below are keyed by the SNMP index number # value below is the text string of the intf name self._index[ii.index] = ii.value # value below is the intf object if not (self.device is None): self._intfobj[ii.index] = self.device.find_match_intf(ii.value, enforce_format=False) def walk_index(self, oid=None): """Example usage, first index with v2c.index('ifName'), then v2c.get_index('ifHCInOctets', 'eth0')""" if not (self._index is None): tmp = list() snmpvals = self.walk(oid=oid) for idx, ii in enumerate(snmpvals): tmp.append([ii.modName, datetime.now(), ii.symName, self._index[ii.index], ii.value]) return map(self.SNMPIndexed._make, tmp) else: raise ValueError, "Must populate with SNMP.v2c.index() first" def walk(self, oid=None): if isinstance(self._format(oid), tuple): errorIndication, errorStatus, errorIndex, \ varBindTable = self.cmdGen.nextCmd( cmdgen.CommunityData('test-agent', self.community), cmdgen.UdpTransportTarget((self.ipaddr, 161), retries=self.query_retries, timeout=self.query_timeout), self._format(oid), ) # Parsing only for now... no return value... self._parse(errorIndication, errorStatus, errorIndex, varBindTable) elif isinstance(oid, str): errorIndication, errorStatus, errorIndex, \ varBindTable = self.cmdGen.nextCmd( # SNMP v2 cmdgen.CommunityData('test-agent', self.community), # Transport cmdgen.UdpTransportTarget((self.ipaddr, 161)), (('', oid),), #cmdgen.MibVariable(oid).loadMibs(), ) return self._parse_resolve(errorIndication, errorStatus, errorIndex, varBindTable) else: raise ValueError, "Unknown oid format: %s" % oid def get_index(self, oid=None, index=None): """In this case, index should be similar to the values you indexed from... i.e. if you index with ifName, get_index('ifHCInOctets', 'eth0')""" if not (self._index is None) and isinstance(index, str): # Map the interface name provided in index to an ifName index... snmpvals = None for idx, value in self._index.items(): if index == value: # if there is an exact match between the text index and the # snmp index value... snmpvals = self.get(oid=oid, index=idx) break else: # TRY mapping the provided text index into an interface obj _intfobj = self.device.find_match_intf(index) if not (_intfobj is None): for key, val in self._intfobj.items(): if (val==_intfobj): snmpvals = self.get(oid=oid, index=key) break # Ensure we only parse a valid response... if not (snmpvals is None): tmp = [snmpvals.modName, datetime.now(), snmpvals.symName, self._index[snmpvals.index], snmpvals.value] return self.SNMPIndexed._make(tmp) elif not isinstance(index, str): raise ValueError, "index must be a string value" else: raise ValueError, "Must populate with SNMP.v2c.index() first" def get(self, oid=None, index=None): if isinstance(self._format(oid), tuple): errorIndication, errorStatus, errorIndex, \ varBindTable = self.cmdGen.getCmd( cmdgen.CommunityData('test-agent', self.community), cmdgen.UdpTransportTarget((self.ipaddr, 161), retries=self.query_retries, timeout=self.query_timeout), self._format(oid), ) # Parsing only for now... no return value... self._parse(errorIndication, errorStatus, errorIndex, varBindTable) elif isinstance(oid, str) and isinstance(index, int): errorIndication, errorStatus, errorIndex, \ varBindTable = self.cmdGen.getCmd( # SNMP v2 cmdgen.CommunityData('test-agent', self.community), # Transport cmdgen.UdpTransportTarget((self.ipaddr, 161)), (('', oid), index), #cmdgen.MibVariable(oid).loadMibs(), ) return self._parse_resolve(errorIndication, errorStatus, errorIndex, [varBindTable])[0] else: raise ValueError, "Unknown oid format: %s" % oid def bulkwalk(self, oid=None): """SNMP bulkwalk a device. NOTE: This often is faster, but does not work as well as a simple SNMP walk""" if isinstance(self._format(oid), tuple): errorIndication, errorStatus, errorIndex, varBindTable = self.cmdGen.bulkCmd( cmdgen.CommunityData('test-agent', self.community), cmdgen.UdpTransportTarget((self.ipaddr, 161), retries=self.query_retries, timeout=self.query_timeout), 0, 25, self._format(oid), ) return self._parse(errorIndication, errorStatus, errorIndex, varBindTable) elif isinstance(oid, str): errorIndication, errorStatus, errorIndex, varBindTable = self.cmdGen.bulkCmd( cmdgen.CommunityData('test-agent', self.community), cmdgen.UdpTransportTarget((self.ipaddr, 161), retries=self.query_retries, timeout=self.query_timeout), 0, 25, (('', oid),), #cmdgen.MibVariable(oid).loadMibs(), ) return self._parse_resolve(errorIndication, errorStatus, errorIndex, varBindTable) else: raise ValueError, "Unknown oid format: %s" % oid def _parse_resolve(self, errorIndication=None, errorStatus=None, errorIndex=None, varBindTable=None): """Parse MIB walks and resolve into MIB names""" retval = list() if errorIndication: print errorIndication else: if errorStatus: print '%s at %s\n' % ( errorStatus.prettyPrint(), varBindTable[-1][int(errorIndex)-1] ) else: for varBindTableRow in varBindTable: for oid, val in varBindTableRow: (symName, modName), indices = cmdgen.mibvar.oidToMibName( self.cmdGen.mibViewController, oid ) val = cmdgen.mibvar.cloneFromMibValue( self.cmdGen.mibViewController, modName, symName, val) # Try to parse the index as an int first, # then as a string try: index = int(string.join(map(lambda v: v.prettyPrint(), indices), '.')) except ValueError: index = str(string.join(map(lambda v: v.prettyPrint(), indices), '.')) # Re-format values as float or integer, if possible... tmp = val.prettyPrint() if re.search(r"""^\s*\d+\s*$""", tmp): value = int64(tmp) elif re.search(r"""^\s*\d+\.\d+\s*$""", tmp): value = float64(tmp) else: value = tmp retval.append(self.SNMPObject._make([modName, datetime.now(), symName, index, value])) return retval def _parse(self, errorIndication, errorStatus, errorIndex, varBindTable): if errorIndication: print errorIndication else: if errorStatus: print '%s at %s\n' % ( errorStatus.prettyPrint(), errorIndex and varBindTable[-1][int(errorIndex)-1] or '?' ) else: for varBindTableRow in varBindTable: for name, val in varBindTableRow: print '%s = %s' % (name.prettyPrint(), val.prettyPrint()) def _format(self, oid): """Format a numerical OID in the form of 1.3.4.1.2.1 into a tuple""" if isinstance(oid, str): if re.search('(\d+\.)+\d+', oid): tmp = list() for ii in oid.split('.'): tmp.append(int(ii)) return tuple(tmp) else: return oid
def __reader(struct_definition: Struct, output_tuple: namedtuple, data) -> namedtuple: """Helper function for building out struct reader functions""" return output_tuple._make(struct_definition.unpack(data))
def data(self, cls: namedtuple): self._data = cls for name, value in cls._asdict().items(): self.__setattr__(name, value)
def avg_profit(company: namedtuple) -> namedtuple: summ = company.quarter_profit_1 + company.quarter_profit_2 + company.quarter_profit_3 + company.quarter_profit_4 company = company._replace(avg=summ / 10) return company
def _compute_meaning_atom(self, atom: namedtuple, debug=False): ''' Computes the meaning of an atom for all models. The meaning of integer constants are model invariant: its meaning is the same in each model: namely, that integer itself. The meaning of set variables are model dependent: it provides, given an enumeration of model, the information of, given a model in that enumeration, which elements of that model are present in that set. Args: atom: A namedtuple created by "Atom". Either set variable A or B, or an integer constant from 0 to max_model_size. Returns: For integer constants: an immutable np.array of shape (1,N_OF_MODELS), for set variables: a list of self.max_model_size many immutable np.arrays of shape (model_size, number_of_subsets**model_size) for model_size from 1 to max_model_size. ''' # Model-invariant atom case: integer constants. # An array with that same integer for each model in the # universe. if atom.is_constant: atom_meaning = np.array([atom.func()] * self.N_OF_MODELS) atom_meaning.flags.writeable = False # Model-dependent atom case: set variables. # A set representation shows which elements are in a set # accross the whole universe of models. else: atom_meaning = [] cur_model_size = 1 # Make matrix template of the correct shape. # Columns represent models, rows represent objects. # Entry (i,j) represents whether element i in model j # is in A (1) or not in A (0). # For model_size 0 to max_model_size there will be a matrix # of shape (model_size, number_of_subsets ** model_size) # in set_repr. number_of_models_of_cur_model_size = ( self.number_of_subsets**cur_model_size) set_repr = np.zeros( (cur_model_size, number_of_models_of_cur_model_size), dtype=np.uint8) # Offset = number of models of previous model sizes. offset = 0 # Fil in the matrix template, per model. start_of_new_model_block = (self.number_of_subsets**cur_model_size) for model_idx, model in enumerate(self.generate_universe()): # When all models of cur_model_size have been treated, # go to the next block of models: the models of size # cur_model_size + 1. Store the results of current # block of models. if debug: print() print("start_of_new_model_block = ", end="") print(start_of_new_model_block) print("model_idx, offset, model = ", end="") print(model_idx, offset, utils.tuple_format(model)) if model_idx == start_of_new_model_block: if debug: print() print() cur_model_size += 1 offset = start_of_new_model_block start_of_new_model_block += ( self.number_of_subsets**cur_model_size) set_repr.flags.writeable = False atom_meaning.append(set_repr) # Reset matrix template: make new matrix template # of the correct shape. set_repr = np.zeros( (cur_model_size, self.number_of_subsets** cur_model_size), dtype=np.uint8, ) # For object 1 to object current_model_size in the # current model, fill in set-inclusion for this # set-variable. Fill in collumn i = model_index - offset # (the i"th model of current_model_size). if debug: print("model =", model) print("atom.func(model) =", atom, atom.func(model)) print("set_repr =", set_repr) set_repr[:, model_idx - offset] = atom.func(model) if debug: print("set_repr =", set_repr) # Finish. set_repr.flags.writeable = False atom_meaning.append(set_repr) return atom_meaning
def unpack_named(data: bytes, packing: str, nt: namedtuple): return [nt._make(x) for x in struct.iter_unpack(packing, data)]
def nt2json(nt: namedtuple): return json.dumps(nt._asdict(), default=str)
def __init__(self, Prime=0, RunID=0): TH.Thread.__init__(self) PrimeBase.THL.acquire() if True: self._PR = Prime # current prrime self._ID = RunID # 0 : dispatcher --- 1..p : worker threads # print('init thread: ', Prime, RunID, flush=True) # tn = self._MakeName(Prime, RunID) self.setName(tn) # easy identifier over all threads # nt = NT('Info', [ 'Prime', 'RunID', 'TObject', 'Alive', 'I_AmReady', 'PauseCnt', 'WhereItIs', 'InitTime', 'StartTime', 'ReadyTime', 'FinitTime' ]) nt.Prime = Prime # current prrime nt.RunID = RunID # 0 : dispatcher --- 1..p : worker threads nt.TObject = self nt.alive = self.is_alive() nt.I_AmReady = False nt.InitTime = DT.datetime.now() nt.StartTime = None nt.ReadyTime = None nt.FinitTime = None nt.PauseCnt = 0 # counter for thread sleep phases nt.WhereItIs = 0 # self.AllThreads.setdefault(tn, nt) PrimeBase.THL.release()
def parse_namedtuple(losses: namedtuple, prefix: str): log = {'{}/{}'.format(prefix, k): v for k, v in losses._asdict().items()} return log
def save_node(self, node: namedtuple) -> None: # noinspection PyProtectedMember self.update("node", { "name": node.name, "source": node.source }, node._asdict(), True)
def message_from_tuple(payload_tuple: namedtuple, attributes: dict = None): tuple_as_json = json.dumps(payload_tuple._asdict()) return mock_message(tuple_as_json, attributes)
def _prefixed(nt: namedtuple, prefix): """Convert a named tuple into a dict with prefixed names.""" result = {} for key, value in nt._asdict().items(): result[prefix + key] = value return result
def train_DQN(env: WrapIt, Q: DQN, Q_target: DQN, optimizer: namedtuple, replay_buffer: ReplayBuffer, exploration: Schedule): """ @parameters Q: Q_target: optimizer: torch.nn.optim.Optimizer with parameters buffer: store the frame @return None """ assert type(env.observation_space) == gym.spaces.Box assert type(env.action_space) == gym.spaces.Discrete optimizer = optimizer.constructor(Q.parameters(), **optimizer.kwargs) num_actions = env.action_space.n num_param_updates = 0 mean_episode_reward = -float('nan') best_mean_episode_reward = -float('inf') LOG_EVERY_N_STEPS = 10000 last_obs = env.reset(passit=True) # Q.getSummary() out_count = 0 bar = tqdm(range(ARGS.timesteps)) for t in bar: last_idx = replay_buffer.store_frame(last_obs) recent_observations = replay_buffer.encode_recent_observation() if t > ARGS.startepoch: value = select_epsilon_greedy_action(Q, recent_observations, exploration, t, num_actions) action = value[0, 0] else: action = random.randrange(num_actions) obs, reward, done, _ = env.step(action) reward = max(-1.0, min(reward, 1.0)) replay_buffer.store_effect(last_idx, action, reward, done) if done: obs = env.reset() last_obs = obs # bar.set_description(f"{obs.shape} {obs.dtype}") if (t > ARGS.startepoch and t % ARGS.dqn_freq == 0 and replay_buffer.can_sample(ARGS.batchsize)): bar.set_description("backward") (obs_batch, act_batch, rew_batch, next_obs_batch, done_mask) = replay_buffer.sample(ARGS.batchsize) (obs_batch, act_batch, rew_batch, next_obs_batch, not_done_mask) = TENSOR(obs_batch, act_batch, rew_batch, next_obs_batch, 1 - done_mask) (obs_batch, act_batch, rew_batch, next_obs_batch, not_done_mask) = TO(obs_batch, act_batch, rew_batch, next_obs_batch, not_done_mask) values = Q(obs_batch) current_Q_values = values.gather( 1, act_batch.unsqueeze(1).long()).squeeze() # Compute next Q value based on which action gives max Q values # Detach variable from the current graph since we don't want gradients for next Q to propagated next_max_q = Q_target(next_obs_batch).detach().max(1)[0] next_Q_values = not_done_mask * next_max_q # Compute the target of the current Q values Q_target_values = rew_batch + (ARGS.gamma * next_Q_values) # Compute Bellman error bellman_error = Q_target_values - current_Q_values # clip the bellman error between [-1 , 1] clipped_bellman_error = bellman_error.clamp(-1, 1) # Note: clipped_bellman_delta * -1 will be right gradient d_error = clipped_bellman_error * -1.0 # Clear previous gradients before backward pass optimizer.zero_grad() # run backward pass # current_Q_values.backward(d_error.data.unsqueeze(1)) current_Q_values.backward(d_error.data) # Perfom the update optimizer.step() num_param_updates += 1 if num_param_updates % ARGS.dqn_updatefreq == 0: bar.set_description("update") Q_target.load_state_dict(Q.state_dict())
def _asSortedList(scores: namedtuple, places: int = 4) -> List[str]: """Converts namedtuple of scores to a list of rounded values in name- sorted order""" tf = truncatedFloat(places) return [tf % v for _, v in sorted(scores._asdict().items())]