def __init__(): if '__file__' in globals(): cdir = os.path.dirname(os.path.realpath(__file__)) else: cdir = os.getcwd() _dir = nt('DirObject', 'cur data static')( *[joinp(cdir, d) for d in ['', 'database/', 'static_files/']]) # pylint: disable=W0106 [os.makedirs(p) for p in _dir if not path.exists(p)] main_url = 'http://tkm.ibb.gov.tr/' # File Names for static files fl_road = ['r{0:d}.txt'.format(x) for x in range(5)] fl_other = ['d{0:02d}.txt'.format(x) for x in range(1, 10)] static_files_url = 'YHarita/res/' # Create list of URLs to use in module. _url = nt('UrlList', 'trafficindex trafficdata parkingdata ' 'announcements weatherdata road other')( *(tuple([joinp(main_url, url) for url in [ 'data/IntensityMap/' + url + '.aspx' for url in [ 'TrafficIndex', 'TrafficDataNew', 'ParkingLotData', 'AnnouncementData', 'WeatherData']]]) + ([joinp(main_url, static_files_url, fn) for fn in fl_road], [joinp(main_url, static_files_url, fn) for fn in fl_other]))) return _url, _dir
def __init__(): if '__file__' in globals(): cdir = os.path.dirname(os.path.realpath(__file__)) else: cdir = os.getcwd() _dir = nt('DirObject', 'cur data static')( *[joinp(cdir, d) for d in ['', 'database/', 'static_files/']]) # pylint: disable=W0106 [os.makedirs(p) for p in _dir if not path.exists(p)] main_url = 'http://tkm.ibb.gov.tr/' # File Names for static files fl_road = ['r{0:d}.txt'.format(x) for x in range(5)] fl_other = ['d{0:02d}.txt'.format(x) for x in range(1, 10)] static_files_url = 'YHarita/res/' # Create list of URLs to use in module. _url = nt( 'UrlList', 'trafficindex trafficdata parkingdata ' 'announcements weatherdata road other')(*(tuple([ joinp(main_url, url) for url in [ 'data/IntensityMap/' + url + '.aspx' for url in [ 'TrafficIndex', 'TrafficDataNew', 'ParkingLotData', 'AnnouncementData', 'WeatherData' ] ] ]) + ([joinp(main_url, static_files_url, fn) for fn in fl_road], [joinp(main_url, static_files_url, fn) for fn in fl_other]))) return _url, _dir
def __init__(self, name='default_game', number_of_players=2, player_list=[['default_player_0', 'default_strategy', []], ['default_player_1', 'default_strategy', []]], sim_info_list = [import_module('networkx.classes.graph').Graph(), [], []]): """ :type graph_info_list: object """ self.name = name self.log = print self._number_of_players = number_of_players self.player_info \ = nt("Players", ["player_%s" % player_number for player_number in range(self._number_of_players)]) _base_player = nt("Player", ['name', 'strategy', 'moves']) self.player_info = self.player_info(*[_base_player(*p) for p in player_list]) _sim_info = nt("Sim_Info", ['test_graph', 'sim_range', 'sim_runs']) self.sim_info = nt("Sim_Info", ['test_graph', 'sim_range', 'sim_runs']) self.sim_info = self.sim_info(*sim_info_list) class obj(object): pass self.exp_info = obj self.output_loc = _filehandling.create_output_locs()
def to_struct(self, override_fields=[]): # logging.info("self.fields: {0}".format(self.fields)) # logging.info("override_fields: {0}".format(override_fields)) if not override_fields: sample = nt("sample", self.fields) return { "fields": self.fields, "results": [sample(*row) for row in self.rows] } else: sample = nt("sample", override_fields) return { "fields": override_fields, "results": [sample(*row) for row in self.rows] }
def cria(self, mapa=""): Fab = nt("Fab", "objeto imagem") fabrica = { "&": Fab(self.maloc, f"{IMGUR}dZQ8liT.jpg"), # OCA "^": Fab(self.indio, f"{IMGUR}UCWGCKR.png"), # INDIO ".": Fab(self.vazio, f"{IMGUR}npb9Oej.png"), # VAZIO "_": Fab(self.coisa, f"{IMGUR}sGoKfvs.jpg"), # SOLO "#": Fab(self.coisa, f"{IMGUR}ldI7IbK.png"), # TORA "@": Fab(self.barra, f"{IMGUR}tLLVjfN.png"), # PICHE "~": Fab(self.coisa, f"{IMGUR}UAETaiP.gif"), # CEU "*": Fab(self.coisa, f"{IMGUR}PfodQmT.gif"), # SOL "|": Fab(self.coisa, f"{IMGUR}uwYPNlz.png") # CERCA } mapa = mapa if mapa != "" else self.mapa mapa = self.mapa lado = self.lado cena = self.v.c(fabrica["_"].imagem) self.ceu = self.v.a(fabrica["~"].imagem, w=lado*self.col, h=lado-10, x=0, y=0, cena=cena, vai=self.executa, style={"padding-top": "10px", "text-align": "center"}) sol = self.v.a(fabrica["*"].imagem, w=60, h=60, x=0, y=40, cena=cena, vai=self.esquerda) self.taba = {(i, j): fabrica[imagem].objeto(fabrica[imagem].imagem, x=i*lado, y=j*lado+lado, cena=cena) for j, linha in enumerate(mapa) for i, imagem in enumerate(linha)} cena.vai() return cena
class PastMetrics(nt('PastMetrics', 'time, abst, abstime, nhyp')): """ Tuple to store relevant information to evaluate an interpretation until a specific time, to allow discard old observations. """ __slots__ = () def diff(self, other): """ Obtains the difference between two PastMetrics tuples, returned as a numpy array with three components. *time* attribute is excluded from diff. """ return np.array((self.abst - other.abst, self.abstime - other.abstime, self.nhyp - other.nhyp)) def patch(self, patch): """ Obtains a new PastMetrics object by applying a difference array, obtained by the *diff* method. Parameters ---------- patch: Array, list or tuple with exactly three numerical values. """ return PastMetrics(self.time, *np.array(self[1:] + patch))
def _eval_formattedvalue(self, node): if node.format_spec: # from https://stackoverflow.com/a/44553570/260366 format_spec = self._eval(node.format_spec) r = r"(([\s\S])?([<>=\^]))?([\+\- ])?([#])?([0])?(\d*)([,])?((\.)(\d*))?([sbcdoxXneEfFgGn%])?" FormatSpec = nt( "FormatSpec", "fill align sign alt zero_padding width comma decimal precision type", ) match = re.fullmatch(r, format_spec) if match: parsed_spec = FormatSpec( *match.group(2, 3, 4, 5, 6, 7, 8, 10, 11, 12)) # skip groups not interested in if int(parsed_spec.width or 0) > 100: raise MemoryError("Sorry, this format width is too long.") if int(parsed_spec.precision or 0) > 100: raise MemoryError( "Sorry, this format precision is too long.") fmt = "{:" + format_spec + "}" return fmt.format(self._eval(node.value)) return self._eval(node.value)
class Node(nt("Node", ["kind", "heu", "grid", "move", "parent"])): """ """ def __new__(cls, kind, heu, grid, move, parent=None): return super().__new__(cls, kind, heu, grid, move, parent) def __init__(self, kind, heu, grid, player_move, parent): super().__init__() if parent is None: self.depth = 0 else: self.depth = parent.depth + 1 def __hash__(self): return hash(self.heu) def path(self): move = self while move.parent: move = move.parent return move
def GetVASubstations(path,sub_file='Electric_Substations.shp', state_file='states.shp'): """ Gets the list of substations within the county polygon Parameters ---------- fis : TYPE DESCRIPTION. sub_file : TYPE, optional DESCRIPTION. The default is 'Electric_Substations.shp'. state_file : TYPE, optional DESCRIPTION. The default is 'states.shp'. Returns ------- None. """ subs = nt("substation",field_names=["cord"]) data_substations = gpd.read_file(path+'eia/'+sub_file) data_states = gpd.read_file(path+'census/'+state_file) state_polygon = list(data_states[data_states.STATE_ABBR == 'VA'].geometry.items())[0][1] df_subs = data_substations.loc[data_substations.geometry.within(state_polygon)] cord = dict([(t.ID, (t.LONGITUDE, t.LATITUDE)) \ for t in df_subs.itertuples()]) cord = {int(k):cord[k] for k in cord} return subs(cord=cord)
def cria(self, mapa=""): IMGUR = "https://i.imgur.com/" """ Gera uma global interna usada na formatação do dicionário fabrica""" Fab = nt("Fab", "objeto url") """ Resgate do colections.nametuple. Criado uma nova coleção de dados, do tipo fab que acolhe informações quanto ao objeto e a url deste """ fabrica = { "#": Fab(self.coisa, f"{IMGUR}uwYPNlz.png"), # CERCA "^": Fab(self.indio, f"{IMGUR}UCWGCKR.png"), # INDIO ".": Fab(self.vazio, f"{IMGUR}npb9Oej.png"), #VAZIO "_": Fab(self.coisa, f"{IMGUR}sGoKfvs.jpg"), #SOLO "&": Fab(self.maloc, f"{IMGUR}dZQ8liT.jpg"), #OCA "@": Fab(self.barra, f"{IMGUR}tLLVjfN.png"), #PICHE "*": Fab(self.coisa, f"{IMGUR}PfodQmT.gif"), #SOL "~": Fab(self.coisa, f"{IMGUR}UAETaiP.gif"), #CEU "|": Fab(self.coisa, f"{IMGUR}ldI7IbK.png") # TORA } """Dicionário que define o tipo e a imagem do objeto para cada elemento""" mapa = mapa if mapa != "" else self.mapa #descobrir o que isso faz """Cria um cenáriocom imagem de terra de chão batido, ceu e sol""" mapa = self.mapa lado = self.lado cena = self.v.c(fabrica["_"].url) """Chama elemento da fábrica [solo] agregando ao seu atributo url para criar a cena""" self.ceu = self.v.a(fabrica["~"].url, w=lado * self.coluna, h=lado - 10, x=0, y=0, cena=cena, vai=self.executa, style={ "padding-top": "10px", "text-align": "center" }) """No argumento *vai*, associamos o clique no céu com o método **executa ()** desta classe. O *ceu* agora é um argumento de instância e por isso é referenciado como **self.ceu**. """ sol = self.v.a(fabrica["*"].url, w=60, h=60, x=0, y=20, cena=cena, vai=self.esquerda) """No argumento *vai*, associamos o clique no sol com o método **esquerda ()** desta classe.""" """Gera o elemento sol""" self.taba = {(i, j): fabrica[caracter].objeto(fabrica[caracter].url, x=i * lado, y=j * lado + lado, cena=cena) for j, linha in enumerate(mapa) for i, caracter in enumerate(linha)} """Posiciona os elementos segundo suas posições i, j na matriz mapa""" cena.vai() return cena
def depenencyGraph(sentence): leaf = nt('leaf', [ 'id', 'form', 'lemma', 'ctag', 'tag', 'features', 'parent', 'pparent', 'drel', 'pdrel', 'left', 'right' ]) yield leaf._make([ 0, 'ROOT_F', 'ROOT_L', 'ROOT_C', 'ROOT_P', dfd(str), -1, -1, '_', '_', [], [] ]) for node in sentence.split("\n"): id_, form, lemma, ctag, tag, features, parent, drel = node.split( "\t")[:-2] node = leaf._make([ int(id_), form, lemma, ctag, tag, features, int(parent), -1, drel, '__PAD__', [], [] ]) features = dfd(str) for feature in node.features.split("|"): try: features[feature.split("-")[0]] = feature.split("-")[1] except IndexError: features[feature.split("-")[0]] = '' yield node._replace(features=features) yield leaf._make([ 0, 'ROOT_F', 'ROOT_L', 'ROOT_C', 'ROOT_P', dfd(str), -1, -1, '_', '_', [], [] ])
class Mutation(nt('Mutation', ['pos', 'op', 'functional', 'aux'])): """ Class representing the mutation pos = position in the reference genome op = mutation oprtation, one of the following: SNP.AB: SNP from A to B (A, B are in [ACGT]) INS.xx: Insertion of xx (xx is [acgt]+) DEL.xx: Deletion of xx (xx is [ACGT]+) functional = does it impact the protein? aux = auxiliary information dict (right now dbSNP rsID and old notation) """ def __new__(self, pos, op, functional=0, aux=None, **kwargs): if not aux: aux = dict() return super(Mutation, self).__new__(self, pos, op, functional, aux) def __repr__(self): return '{}:{}'.format(self.pos, self.op) def __str__(self): return self.__repr__() def __eq__(self, other): return (self.pos, self.op) == (other.pos, other.op) def __cmp__(self, other): return (self.pos, self.op).__cmp__((other.pos, other.op)) def __hash__(self): return (self.pos, self.op).__hash__() def __ne__(self, other): return not self.__eq__(other)
def cria(self, mapa=""): from collections import namedtuple as nt Fab = nt("Fab", "objeto imagem") fabrica = { "&": Fab(self.coisa, f"{IMGUR}dZQ8liT.jpg"), # OCA "^": Fab(self.indio, f"{IMGUR}8jMuupz.png"), # INDIO ".": Fab(self.vazio, f"{IMGUR}npb9Oej.png"), # VAZIO "_": Fab(self.coisa, f"{IMGUR}sGoKfvs.jpg"), # SOLO "#": Fab(self.coisa, f"{IMGUR}ldI7IbK.png"), # TORA "@": Fab(self.coisa, f"{IMGUR}tLLVjfN.png"), # PICHE "~": Fab(self.coisa, f"{IMGUR}UAETaiP.gif"), # CEU "*": Fab(self.coisa, f"{IMGUR}PfodQmT.gif"), # SOL "|": Fab(self.coisa, f"{IMGUR}uwYPNlz.png") # CERCA } mapa = mapa if mapa != "" else self.mapa mapa = self.mapa lado = self.lado cena = self.v.c(fabrica["_"].imagem) ceu = self.v.a(fabrica["~"].imagem, w=lado*self.col, h=lado, x=0, y=0, cena=cena) sol = self.v.a(fabrica["*"].imagem, w=60, h=60, x=0, y=40, cena=cena) self.taba = {(i, j): fabrica[imagem].objeto( fabrica[imagem].imagem, x=i*lado, y=j*lado+lado, cena=cena) for j, linha in enumerate(mapa) for i, imagem in enumerate(linha)} cena.vai() return cena
def cria(self, mapa=" "): from collections import namedtuple as nt Fab = nt("Fab", "objeto imagem") IMGUR = "https://imgur.com/" IIMGUR = "https://i.imgur.com/" fabrica = { "&": Fab(self.coisa, f"{IIMGUR}dZQ8liT.jpg"), # OCA "^": Fab(self.indio, f"{IMGUR}8jMuupz.png"), # INDIO ".": Fab(self.vazio, f"{IIMGUR}npb9Oej.png"), # VAZIO XXX[]XXX estava coisa, tinha que ser vazio "_": Fab(self.coisa, f"{IIMGUR}sGoKfvs.jpg"), # SOLO "#": Fab(self.coisa, f"{IMGUR}ldI7IbK.png"), # TORA "@": Fab(self.coisa, f"{IIMGUR}tLLVjfN.png"), # PICHE "~": Fab(self.coisa, f"{IMGUR}UAETaiP.gif"), # CEU "*": Fab(self.coisa, f"{IMGUR}PfodQmT.gif"), # SOL "+": Fab(self.coisa, f"{IMGUR}uwYPNlz.png"), # CERCA "%": Fab(self.coisa, f"{IMGUR}Ry3Vmsn.png") #ROCHA } mapa = mapa if mapa != "" else self.mapa mapa = self.mapa lado = self.lado cena = self.v.c(fabrica["_"].imagem) ceu = self.v.a(fabrica["~"].imagem, w=lado*self.col, h=lado, x=0, y=0, cena=cena, vai= self.executa) """No argumento *vai*, associamos o clique no céu com o método **executa ()** desta classe""" sol = self.v.a(fabrica["*"].imagem, w=60, h=60, x=0, y=40, cena=cena) self.taba = {(i, j): fabrica[imagem].objeto( fabrica[imagem].imagem, x=i*lado, y=j*lado+lado, cena=cena) for j, linha in enumerate(mapa) for i, imagem in enumerate(linha)} cena.vai() return cena
def cria(self, mapa=""): """ Fábrica de componentes. :param mapa: Um texto representando o mapa do desafio. """ Fab = nt("Fab", "objeto imagem") """Esta tupla nomeada serve para definir o objeto construido e sua imagem.""" fabrica = { "&": Fab(self.coisa, f"{IMGUR}dZQ8liT.jpg"), # OCA "^": Fab(self.indio, f"{IMGUR}8jMuupz.png"), # INDIO ".": Fab(self.vazio, f"{IMGUR}npb9Oej.png"), # VAZIO "_": Fab(self.coisa, f"{IMGUR}sGoKfvs.jpg"), # SOLO "#": Fab(self.coisa, f"{IMGUR}ldI7IbK.png"), # TORA "@": Fab(self.coisa, f"{IMGUR}tLLVjfN.png"), # PICHE "~": Fab(self.coisa, f"{IMGUR}UAETaiP.gif"), # CEU "*": Fab(self.coisa, f"{IMGUR}PfodQmT.gif"), # SOL "|": Fab(self.coisa, f"{IMGUR}uwYPNlz.png") # CERCA } """Dicionário que define o tipo e a imagem do objeto para cada elemento.""" mapa = mapa if mapa != "" else self.mapa """Cria um cenário com imagem de terra de chão batido, céu e sol""" mapa = self.mapa lado = self.lado print(f"cria(self, mapa={mapa}, col={len(self.mapa[0])}") cena = self.v.c(fabrica["_"].imagem) ceu = self.v.a(fabrica["~"].imagem, w=lado*self.col, h=lado, x=0, y=0, cena=cena) sol = self.v.a(fabrica["*"].imagem, w=60, h=60, x=0, y=40, cena=cena) self.taba = {(i, j): fabrica[imagem].objeto(fabrica[imagem].imagem, x=i*lado, y=j*lado+lado, cena=cena) for j, linha in enumerate(mapa) for i, imagem in enumerate(linha)} """Posiciona os elementos segundo suas posições i, j na matriz mapa""" cena.vai() return cena
def text_setup_for_feature_representation(dataset,embedding_case): dataset = get_equal_for_each_cat(dataset,1000000) #dataset = clean_text_language(dataset) x = [k for k in dataset['text']] #x_1 = [i.split() for i in x] if embedding_case == 1: #x_1 = [i.split() for i in x] print("text to word sequence...") x_list = [text_to_word_sequence(k) for k in dataset['text']] print('text prepared for word2vec...') print(len(x_list)) model = Word2Vec(x_list, size = 800, window = 5, min_count=3, workers=3) print("saving model...") model.save("word2vec_yelp_800") del model elif embedding_case == 2: x_1 = [i.split() for i in x] print('text prepared for FastText...') model = FastText(x_list, size = 300, window = 5, min_count = 3, workers=3) print("saving model...") model.save("FastText_yelp_all") del model elif embedding_case == 3: taggedDocs = nt('taggedDocs','words tags') docs = [] for i in range(len(x)): words = x[i].split() tag = [i] docs.append(taggedDocs(words,tag)) print('Text prepared for doc2vec...') model = Doc2Vec(x_list, size = 300, window = 8, min_count = 3, workers = 3) print("saving model...") model.save('doc2vec_yelp_all_latest') del model
def cria(self, mapa = " "): """Este método define uma fábrica de componentes.""" Fab = nt("Fab", "objeto imagem") fabrica = { "&": Fab(self.coisa, f"{IMGUR}dZQ8liT.jpg"), # OCA "^": Fab(self.indio, f"{IMGUR}8jMuupz.png"), # INDIO ".": Fab(self.vazio, f"{IMGUR}npb9Oej.png"), # VAZIO "_": Fab(self.coisa, f"{IMGUR}sGoKfvs.jpg"), # SOLO "#": Fab(self.coisa, f"{IMGUR}ldI7IbK.png"), # TORA "@": Fab(self.coisa, f"{IMGUR}tLLVjfN.png"), # PICHE "~": Fab(self.coisa, f"{IMGUR}UAETaiP.gif"), # CEU "*": Fab(self.coisa, f"{IMGUR}PfodQmT.gif"), # SOL "+": Fab(self.coisa, f"{IMGUR}uwYPNlz.png")} # CERCA mapa = mapa if mapa != "" else self.mapa mapa = self.mapa lado = self.lado cena = self.v.c(fabrica["_"].imagem) """No argumento *vai*, associamos o clique no céu com o método **executa ()** desta classe""" ceu = self.v.a(fabrica["~"].imagem, w=lado*self.col, h=lado, x=0, y=0, cena=cena, vai= self.executa) sol = self.v.a(fabrica["*"].imagem, w=60, h=60, x=0, y=40, cena=cena) self.taba = {(i, j): fabrica[imagem].objeto(fabrica[imagem].imagem, x=i*lado, y=j*lado+lado, cena=cena) for j, linha in enumerate(mapa) for i, imagem in enumerate(linha)} cena.vai() return cena
def unformat(cls, string, evaluate_result=True): '''Inverse of format. Match my format group to the string exactly. Return a parse.Result or parse.Match instance or None if there's no match. ''' fmat_str = (cls._sep if cls._sep else ' ').join(member._format_str for member in cls) # try to get extra type from precompiled parser set at initialization try: extra_types = cls._extra_types # parser wasn't precompiled so just assume the default except AttributeError: extra_types = dict(s=str) print('fmat_str:\n', fmat_str, 'string:\n', string[len(cls._prefix):], sep='\n') result = parse.parse(fmat_str, string[len(cls._prefix):], extra_types, evaluate_result=evaluate_result) # replace default output tuple with namedtuple if result is not None and result.fixed: result.fixed=list(result.fixed) def is_positional_field(member_parse): return member_parse[1:3]!=(None,None) and (member_parse[1] == '' or parse.parse('{:d}',member_parse[1]) is not None or parse.parse('{:d}{}',member_parse[1]) is not None) fixed_counts=[len([member_parse for member_parse in member.parse(member._format_str) if is_positional_field(member_parse)]) for member in cls] results=[] for count in fixed_counts: r=[] for _ in range(count): r.append(result.fixed.pop(0)) results.append(r) NT=nt(cls.__name__+'Data', ' '.join(cls._formatters)) result.fixed=NT(*(r if len(r)>1 else r[0] for r in results)) return result
def test_class_format_mixture(ALineDefClass, ALineDefMembers, ABCD_namedtuple): a, b, c, d = ALineDefMembers a_nt = nt('A', 'a')(1) assert ALineDefClass.format(a_nt, dict(d='d')) == ' 1 0.000000 d' with pytest.raises(IndexError): ALineDefClass.format(a_nt, dict(b=3)) assert ALineDefClass.format(a_nt, dict(c='xx', d='bar')) == ' 1 0.000000xxbar'
def cria(self, mapa=""): """ Fábrica de componentes. :param mapa: Um texto representando o mapa do desafio. """ Fab = nt("Fab", "objeto imagem") """Esta tupla nomeada serve para definir o objeto construido e sua imagem.""" fabrica = { "&": Fab(self.maloc, f"{IMGUR}dZQ8liT.jpg"), # OCA "^": Fab(self.indio, f"{IMGUR}UCWGCKR.png"), # INDIO "`": Fab(self.indio, f"{IMGUR}nvrwu0r.png"), # INDIA "p": Fab(self.indio, f"{IMGUR}HeiupbP.png"), # PAJE ".": Fab(self.vazio, f"{IMGUR}npb9Oej.png"), # VAZIO "_": Fab(self.coisa, f"{IMGUR}sGoKfvs.jpg"), # SOLO "#": Fab(self.atora, f"{IMGUR}0jSB27g.png"), # TORA "@": Fab(self.barra, f"{IMGUR}tLLVjfN.png"), # PICHE "~": Fab(self.coisa, f"{IMGUR}UAETaiP.gif"), # CEU "*": Fab(self.coisa, f"{IMGUR}PfodQmT.gif"), # SOL "|": Fab(self.coisa, f"{IMGUR}uwYPNlz.png") # CERCA } """Dicionário que define o tipo e a imagem do objeto para cada elemento.""" mapa = mapa if mapa != "" else self.mapa """Cria um cenário com imagem de terra de chão batido, céu e sol""" mapa = self.mapa lado = self.lado cena = self.v.c(fabrica["_"].imagem) self.ceu = self.v.a(fabrica["~"].imagem, w=lado * self.col, h=lado - 10, x=0, y=0, cena=cena, vai=self.passo, style={ "padding-top": "10px", "text-align": "center" }) """No argumento *vai*, associamos o clique no céu com o método **executa ()** desta classe. O *ceu* agora é um argumento de instância e por isso é referenciado como **self.ceu**. """ sol = self.v.a(fabrica["*"].imagem, w=60, h=60, x=0, y=40, cena=cena, vai=self.executa) """No argumento *vai*, associamos o clique no sol com o método **esquerda ()** desta classe.""" self.taba = {(i, j): fabrica[imagem].objeto(fabrica[imagem].imagem, x=i * lado, y=j * lado + lado, cena=cena) for j, linha in enumerate(mapa) for i, imagem in enumerate(linha)} """Posiciona os elementos segundo suas posições i, j na matriz mapa""" cena.vai() return cena
def _ellipsify_experiment(w, nonbreaking_chunks, ellipsis): def classifed_chunks(): for chunk in nonbreaking_chunks: chunk_w = sum(len(s) for s in chunk) yield cchunk_via(chunk, chunk_w) from collections import namedtuple as nt cchunk_via = nt('CC', ('chunk', 'width')) scn = _scanner_via_iterator(classifed_chunks()) cchunks, ww = [], 0 # Keep adding more chunks while you can while scn.more: hypothetical_next_width = ww + scn.peek.width # If this additional width would put it over, stop if w < hypothetical_next_width: break cchunks.append(scn.next()) ww = hypothetical_next_width # If this additional width landed right on the money, stop if w == ww: break # If you managed to add all the chunks, you are done and happy def result(): final_w = sum(cchunk.width for cchunk in cchunks) final_chunks = [cc.chunk for cc in cchunks] return final_chunks, final_w if scn.empty: return result() # Since there were some chunks you couldn't add, you've got to ellipsify ellicchunk = cchunk_via((ellipsis, ), len(ellipsis)) # Wouldn't it be nice if it was enough just to add the ellipsis and be done while True: # If adding the ellipse landed you right on the money or under, done hypothetical_next_width = ww + ellicchunk.width if hypothetical_next_width <= w: cchunks.append(ellicchunk) return result() # The current raster roster, even though under, is too long once we # add the ellipsis. So now we backtrack. Crazy if this loops > once # If you had to backtrack so far that we are out of content, nothing. # (it's tempting to display an ellipsis, but ours is an infix operator) if 0 == len(cchunks): return result() # you get NOTHING removing = cchunks.pop() ww -= removing.width
class FrozenVec3(nt('FrozenVec3', 'x y z'), Arithvector): def __getattr__(self, name): if name[0] in 'xyz': swz = 'xyz' elif name[0] in 'rgb': swz = 'rgb' else: raise AttributeError( "FrozenVec3 has no '{}' attribute.".format(name)) if len(name) == 1: attr = {'x': 'x', 'y': 'y', 'z': 'z', 'r': 'x', 'g': 'y', 'b': 'z'} return getattr(self, attr[name]) elif len(name) not in (2, 3, 4): raise AttributeError( "Attribute swizzling is too long ({}).".format(len(name))) else: v = {2: Vec2, 3: FrozenVec3, 4: FrozenVec4}[len(name)] i = [self.x, self.y, self.z] try: return v(*(i[swz.index(ch)] for ch in name)) except ValueError: raise AttributeError( "FrozenVec3 '{}' swizzled with invalid attribute(s).".format( name)) # region - - -- ----==<[ OTHER ]>==---- -- - - def hypot(self): # type: () -> None return self.x**2 + self.y**2 + self.z**2 def dot(self, other): # type: (Union[Vec3, FrozenVec3, Vec4, FrozenVec4]) -> float return ((self.x * other.x) + (self.y * other.y) + (self.z * other.z)) def cross(self, other): # type: (Union[Vec3, FrozenVec3, Vec4, FrozenVec4]) -> Vec4 return FrozenVec4(self.x * other.z - self.z * other.y, self.y * other.x - self.x * other.z, self.z * other.y - self.y * other.x, 1.) def length(self): # type: () -> float return math.sqrt(self.hypot()) def normalized(self): # type: () -> FrozenVec3 magnitude = self.length() if magnitude != 0.: return FrozenVec3( self.x / magnitude, self.y / magnitude, self.z / magnitude, ) return FrozenVec3(0., 0., 0.)
class Allele( nt('Allele', ['name', 'cnv_configuration', 'functional_mutations', 'suballeles']) ): """ cnv_configuration = key for CNV configuration functional_mutations = unordered set of Mutation suballeles = dict of Suballele """ pass
def these(): def case_via_sx(sx): return fake_case(*sx[1:]) dct = {} dct['case'] = case_via_sx from collections import namedtuple as nt fake_case = nt('FakeCase', ('case_num', 'case_key')) fake_case.type = 'test_case' fake_case.is_plain = False return dct
def clean_text_for_doc2vec(data_set): taggedDocs = nt('taggedDocs','words tags') dataset = get_equal_for_each_cat(data_set,30000) x = [k for k in dataset['text']] docs = [] for i in range(len(x)): words = x[i].split() tag = [i] docs.append(taggedDocs(words,tag)) create_doc2vec(docs)
def cria(self, mapa=""): IMGUR = "https://i.imgur.com/" """ Gera uma global interna usada na formatação do dicionário fabrica""" Fab = nt("Fab", "objeto url") """ Resgate do colections.nametuple. Criado uma nova coleção de dados, do tipo fab que acolhe informações quanto ao objeto e a url deste """ fabrica = { "#": Fab(self.coisa, f"{IMGUR}uwYPNlz.png"), # CERCA "^": Fab(self.indio, f"{IMGUR}8jMuupz.png"), # INDIO ".": Fab(self.vazio, f"{IMGUR}npb9Oej.png"), #VAZIO "_": Fab(self.coisa, f"{IMGUR}sGoKfvs.jpg"), #SOLO "&": Fab(self.coisa, f"{IMGUR}dZQ8liT.jpg"), #OCA "@": Fab(self.coisa, f"{IMGUR}tLLVjfN.png"), #PICHE "*": Fab(self.coisa, f"{IMGUR}PfodQmT.gif"), #SOL "~": Fab(self.coisa, f"{IMGUR}UAETaiP.gif"), #CEU "|": Fab(self.coisa, f"{IMGUR}ldI7IbK.png") # TORA } mapa = mapa if mapa != "" else self.mapa #descobrir o que isso faz mapa = self.mapa #uguala ao mapa do init lado = self.lado #iguala ao lado do init cena = self.v.c(fabrica["_"].url) """Chama elemento da fábrica [solo] agregando ao seu atributo url para criar a cena""" ceu = self.v.a(fabrica["~"].url, w=lado * self.coluna, h=lado, x=0, y=0, cena=cena, vai=self.executa) """ Chama elemento da fábrica [ceu] agregando ao seu atributo url para gerar um elemento na cena""" sol = self.v.a(fabrica["*"].url, w=60, h=60, x=0, y=40, cena=cena) """Gera o elemento sol""" self.taba = {(i, j): fabrica[caracter].objeto(fabrica[caracter].url, x=i * lado, y=j * lado + lado, cena=cena) for j, linha in enumerate(mapa) for i, caracter in enumerate(linha)} """Compreensão de Dicionário. Sintaxe: {key:value for key, value in iterable if condition} Sintaxe: {key:value for (key,value) in interable} leitura: Para cada coluna (i), linha (j) : chama caracter específico (acessa atributo) objeto(chama caracter específico (acessa atributo) url específica, posição x, posição y e cena=solo) para cada segmento de caracteres e número respectivo, para cada caracter específico e número específico """ cena.vai() return cena
class Suballele(nt('Suballele', ['name', 'alt_names', 'neutral_mutations'])): """ alt_names = list of other names for this suballele neutral_mutations = unordered set of Mutation """ def __repr__(self): return '{}: [{}]'.format('|'.join([self.name] + self.alt_names), ', '.join(map(str, self.neutral_mutations))) def __str__(self): return self.__repr__()
def parse_pdb(pdb_file): ''' DESCRIPTION Parse .pdb files for finding system and atomic information. Arguments: pdb_file (srt): path to a formatted .pdb file. Returns: parsed_pdb (pandas.DataFrame): dataframe with parsed information in columns: serial, chain, resnum, resname, atomname, atomtype, atomcharge, atomicnumber, neighbors, valence, element and index. ''' # ---- open file ---------------------------------------------------------- with open(pdb_file, 'rt') as psf: lines = psf.readlines() # ---- parse atomic info -------------------------------------------------- info = [] info_tup = nt('Atom', [ 'Record', 'AtomNum', 'AtomName', 'AltLoc', 'ResName', 'ChainID', 'ResNum', 'Insert', 'x', 'y', 'z', 'Occup', 'BFactor', 'SegID', 'Symbol', 'Charge' ]) for line in lines: if ('ATOM' in line) or ('HETATM' in line): line = line.strip() splitted = [ line[0:6], line[6:11], line[11:16], line[16:17], line[17:21], line[21:22], line[22:26], line[26:30], line[30:38], line[38:46], line[46:54], line[54:60], line[60:66], line[66:76], line[76:78], line[78:80] ] splitted = [x.strip() for i, x in enumerate(splitted)] info.append(info_tup(*splitted)) # ---- parse bonded info -------------------------------------------------- parsed_pdb = pd.DataFrame(info) parsed_pdb = parsed_pdb.astype({ 'Record': 'str', 'AtomNum': 'int32', 'AtomName': 'str', 'ResName': 'str', 'x': 'float32', 'y': 'float32', 'z': 'float32', 'Occup': 'float32', 'BFactor': 'float32', 'SegID': 'str', 'Symbol': 'str' }) return parsed_pdb
def avg_function(): n = int(input()) ## Input the number of students fields = input().split() ## Creating Indexes through split command total = 0 for i in range(n): ## Looping for 5 times to input the values data = nt('data', fields) field1, field2, field3, field4 = input().split( ) ## input values 5 times for differentt fields data = data(field1, field2, field3, field4) total += int(data.MARKS) ## Marks Total print(total / n)
class FrozenVec2(nt('FrozenVec2', 'x y z'), Arithvector): def __getattr__(self, name): if name[0] in 'xy': swz = 'xy' elif name[0] in 'uv': swz = 'uv' else: raise AttributeError( "FrozenVec2 has no '{}' attribute.".format(name)) if len(name) == 1: attr = {'x': 'x', 'y': 'y', 'u': 'x', 'v': 'y'} return getattr(self, attr[name]) elif len(name) not in (2, 3, 4): raise AttributeError( "Attribute swizzling is too long ({}).".format(len(name))) else: v = {2: FrozenVec2, 3: FrozenVec3, 4: FrozenVec4}[len(name)] i = [self.x, self.y] try: return v(*(i[swz.index(ch)] for ch in name)) except ValueError: raise AttributeError( "FrozenVec2 '{}' swizzled with invalid attribute(s).".format( name)) # region - - -- ----==<[ OTHER ]>==---- -- - - def hypot(self): # type: () -> float return self.x**2 + self.y**2 def dot(self, other): # type: (Vec2) -> float return ((self.x * other.x) + (self.y * other.y)) def cross(self, other): # type: (Vec2) -> float return self.x * other.y - self.y * other.x def length(self): # type: () -> float return math.sqrt(self.hypot()) def normalized(self): # type: () -> FrozenVec2 magnitude = self.length() if magnitude != 0.: return FrozenVec2( self.x / magnitude, self.y / magnitude, ) return FrozenVec2(0., 0.)
def __init__(self): self.token_table = { re.compile('fun').search:'lambda', re.compile('->').search:'lambda_dot', re.compile('\+').search:'add', re.compile('-^>').search:'sub', re.compile('\/').search:'div', re.compile('\%').search:'mod', re.compile('\*').search:'mul', re.compile('\|').search:'head', re.compile('\(').search:'open_bracket', re.compile('\)').search:'closed_bracket', re.compile('\[').search:'open_sq_bracket', re.compile('\]').search:'closed_sq_bracket', re.compile('not').search:'not', re.compile('==').search:'equality', re.compile('and').search:'and', re.compile('or').search:'or', re.compile(';').search:'expr_sep', re.compile('^"([^"]*)"').search:'string', re.compile('^\d+$').search:'integer' } self.token = nt('node', 't_type contents') self.grammar_table = { ('add',): 'binary_op', ('sub',): 'binary_op', ('div',): 'binary_op', ('mod',): 'binary_op', ('mul',): 'binary_op', ('head',): 'binary_op', ('word',): 'expr', ('string',): 'expr', ('integer',): 'expr', ('binary_operation',): 'expr', ('func_decl',): 'expr', ('func_appl',): 'expr', ('bool',): 'expr', ('open_bracket', 'expr', 'closed_bracket'): 'expr', ('expr', 'expr_sep', 'expr'): 'expr', ('expr', 'binary_op', 'expr'): 'binary_operation', ('lambda', 'func_name', 'arg_name', 'lambda_dot', 'expr'): 'func_decl', ('expr', 'expr'): 'func_appl', ('not', 'expr'): 'bool', ('expr', 'equality', 'expr'): 'bool', ('expr', 'or', 'expr'): 'bool', ('expr', 'and', 'expr'): 'bool' }
def GetTransformers(path,fis): """ Gets the network of local transformers in the county/city """ df_tsfr = pd.read_csv(path+fis+'-tsfr-data.csv', header=None,names=['tid','long','lat','load']) tsfr = nt("Transformers",field_names=["cord","load","graph"]) dict_cord = dict([(t.tid, (t.long, t.lat)) for t in df_tsfr.itertuples()]) dict_load = dict([(t.tid, t.load) for t in df_tsfr.itertuples()]) df_tsfr_edges = pd.read_csv(path+fis+'-tsfr-net.csv', header=None,names=['source','target']) g = nx.from_pandas_edgelist(df_tsfr_edges) return tsfr(cord=dict_cord,load=dict_load,graph=g)
def test_viewer(): # Test viewer plt = optional_package('matplotlib.pyplot')[0] a = np.sin(np.linspace(0, np.pi, 20)) b = np.sin(np.linspace(0, np.pi*5, 30)) data = (np.outer(a, b)[..., np.newaxis] * a)[:, :, :, np.newaxis] data = data * np.array([1., 2.]) # give it a # of volumes > 1 v = OrthoSlicer3D(data) assert_array_equal(v.position, (0, 0, 0)) assert_true('OrthoSlicer3D' in repr(v)) # fake some events, inside and outside axes v._on_scroll(nt('event', 'button inaxes key')('up', None, None)) for ax in (v._axes[0], v._axes[3]): v._on_scroll(nt('event', 'button inaxes key')('up', ax, None)) v._on_scroll(nt('event', 'button inaxes key')('up', ax, 'shift')) # "click" outside axes, then once in each axis, then move without click v._on_mouse(nt('event', 'xdata ydata inaxes button')(0.5, 0.5, None, 1)) for ax in v._axes: v._on_mouse(nt('event', 'xdata ydata inaxes button')(0.5, 0.5, ax, 1)) v._on_mouse(nt('event', 'xdata ydata inaxes button')(0.5, 0.5, None, None)) v.set_volume_idx(1) v.set_volume_idx(1) # should just pass v.close() v._draw() # should be safe # non-multi-volume v = OrthoSlicer3D(data[:, :, :, 0]) v._on_scroll(nt('event', 'button inaxes key')('up', v._axes[0], 'shift')) v._on_keypress(nt('event', 'key')('escape')) # other cases fig, axes = plt.subplots(1, 4) plt.close(fig) v1 = OrthoSlicer3D(data, pcnt_range=[0.1, 0.9], axes=axes) aff = np.array([[0, 1, 0, 3], [-1, 0, 0, 2], [0, 0, 2, 1], [0, 0, 0, 1]], float) v2 = OrthoSlicer3D(data, affine=aff, axes=axes[:3]) assert_raises(ValueError, OrthoSlicer3D, data[:, :, 0, 0]) assert_raises(ValueError, OrthoSlicer3D, data, affine=np.eye(3)) assert_raises(TypeError, v2.link_to, 1) v2.link_to(v1) v2.link_to(v1) # shouldn't do anything v1.close() v2.close()
def __init__(self): self.lambda_node = nt('fun', 'func_name arg_name body') def apply_node_c(func_name, arg_name): apply_node_type = nt(func_name, 'arg_name') return apply_node_type(arg_name) self.apply_node = apply_node_c def bool_node_c(lhs, rel, rhs): bool_node_type = nt(rel, 'lhs rhs') return bool_node_type(lhs, rhs) self.bool_node = bool_node_c def bin_op_node_c(lhs, op, rhs): bin_op_node_type = nt(op, 'lhs rhs') return bin_op_node_type(lhs, rhs) self.bin_op_node = bin_op_node_c
def get_tree(self, walk=None, folder=None): ''' TODO must return a clean list of objects(name & path) regardless of the source of the walk. which could only be from: `backup`, `library`, `code` or `xml` folders ''' folder = folder.split(os.sep)[-1] tree = [] library_part = nt('library_part', 'path name') for i in walk: for f in i[2]: if f[0] != '.': tree.append(library_part(path=clip_path(i[0], folder), name=f)) # if self.tree is None: # TODO make tree available in JSON file, as reference # self.tree = tree return tree
def readData(trainFile, testFile): trainData = [] testData = [] titles = trainFile.readline().strip().split(",") Columns = nt("Columns",titles) csvReader = csv.reader(trainFile) for line in csvReader: columns = Columns(*line) trainData.append((getattr(columns,"text"),getattr(columns,"gender"))) testFile.readline() csvReader = csv.reader(testFile) for line in csvReader: columns = Columns(*line) testData.append((getattr(columns,"text"),getattr(columns,"gender"))) return [trainData, testData]
def next(self): if self.has_header and not self.header_found: #assume the first row is the header r = self.reader.next() #make it alpha and lowercase only r = map(lambda item: ''.join(ch for ch in item if ch.isalpha()).lower(), r) #generate a random name tuplename = hex(random.getrandbits(24))[1:] #make a named tuple with the header as parameter names self.tuplecreator = nt(tuplename, r) self.header_found = True row = self.reader.next() #run our type converter on it row = map(self.converter, row) if self.has_header: #for some reason namedtuple and tuple don't have the same #constructor...? how annoying return self.tuplecreator(*row) else: return tuple(row)
#!/usr/bin/env python # encoding: utf-8 import codecs import numpy as np from collections import namedtuple as nt Term = nt('Term', ['freq', 'left', 'right', 'aggreg', 'inner', 'score']) def load_data(fname='candidates_statistics.csv'): terms = {} with codecs.open(fname, 'r', 'utf-8') as fp: for line in fp: term, freq, left, right, aggreg, inner, score = line.strip().split('\t') terms[term] = Term._make([float(freq), float(left), float(right), float(aggreg), float(inner), float(score)]) return terms def filter_column(terms, column_name): result = {} for text, term in terms.iteritems(): result[text] = getattr(term, column_name) return result def statistics(values): print "min: {}".format(np.amin(values)) print "max: {}".format(np.amax(values)) print "diff: {}".format(np.ptp(values)) print "median: {}".format(np.median(values))
# python port of microkanren (µkanren) from collections import namedtuple as nt # lisp-like cons cells. most of the time, we'll # actually use nts with the same shape but different names. cons = Cell = nt('Cell', ['car','cdr']) def car(cell): return cell[0] def cdr(cell): return cell[1] def ispair(x): return isinstance(x,tuple) and len(x)==2 # null value as a special cell null = () def isnull(cell): return cell==() # logic variables Var = nt('Var', ['num']) def eqvars(x:Var,y:Var)->bool: return x[0]==y[0] def isvar(x:any)->bool: return isinstance(x,Var) # assp: search an association list for first key to match predicate # we could use a dict in python, but that would prevent backtracking. # probably using a javascript-style chained dict would make more sense. def assp(c:Cell, p:(lambda Cell:bool))->Cell or (): """find value in an association list""" if isnull(c): return null elif p(c[0][0]): return c[0] else: return assp(p,c[1]) # substitution lists Subs = nt('Subs', ['head','tail']) subs0 = ()
# second pass interprets the code to process the text interp(code,labels,txt,emit, debug) def read(path): return sys.stdin.read() if path=='-' else open(path).read() #-- test suite --- def prep(s): # test case syntax: return '\n'.join( # --------------------- line[1:] if line[0]==':' # ':' to signal a label else ' '+line # no need to indent for line in s.split(';')) # use ';' for '\n' T = TestCase = nt('TestCase', 'asm inp out') cases =[ # each test case is (src, txt, goal) T(asm="CL 'hello';OUT", inp='', out='hello'), T(asm="CL 'X';B SKIP;CL 'Y';:SKIP;OUT", inp='', out='X'), T(asm=':bye;END', inp='', out='')] def test(): for src,txt,goal in cases: out=[] main(prep(src), txt, out.append) actual='\n'.join(out) if actual!=goal: print('wanted:', goal) print('actual:', actual) print(' src:', src) print(' txt:', txt)
""" Tools for logical inference. """ from collections import namedtuple as nt from itertools import repeat, chain from types import GeneratorType Var = nt('Var',['name']) Not = nt('Not',['x']) Par = nt('Par',['x',]) Imp = nt('Imp',['x','y']) Rfn = nt('Rfn',['x','y']) And = nt('And',['x','y']) Vel = nt('Vel',['x','y']) Xor = nt('Xor',['x','y']) Mux = nt('Mux',['x','y','z']) Sym = nt('Sym',['value']) Int = nt('Int',['value']) # Inference rule. (entailment) Rule = nt('Rule',['name','args','ants','cons']) # Modus Ponens: P→Q, P ⊢ Q MP = Rule("MP",["P","Q"], [Imp(Var('P'), Var('Q')), Var('P')], [Var('Q')]) # Modus Tollens: P→Q, ¬Q ⊢ ¬P MT = Rule("MT",["P","Q"], [Imp(Var('P'),Var('Q')), Not(Var('Q'))],
def test_viewer(): # Test viewer plt = optional_package('matplotlib.pyplot')[0] a = np.sin(np.linspace(0, np.pi, 20)) b = np.sin(np.linspace(0, np.pi*5, 30)) data = (np.outer(a, b)[..., np.newaxis] * a)[:, :, :, np.newaxis] data = data * np.array([1., 2.]) # give it a # of volumes > 1 v = OrthoSlicer3D(data) assert_array_equal(v.position, (0, 0, 0)) assert_true('OrthoSlicer3D' in repr(v)) # fake some events, inside and outside axes v._on_scroll(nt('event', 'button inaxes key')('up', None, None)) for ax in (v._axes[0], v._axes[3]): v._on_scroll(nt('event', 'button inaxes key')('up', ax, None)) v._on_scroll(nt('event', 'button inaxes key')('up', ax, 'shift')) # "click" outside axes, then once in each axis, then move without click v._on_mouse(nt('event', 'xdata ydata inaxes button')(0.5, 0.5, None, 1)) for ax in v._axes: v._on_mouse(nt('event', 'xdata ydata inaxes button')(0.5, 0.5, ax, 1)) v._on_mouse(nt('event', 'xdata ydata inaxes button')(0.5, 0.5, None, None)) v.set_volume_idx(1) v.cmap = 'hot' v.clim = (0, 3) assert_raises(ValueError, OrthoSlicer3D.clim.fset, v, (0.,)) # bad limits assert_raises(ValueError, OrthoSlicer3D.cmap.fset, v, 'foo') # wrong cmap # decrement/increment volume numbers via keypress v.set_volume_idx(1) # should just pass v._on_keypress(nt('event', 'key')('-')) # decrement assert_equal(v._data_idx[3], 0) v._on_keypress(nt('event', 'key')('+')) # increment assert_equal(v._data_idx[3], 1) v._on_keypress(nt('event', 'key')('-')) v._on_keypress(nt('event', 'key')('=')) # alternative increment key assert_equal(v._data_idx[3], 1) v.close() v._draw() # should be safe # non-multi-volume v = OrthoSlicer3D(data[:, :, :, 0]) v._on_scroll(nt('event', 'button inaxes key')('up', v._axes[0], 'shift')) v._on_keypress(nt('event', 'key')('escape')) v.close() # complex input should raise a TypeError prior to figure creation assert_raises(TypeError, OrthoSlicer3D, data[:, :, :, 0].astype(np.complex64)) # other cases fig, axes = plt.subplots(1, 4) plt.close(fig) v1 = OrthoSlicer3D(data, axes=axes) aff = np.array([[0, 1, 0, 3], [-1, 0, 0, 2], [0, 0, 2, 1], [0, 0, 0, 1]], float) v2 = OrthoSlicer3D(data, affine=aff, axes=axes[:3]) # bad data (not 3+ dim) assert_raises(ValueError, OrthoSlicer3D, data[:, :, 0, 0]) # bad affine (not 4x4) assert_raises(ValueError, OrthoSlicer3D, data, affine=np.eye(3)) assert_raises(TypeError, v2.link_to, 1) v2.link_to(v1) v2.link_to(v1) # shouldn't do anything v1.close() v2.close()
def nstruct(name): return struct( of = lambda **things: nt(name, things.keys())(**things) )
from collections import namedtuple as nt # Define types for Inference # Extra stores address for .new and .address and class name for object types # Const stores value for int constants and length for exact arrays. This isn't needed for normal verification but is # useful for optimizing the code later. fullinfo_t = nt("fullinfo_t", ["tag", "dim", "extra", "const"]) valid_tags = ["." + _x for _x in "int float double long obj new init address byte short char boolean".split()] valid_tags = frozenset([None] + valid_tags) def _makeinfo(tag, dim=0, extra=None, const=None): assert tag in valid_tags return fullinfo_t(tag, dim, extra, const) T_INVALID = _makeinfo(None) T_INT = _makeinfo(".int") T_FLOAT = _makeinfo(".float") T_DOUBLE = _makeinfo(".double") T_LONG = _makeinfo(".long") T_NULL = _makeinfo(".obj") T_UNINIT_THIS = _makeinfo(".init") T_BYTE = _makeinfo(".byte") T_SHORT = _makeinfo(".short") T_CHAR = _makeinfo(".char") T_BOOL = _makeinfo(".boolean") # Hotspot doesn't have a bool type, but we can use this elsewhere
from collections import namedtuple as nt from collections import deque as dq import time import datetime Months = nt('month', 'year') month_q = dq('Jan Feb Mar Apr May Jun Jul Aug Sep Oct Nov Dec'.split()) curr_mo = month_q.pop() month_q.appendleft(curr_mo) month_l = list('Jan Feb Mar Apr May Jun Jul Aug Sep Oct Nov Dec'.split()) print month_l today = datetime.datetime.now() curr_mo = today.strftime('%b') mo = '' while(mo != curr_mo): mo = month_l.pop() month_l.insert(0, mo) print month_l LastThree = month_l[9:12] LastOne = month_l[-1:] print 'LastThree are ', LastThree print 'LastOne is ', LastOne t = (2000, 1, 21, 1, 2, 3, 4, 5, 6) t = time.mktime(t) print time.strftime("%b %d %Y %H:%M:%S", time.gmtime(t)) t = (2000, 2, 21, 1, 2, 3, 4, 5, 6)
#! /usr/bin/env python # System imports from __future__ import print_function, division, absolute_import, unicode_literals from threading import Thread, Event from datetime import datetime from numpy.random.mtrand import RandomState from collections import namedtuple as nt MeasurementSpec = nt('measurement', ['ID', 'coords', 'time']) class Sensor(Thread): def __init__(self, athlete, queue, ID, rate = 20, noise = 0.3, verbose = False, seed = None): """ Sensor class which gets position measurements from athlete, adds noise and collects them in a queue. :param athlete: object yielding position data when called :param queue: queue to which the measurements are added :param id: sensor ID :param rate (optional): sampling rate of sensor in Hz, default: 20 :param noise (optional): standard deviation of noise on measurement in meter, default: 0.3 :param verbose (optional): verbosity of sensor, default: False :param seed (optional): seed of noise generation, default: None """ super(Sensor, self).__init__() self.queue = queue
#! /usr/bin/env python # Copyright (C) 2015 ETH Zurich, Institute for Astronomy # System imports from __future__ import print_function, division, absolute_import, unicode_literals # External modules from numpy import sum, matrix, diag, sqrt, corrcoef, isclose, trace, identity,\ atleast_2d, zeros, allclose, log, ndarray from collections import namedtuple as nt from numpy.linalg.linalg import eig, det MomentsSpec = nt('moments', ['mean', 'cov', 'icov']) class Surprise(object): """ Module for estimating relative entropy and surprise from samples or moments of the distributions assuming they are Gaussian. For a reference see arXiv:1402.3593 """ def __init__(self, atol=1e-08, rtol=1e-05): """ Constructor of surprise module. :param atol: absolute tolerance for covariance matrix inversion :param rtol: relative tolerance for covariance matrix inversion """ self.atol = atol
some evidence shows that named tuples are "slow". """ __all__ = ['PositionPrecept', 'TimePrecept', 'DatumPrecept', 'QueryPrecept', 'ActionPrecept', 'PropositionPrecept', 'SpeechPrecept', 'MoodPrecept'] from collections import namedtuple as nt # used to remember where entities are PositionPrecept = nt('PositionPrecept', 'entity, position') # used to remember what the time is TimePrecept = nt('TimePrecept', 'time') # used to remember a single piece of data DatumPrecept = nt('DatumPrecept', 'entity, name, value') # used to query another agent QueryPrecept = nt('QueryPrecept', 'entity, name, value') # used to remember "seeing" (or hearing) an action performed ActionPrecept = nt('ActionPrecept', 'entity, action, object') # used by one agent to suggest an action take place # this is used to coordinate actions between agents
from collections import namedtuple as nt from .. import floatutil as fu from ..verifier import verifier_types as vtypes slots_t = nt('slots_t', ('locals', 'stack')) def _localsAsList(self): return [t[1] for t in sorted(self.locals.items())] slots_t.localsAsList = property(_localsAsList) # types SSA_INT = 'int', 32 SSA_LONG = 'int', 64 SSA_FLOAT = 'float', fu.FLOAT_SIZE SSA_DOUBLE = 'float', fu.DOUBLE_SIZE SSA_OBJECT = 'obj', def verifierToSSAType(vtype): vtype_dict = {vtypes.T_INT:SSA_INT, vtypes.T_LONG:SSA_LONG, vtypes.T_FLOAT:SSA_FLOAT, vtypes.T_DOUBLE:SSA_DOUBLE} # These should never be passed in here assert vtype.tag not in ('.new','.init') vtype = vtypes.withNoConst(vtype) if vtypes.objOrArray(vtype): return SSA_OBJECT elif vtype in vtype_dict: return vtype_dict[vtype] return None
# ax = fig.gca(projection='3d') # # ax.plot_trisurf(data[0], data[1], data[2], cmap=cm.jet, linewidth=0.2) # # plt.show() class MYO(object): def __init__(self): self.a = (1,2,3) self.b = (56, -8.4) def __eq__(self, other): return False from collections import namedtuple as nt N1 = nt('N1', ['x', 'y']) def _test_tuple_equal(): myo1 = MYO() t1 = (myo1, myo1) myo2 = MYO() t2 = (myo2, myo2) def f(x): return x**2
"""Miscellaneous small classes""" import numpy as np from collections import namedtuple as nt from . import units try: isinstance("", basestring) Strings = (str, unicode) except NameError: Strings = (str,) Quantity = units.Quantity Numbers = (int, float, np.number, Quantity) PosyTuple = nt('PosyTuple', ['exps', 'cs', 'varlocs', 'substitutions']) CootMatrixTuple = nt('CootMatrix', ['row', 'col', 'data']) class CootMatrix(CootMatrixTuple): "A very simple sparse matrix representation." shape = (None, None) def append(self, row, col, data): if row < 0 or col < 0: raise ValueError("Only positive indices allowed") self.row.append(row) self.col.append(col) self.data.append(data) def update_shape(self): self.shape = (max(self.row)+1, max(self.col)+1)
# metagrammar parser for python's Grammar file import re, sys from collections import namedtuple as nt from contextlib import contextmanager from itertools import chain from warnings import warn # namedtuple types to model the grammar rules Tok = nt('Tok', ['text']) # special token (NAME,NUMBER,INDENT,etc) Lit = nt('Lit', ['text']) # string literal Seq = nt('Seq', ['args']) # sequence of patterns Alt = nt('Alt', ['args']) # set of alternative patterns Rep = nt('Rep', ['args']) # 1..n repetitions ('repeat') Opt = nt('Opt', ['args']) # 0..1 repetitions ('optional') Orp = nt('Orp', ['args']) # 0..n repetitions ('optional repeat') Def = nt('Def', ['name','args']) # rule definition. works like named Seq Ref = nt('Ref', ['text']) # reference a known rule Grammar = nt('Grammar', ['rules']) Token = nt('Token', ['kind', 'text']) class MetaScanner: lexer = re.compile(r""" (?P<NEWLINE> \n ) # this comes first so SPACE doesn't match it. | (?P<SPACE> \s+ ) | (?P<COMMENT> [#].*$ ) | (?P<SPECIAL> [A-Z]+ ) # special tokens (in upper case) | (?P<RULENAME> [a-z_]+ ) | (?P<STRING> '[^']*' ) # luckily, none contain escapes or quotes | (?P<COLON> : )
def struct(**things): return nt('Struct', things.keys())(**things)
#!/usr/bin/env python # encoding: utf-8 from __future__ import print_function import os, os.path as p from collections import namedtuple as nt import re import datetime as dt Activity = nt('Activity', 'name on_time fin_time tags notes') def get_sheet_content(sheet_file): lines = [] with open(sheet_file) as f: for line in f: line = line[:-1] lines.append('' if line.isspace() else line) return '\n'.join(lines) def parse_time(timestr): return dt.datetime.strptime(timestr, '%a %b %d %H:%M:%S %Z %Y') def parse_activity(content): lines = content.splitlines() project_line = lines[0].strip()
#Vanilla def tt(): def rt(): print "inside" #rt #only writing rt insted of rt() will not call the rt function unlike the decorator return rt tt() #to do prime number from collections import namedtuple as nt Emp = nt('Emp','id,no,vote') t1=Emp(1,2,3) print t1.id print t1.no print t1.vote import collections a=[1,2,2,2,2,2,1,1,1,1,1,3,4,5,6,7] print collections.Counter(a) Jan=Feb=Mar=range(1,32) print Jan print Feb print Mar
from datetime import datetime as dt import datetime from dateutil import tz import tkmdecrypt as td import compression as c # region initial definitions log.basicConfig(format='%(asctime)s %(levelname)s %(name)s %(message)s', filename='tkm.log', level=log.INFO) logger = log.getLogger(__name__) logger.root.name = 'tkm.py' SENSOR_DATA = nt('SensorData', 'id speed color') TKM_DATA = nt('TkmData', 'date e_tag filename data') _stop_events = [] _terminate = False _run_time = -1 # _file_pid = "" def __init__(): if '__file__' in globals(): cdir = os.path.dirname(os.path.realpath(__file__)) else: cdir = os.getcwd() _dir = nt('DirObject', 'cur data static')( *[joinp(cdir, d) for d in ['', 'database/', 'static_files/']])
""" Contains a set of useful precept types for use with an environment. """ from collections import namedtuple as nt # used to remember where entities are PositionPrecept = nt('PositionPrecept', 'entity, position') # used to remember what the time is TimePrecept = nt('TimePrecept', 'time') # used to remember a single piece of data DatumPrecept = nt('DatumPrecept', 'name, value')
def __init__(self, **kwds): self._attrs = nt(self.__class__.__name__, self._fields)(**kwds)
def bad(): # Currently don't support aliased imports of namedtuple from collections import namedtuple as nt Thing = nt('Thing', 'a b c')
#! /usr/bin/env python # System imports from __future__ import print_function, division, absolute_import, unicode_literals from threading import Thread from Queue import Empty from numpy import sqrt, zeros, matrix, eye, diag, log from streamanalysis.utils import get_norm from collections import namedtuple as nt ResultSpec = nt('result', ['pos', 'pos_err', 'vel', 'vel_err', 'tot_vel', 'dist', 'stationary', 'time']) FilterSpec = nt('filter', ['X', 'P']) POS_IDX = [0, 2] VEL_IDX = [1, 3] class Analyser(Thread): def __init__(self, queue, pos0 = [50.0, 50.0], vel0 = [0.0, 0.0], noise = 0.3, dt0 = 1./20, acc_noise = 4.0, wait = 1.0): """ Analysis thread for position data from sensors using a Kalman Filter. Stores results of the individual sensors in a dictionary at self.sensors. :param queue: queue from which the sensor data is processed :param pos0 (optional): list of initial positions for the Kalman