def collectData(info): i, location, ID = info print('Start', ID) disablePrint() agent = Agent(memory=i) env = Environment(render=False).fruitbot while i > 0: obs = clean(env.reset()) hn = torch.zeros(2, 1, hidden_size, device=device) cn = torch.zeros(2, 1, hidden_size, device=device) while i > 0: i -= 1 # hn, cn = hn.detach(), cn.detach() act, obs_old, h0, c0, hn, cn = agent.choose(obs, hn, cn) obs, rew, done, _ = env.step(act) obs = agent.remember(obs_old.detach(), act, clean(obs).detach(), rew, h0.detach(), c0.detach(), hn.detach(), cn.detach(), int(not done)) env.render() if done: break env.close() saveData(agent, location, ID) enablePrint() print('Done', ID) return os.getpid()
def startup_end(self): name = self.levelgain['Character Name?'] cl = self.levelgain['Class to gain a level in?'] path = 'character/' + h.clean(name) + '.character' if (os.path.exists(iface.JSONInterface.OBJECTSPATH + path)): self.record = iface.JSONInterface(path) else: gui.ErrorMessage('A character with that name was not found.') clpath = 'class/' + h.clean(cl) + '.class' if (not os.path.exists(iface.JSONInterface.OBJECTSPATH + path)): gui.ErrorMessage('A class with that name was not found.') self.character = c.Character(self.record) pattern = r'\s*([a-zA-Z\']+)\s*(\(([a-zA-Z\'\s]+)\))?' desc_ = re.match(pattern, cl).groups() desc = [str(item) for item in desc_ if item is not None] # desc should be a class and possibly a subclass name (rec, level) = self.character.classes.level_up(*desc) self.core = FeaturesAtLevel(self.f, rec.record, level) # Set new number of hit dice size = rec.hit_dice hdpath = '/HP/HD/' + size + '/maxnumber' hdn = self.character.get(hdpath) self.character.set(hdpath, hdn + 1) # Set new number of hit points conmod = h.modifier(self.character.get('/abilities/Constitution')) if (self.levelgain['Average or roll for HP?'] == 'average'): gain = r.roll(size, 'average') + .5 elif (self.levelgain['Average or roll for HP?'] == 'roll'): gain = r.roll(size) current = self.character.get('/HP/max') self.character.set('/HP/max', current + gain + conmod) self.draw_static() self.container.deiconify()
def collectData(agent): print('Start', agent.memory.size) disablePrint() i = agent.memory.size env = Environment(render=False).fruitbot while i > 0: obs = clean(env.reset()) hn = torch.zeros(2, 1, hidden_size, device=device) cn = torch.zeros(2, 1, hidden_size, device=device) while i > 0: i -= 1 # hn, cn = hn.detach(), cn.detach() act, obs_old, h0, c0, hn, cn = agent.choose(obs, hn, cn) obs, rew, done, _ = env.step(act) obs = agent.remember(obs_old.detach(), act, clean(obs).detach(), rew, h0.detach(), c0.detach(), hn.detach(), cn.detach(), int(not done)) env.render() if done: break env.close() enablePrint() print('Done') return agent.memory.memory
def run(command=None): global quoted err = "Macro error:\n\n" if command is None: command = re.sub('@[@\w]+', '', message.text.split()[0]) if command in MACROS: variety = MACROS[command].variety content = MACROS[command].content if MACROS[command].nsfw and name in bot_globals['SFW'].keys(): if bot_globals['SFW'][name]: known("{}{} is NSFW, this chat has been marked as SFW by the admins!" .format(command, err)) return if variety == Macro.EVAL: symbols = {'INPUT': clean(message.text), 'HIDDEN': MACROS[command].hidden, 'PROTECTED': MACROS[command].protected} evaluate(bot, update, bot_globals, cmd=content, symbols=symbols) elif variety == Macro.TEXT: known(content) elif variety == Macro.PHOTO: photo(content) elif variety == Macro.E621: if 'e621 command' in bot_globals['PLUGINS'].keys(): bot.sendChatAction(chat_id=message.chat_id, action=Ca.UPLOAD_PHOTO) bot_globals['PLUGINS']['e621 command'].e621(bot, update, bot_globals, tags='{} {}'.format(content, clean(message.text))) else: update.message.reply_text(err + "e621 plugin isn't installed.") elif variety == Macro.MARKOV: if 'markov generator' in bot_globals['PLUGINS'].keys(): bot.sendChatAction(chat_id=message.chat_id, action=Ca.TYPING) bot_globals['PLUGINS']['markov generator'].markov(bot, update, bot_globals, seed= '{}{}'.format(content, (bool(clean(message.text)) * ' ') + clean(message.text))) else: update.message.reply_text(err + "Markov generator plugin isn't installed.") elif variety == Macro.INLINE: quoted = None known(err + "That's an inline macro! Try @yosho_bot " + command) elif variety == Macro.ALIAS: run(content) else: invalid('Error:\n\nUnknown command: ' + command)
def _add_or_update_addresses(addresses, user_id=None, is_updating=None, existing_data=None, candidate_id=None): # Remove duplicates addresses = remove_duplicates(addresses) # Aggregate formatted & validated address data validated_addresses_data = [] # Check if any of the addresses is set as the default address addresses_have_default = [ isinstance(address.get('is_default'), bool) for address in addresses ] for index, address in enumerate(addresses): zip_code = sanitize_zip_code( address['zip_code']) if address.get('zip_code') else None city = clean(address.get('city')) iso3166_subdivision = address.get('iso3166_subdivision') address_dict = dict( address_line_1=address.get('address_line_1'), address_line_2=address.get('address_line_2'), zip_code=zip_code, city=city, iso3166_subdivision=iso3166_subdivision, iso3166_country=address.get('iso3166_country'), po_box=clean(address.get('po_box')), is_default=index == 0 if not addresses_have_default else address.get('is_default'), coordinates=get_coordinates(zipcode=zip_code, city=city, state=iso3166_subdivision)) # Remove empty data from address dict address_dict = purge_dict(address_dict) # track updates if is_updating: track_updates(user_id=user_id, candidate_id=candidate_id, existing_data=existing_data.get('addresses')[index], new_data=address_dict, attribute='address') # Prevent adding empty records if not address_dict: continue validated_addresses_data.append(address_dict) return validated_addresses_data
def get_graph_spec(source): q = '' if helpers.is_bad(source[1]): print(source[1]) return q if source[1].find('.') == -1: return q url = '<http://' + urllib.parse.quote(source[1]) + '>' url_item = '<http://' + urllib.parse.quote(source[1]) + '/item>' graph = """ GRAPH """ + url #url q += ("DELETE WHERE" + graph + """ {?item wdt:P17 ?country.}}; INSERT DATA { """ + graph + "{" + url_item + " wdt:P17 """ + urllib.parse.quote(source[1]) + """\' }} WHERE """ + match + ";" ) #country if not helpers.is_bad(source[0]): country_code = get_country_code(source[0]) if not helpers.is_bad(country_code): c = country_code else: c = helpers.clean(source[0]) match = "{" + graph + "{ ?item wdt:P17 ?country}}" q += ("DELETE" + match + """ INSERT { """ + graph + " {" + url_item + " wdt:P17 " + c + """ }} WHERE """ + match + ";" ) return q
def merge(bot, update): """<state> <state>: merge first state into second""" global TRANSITIONS, STATES expr = clean(update.message.text) states = expr.split() if len(states) != 2: update.message.reply_text( text='Proper syntax is /merge <state> <state>') return if any(s not in STATES for s in states): update.message.reply_text( text='One or both input states not found in markov states.') return ind = STATES.index(states[0]) del STATES[ind] keep = [c for c in range(TRANSITIONS.shape[0]) if c != ind] row = TRANSITIONS.getrow(ind)[:, keep] col = TRANSITIONS.getcol(ind)[keep, :] loop = TRANSITIONS[ind, ind] TRANSITIONS = TRANSITIONS[keep, :][:, keep] ind = STATES.index(states[1]) TRANSITIONS[ind, :] += row TRANSITIONS[:, ind] += col TRANSITIONS[ind, ind] += loop update.message.reply_text(text='Merged state "{}" into state "{}".'.format( *states))
def convergence(bot, update): """ /converge <state> <steps>: number of states a starting state converges to /diverge <state> <steps>: displays if a starting state diverges at least once """ expr = clean(update.message.text).split() if not expr: update.message.reply_text( text='Syntax is /converge <state> <optional steps int>') return state = process_token(expr[0]) if state not in set(STATES): update.message.reply_text( text='"{}" is not in markov states.'.format(state)) return else: state_index = STATES.index(state) if len(expr) > 1 and expr[1].isnumeric() and int(expr[1]) < 10: steps = int(expr[1]) else: steps = 10 # create and populate boolean translation matrix shape = TRANSITIONS.shape transitions = lil_matrix(shape, dtype=bool) x, y = find(TRANSITIONS)[:2] for i, v in enumerate(x): transitions[v, y[i]] = True transitions = transitions.asformat('csr') if update.message.text.startswith('/converge'): transitions **= steps converge = len(find(transitions.getrow(state_index))[1]) update.message.reply_text( text= 'State "{}" converges to {} possible final state{} after {} step{}.' .format(state, converge, add_s(converge), steps, add_s(steps))) else: copy = transitions.copy() for s in range(steps + 1): branches = find(transitions.getrow(state_index))[1] if len(branches) > 1: update.message.reply_text( text= 'State "{}" diverges at {} step{} where it has {} possible branches.' .format(state, s, add_s(s), len(branches))) return if s != steps: transitions *= copy update.message.reply_text( text="""State "{}" doesn't diverge within {} step{}.""".format( state, steps, add_s(steps)))
def insert(bot, update): """insert new state in markov states""" expr = clean(update.message.text) state = expr.split()[0] accumulator(bot, update, insert=state) update.message.reply_text( text='Inserted "{}" into markov states.'.format(state))
def register_change(self, var): Chooser.register_change(self, var) self.subclassname = var.get() if (self.subclassname): # Add the subclass and display extra options clm = re.search('(\w+)\.class', self.fullpath) self.classname = clm.group(1) name = 'class/{}.{}.sub.class'.format(h.clean(self.classname), h.clean(self.subclassname)) if (os.path.isfile(iface.JSONInterface.OBJECTSPATH + name)): self.clear_subframe() rec = iface.JSONInterface(name) lvm = re.search('/(\d+)/.*$', self.fullpath) lv = int(lvm.group(1)) self.subclassfeatures = FeaturesAtLevel(self.subfeatures, rec, lv) self.draw_dynamic() else: raise FileNotFoundError(name)
def startup_end(self): name = self.charactername['Character Name?'] path = 'character/' + h.clean(name) + '.character' if (os.path.exists(iface.JSONInterface.OBJECTSPATH + path)): self.record = iface.JSONInterface(path) else: gui.ErrorMessage('A character with that name was not found.') self.character = c.Character(self.record) self.core = ConditionsDisplay(self.f, self.character) self.draw_static() self.container.deiconify()
def remove_dups(): domains = [] cc_urls = read_cc() lines = read_all() old_urls = [line[1] for line in lines] # make list of old Private Domains (without TLD) for item in old_urls: url = helpers.clean(item) o = tldextract.extract(url) domain = o.subdomain + o.domain if domain not in domains: domains.append(domain) for item in cc_urls: url = helpers.clean(item) o = tldextract.extract(url) domain = o.subdomain + o.domain # append URL if Private Domain is unique if domain not in domains: lines.append(["", url, "", "", "", "", "", "commoncrawl", "", "", "", "", ""]) return lines
def _add_or_update_emails(emails, is_updating, user_id=None, existing_data=None, candidate_id=None): # Remove duplicates emails = remove_duplicates(emails) # Aggregate formatted & validated email data validated_emails_data = [] # Check if any of the emails is set as the default email emails_have_default = [ isinstance(email.get('is_default'), bool) for email in emails ] for i, email in enumerate(emails): # Label label = (email.get('label') or '').title() if not label or label not in CandidateEmail.labels_mapping.keys(): label = 'Other' # First email will be set as default if no other email is set as default default = i == 0 if not any(emails_have_default) else email.get( 'is_default') email_address = clean(email.get('address')) email_dict = dict(label=label, address=email_address, is_default=default) # Email address must be valid if email_address and not is_valid_email(email_address): raise InvalidUsage(error_message='Invalid email address') # Remove empty data email_dict = purge_dict(email_dict, strip=False) # track updates if is_updating: track_updates(user_id=user_id, candidate_id=candidate_id, existing_data=existing_data.get('emails')[i], new_data=email_dict, attribute='email') # Prevent adding empty records if not email_dict: continue validated_emails_data.append(email_dict) return validated_emails_data
def _add_or_update_preferred_locations(preferred_locations, is_updating=False, user_id=None, candidate_id=None, existing_data=None): # Remove duplicates preferred_locations = remove_duplicates(preferred_locations) # Aggregate formatted & validated preferred locations' data validated_preferred_locations_data = [] for index, preferred_location in enumerate(preferred_locations): preferred_location_dict = dict( iso3166_country=clean( preferred_location.get('iso3166_country')).upper(), iso3166_subdivision=clean( preferred_location.get('iso3166_subdivision')).upper(), city=clean(preferred_location.get('city')), zip_code=sanitize_zip_code(preferred_location.get('zip_code'))) # Remove empty data preferred_location_dict = purge_dict(preferred_location_dict, strip=False) # track updates if is_updating: track_updates( user_id=user_id, candidate_id=candidate_id, existing_data=existing_data.get('preferred_location')[index], new_data=preferred_location_dict, attribute='preferred_location') # Prevent adding empty records if not preferred_location_dict: continue validated_preferred_locations_data.append(preferred_location_dict) return validated_preferred_locations_data
def leave(bot, update): """leaves specified chat/chatid""" chat = clean(update.message.text) try: if chat.replace('-', '').isnumeric(): bot.leave_chat(chat_id=int(chat)) else: bot.leave_chat(chat_id=chat) update.message.reply_text(text='Left chat {}.'.format(chat)) except TelegramError: update.message.reply_text( text='Error leaving chat {}.\nMake sure chat name/id is valid!'. format(chat))
def write(self): data = self.export() name = h.clean(data['name']) if (not name): raise FileNotFoundError path = iface.JSONInterface.OBJECTSPATH + self.basepath.format(name) if (isfile(path)): proceed = tk.messagebox.askyesno(message='You are overwriting an' ' existing file.\nContinue' ' anyway?') if (not proceed): raise FileExistsError(path) with open(path, 'w') as outfile: json.dump(data, outfile, indent=2)
def hook(self): main = 'class/{}.class' sub = 'class/{}.{}.sub.class' super_ = 'class/{}.super.class' for (C, S, L) in zip(self._classes, self._subclasses, self.levels): C = h.clean(C) S = h.clean(S) file_ = main.format(C) subfile_ = sub.format(C, S) mainclass = iface.JSONInterface(file_) try: subclass = iface.JSONInterface(subfile_) subclassfound = True except FileNotFoundError: subclassfound = False superclasses = [ iface.JSONInterface(super_.format(name)) for name in mainclass.get('/superclass') ] if (subclassfound and subclass.get('/superclass')): superclasses.extend([ iface.JSONInterface(super_.format(name)) for name in subclass.get('/superclass') ]) if (subclassfound): jf = iface.LinkedInterface(*superclasses, mainclass, subclass) # self.classes.update( # {str(mainclass): iface.LinkedInterface(*superclasses, # mainclass, # subclass)}) else: jf = iface.LinkedInterface(*superclasses, mainclass) # self.classes.update( # {str(mainclass): iface.LinkedInterface(*superclasses, # mainclass)}) self.classes.update({C: c.Class(jf, L)})
def begin_end(self): name = self.charactername['Character name?'] filename = 'character/{}.character'.format(h.clean(name)) self.record = iface.JSONInterface(filename) self.character = c.Character(self.record) self.numbers = NumberDisplay(self.f, self.character) self.handler = SpellSection(self.f, self.record, self.character, self.numbers, self.effects) self.prepare = tk.Button(self.excessblock, text='Prepare a spell', command=self.detail.prepare_start) self.unprepare = tk.Button(self.excessblock, text='Unprepare a spell', command=self.detail.unprepare_start) self.container.deiconify() self.draw_static()
def rank_sentences(doc, tfidf_dict, include_words=False): """given document and the document tfidf_dict {word:word_tfidfscore}, return list of ranked senteces and its score [(sent,score),(sen2,score2)]""" ranked_sentences = [] for sentence in helpers.sentences(doc): word_list = helpers.clean(sentence) if len(word_list) < 5: continue #if less than 5 words in sentence, skip score = sum([tfidf_dict[word[0]] for word in word_list]) ranked_sentences.append((sentence, score)) return ranked_sentences
def __init__(self, name=None, above=None, left_of=None, overlaps=None, inside=None, shape=Shape.unknown, alignment=Alignment.center, fill=Fill.no, angle=Angle._0, size=Size.small, width=Width.small, height=Height.small): if above is None: self.above = frozenset() else: self.above = frozenset(above.split(',')) if left_of is None: self.left_of = frozenset() else: self.left_of = frozenset(left_of.split(',')) if overlaps is None: self.overlaps = frozenset() else: self.overlaps = frozenset(overlaps.split(',')) if inside is None: self.inside = frozenset() else: self.inside = frozenset(inside.split(',')) self.name = name self.shape = shape self.alignment = alignment self.fill = fill self.angle = angle self.size = size self.width = width self.height = height if isinstance(shape, str): self.shape = getattr(Shape, clean(shape)) if isinstance(alignment, str): self.alignment = getattr(Alignment, clean(alignment)) if isinstance(fill, str): self.fill = getattr(Fill, clean(fill)) if isinstance(angle, str): self.angle = getattr(Angle, '_' + str(angle)) if isinstance(size, str): self.size = getattr(Size, clean(size)) if isinstance(width, str): self.width = getattr(Width, clean(width)) if isinstance(height, str): self.height = getattr(Height, clean(height))
def top_words(doc, tfidf_dict, n_words=settings.TOP_WORDS_NUMBER): """Given list of words, and its tfidf dict {word:tfidf_score} return list of top X words""" word_list = helpers.clean(doc) words_score = [ (x, tfidf_dict[x]) for x in set([word[0] for word in word_list]) ] #set to avoid including same words twice top_words = sorted(words_score, key=lambda kv: kv[1], reverse=True)[0:n_words] # return raw words in order result = [ word[1] for top in top_words for word in set(word_list) if top[0] in word ] return result
def summariseBasic(document, word_limit=100, freq="tfidf", debug=False): summary = "" raw_base = "".join(document['raw'].copy()) sentences = {} counter = 0 # split by periods and linebreaks raw_base = " ".join(raw_base.split("\n")) # split article into lists of sentences raw_base = helpers.splitBySentence(raw_base) # add them to the dictionary so we can analyse them for i in raw_base: # clean sentence of junk cleaned = helpers.clean(i) # remove stopwords cleaned = helpers.removeStopwords("".join(cleaned)) # add to dictionary sentences[counter] = {"raw": i, "clean": cleaned, "worth": 0} counter += 1 # look for freq header and decide on ranking term if freq != "tfidf": freq = "tf" # now we rank each sentence based on the use of words rankings = {} for sen in sentences: for word in sentences[sen]['clean']: if word in document[freq]: sentences[sen]['worth'] += document[freq][word] rankings[sen] = sentences[sen]['worth'] # now we sort the sentences based on how relevant they are. rankings = sorted(rankings.items(), key=lambda x: x[1], reverse=True) if debug: for i in rankings: print(i[0], sentences[i[0]]['raw']) # now we add sentences based on their score. for i in rankings: if len(summary.split(" ") + sentences[i[0]]['raw'].split(" ")) < word_limit: summary += sentences[i[0]]['raw'] + " " return summary
def delete(bot, update): """delete a state""" global TRANSITIONS, STATES expr = clean(update.message.text) state = expr.split()[0] if state not in STATES: update.message.reply_text( text='Input state not found in markov states.') return ind = STATES.index(state) del STATES[ind] keep = [c for c in range(TRANSITIONS.shape[0]) if c != ind] TRANSITIONS = TRANSITIONS[keep, :][:, keep] update.message.reply_text(text='Deleted {}.'.format(state))
def preprocess_text(self, text): use_own_stopwords = False text = helper.clean( text ) # Clean text and remove double spacy and special chars and copyrights. doc = self.nlp(text) sentences = [] for sentence in doc.sents: # Split text into sentences chunks = [] for chunk in sentence.noun_chunks: # Get noun_chunks out of sentence if use_own_stopwords: # Remove stopwords tokens = chunk.text.lower().split(' ') tokens = list( filter(lambda token: token not in stopwords, tokens)) else: # chunk = self.nlp(chunk.text.lower()) # tokens = [token.text for token in chunk if not token.is_stop] tokens = chunk.text.lower().split( ' ' ) # This method is way faster for checking for stopwords tokens = list( filter(lambda token: not self.nlp.vocab[token].is_stop, tokens)) tokens = list(filter(lambda token: token != '', tokens)) tokens = list(map(lambda token: lemmatize_word(token), tokens)) # Lemmatize chunk = ' '.join(tokens) chunk = chunk.lower() # lower all the text if chunk != '': chunks.append(chunk) if len(chunks) > 0: # Only append non empty tokens sentences.append(chunks) return helper.flatten( sentences ) # Merge words of the sentences together to a document array
def rename(bot, update): """rename markov state""" expr = clean(update.message.text) states = expr.split() if len(states) != 2: update.message.reply_text( text='Proper syntax is /rename <state> <state>') return states = [process_token(states[0]), states[1]] if states[0] not in STATES: update.message.reply_text(text='State not found in markov states.') return ind = STATES.index(states[0]) STATES[ind] = states[1] update.message.reply_text(text='Renamed "{}" to "{}".'.format(*states))
def startup_end(self): self.name = self.charactername['Character Name?'] self.container.title(self.name) path = 'character/' + h.clean(self.name) + '.character' self.filename = iface.JSONInterface.OBJECTSPATH + path if (os.path.exists(iface.JSONInterface.OBJECTSPATH + path)): ok = messagebox.askokcancel(message='You are overwriting an ' 'existing file. Continue?') if (not ok): self.container.destroy() f = open(self.filename, 'w') f.close() ###### self.basic = BasicInfoSelector(self.f, self.race_features, self.class_features) self.abils = AbilitySelector(self.f) self.skills = SkillSelector(self.f) ###### self.draw_static() self.container.deiconify()
def set_global(bot, update, bot_globals): """sets or displays int/bool bot globals""" args = [a.strip() for a in clean(update.message.text).split('=')] names = (k for k, v in bot_globals.items() if isinstance(v, (int, bool))) listed = ('{} = {}'.format(k, v) for k, v in bot_globals.items() if isinstance(v, (int, bool))) if len(args) > 1: if args[0] in names: if str(args[1]).isnumeric(): bot_globals['GLOBALS'][args[0]] = int(args[1]) for k, g in bot_globals.items(): if isinstance(g, (int, bool)): if k in bot_globals['GLOBALS'].keys(): bot_globals[k] = bot_globals['GLOBALS'][k] bot_globals['logger'].level = bot_globals['LOGGING_LEVEL'] pickle.dump(bot_globals['GLOBALS'], open(bot_globals['GLOBALS_PATH'], 'wb+')) db_push(bot_globals['GLOBALS_PATH']) update.message.reply_text( text='Global {} updated.'.format(args[0])) else: update.message.reply_text( text= 'Globals type error.\n\nValue must be int.\nUse 1 or 0 for booleans.' ) else: update.message.reply_text( text='Globals key error.\n\nThat global does not exist.') elif args[0] == '': update.message.reply_text(text='Globals:\n\n' + '\n'.join(listed)) else: update.message.reply_text( text= 'Globals syntax error.\n\nProper usage is /global <global> = <value>' )
def calculate_tf(doc): """Given text document, return dict of {stemmed_word:term_freq} and list of tuples containing (stem_word, raw_word) for later use (CHANGE THIS) word_frequency_in_doc/total#ofwords """ #separate stemming from cleaning to make it easier to control raw and stem words word_list = helpers.clean( doc) # retrieves list of processed(clean) words of doc doc_len = len(word_list) # how many words in doc freq_dict = collections.Counter([word[0] for word in word_list ]) # dict of word:frequency # normalise tf by diving its frequency/doc_len tf_dict = {key: val / doc_len for key, val in freq_dict.items()} #update word_repo word frequency to calculate IDF log(totaldocuments/documentsWithWord) #[update_word_repo(word,WORD_REPO) for word in set(word_list)] #set so we dont count wordsx2 in a doc return tf_dict, word_list
def startup_end(self): name = self.charactername['Character Name?'] path = 'character/' + h.clean(name) + '.character' if (os.path.exists(iface.JSONInterface.OBJECTSPATH + path)): self.record = iface.JSONInterface(path) else: gui.ErrorMessage('A character with that name was not found.') print(iface.JSONInterface.OBJECTSPATH + path) raise FileNotFoundError self.character = c.Character(self.record) self.container.title(str(self.character)) self.settingmenu = Settings(self.container, self.character) self.container.config(menu=self.settingmenu.core) ###### # Front page self.info = Information(self.frontpage, self.character) self.HP = hp.module(self.frontpage, self.character) self.roller = dice.module(self.frontpage, self.character) self.abils = abilities.module(self.frontpage, self.character) ###### # Attacks self.attacktop = tk.Frame(self.attackpage) self.attacks = attacks.module(self.attacktop, self.character) self.conditions = conditions.module(self.attacktop, self.character) self.equipment = equipment.module(self.attackpage, self.character) ###### # Features self.features = features.module(self.featurespage, self.character) self.resources = resources.module(self.featurespage, self.character) self.featureroller = dice.module(self.featurespage, self.character) ###### # Inventory self.inventory = inventory.module(self.inventorypage, self.character) ###### # Spells self.spells = spells.module(self.spellspage, self.character) ###### self.container.deiconify() self.draw_static()
def tokenize(X): corpus_text = '\n'.join(df[:50000]['comment_text']) sentences = corpus_text.split('\n') sentences = [line.lower().split(' ') for line in sentences] sentences = [clean(s) for s in sentences if len(s) > 0]
def do(self): helpers.clean(self.paths)