def main(): parser = argparse.ArgumentParser() parser.add_argument('--palin', action='store', dest='words', nargs='+', help='Store a word to be checked as palindrome') parser.add_argument('--correct', action='store', dest='frase', nargs='+', help='Store a(n) word(s) to be checked if it is correct\ in portuguese' ) parser.add_argument('--version', action='version', version='%(prog)s 1.0') results = parser.parse_args() words_treat = Words() if results.words is not None: for word in results.words: print 'The word "{0}" is palindrome? {1}\n'.format( word, words_treat.isPalindrome(word)) if results.frase is not None: print("The phrase spelled correct: ") for word in results.frase: print '{0}'.format( words_treat.correct(word)),
def jumble_solver(clue): clue = clue.lower() from words import Words w = Words() w.import_words() lengths = w.lengths matching_len_words = lengths[len(clue)] letters = dict() possible_words = list() possible = True for c in clue: if c in letters: letters[c] += 1 else: letters[c] = 1 for word in matching_len_words: for c, val in letters.items(): if val == word.count(c): possible = True else: possible = False break if possible: possible_words.append(word) print('inside', possible_words) return possible_words
def main(): parser = argparse.ArgumentParser() parser.add_argument('--palin', action='store', dest='words', nargs='+', help='Store a word to be checked as palindrome') parser.add_argument( '--correct', action='store', dest='frase', nargs='+', help='Store a(n) word(s) to be checked if it is correct\ in portuguese') parser.add_argument('--version', action='version', version='%(prog)s 1.0') results = parser.parse_args() words_treat = Words() if results.words is not None: for word in results.words: print 'The word "{0}" is palindrome? {1}\n'.format( word, words_treat.isPalindrome(word)) if results.frase is not None: print("The phrase spelled correct: ") for word in results.frase: print '{0}'.format(words_treat.correct(word)),
def cross_solver(clue): clue = clue.lower().strip('\n') #print(clue) w = Words() w.import_words() lengths = w.lengths matching_len_words = lengths[len(clue)] letters = dict() possible_words = list() possible = True given_letter_pos = [] last_pos = 0 for index, letter in enumerate(clue): if letter is not '?': given_letter_pos.append(index) last_pos = index #print(given_letter_pos) matching_words = [] for word in matching_len_words: for pos in given_letter_pos: if word[pos] != clue[pos]: break elif (word[pos] == clue[pos]) and pos == last_pos: matching_words.append(word) return matching_words
def get_words(message): user = User(message.chat.id) w = Words(user) words = Template.get_words() + '\n' for word in w.new_words(10): words += '\n' + word try_send(message.chat.id, words)
def setUp(self): self.wa = Words('test.txt') # input args self.inp = [ ' test this program ', 'testthisprogram', '@ hello, this is a word list !', '_ - # ? ! . * / ^ *', 'HI, ThIs IS normalized text !', 'Hi ! this is. SPARTA. What ?', 'This is not a regular sentence', ' ? ! .', 'Hi. This is the longest ! Goodbye.', 'Hi. This is the shortest ! Goodbye.', 'Short. First sentence. This is the longest one !', 'TUTU is tutu for life. Test of tutu !', ' Total of chars without spaces ' ] # expected args self.exp = [ 7, 0, ['hello', 'this', 'is', 'a', 'word', 'list'], [], 'hi, this is normalized text !', ['Hi', 'this is', 'SPARTA', 'What'], ['This is not a regular sentence'], [], 19, 2, 2.666, {'tutu', 'is', 'for', 'life', 'test', 'of'}, 25 ]
def test_preprocess_dict(self): words = Words(dictionary_url="https://fakurl.com/") words.words = ["bad", "dab"] ret = words.preprocess_dict() self.assertIn("abd", ret) self.assertIn("bad", ret["abd"]) self.assertIn("dab", ret["abd"])
def main(): words = Words(WORDLIST_FILENAME) hangman = Hangman(words) hangman.start() while words.isWordGuessed(words.secretWord, hangman.lettersGuessed) == False and hangman.guesses >0: hangman.game(words) else: hangman.end()
def test_load_words(self, mock_requests): words = Words(dictionary_url="https://fakurl.com/") response = MagicMock() response.status_code = 200 response.text = "a\naye" mock_requests.get.return_value = response res = words.load_words() mock_requests.get.assert_called_once_with("https://fakurl.com/") self.assertEqual(res, ["a", "aye"])
def generate(event, context): text = event['body-json']['corpus'] state_size, min_word_len = check_parameters(event) w = Words(text, state_size, min_word_len) wordlist = [] for i in range(int(event['body-json']['noOfWords'])): word = w.generate_word() wordlist += [word] print word return json.dumps(wordlist)
def __init__(self, wordFileName, filter=None): QObject.__init__(self) self.app = QCoreApplication(sys.argv) self.finished.connect(self.app.exit) self.words = Words(wordFileName) self.strategy = RandomLinearStrategy(self.words, filter) mp = QMediaPlayer() self.audioManager = AudioManager(mp) self.ui = CmdUI() self.recorder = Recorder(self.words, self.strategy) self.run()
def main(): __WORDLIST_FILENAME = "palavras.txt" words = Words(__WORDLIST_FILENAME) hangman = Hangman(words) hangman.start() while words.isWordGuessed( words.secretWord, hangman.lettersGuessed) == False and hangman.guesses > 0: hangman.game() else: hangman.end() gc.collect() sys.exit()
def load(self, file): """ Load the NLP tokenized string from storage """ with open(file, 'r', encoding='utf-8') as f: self._words = Words() self._words._words = json.load(f) file = file.replace('.json', '.txt') with open(file, 'r', encoding='utf-8') as f: self._text = f.read()
def prehandler(ws): logging.info('************ prehandler ************') _ws = Words(ws.source) while True: w = ws.getword() if not w: break if w.name() == '%': end = '\n' l = ws.find(end).getword_byindex(-1).pos[2] - w.pos[2] + len(end) w = Word(Word.TYPE_COMMENT, l, 'comment', w.pos) _ws.append(w) elif w.name() == '\starttyping': end = '\stoptyping' l = ws.find(end).getword_byindex(-1).pos[2] - w.pos[2] + len(end) w = Word(Word.TYPE_TYPING, l, 'typing',w.pos) _ws.append(w) else: _ws.append(w) ws.update() return _ws
def words(self): """ Getter for page words (tokenized) """ if self._text is None and self._words is None: return None # If text has not been tokenized yet, then tokenize it if self._words is None: self._words = Words(self._text, bare=self.BARE, stem=self.STEM, pos=self.POS, roman=self.ROMAN) return self._words.words
def __len__(self): """ Override the len() operator - get the number of tokenized words """ if self._text is None: return 0 # If text has not been tokenized yet, then tokenize it if self._words is None: self._words = Words(self._text, bare=self.BARE, stem=self.STEM, pos=self.POS, roman=self.ROMAN) return len(self._words.words)
async def handle_request(reader, writer): data = await reader.read(512) if not data: response = {'code': 400, 'error': 'no data sent', 'data': None} else: print("request: " + data.decode()) request = json.loads(data.decode()) api = request['api'] if api == 'search': j = Jung() response = { 'code': 200, 'error': None, 'data': j.search(request['search_text'], request['user_id'], request['limit'], request['page']) } elif api == 'add_word': sentence = request['word'] f = Freud() sentence = f.preprocess_dream_text(sentence) tokens = f.process_sentence(sentence) if len(tokens) > 0: token = tokens[0] words = Words() if words.get_lemmatized_word(token[0]) is None: words.add_lemmatized_word(token[0], token[1]) response = {'code': 200, 'error': None, 'data': token[0]} else: response = { 'code': 400, 'error': 'failed to add word', 'data': None } json_response = json.dumps(response) print('sending: ' + json_response) writer.write(json_response.encode()) await writer.drain() writer.close()
def run(self): english_words = Words.load_dictionary() print('Successfully loaded {} English words.'.format(len(english_words))) for submission in self.reddit.subreddit(STREAM_SUBREDDIT).stream.submissions(skip_existing=True): if submission.id in english_words: print('Submission with ID "{}" found!'.format(submission.id)) crosspost_title = '{}: {}'.format(submission.id, submission.title) submission.crosspost(TARGET_SUBREDDIT, title=crosspost_title)
def generate(): text = '''car cab arc bar bark arb carb pulp <span>test</span>''' state_size = 2 minWordLen = 4 w = Words(text, state_size, minWordLen) wordlist = [] for i in range(50): word = w.generate_word() wordlist += [word] return wordlist
def send_messages(users): i = 0 for u in users: user = User(u) if user.get_time() == time_zone( ) and user.get_last_day() != strftime('%x'): # message limitation # # this is necessary because telegram api # limits us to 30 messages per second # in addition, we have other threads that # also need to respond to user requests # so I set a limit of 10 messages per second # from this thread if i == 5: i = 0 sleep(1) user.set_last_day() if user.check_data() == 'data error': # data error i += 1 print('problem_data') print(u) try_send(u, Template.data_error()) elif user.check_data() == 'no await': # no message waiting print('no message waiting') print(u) elif user.check_data() == 'good': # all rigtht i += 1 w = Words(user) words = w.get_words() print('ok') try_send(u, words) else: sleep(5 * 60)
def get_words_lists(): files = os.listdir('./lists') names_list = [] for file in files: if file.find('.txt'): print (file) names = file.split('.') names_list.append(names[0]) WORDS.append(Words(names[0])) return names_list
def get_words_within_path(path, type_of_node='function', type_of_word='verb'): list_of_nodes = fetch_list_of_node_names(path, type_of_node=type_of_node) words = [] for word in list_of_nodes: words.append(extract_words_from_text(word, type_of_word=type_of_word)) words = flatter_list(words) logger.info(f'total {len(words)} words, {len(set(words))} unique') return Words(words)
def test_decompose(self): words = Words(dictionary_url="https://fakurl.com/") words.words = ["bad", "dab", "cab"] words.preprocess_dict() ret = words.decompose("bade") self.assertIn("bad", ret) self.assertIn("dab", ret) self.assertNotIn("cab", ret)
def main(location): # read the words from a file word_object = Words() word_object.set_case("UPPER") word_object.read_words_file(location) # word_object.read_words_url("https://gist.githubusercontent.com/atduskgreg/3cf8ef48cb0d29cf151bedad81553a54/raw/82f142562cf50b0f6fb8010f890b2f934093553e/animals.txt") # render a grid with the words engine = RenderEngine(word_object) engine.generate_grid(16) engine.print_letter_matrix() print("-" * word_object.get_max_word_len()) #print (len(engine.missed_words)) #engine.print_words_not_in_grid() engine.print_words_in_grid()
def __iadd__(self, text): """ Override the += operator - add text to the page """ if text is None: return self if isinstance(text,str) == False: raise TypeError("String expected for text") self._text = text else: # Extend the text if self._text is None: self._text = text else: self._text += " " + text # Was already tokenized if self._words is not None: # Tokenize new text words = Words(text, bare=self.BARE, stem=self.STEM, pos=self.POS, roman=self.ROMAN) # Add tokens to existing list self._words += words.words return self
def main(): yes = True while yes: lines = int( input(">>> Enter the number of lines you'd like in a stanza: ")) stanzas = int( input(">>> Enter the number of stanzas you'd like in your poem: ")) words = Words() print("Generating poem...\n") time.sleep(1) poem_generator(lines, stanzas, words) repeat = input("\n>>> Would you like another poem? (yes/no) ").lower() if repeat == "y" or repeat == "yes": continue elif repeat == "n" or repeat == "no": yes = False else: print("Sorry, '%s' is not recognized. \n" % (repeat)) yes = False
from trie import Trie from measure import MeasureTime, MeasureMemory from words import Words from nltk.corpus import wordnet import sys # 辞書読み込み t = Words(sys.argv[1]) # ./data/origin/wordnet_words.csv words_dict = t.words_dict words = t.words # Trie木作成 print("=========================== 使用メモリ ===========================") trie = Trie(words) m = MeasureMemory() # bit配列のメモリ使用量、ラベル配列のメモリ使用量 print("bit_array:", m.convert_bytes(m.compute_object_size(trie.bit_array)), "labels:", m.convert_bytes(m.compute_object_size(trie.labels))) # selectのメモリ使用量 print("select:", m.convert_bytes(m.compute_object_size(trie.zero_pos))) # rankのメモリ使用量 print("rank:", m.convert_bytes(m.compute_object_size(trie.split_list))) x = input("Input search word:") # 単語検索 print("=========================== 単語検索 ===========================") while True:
def Word(data=None, alphabet=None, length=None, datatype=None, caching=True, RSK_data=None): r""" Construct a word. INPUT: - ``data`` -- (default: ``None``) list, string, tuple, iterator, free monoid element, ``None`` (shorthand for ``[]``), or a callable defined on ``[0,1,...,length]``. - ``alphabet`` -- any argument accepted by Words - ``length`` -- (default: ``None``) This is dependent on the type of data. It is ignored for words defined by lists, strings, tuples, etc., because they have a naturally defined length. For callables, this defines the domain of definition, which is assumed to be ``[0, 1, 2, ..., length-1]``. For iterators: Infinity if you know the iterator will not terminate (default); ``"unknown"`` if you do not know whether the iterator terminates; ``"finite"`` if you know that the iterator terminates, but do know know the length. - ``datatype`` -- (default: ``None``) ``None``, ``"list"``, ``"str"``, ``"tuple"``, ``"iter"``, ``"callable"``. If ``None``, then the function tries to guess this from the data. - ``caching`` -- (default: ``True``) ``True`` or ``False``. Whether to keep a cache of the letters computed by an iterator or callable. - ``RSK_data`` -- (Optional. Default: ``None``) A semistandard and a standard Young tableau to run the inverse RSK bijection on. .. NOTE:: Be careful when defining words using callables and iterators. It appears that islice does not pickle correctly causing various errors when reloading. Also, most iterators do not support copying and should not support pickling by extension. EXAMPLES: Empty word:: sage: Word() word: Word with string:: sage: Word("abbabaab") word: abbabaab Word with string constructed from other types:: sage: Word([0,1,1,0,1,0,0,1], datatype="str") word: 01101001 sage: Word((0,1,1,0,1,0,0,1), datatype="str") word: 01101001 Word with list:: sage: Word([0,1,1,0,1,0,0,1]) word: 01101001 Word with list constructed from other types:: sage: Word("01101001", datatype="list") word: 01101001 sage: Word((0,1,1,0,1,0,0,1), datatype="list") word: 01101001 Word with tuple:: sage: Word((0,1,1,0,1,0,0,1)) word: 01101001 Word with tuple constructed from other types:: sage: Word([0,1,1,0,1,0,0,1], datatype="tuple") word: 01101001 sage: Word("01101001", datatype="str") word: 01101001 Word with iterator:: sage: from itertools import count sage: Word(count()) word: 0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,... sage: Word(iter("abbabaab")) # iterators default to infinite words word: abbabaab sage: Word(iter("abbabaab"), length="unknown") word: abbabaab sage: Word(iter("abbabaab"), length="finite") word: abbabaab Word with function (a 'callable'):: sage: f = lambda n : add(Integer(n).digits(2)) % 2 sage: Word(f) word: 0110100110010110100101100110100110010110... sage: Word(f, length=8) word: 01101001 Word over a string with a parent:: sage: w = Word("abbabaab", alphabet="abc"); w word: abbabaab sage: w.parent() Words over {'a', 'b', 'c'} Word from a free monoid element:: sage: M.<x,y,z> = FreeMonoid(3) sage: Word(x^3*y*x*z^2*x) word: xxxyxzzx The default parent is the combinatorial class of all words:: sage: w = Word("abbabaab"); w word: abbabaab sage: w.parent() Words We can also input a semistandard tableau and a standard tableau to obtain a word from the inverse RSK algorithm using the ``RSK_data`` option:: sage: p = Tableau([[1,2,2],[3]]); q = Tableau([[1,2,4],[3]]) sage: Word(RSK_data=[p, q]) word: 1322 TESTS:: sage: Word(5) Traceback (most recent call last): ... ValueError: Cannot guess a datatype from data (=5); please specify one :: sage: W = Words() sage: w = W('abc') sage: w is W(w) True sage: w is Word(w, alphabet='abc') False """ if isinstance(data, FreeMonoidElement): return data.to_word(alphabet) if RSK_data is not None: #if a list of a semistandard and a standard tableau or a pair of lists from sage.combinat.tableau import Tableau if isinstance(RSK_data, (tuple, list)) and len(RSK_data) == 2 and \ all(map(lambda x: isinstance(x, Tableau), RSK_data)): from sage.combinat.rsk import RSK_inverse return RSK_inverse(*RSK_data, output='word') elif isinstance(RSK_data, (tuple, list)) and len(RSK_data) == 2 and \ all(map(lambda x: isinstance(x, (list, tuple)), RSK_data)): from sage.combinat.rsk import RSK_inverse P, Q = map(Tableau, RSK_data) return RSK_inverse(P, Q, 'word') raise ValueError("Invalid input. Must be a pair of tableaux") # Create the parent object from words import Words parent = Words() if alphabet is None else Words(alphabet) return parent(data=data, length=length, datatype=datatype, caching=caching)
import config import threading import re from flask import Flask, render_template, request, url_for, session, redirect from flask_socketio import SocketIO, emit from players import Players from words import Words from game_state import GameState from leaderboard import LeaderBoard from authentication import player_required app = Flask(__name__) config.load(app) words = Words(app) players = Players(app) game_state = GameState(app) leaderboard = LeaderBoard(app) socketio = SocketIO(app, async_mode=None) thread = None stop_game = go_next = False # TODO: look into using thread events instead of these flags @app.route('/') def index(): return render_template('index.html', leaderboard=leaderboard.rankings(), player=session.get("player")) @app.route("/signout") def signout(): session.clear()
def __init__(self): self.all_words = Words().words_list self.language_to_guess = '' self.index = []
def Word(data=None, alphabet=None, length=None, datatype=None, caching=True): r""" Construct a word. INPUT: - ``data`` - (default: None) list, string, tuple, iterator, None (shorthand for []), or a callable defined on [0,1,...,length]. - ``alphabet`` - any argument accepted by Words - ``length`` - (default: None) This is dependent on the type of data. It is ignored for words defined by lists, strings, tuples, etc., because they have a naturally defined length. For callables, this defines the domain of definition, which is assumed to be [0, 1, 2, ..., length-1]. For iterators: Infinity if you know the iterator will not terminate (default); "unknown" if you do not know whether the iterator terminates; "finite" if you know that the iterator terminates, but do know know the length. - ``datatype`` - (default: None) None, "list", "str", "tuple", "iter", "callable". If None, then the function tries to guess this from the data. - ``caching`` - (default: True) True or False. Whether to keep a cache of the letters computed by an iterator or callable. .. note:: Be careful when defining words using callables and iterators. It appears that islice does not pickle correctly causing various errors when reloading. Also, most iterators do not support copying and should not support pickling by extension. EXAMPLES: Empty word:: sage: Word() word: Word with string:: sage: Word("abbabaab") word: abbabaab Word with string constructed from other types:: sage: Word([0,1,1,0,1,0,0,1], datatype="str") word: 01101001 sage: Word((0,1,1,0,1,0,0,1), datatype="str") word: 01101001 Word with list:: sage: Word([0,1,1,0,1,0,0,1]) word: 01101001 Word with list constructed from other types:: sage: Word("01101001", datatype="list") word: 01101001 sage: Word((0,1,1,0,1,0,0,1), datatype="list") word: 01101001 Word with tuple:: sage: Word((0,1,1,0,1,0,0,1)) word: 01101001 Word with tuple constructed from other types:: sage: Word([0,1,1,0,1,0,0,1], datatype="tuple") word: 01101001 sage: Word("01101001", datatype="str") word: 01101001 Word with iterator:: sage: from itertools import count sage: Word(count()) word: 0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,... sage: Word(iter("abbabaab")) # iterators default to infinite words word: abbabaab sage: Word(iter("abbabaab"), length="unknown") word: abbabaab sage: Word(iter("abbabaab"), length="finite") word: abbabaab Word with function (a 'callable'):: sage: f = lambda n : add(Integer(n).digits(2)) % 2 sage: Word(f) word: 0110100110010110100101100110100110010110... sage: Word(f, length=8) word: 01101001 Word over a string with a parent:: sage: w = Word("abbabaab", alphabet="abc"); w word: abbabaab sage: w.parent() Words over Ordered Alphabet ['a', 'b', 'c'] The default parent is the combinatorial class of all words:: sage: w = Word("abbabaab"); w word: abbabaab sage: w.parent() Words TESTS:: sage: Word(5) Traceback (most recent call last): ... ValueError: Cannot guess a datatype from data (=5); please specify one :: sage: W = Words() sage: w = W('abc') sage: w is W(w) True sage: w is Word(w, alphabet='abc') False """ # Create the parent object from words import Words parent = Words() if alphabet is None else Words(alphabet) return parent(data=data, length=length, datatype=datatype, caching=caching)
def run(self): print("%s in run loop" % self.name) hosts = Hosts().getHosts() imageHosts = [] phraseHosts = [] lastCacheId = 0 for h in hosts: ip = h['ip'] if Hosts().isLocalHost(ip): ImageHandler.clearCache(None) else: Hosts().sendToHost(ip, { 'cmd': 'ClearCache', 'args': None }) iAltar = Hosts().getAttr(ip, 'iAltar') if iAltar['enabled']: if iAltar['image']: Debug().p("%s: display type for %s: image" % (self.name, ip)) imageHosts.append(ip) if iAltar['phrase']: Debug().p("%s: wants phrase for %s: " % (self.name, ip)) phraseHosts.append(ip) sleepTime = .001 while True: try: Debug().p("%s: sleeping %d" % (self.name, sleepTime)) msg = self.queue.get(timeout=sleepTime) if msg == "__stop__": print("%s: stopping" % self.name) break except Queue.Empty: pass Watchdog().feed(self) cacheId = random.randint(10000, 20000) images = [] choices = [] urls = [] if self.searchType == 'Archive': [images, choices] = Archive().getArchive() for i in images: Debug().p("%s: image %s" % (self.name, i)) for c in choices: Debug().p("%s: choice %s" % (self.name, c)) elif self.searchType == 'Google': choices = [] msg = doGetRecog('') if msg != "": test = json.loads(msg) Debug().p("%s got %s" % (self.name, test)) if test['recog'] != ["", ""]: choices = test['recog'] Debug().p["%s choices from recog %s" % (self.name, choices)] if len(choices) == 0: choices = Words().getWords() urls = Search().getUrls(choices) if urls == None: Debug().p("%s Google Error switching to Archive" % self.name) self.searchType = "Archive" continue if len(urls) == 0: Debug().p("%s Nothing found try again" % self.name) continue images = self.urlsToImages(Search().getUrls(choices)) else: Debug().p("%s unimplemented type %s switching to archive" % (self.name, searchType)) self.searchType = 'Archive' if self.searchType != 'Archive': Archive().putArchive(choices) phraseArgs = {} if len(phraseHosts) != 0: phraseArgs['phrase'] = choices phraseArgs['phraseData'] = "" Debug().p("%s sending %s to %s" % (self.name, choices, ip)) for ip in phraseHosts: phr = Hosts().getAttr(ip, 'phrase') if phr['voice']: lang = random.choice(Specs().s['langList']) phraseArgs['phraseData'] = makeSpeakData( "%s %s" % (choices[0], choices[1]), lang) #os.unlink(file.replace("mp3","wav")); if len(imageHosts) != 0: numImages = len(images) imagesPerHost = numImages / len(imageHosts) extraImages = numImages % len(imageHosts) extra = 0 count = 0 Debug().p("%s numImages:%d imagesPerHost:%d extraImages:%d" % (self.name, numImages, imagesPerHost, extraImages)) for ip in imageHosts: args = {} args['id'] = cacheId args['imgData'] = [] for i in range(0, imagesPerHost): fname = images[i + count] args['imgData'].append(self.setImgData(fname)) count += imagesPerHost if extra < extraImages: fname = images[count + extra] args['imgData'].append(self.setImgData(fname)) extra += 1 cmd = {'cmd': "AddImage", 'args': args} if Hosts().isLocalHost(ip): ImageHandler().addImage(args) else: Hosts().sendToHost(ip, cmd) for ip in imageHosts: args = [cacheId] if Hosts().isLocalHost(ip): ImageHandler().setImageDir(args) else: Hosts().sendToHost(ip, { 'cmd': 'SetImageDir', 'args': args }) if lastCacheId != 0: for ip in imageHosts: args = [lastCacheId] if Hosts().isLocalHost(ip): ImageHandler.rmCacheDir(args) else: Hosts().sendToHost(ip, { 'cmd': 'RmCacheDir', 'args': args }) lastCacheId = cacheId if len(phraseHosts) != 0: for ip in phraseHosts: if Hosts().isLocalHost(ip): PhraseHandler().setPhrase(phraseArgs) else: Hosts().sendToHost(ip, { 'cmd': 'Phrase', 'args': phraseArgs }) sleepTime = Specs().s['iAltarSleepInterval']
def get_list(self,index): w = Words() w.get_list(index) self.list = w.get_list_content() return self.list
from game import Game from player import Player from words import Words player = Player() word = Words() game = Game(player.get_players(), word.get_word()) game.play()