示例#1
0
class CoapLFUCache(Cache):
    def __init__(self, mode, max_dim):
        """
        Initialise an LFU cache for the Coap.

        :param max_dim: max number of elements in the cache
        :param mode: used to differentiate between a cache used in a forward-proxy or in a reverse-proxy
        """

        Cache.__init__(self, mode, max_dim)
        self.cache = LFUCache(maxsize=max_dim)

    def __str__(self):
        msg = []
        for e in list(self.cache.values()):
            msg.append(str(e))
        return ("Cache Size: {sz}\n" + "\n".join(msg))

    def debug_print(self):
        """
        :return: a debug printout for the current cache.
        """
        return ("size = %s\n%s" % (
            self.cache.currsize,
            '\n'.join([
                (   "element.max age %s\n"\
                    "element.uri %s\n"\
                    "element.freshness %s"  ) % (
                        element.max_age,
                        element.uri,
                        element.freshness )
                for key, element
                in list(self.cache.items())
            ])))
示例#2
0
    def __init__(self, max_dim):
        """

        :param max_dim:
        """
        print "Using LFU Cache with dimension : " + str(max_dim)
        self.cache = LFUCache(maxsize=max_dim)
示例#3
0
 def __init__(self,
              name,
              dname,
              pclass,
              mappath=None,
              currency_code=BASE_CURRENCY,
              currency_symbol=BASE_CURRENCY_SYMBOL,
              vendorlogo=None,
              sname=None,
              is_manufacturer=None,
              vtype=None):
     self._name = name
     self._dname = dname
     self._sname = sname
     self._instance_vendorlogo = vendorlogo
     self._is_manufacturer = is_manufacturer
     self._currency = currency.CurrencyDefinition(currency_code,
                                                  currency_symbol)
     self._vtype = vtype
     self._pclass = pclass
     self._order = None
     self._orderbasecosts = []
     self._orderadditionalcosts = []
     self._partcache = LFUCache(1000)
     if mappath is not None:
         self._mappath = mappath
     else:
         self._mappath = self._name + '-' + self._pclass + '.csv'
     self._map = VendorMapFileDB(self)
示例#4
0
	def __init__(self):
		self.dictionaryCache = LFUCache(maxsize = 200)
		self.urbandictionaryCache = LFUCache(maxsize = 200)
		self.wordOfTheDayCache = {}
		self.bot = telebot.TeleBot(bot_token)
		self.session = requests.Session()
		self.session.mount('http://', requests.adapters.HTTPAdapter(max_retries=5))
		self.session.mount('https://', requests.adapters.HTTPAdapter(max_retries=5))
示例#5
0
	def __init__(self):
		self.offset = ''
		self.URL = 'https://api.telegram.org/bot' + bot_token
		self.session = requests.Session()
		self.session.mount("http://", requests.adapters.HTTPAdapter(max_retries=2))
		self.session.mount("https://", requests.adapters.HTTPAdapter(max_retries=2))
		self.dictionaryCache = LFUCache(maxsize = 200)
		self.urbandictionaryCache = LFUCache(maxsize = 200)
		self.startMessage = start_message
示例#6
0
 def __init__(self, food_size, history_size):
     self.reaction_graph = ReactionGraph()
     self.food_size = food_size
     self.history = Counter()
     self.substrate_count = Counter()
     self.recent_history = LFUCache(history_size)
     self.pool_str_hist = deque(maxlen=1000)
     self.n_reactions = Counter()
     self.emergent_substrates = set()
示例#7
0
    def __init__(self, mode, max_dim):
        """
        Initialise an LFU cache for the Coap.

        :param max_dim: max number of elements in the cache
        :param mode: used to differentiate between a cache used in a forward-proxy or in a reverse-proxy
        """

        Cache.__init__(self, mode, max_dim)
        self.cache = LFUCache(maxsize=max_dim)
    def __init__(self, config: Dict[str, any], result_dir: str,
                 cache_stats: CacheInformation):
        super().__init__(config, result_dir, cache_stats)
        self.lfu = LFUCache(100000)
        self.logger = logging.getLogger(__name__)
        name = 'lfu_eviction_strategy'
        self.performance_logger = create_file_logger(
            name=f'{name}_performance_logger', result_dir=result_dir)

        self._incomplete_experiences = TTLCache(InMemoryStorage())
        self._incomplete_experiences.expired_entry_callback(
            self._observe_expired_incomplete_experience)
示例#9
0
 def __init__(self):
     self.word_api = WordApi.WordApi(
         swagger.ApiClient(wordnik_api_key, wordnik_api))
     self.wordoftheday_api = WordsApi.WordsApi(
         swagger.ApiClient(wordnik_api_key, wordnik_api))
     self.urbandictionary_api = urbandictionary_api
     self.dictionaryCache = LFUCache(maxsize=1000)
     self.urbandictionaryCache = LFUCache(maxsize=1000)
     self.wordOfTheDayCache = {}
     self.session = requests.Session()
     self.session.mount('http://',
                        requests.adapters.HTTPAdapter(max_retries=5))
     self.session.mount('https://',
                        requests.adapters.HTTPAdapter(max_retries=5))
示例#10
0
def run(
    sequence, datasets, name, outdir, tempdir, mode, batch_opts, ncores,
    nblocks_per_dataset, nblocks_per_process, nfiles_per_dataset,
    nfiles_per_process, blocksize, cachesize, quiet, dryrun, sample,
    predetermined_nevents_in_file,
):
    process = AtUproot(
        outdir,
        quiet = quiet,
        max_blocks_per_dataset = nblocks_per_dataset,
        max_blocks_per_process = nblocks_per_process,
        max_files_per_dataset = nfiles_per_dataset,
        max_files_per_process = nfiles_per_process,
        nevents_per_block = blocksize,
        predetermined_nevents_in_file=predetermined_nevents_in_file,
        branch_cache = LFUCache(int(cachesize*1024**3), get_size),
    )
    tasks = process.run(datasets, sequence)

    if mode=="multiprocessing" and ncores==0:
        results = pysge.local_submit(tasks)
    elif mode=="multiprocessing":
        results = pysge.mp_submit(tasks, ncores=ncores)
    elif mode=="sge":
        results = pysge.sge_submit(
            tasks, name, tempdir, options=batch_opts, dryrun=dryrun,
            sleep=5, request_resubmission_options=True,
            return_files=True,
        )
    return results
示例#11
0
    def __init__(self, analyzer, mle_path=None, top=1, cache_size=100000):
        if not isinstance(analyzer, Analyzer):
            raise ValueError('Invalid analyzer instance.')
        if not isinstance(top, int):
            raise ValueError('Invalid value for top.')
        if not isinstance(cache_size, int):
            raise ValueError('Invalid value for cache_size.')

        if mle_path is not None:
            with open(mle_path, 'r', encoding='utf-8') as mle_fp:
                self._mle = json.load(mle_fp)

                # TODO: Remove this when MLE files are fixed
                for analysis in self._mle.values():
                    analysis['lex'] = strip_lex(analysis['lex'])
        else:
            self._mle = None

        self._analyzer = analyzer

        if top < 1:
            top = 1
        self._top = top

        if cache_size < 0:
            cache_size = 0

        self._cache = LFUCache(cache_size)
        self._scored_analyses = cached(self._cache)(self._scored_analyses)
示例#12
0
    def lfu(self, tags: Optional[Union[list, str]] = None, name: Optional[str] = None,
            maxsize: Optional[int]=None) -> LFUCache:
        """
        Create a new LFU (Least Frequently Used) based cache. This counts how often
        the cache items are used, and when the maxsize is reached, it will discard
        the least freqently used item. Use this with caution, new items tend to be
        pushed off faster and older but less freqently used items. See LRU cache.

        Default is a maxsize of 512 entires.

        :param tags: Associate tags with this cache for purging.
        :param name: Name of the cache.
        :param maxsize: Max number of entries.
        :return:
        """
        if maxsize is None:
            maxsize = 512

        if isinstance(tags, str):
            tags = (tags,)
        elif tags is None:
            tags = ()

        if name is None:
            name = caller_string() + random_string(length=10)
        if name not in self.caches:
            self.caches[name] = {
                "cache": LFUCache(maxsize),
                "tags": tags,
                "type": "LFUCache",
                "lock": RLock(),
            }
示例#13
0
文件: text.py 项目: Kyrmy/prototyyppi
    def nearest_sentence(self, topics: List[float], texts: List[str]) -> str:
        """
        Find sentence closest to topic.

        TODO: When joining multiple sentences, it should be checked that they are from same paragraph.
        """
        @cached(LFUCache(maxsize=128))
        def lda(sentences):
            count_data = self._count_vector.transform(sentences)
            _lda = self._lda.transform(count_data)
            return _lda

        # Tokenize into sentences.
        sentences = chain(*[re.findall(r"\s*(.+?[\.!?])+", b, re.MULTILINE + re.DOTALL) for b in texts if b.strip() != ""])

        # cleanup sentences.
        sentences = tuple(set(filter(lambda x: len(x) > self.min_sentence_length, map(str.strip, sentences))))
        if len(sentences) == 0:
            return None

        # Find most topical sentence.
        tl_dr = []
        distance = 1.
        prev_sentence = ""
        for current_sentence, m in zip(sentences, lda(sentences)):
            _distance = np.abs(np.mean(topics - m))
            if _distance < distance:
                tl_dr, distance = ([prev_sentence, current_sentence], _distance)
            
            # Previous sentence is to provide context to most suitable sentence.
            prev_sentence = current_sentence

        return " ".join(filter(None, tl_dr))
示例#14
0
    def __init__(self, db, backoff='NONE',
                 norm_map=DEFAULT_NORMALIZE_MAP,
                 strict_digit=False,
                 cache_size=0):
        if not isinstance(db, CalimaStarDB):
            raise AnalyzerError('DB is not an instance of CalimaStarDB')
        if not db.flags.analysis:
            raise AnalyzerError('DB does not support analysis')

        self._db = db

        self._backoff = backoff
        self._norm_map = DEFAULT_NORMALIZE_MAP
        self._strict_digit = strict_digit

        if backoff in _BACKOFF_TYPES:
            if backoff == 'NONE':
                self._backoff_condition = None
                self._backoff_action = None
            else:
                backoff_toks = backoff.split('_')
                self._backoff_condition = backoff_toks[0]
                self._backoff_action = backoff_toks[1]
        else:
            raise AnalyzerError('Invalid backoff mode {}'.format(
                repr(backoff)))

        if isinstance(cache_size, int):
            if cache_size > 0:
                cache = LFUCache(cache_size)
                self.analyze = cached(cache, lock=RLock())(self.analyze)

        else:
            raise AnalyzerError('Invalid cache size {}'.format(
                                repr(cache_size)))
示例#15
0
 def __init__(self):
     self._cache = LFUCache(maxsize=128)
     self._url = 'https://duckduckgo.com/'
     self._requestUrl = self._url + "i.js"
     self._headers = {
         'authority':
         'duckduckgo.com',
         'accept':
         'application/json, text/javascript, */*; q=0.01',
         'sec-fetch-dest':
         'empty',
         'x-requested-with':
         'XMLHttpRequest',
         'user-agent':
         'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_4) AppleWebKit/537.36 (KHTML, like Gecko) '
         'Chrome/80.0.3987.163 Safari/537.36',
         'sec-fetch-site':
         'same-origin',
         'sec-fetch-mode':
         'cors',
         'referer':
         'https://duckduckgo.com/',
         'accept-language':
         'en-US,en;q=0.9',
     }
示例#16
0
    def __init__(self, analyzer, mle_path=None, top=1, cache_size=100000):
        if not isinstance(analyzer, Analyzer):
            raise ValueError('Invalid analyzer instance.')
        if not isinstance(top, int):
            raise ValueError('Invalid value for top.')
        if not isinstance(cache_size, int):
            raise ValueError('Invalid value for cache_size.')

        if mle_path is not None:
            with open(mle_path, 'r', encoding='utf-8') as mle_fp:
                self._mle = json.load(mle_fp)
        else:
            self._mle = None

        self._analyzer = analyzer

        if top < 1:
            top = 1
        self._top = top

        if cache_size < 0:
            cache_size = 0

        self._cache = LFUCache(cache_size)
        self._scored_analyses = cached(self._cache)(self._scored_analyses)
示例#17
0
    def __init__(self, url, timeout=30, utxo_cache=False, debug=False):
        self.sessionmaker = sessionmaker(bind=create_engine(url, connect_args={'connect_timeout': timeout}, encoding='utf8', echo=debug))

        self.address_cache = LFUCache(maxsize=16384)
        self.txid_cache = RRCache(maxsize=131072)
        self.utxo_cache = RRCache(maxsize=262144) if utxo_cache else None

        super(DatabaseIO, self).__init__(self.sessionmaker(), address_cache=self.address_cache, txid_cache=self.txid_cache, utxo_cache=self.utxo_cache)
示例#18
0
 def __init__(self, *args: Any, **kwargs: Any) -> None:
     settings = get_settings()
     self._raster_cache = LFUCache(
         settings.RASTER_CACHE_SIZE,
         getsizeof=operator.attrgetter('nbytes')
     )
     self._executor = concurrent.futures.ThreadPoolExecutor(max_workers=3)
     super().__init__(*args, **kwargs)
示例#19
0
	def __init__(self):
		self.offset = ''
		self.URL = 'https://api.telegram.org/bot' + bot_token
		self.session = requests.Session()
		self.session.mount("http://", requests.adapters.HTTPAdapter(max_retries=2))
		self.session.mount("https://", requests.adapters.HTTPAdapter(max_retries=2))
		self.cache = LFUCache(maxsize = 200)
		self.startMessage = start_message
		self.keyboard = {"keyboard":[["/define"],["/synonyms"],["/antonyms"],["/examples"],["/all"]]}
示例#20
0
def get_cache(cache_type, cache_size):
    caches = {
        'lfu': LFUCache(cache_size),
        'lru': LRUCache(cache_size),
        #'rl' : RLCache(cache_size),
        'rr': RRCache(cache_size)
    }

    try:
        return caches[cache_type]
    except KeyError:
        return default()
示例#21
0
class FileCache(APICache):
    def __init__(self, path):
        self._cache = LFUCache(maxsize=512)
        self.path = path
        if not os.path.isdir(self.path):
            os.mkdir(self.path, 0o700)

    def _getpath(self, key):
        return os.path.join(self.path, str(hash(key)) + '.cache')

    def put(self, key, value):
        with open(self._getpath(key), 'wb') as f:
            f.write(zlib.compress(pickle.dumps(value, -1)))
        self._cache[key] = value

    def get(self, key):
        if key in self._cache:
            return self._cache[key]

        try:
            with open(self._getpath(key), 'rb') as f:
                return pickle.loads(zlib.decompress(f.read()))
        except IOError as ex:
            if ex.errno == 2:  # file does not exist (yet)
                return None
            else:
                raise

    def invalidate(self, key):
        self._cache.pop(key, None)

        try:
            os.unlink(self._getpath(key))
        except OSError as ex:
            if ex.errno == 2:  # does not exist
                pass
            else:
                raise
示例#22
0
    def __init__(self, master_path):
        self.version = os.path.basename(master_path.rstrip("/"))
        self.connection = sqlite3.connect(
            "file:{0}?mode=ro".format(os.path.join(master_path, "masterdata.db")), uri=True
        )
        self.connection.row_factory = sqlite3.Row
        self.card_id_cache = LFUCache(256)
        self.member_cache = {}

        self.ordinal_to_cid = {
            ord: id for ord, id in self.connection.execute("SELECT school_idol_no, id FROM m_card")
        }
        self.tt_stat_increases = self.distill_tt_stat_increases()
        print(f"MasterData: alloc with {len(self.ordinal_to_cid)} cards")
示例#23
0
    def __init__(self, master_path):
        super().__init__(master_path)
        self.card_id_cache = LFUCache(256)
        self.member_cache = {}
        self.card_brief_cache = {}

        self.ordinal_to_cid = {
            ord: id
            for ord, id in self.connection.execute(
                "SELECT school_idol_no, id FROM m_card")
        }
        self.tt_stat_increases = self.distill_tt_stat_increases()
        self.constants = self.fetch_required_constants()
        print(f"MasterData: alloc with {len(self.ordinal_to_cid)} cards")
示例#24
0
    def __init__(self, bot):
        self.bot = bot
        self.spam_detection = defaultdict(list)
        self.repetitive_message = defaultdict(Counter)
        self.INVITE_REGEX = re.compile(
            r'((http(s|):\/\/|)(discord)(\.(gg|io|me)\/|app\.com\/invite\/)([0-z]+))'
        )
        self.ENGLISH_REGEX = re.compile(
            r'[ -~]|(?:' + UNICODE_EMOJI +
            r')|(?:\U00002018|\U00002019|\s)|[.!?\\\-\(\)]|ツ|(?:\(╯°□°\)╯︵ ┻━┻)|(?:┬─┬ ノ\( ゜-゜ノ\))'
        )  # U2018 and U2019 are iOS quotes

        self.nude_detector = NudeDetector()
        self.nude_graph = tf.get_default_graph()

        self.nude_image_cache = LFUCache(50)
示例#25
0
    def __init__(self, bot: rainbot) -> None:
        self.bot = bot
        self.spam_detection: DefaultDict[str, List[int]] = defaultdict(list)
        self.repetitive_message: DefaultDict[str, Counter] = defaultdict(Counter)
        self.INVITE_REGEX = re.compile(r'((http(s|):\/\/|)(discord)(\.(gg|io|me)\/|app\.com\/invite\/)([0-z]+))')
        self.ENGLISH_REGEX = re.compile(r'(?:\(╯°□°\)╯︵ ┻━┻)|[ -~]|(?:' + UNICODE_EMOJI + r')|(?:\U00002018|\U00002019|\s)|[.!?\\\-\(\)]|ツ|¯|(?:┬─┬ ノ\( ゜-゜ノ\))')  # U2018 and U2019 are iOS quotes

        self.nude_detector = NudeDetector()

        self.nude_image_cache: LFUCache[str, List[str]] = LFUCache(50)

        self.detections = []

        for func in self.__class__.__dict__.values():
            if isinstance(func, Detection):
                self.detections.append(func)
示例#26
0
    def test_lfu(self):
        cache = LFUCache(maxsize=2)

        cache[1] = 1
        cache[1]
        cache[2] = 2
        cache[3] = 3

        self.assertEqual(len(cache), 2)
        self.assertEqual(cache[1], 1)
        self.assertTrue(2 in cache or 3 in cache)
        self.assertTrue(2 not in cache or 3 not in cache)

        cache[4] = 4
        self.assertEqual(len(cache), 2)
        self.assertEqual(cache[4], 4)
        self.assertEqual(cache[1], 1)
示例#27
0
class TwitterListener(StreamListener):
    def __init__(self):
        self.cache = LFUCache(maxsize=50)
        self.cache2 = LFUCache(maxsize=50)
        Timer(interval=60, function=self.print_keys)
        Timer(interval=30, function=self.check_cached_words)

    def on_data(self, data):
        data_lst = json.loads(data)
        data_lst = data_lst.get('text', '').split()

        if self.cache.currsize == self.cache.maxsize:
            for key in list(self.cache.keys()):
                if self.cache[key] == 0:
                    del self.cache[key]

        for word in data_lst:
            if word in self.cache.keys():
                self.cache[word] += 1
            else:
                self.cache[word] = 1
            if self.cache[word] < 0:
                del self.cache[word]

        return True

    def print_keys(self):
        """
        print recent words and update the second cache every 60 seconds
        """
        print(list(self.cache.items()))
        self.cache2.update(self.cache)
        return True

    def check_cached_words(self):
        """
        Decrease score of word by 1 if the score does not change within
        60 seconds
        """
        for word in list(self.cache.keys()):
            if self.cache.get(word) == self.cache2.get(word):
                self.cache[word] -= 1
        return True
示例#28
0
    def test_lfu_getsizeof(self):
        cache = LFUCache(maxsize=3, getsizeof=lambda x: x)

        cache[1] = 1
        cache[2] = 2

        self.assertEqual(len(cache), 2)
        self.assertEqual(cache[1], 1)
        self.assertEqual(cache[2], 2)

        cache[3] = 3

        self.assertEqual(len(cache), 1)
        self.assertEqual(cache[3], 3)
        self.assertNotIn(1, cache)
        self.assertNotIn(2, cache)

        with self.assertRaises(ValueError):
            cache[4] = 4
        self.assertEqual(len(cache), 1)
        self.assertEqual(cache[3], 3)
示例#29
0
文件: cli.py 项目: ridhoq/ditto
def main():
    cache_repo = CacheRepository()

    lfu_cache = LFUCache(maxsize=5)
    cache_repo.register_cache("lfu", lfu_cache)

    blob_cache = BlobCache()
    cache_repo.register_cache("blob", blob_cache)

    for user_name in sys.argv[1:]:
        user = team_ayy_lmao_users[user_name]
        model = get_model(user, cache_repo)
        start = timer()
        for i in range(20):
            print(
                model.make_sentence(max_overlap_ratio=MAX_OVERLAP_RATIO,
                                    max_overlap_total=MAX_OVERLAP_TOTAL,
                                    tries=1000))

        end = timer()
        print(f"{(end - start) * 1000} ms elapsed for sentence creation ")
示例#30
0
class WordBot():
	def __init__(self):
		self.dictionaryCache = LFUCache(maxsize = 200)
		self.urbandictionaryCache = LFUCache(maxsize = 200)
		self.wordOfTheDayCache = {}
		self.bot = telebot.TeleBot(bot_token)
		self.session = requests.Session()
		self.session.mount('http://', requests.adapters.HTTPAdapter(max_retries=5))
		self.session.mount('https://', requests.adapters.HTTPAdapter(max_retries=5))

	def handle_inline(self, query):
		try:
			query_word = query.get('query')
			default_word = self.getWordOfTheDay()
			inline_answers = []
			if default_word:
				default_result = types.InlineQueryResultArticle(
					'1', 
					'Word of the day', 
					types.InputTextMessageContent(
						'*' + default_word['word'] + '*\n' + default_word['definitions'],
						parse_mode='markdown'
					),
					description=default_word['word']
				)
				inline_answers = [default_result]
			if query_word or query_word != '':
				reply = self.make_reply('/define ' + query_word)
				desc = reply if reply == 'Word not found.' else None
				query_result = types.InlineQueryResultArticle('2', 
					query_word, 
					types.InputTextMessageContent(
						reply,
						parse_mode='markdown'
					),
					description=desc
				)
				inline_answers = [query_result]
			self.bot.answer_inline_query(query.get('id'), inline_answers)
		except Exception as e:
			pass
			#sprint(e)

	def handle_message(self, message):
		if 'new_chat_participant' in message:
			return
		query = message.get('text')
		if not query:
			return
		if '@LexicoBot' in query:
			query = query.replace('@LexicoBot', '')
		reply = self.make_reply(query)
		if reply != '':
			self.bot.send_chat_action(message['chat']['id'], 'typing')
			self.bot.send_message(message['chat']['id'], reply, parse_mode='markdown')
	   
	def make_reply(self, query):
		if query in ['/start', '/help']:
			return start_message		
		reply_message = ''
		if query == '/today':
			wordData = self.getWordOfTheDay()
			if wordData is None:
				return 'Server error.'
			query = '/define ' + wordData['word']
		query = query.split()
		if len(query) > 1:
			if query[0] in ['/define', '/synonyms', '/antonyms', '/use', '/all', '/ud']:
				word = ' '.join(query[1::])
				reply_message = '*' +  word + '*\n'
				if query[0] != '/ud':
					wordData = self.dictionaryCache.get(word)
					if wordData is None:
						wordData = self.getWord(word)
					else:
						pass
						#print 'Cache hit ' + word 
					if wordData is None:
						return 'Word not found.'
					if query[0] in ['/define','/all']:
						reply_message += wordData['definitions'] + '\n'
					if query[0] in ['/synonyms','/all']:
						reply_message += wordData['synonyms'] + '\n'
					if query[0] in ['/antonyms','/all']:
						reply_message += wordData['antonyms'] + '\n'
					if query[0] in ['/use','/all']:
						reply_message += wordData['examples'] + '\n'
				else:
					wordData = self.urbandictionaryCache.get(word)
					if wordData is None:
						wordData = self.getUrbandictionaryWord(word)
					if wordData is None:
						return 'Word not found'
					reply_message += wordData['definition'] + '\n'
					reply_message += wordData['example']	
		return reply_message
	
	def updateCache(self, word, wordData):
		dataDict = {}
		definitionText = '*Definitions*' +'\n'
		synonymsText = '*Synonyms*' + '\n'
		antonymsText = '*Antonyms*' + '\n'
		examplesText = '*Examples*' + '\n'
		definitions = self.getDefinitions(wordData)
		synonyms = self.getSynonyms(wordData)
		antonyms = self.getAntonyms(wordData)
		examples = self.getExamples(wordData)
		if not definitions:
			definitionText += 'No definitions found.\n'
		if not synonyms:
			synonymsText += 'No synonyms found.\n'
		if not antonyms:
			antonymsText += 'No antonyms found.\n'
		if not examples:
			examplesText += 'No examples found.\n'		
		for definition in self.getDefinitions(wordData):
			if definition[0]:
				definitionText += definition[0] + ': '
			definitionText += definition[1] + '\n\n'	
		definitionText = definitionText[:-1]	
		for synonym in synonyms[:3]:
			synonymsText += synonym + '\n'
		for antonym in antonyms[:3]:
			antonymsText += antonym + '\n'
		for index, example in enumerate(examples[:3]):
			examplesText += str(index+1) + '. ' + example + '\n\n'
		examplesText = examplesText[:-1]
		dataDict['word'] = word
		dataDict['definitions'] = definitionText
		dataDict['synonyms'] = synonymsText
		dataDict['antonyms'] = antonymsText
		dataDict['examples'] = examplesText
		self.dictionaryCache.update({word:dataDict})
		return dataDict
			
	def getDefinitions(self, wordData):
		partCounter = Counter()
		definitions = []
		for definition in wordData:
			if 'partOfSpeech' in definition.keys() and partCounter[definition['partOfSpeech']] < 2:
				definitions.append( 
					('_' + definition['partOfSpeech'] + '_ ', 
					definition['text'])
				)
				partCounter[definition['partOfSpeech']] += 1
			else:
				definitions.append(('',definition['text']))
		return definitions

	def getSynonyms(self, wordData):
		synonyms = []
		for relatedWords in wordData[0]['relatedWords']:
			if relatedWords['relationshipType'] == 'synonym':
				for synonym in relatedWords['words']:
					synonyms.append(synonym)
		
		for relatedWords in wordData[0]['relatedWords']:
			if relatedWords['relationshipType']	 == 'cross-reference':
				for synonym in relatedWords['words']:
					synonyms.append(synonym)	
		return synonyms

	def getAntonyms(self, wordData):
		antonyms = []
		for relatedWords in wordData[0]['relatedWords']:
			if relatedWords['relationshipType']	 == 'antonym':
				for antonym in relatedWords['words']:
					antonyms.append(antonym)
		return antonyms

	def getExamples(self, wordData):
		examples = []
		for index,example in enumerate(wordData[0]['examples']):
			examples.append(example['text'].replace('\n',''))
		return examples
		
	def getEtymology(self, wordData):
		etymologies = []
		for etymology in wordData[0]['etymologies']:
			etymologies.append(etymology)
		return etymology

	def getWord(self, word):
		def_url = wordnik_url + word + '/definitions?limit=15&api_key=' + wordnik_api_key
		example_url = wordnik_url + word + '/examples?api_key=' + wordnik_api_key
		related_url = wordnik_url + word + '/relatedWords?api_key=' + wordnik_api_key
		urls = [def_url, example_url, related_url]
		data = []
		for url in urls:
			try:
				response = self.session.get(url, verify=False)
				if response.status_code != 200:
					return None
				data.append(json.loads(response.text.encode('utf-8')))
			except ValueError:
				return None
		if not data[0]:
			return None
		wordData = data[0]
		try:
			wordData[0]['examples'] = data[1]['examples']
		except KeyError:
			wordData[0]['examples'] = []
		try:
			wordData[0]['relatedWords'] = data[2]
		except KeyError:
			wordData[0]['relatedWords'] = []
		return self.updateCache(word,wordData)
	
	def getUrbandictionaryWord(self, word):
		api_url = 'http://api.urbandictionary.com/v0/define?term='
		#response = requests.get(api_url+word, verify=False)
		response = urllib2.urlopen(api_url + word)
		data = json.loads(response.read().decode('utf-8'))
		if data['result_type'] == 'no_results' or not data['list']:
			return None
		wordData = {}
		wordData['definition'] = '*Definition*' + '\n'
		wordData['example'] = '*Example*'  + '\n'
		try:
			if data['list'][0]['definition']:
				wordData['definition'] += data['list'][0]['definition'].strip() + '\n'
			else:
				return None
		except KeyError:
			return None
		try:
			if data['list'][0]['example']:
				wordData['example'] += data['list'][0]['example'].strip() + '\n'
			else:
				wordData['example'] += 'No example found.'
		except KeyError:
			wordData['example'] += 'No example found.'			
		self.urbandictionaryCache.update({word:wordData})
		return wordData
	
	def getWordOfTheDay(self):
		wordOfTheDay = self.wordOfTheDayCache.get(datetime.now().day, None)
		if wordOfTheDay is None:
			today = datetime.strftime(datetime.now(), '%Y-%m-%d')
			url = wordnik_url[:-10] + 'words.json/wordOfTheDay?api_key=' + wordnik_api_key + '&date=' + today
			data = []
			response = self.session.get(url, verify=False)
			try:
				data.append(json.loads(response.text.encode('utf-8')))
			except ValueError:
				return None
			wordOfTheDay = data[0]['word']
			self.wordOfTheDayCache.clear()
			self.wordOfTheDayCache[datetime.now().day] = wordOfTheDay
		else:
			pass
			#print 'Today Cache Hit ' + wordOfTheDay
		wordData = self.dictionaryCache.get(wordOfTheDay)
		if not wordData:
			url = wordnik_url + wordOfTheDay + '/relatedWords?api_key=' + wordnik_api_key
			wordData = [definition for definition in data[0]['definitions']]
			for definition in wordData:
				definition['attributionText'] = ''
			wordData[0]['examples'] = data[0]['examples']
			response = self.session.get(url, verify = False)
			relatedWords = json.loads(response.text)
			wordData[0]['relatedWords'] = relatedWords
			wordData[0]['word'] = wordOfTheDay
			return self.updateCache(wordOfTheDay, wordData)	
		else:
			#print 'Cache hit ' + wordOfTheDay
			return wordData	
示例#31
0
    DATABASE["actorKeys"] = {
        "publicKey": pubkey.exportKey('PEM').decode('utf-8'),
        "privateKey": privkey.exportKey('PEM').decode('utf-8')
    }

PRIVKEY = RSA.importKey(DATABASE["actorKeys"]["privateKey"])
PUBKEY = PRIVKEY.publickey()

from . import app, CONFIG
from .remote_actor import fetch_actor

AP_CONFIG = CONFIG.get('ap', {'host': 'localhost', 'blocked_instances': []})
CACHE_SIZE = CONFIG.get('cache-size', 16384)

CACHE = LFUCache(CACHE_SIZE)


async def actor(request):
    data = {
        "@context": "https://www.w3.org/ns/activitystreams",
        "endpoints": {
            "sharedInbox": "https://{}/inbox".format(request.host)
        },
        "followers": "https://{}/followers".format(request.host),
        "following": "https://{}/following".format(request.host),
        "inbox": "https://{}/inbox".format(request.host),
        "name": "ActivityRelay",
        "type": "Application",
        "id": "https://{}/actor".format(request.host),
        "publicKey": {
示例#32
0
文件: optimizer.py 项目: equinor/lcm
class Optimizer:
    POPULATION_SIZE = 20
    NUMBER_OF_CHILDREN = 18  # must be a multiple of 2
    NUMBER_OF_PARENTS = 2
    MUTATION_PROBABILITY = 50  # percent
    NUMBER_OF_MUTATIONS = 30
    MIN_MUTATOR_VALUE = 5
    ENVIRONMENTAL_SCORE = {
        "GREEN": 100,
        "YELLOW": 66,
        "RED": 33,
        "BLACK": 0,
    }
    MASS_IMPORTANCE = 10
    NUMBER_OF_PRODUCTS_IMPORTANCE = 40

    def __init__(
        self,
        bridge: List[float],
        products: List[dict] = None,
        density_goal: int = 350,
        volume: int = 10,
        max_iterations: int = 500,
        max_products: int = 999,
        particle_range=None,
        weights: Dict = None,
    ):
        self.products = products
        self.bridge = bridge
        self.mass_goal = density_goal * volume
        self.density_goal = density_goal
        self.volume = volume
        self.max_iterations = max_iterations
        self.max_products = max_products
        if particle_range is None:
            particle_range = [1.0, 100]
        if particle_range[1] <= 0:
            particle_range[1] = 10000
        self.particle_range = particle_range
        self.weights = weights if weights else {"bridge": 10, "mass": 1, "products": 1}
        self.sum_weights = sum(self.weights.values())

    def optimize(self):
        start = datetime.now()
        max_initial_density = self.density_goal // min(self.max_products, len(self.products))

        score_progress = []
        population, children = self.initialize_population(max_initial_density)
        iterations = 0
        fittest_combo, score, experimental_bridge = {}, 100, []
        for _ in range(self.max_iterations):
            parents = self.select_parents(population)
            children = self.crossover(parents, children)
            children = self.execute_mutation(children)
            population = parents + children
            score, fittest_combo, experimental_bridge = self.optimal(population)
            # for i, _ in enumerate(products):
            #     combination_progress[i].append(list(fittest_combo.values())[i])
            score_progress.append(score)

        bridge_score = self.bridge_score(experimental_bridge)  # standard deviaton
        return {
            "combination": {k: v for k, v in fittest_combo.items() if v > 0},
            "cumulative_bridge": experimental_bridge,
            "curve": score_progress,
            "execution_time": (datetime.now() - start),
            "iterations": iterations,
            "score": score,
            "bridge_score": bridge_score,
        }

    def calculate_performance(self, experimental_bridge: list, products_result: List[Product]) -> dict:
        bridge_fitness = 100 - self.bridge_score(experimental_bridge)
        mass_score = 100 - self.mass_score(products_result, squash=False)
        products_score = 100 - (self.n_products_score(products_result, squash=False))

        return {"bridge": bridge_fitness, "mass": mass_score, "products": products_score}

    def initialize_population(self, max_initial_density):
        population = []
        children = []

        if self.max_products < len(self.products):
            id_list = [p["id"] for p in self.products]

            for _ in range(self.POPULATION_SIZE):
                combo_dict = {id: 0 for id in id_list}
                used_id_list = []

                for _ in range(self.max_products):
                    id = random.choice(id_list)
                    while id in used_id_list:
                        id = random.choice(id_list)
                    used_id_list.append(id)

                for id in used_id_list:
                    combo_dict[id] = round(random.uniform(0, max_initial_density), 2)

                population.append(combo_dict)

        else:
            for i in range(self.POPULATION_SIZE):
                combo_dict = {}
                for j in range(len(self.products)):
                    combo_dict[self.products[j]["id"]] = round(random.uniform(0, max_initial_density), 2)
                population.append(combo_dict)

        for i in range(self.NUMBER_OF_CHILDREN):
            children.append([])

        return population, children

    def select_parents(self, population):
        fitness = []
        fit_dict = {}

        for combination in population:
            score, _ = self.fitness_score(pickle.dumps(combination))
            fitness.append(score)
            fit_dict[score] = combination

        fitness.sort()

        parents = [fit_dict[fitness[0]], fit_dict[fitness[1]]]

        return parents

    def bridge_score(self, experimental_bridge):
        if len(self.bridge) != len(experimental_bridge):
            raise ValueError("The experimental bridge has a different size than the theoretical")
        diff_list = []
        i = 0
        for theo, blend in zip(self.bridge, experimental_bridge):
            if self.particle_range[0] < SIZE_STEPS[i] < self.particle_range[1]:
                diff_list.append((theo - blend) ** 2)
            i += 1

        _mean = np.mean(diff_list)
        score = np.sqrt(_mean)
        return score

    @cached(cache=LFUCache(8192))
    def fitness_score(self, pickled_combination: bytes):  # nosec
        combination = pickle.loads(pickled_combination)

        products: List[Product] = []
        for p in self.products:
            if combination[p["id"]] > 0:
                products.append(
                    Product(
                        product_id=p["id"],
                        share=combination[p["id"]] / sum(combination.values()),
                        cumulative=p["cumulative"],
                        sacks=combination[p["id"]],
                        mass=(combination[p["id"]] * self.volume),
                    )
                )

        experimental_bridge = calculate_blend_cumulative(products)
        _bridge_score = self.bridge_score(experimental_bridge) * (self.weights["bridge"] / self.sum_weights)
        mass_score = self.mass_score(products) * (self.weights["mass"] / self.sum_weights)
        number_of_products_score = self.n_products_score(products) * (self.weights["products"] / self.sum_weights)
        return _bridge_score + mass_score + number_of_products_score, experimental_bridge

    def optimal(self, population):
        results = []
        for combination in population:
            score, exp_bridge = self.fitness_score(pickle.dumps(combination))
            results.append({"score": score, "combination": combination, "bridge": exp_bridge})

        results.sort(key=lambda r: (r["score"]))
        return results[0]["score"], results[0]["combination"], results[0]["bridge"]

    def crossover(self, parents, children):
        number_of_products = len(self.products)

        first_parent_ids = list(parents[0].keys())
        second_parent_ids = list(parents[1].keys())
        first_parent_densities = list(parents[0].values())
        second_parent_densities = list(parents[1].values())

        for i in range(self.NUMBER_OF_CHILDREN // 2):
            cross_point = random.randint(1, number_of_products - 1)
            first_child_id_list = first_parent_ids[:cross_point] + second_parent_ids[cross_point:]
            second_child_id_list = second_parent_ids[:cross_point] + first_parent_ids[cross_point:]
            first_child_sacks_list = first_parent_densities[:cross_point] + second_parent_densities[cross_point:]
            second_child_sacks_list = second_parent_densities[:cross_point] + first_parent_densities[cross_point:]

            first_child_dict = {}
            second_child_dict = {}
            for j in range(len(first_child_id_list)):
                first_child_dict[first_child_id_list[j]] = first_child_sacks_list[j]
                second_child_dict[second_child_id_list[j]] = second_child_sacks_list[j]

            children[2 * i] = first_child_dict
            children[2 * i + 1] = second_child_dict

        return children

    @staticmethod
    def swap_bit_mutation(child):
        if len(child) > 1:
            x = random.choice(list(child.keys()))
            y = random.choice(list(child.keys()))

            attempts = 0
            while (x == y) and (child[x] == child[y]) and (attempts < 50):
                y = random.choice(list(child.keys()))
                attempts += 1

            child[x], child[y] = child[y], child[x]

        return child

    @staticmethod
    def inverse_mutation(child):
        child_ids = list(child.keys())
        child_densities = list(child.values())

        x = random.randint(0, len(child_densities))
        y = random.randint(0, len(child_densities))

        while x == y:
            y = random.randint(0, len(child_densities))

        if abs(x - y) <= 1:
            if (x == len(child_densities)) or (y == len(child_densities)):
                x -= 1
                y -= 1
            child_densities[x], child_densities[y] = child_densities[y], child_densities[x]
        elif x < y:
            toReverse = child_densities[x:y]
            toReverse.reverse()
            child_densities[x:y] = toReverse
        else:
            toReverse = child_densities[y:x]
            toReverse.reverse()
            child_densities[y:x] = toReverse

        new_child = {}
        for i in range(len(child_ids)):
            new_child[child_ids[i]] = child_densities[i]
        return new_child

    def flip_bit_mutation(self, child):
        key = random.choice(list(child.keys()))
        max_mutator = child[key] // 2
        if max_mutator < self.MIN_MUTATOR_VALUE:
            max_mutator = self.MIN_MUTATOR_VALUE

        mutator = random.randint(-max_mutator, max_mutator)
        child[key] = child[key] + mutator
        return child

    def execute_mutation(self, children):
        for child in children:
            mute = random.randint(0, int((self.NUMBER_OF_MUTATIONS / self.MUTATION_PROBABILITY) * 100))
            if mute == 1:
                child = self.swap_bit_mutation(child)
            elif mute == 2:
                child = self.inverse_mutation(child)
            elif mute == 3:
                child = self.flip_bit_mutation(child)

            for id in child:
                if child[id] < 0:
                    child[id] = 0

        return children

    def mass_score(self, products: List[Product], squash: bool = True) -> float:
        combination_mass = sum([p.mass for p in products])
        diff = abs(self.mass_goal - combination_mass)
        percentage_diff = (100 / self.mass_goal) * diff
        if squash:
            return percentage_diff / self.MASS_IMPORTANCE
        return percentage_diff

    def n_products_score(self, products: List[Product], squash: bool = True) -> float:
        if len(products) <= self.max_products:
            return 0
        diff = len(products) - self.max_products
        percentage_diff = (100 / self.max_products) * diff
        if squash:
            return percentage_diff / self.NUMBER_OF_PRODUCTS_IMPORTANCE
        return percentage_diff
示例#33
0
# Copyright (c) 2018-2019, Eduardo Rodrigues and Henry Schreiner.
#
# Distributed under the 3-clause BSD license, see accompanying file LICENSE
# or https://github.com/scikit-hep/decaylanguage for details.

from __future__ import print_function

from collections import Counter
from copy import deepcopy

try:
    from functools import lru_cache
    cacher = lru_cache(maxsize=64)
except ImportError:
    from cachetools import cached, LFUCache
    cacher = cached(cache=LFUCache(maxsize=64))

from particle import Particle, ParticleNotFound


@cacher
def charge_conjugate(pname):
    """
    Return the charge-conjugate particle name matching the given PDG name.
    If no matching is found, return "ChargeConj(pname)".
    """
    try:
        return Particle.from_string(pname).invert().name
    except:
        return 'ChargeConj({0})'.format(pname)
示例#34
0
def main():
    # Patch threading to make exceptions catchable.
    install_thread_excepthook()

    # Make sure exceptions get logged.
    sys.excepthook = handle_exception

    args = get_args()

    # Add file logging if enabled.
    if args.verbose and args.verbose != 'nofile':
        filelog = logging.FileHandler(args.verbose)
        filelog.setFormatter(
            logging.Formatter(
                '%(asctime)s [%(threadName)16s][%(module)14s][%(levelname)8s] '
                + '%(message)s'))
        logging.getLogger('').addHandler(filelog)
    if args.very_verbose and args.very_verbose != 'nofile':
        filelog = logging.FileHandler(args.very_verbose)
        filelog.setFormatter(
            logging.Formatter(
                '%(asctime)s [%(threadName)16s][%(module)14s][%(levelname)8s] '
                + '%(message)s'))
        logging.getLogger('').addHandler(filelog)

    if args.verbose or args.very_verbose:
        log.setLevel(logging.DEBUG)
    else:
        log.setLevel(logging.INFO)

    # Let's not forget to run Grunt / Only needed when running with webserver.
    if not args.no_server:
        if not os.path.exists(
                os.path.join(os.path.dirname(__file__), 'static/dist')):
            log.critical(
                'Missing front-end assets (static/dist) -- please run ' +
                '"npm install && npm run build" before starting the server.')
            sys.exit()

        # You need custom image files now.
        if not os.path.isfile(
                os.path.join(os.path.dirname(__file__),
                             'static/icons-sprite.png')):
            log.critical('Missing sprite files.')
            sys.exit()

    # These are very noisy, let's shush them up a bit.
    logging.getLogger('peewee').setLevel(logging.INFO)
    logging.getLogger('requests').setLevel(logging.WARNING)
    logging.getLogger('pgoapi.pgoapi').setLevel(logging.WARNING)
    logging.getLogger('pgoapi.rpc_api').setLevel(logging.INFO)
    logging.getLogger('werkzeug').setLevel(logging.ERROR)

    config['parse_pokemon'] = not args.no_pokemon
    config['parse_pokestops'] = not args.no_pokestops
    config['parse_gyms'] = not args.no_gyms

    # Turn these back up if debugging.
    if args.verbose or args.very_verbose:
        logging.getLogger('pgoapi').setLevel(logging.DEBUG)
    if args.very_verbose:
        logging.getLogger('peewee').setLevel(logging.DEBUG)
        logging.getLogger('requests').setLevel(logging.DEBUG)
        logging.getLogger('pgoapi.pgoapi').setLevel(logging.DEBUG)
        logging.getLogger('pgoapi.rpc_api').setLevel(logging.DEBUG)
        logging.getLogger('rpc_api').setLevel(logging.DEBUG)
        logging.getLogger('werkzeug').setLevel(logging.DEBUG)

    # Use lat/lng directly if matches such a pattern.
    prog = re.compile("^(\-?\d+\.\d+),?\s?(\-?\d+\.\d+)$")
    res = prog.match(args.location)
    if res:
        log.debug('Using coordinates from CLI directly')
        position = (float(res.group(1)), float(res.group(2)), 0)
    else:
        log.debug('Looking up coordinates in API')
        position = util.get_pos_by_name(args.location)

    if position is None or not any(position):
        log.error("Location not found: '{}'".format(args.location))
        sys.exit()

    # Use the latitude and longitude to get the local altitude from Google.
    (altitude, status) = get_gmaps_altitude(position[0], position[1],
                                            args.gmaps_key)
    if altitude is not None:
        log.debug('Local altitude is: %sm', altitude)
        position = (position[0], position[1], altitude)
    else:
        if status == 'REQUEST_DENIED':
            log.error(
                'Google API Elevation request was denied. You probably ' +
                'forgot to enable elevation api in https://console.' +
                'developers.google.com/apis/api/elevation_backend/')
            sys.exit()
        else:
            log.error('Unable to retrieve altitude from Google APIs' +
                      'setting to 0')

    log.info('Parsed location is: %.4f/%.4f/%.4f (lat/lng/alt)', position[0],
             position[1], position[2])

    if args.no_pokemon:
        log.info('Parsing of Pokemon disabled.')
    if args.no_pokestops:
        log.info('Parsing of Pokestops disabled.')
    if args.no_gyms:
        log.info('Parsing of Gyms disabled.')
    if args.encounter:
        log.info('Encountering pokemon enabled.')

    config['LOCALE'] = args.locale
    config['CHINA'] = args.china

    app = Pogom(__name__)
    app.before_request(app.validate_request)

    db = init_database(app)
    if args.clear_db:
        log.info('Clearing database')
        if args.db_type == 'mysql':
            drop_tables(db)
        elif os.path.isfile(args.db):
            os.remove(args.db)
    create_tables(db)

    app.set_current_location(position)

    # Control the search status (running or not) across threads.
    pause_bit = Event()
    pause_bit.clear()
    if args.on_demand_timeout > 0:
        pause_bit.set()

    heartbeat = [now()]

    # Setup the location tracking queue and push the first location on.
    new_location_queue = Queue()
    new_location_queue.put(position)

    # DB Updates
    db_updates_queue = Queue()

    # Thread(s) to process database updates.
    for i in range(args.db_threads):
        log.debug('Starting db-updater worker thread %d', i)
        t = Thread(target=db_updater,
                   name='db-updater-{}'.format(i),
                   args=(args, db_updates_queue, db))
        t.daemon = True
        t.start()

    # db cleaner; really only need one ever.
    if not args.disable_clean:
        t = Thread(target=clean_db_loop, name='db-cleaner', args=(args, ))
        t.daemon = True
        t.start()

    # WH updates queue & WH gym/pokéstop unique key LFU cache.
    # The LFU cache will stop the server from resending the same data an
    # infinite number of times.
    # TODO: Rework webhooks entirely so a LFU cache isn't necessary.
    wh_updates_queue = Queue()
    wh_key_cache = LFUCache(maxsize=args.wh_lfu_size)

    # Thread to process webhook updates.
    for i in range(args.wh_threads):
        log.debug('Starting wh-updater worker thread %d', i)
        t = Thread(target=wh_updater,
                   name='wh-updater-{}'.format(i),
                   args=(args, wh_updates_queue, wh_key_cache))
        t.daemon = True
        t.start()

    if not args.only_server:

        # Processing proxies if set (load from file, check and overwrite old
        # args.proxy with new working list)
        args.proxy = check_proxies(args)

        # Run periodical proxy refresh thread
        if (args.proxy_file is not None) and (args.proxy_refresh > 0):
            t = Thread(target=proxies_refresher,
                       name='proxy-refresh',
                       args=(args, ))
            t.daemon = True
            t.start()
        else:
            log.info('Periodical proxies refresh disabled.')

        # Gather the Pokemon!

        # Attempt to dump the spawn points (do this before starting threads of
        # endure the woe).
        if (args.spawnpoint_scanning and args.spawnpoint_scanning != 'nofile'
                and args.dump_spawnpoints):
            with open(args.spawnpoint_scanning, 'w+') as file:
                log.info('Saving spawn points to %s', args.spawnpoint_scanning)
                spawns = Pokemon.get_spawnpoints_in_hex(
                    position, args.step_limit)
                file.write(json.dumps(spawns))
                log.info('Finished exporting spawn points')

        argset = (args, new_location_queue, pause_bit, heartbeat,
                  db_updates_queue, wh_updates_queue)

        log.debug('Starting a %s search thread', args.scheduler)
        search_thread = Thread(target=search_overseer_thread,
                               name='search-overseer',
                               args=argset)
        search_thread.daemon = True
        search_thread.start()

    if args.cors:
        CORS(app)

    # No more stale JS.
    init_cache_busting(app)

    app.set_search_control(pause_bit)
    app.set_heartbeat_control(heartbeat)
    app.set_location_queue(new_location_queue)

    config['ROOT_PATH'] = app.root_path
    config['GMAPS_KEY'] = args.gmaps_key

    if args.no_server:
        # This loop allows for ctrl-c interupts to work since flask won't be
        # holding the program open.
        while search_thread.is_alive():
            time.sleep(60)
    else:
        ssl_context = None
        if (args.ssl_certificate and args.ssl_privatekey
                and os.path.exists(args.ssl_certificate)
                and os.path.exists(args.ssl_privatekey)):
            ssl_context = ssl.SSLContext(ssl.PROTOCOL_TLSv1_2)
            ssl_context.load_cert_chain(args.ssl_certificate,
                                        args.ssl_privatekey)
            log.info('Web server in SSL mode.')
        if args.verbose or args.very_verbose:
            app.run(threaded=True,
                    use_reloader=False,
                    debug=True,
                    host=args.host,
                    port=args.port,
                    ssl_context=ssl_context)
        else:
            app.run(threaded=True,
                    use_reloader=False,
                    debug=False,
                    host=args.host,
                    port=args.port,
                    ssl_context=ssl_context)
示例#35
0
class WordBot():

	def __init__(self):
		self.offset = ''
		self.URL = 'https://api.telegram.org/bot' + bot_token
		self.session = requests.Session()
		self.session.mount("http://", requests.adapters.HTTPAdapter(max_retries=2))
		self.session.mount("https://", requests.adapters.HTTPAdapter(max_retries=2))
		self.cache = LFUCache(maxsize = 200)
		self.startMessage = start_message
		self.keyboard = {"keyboard":[["/define"],["/synonyms"],["/antonyms"],["/examples"],["/all"]]}

	def processUpdates(self):
		response = self.session.get(self.URL + '/getUpdates?offset=' + str(self.offset),verify=False)
		print "Got update"
		status = False
		updates = json.loads(response.text)
		if updates['result']:
			self.offset = updates['result'][0]['update_id'] + 1
			query = updates['result'][0]['message']['text']
			chat_id = updates['result'][0]['message']['chat']['id']
			self.session.get(self.URL + '/sendChatAction?chat_id=' + str(chat_id) +'&action=typing',verify=False)
			message = self.makeMessage(query)			
			status = self.sendMessage(message,chat_id)
		return status
	
	def makeMessage(self,query):
		message = self.startMessage
		if query == '/stop':
			message = 'Bot disabled.'
		elif query == '/today':
			wordData = self.getWordOfTheDay()
			query = '/define ' + wordData['word']
		query = query.split()
		if len(query) > 1:
			if query[0] not in ['/define','/synonyms','/antonyms','/use','/all']:
				return self.startMessage
			word = ' '.join(query[1::]).lower()
			message = 'Word: ' +  word + '\n'
			message += '=' * (len(word) + 7) + '\n'
			if self.cache.get(word):
				print "Fetching from cache"
				wordData = self.cache.get(word)
			else:
				wordData = self.getWord(word)
				if wordData is None:
					return 'Word not found.'
			if query[0] in ['/define','/all']:
				message += wordData['definitions'] + '\n'
			if query[0] in ['/synonyms','/all']:
				message += wordData['synonyms'] + '\n'
			if query[0] in ['/antonyms','/all']:
				message += wordData['antonyms'] + '\n'
			if query[0] in ['/use','/all']:
				message += wordData['examples'] + '\n'	
		return message
	
	def updateCache(self,word,wordData):
		dataDict = {}
		definitionText = 'Definitions :-' + '\n'
		definitionText += '-' * 20 + '\n'
		synonymsText = 'Synonyms :-' + '\n'
		synonymsText += '-' * 20 + '\n'
		antonymsText = 'Antonyms :-' + '\n'
		antonymsText += '-' * 20 + '\n'
		examplesText = 'Exmaples :-' + '\n'
		examplesText += '-' * 20 + '\n'
		definitions = self.getDefinitions(wordData)
		synonyms = self.getSynonyms(wordData)
		antonyms = self.getAntonyms(wordData)
		examples = self.getExamples(wordData)
		if not definitions:
			definitionText += 'No definitions found.\n'
		if not synonyms:
			synonymsText += 'No synonyms found.\n'
		if not antonyms:
			antonymsText += 'No antonyms found.\n'
		if not examples:
			examplesText += 'No examples found.\n'
			
		for definition in self.getDefinitions(wordData):
			definitionText += definition[0] + '\n' + definition[1] + ': ' +  definition[2] + '\n\n'
		for synonym in synonyms[:5]:
			synonymsText += synonym + '\n'
		for antonym in antonyms[:5]:
			antonymsText += antonym + '\n'
		for index,example in enumerate(examples[:5]):
			examplesText += str(index+1) + ". " + example + '\n\n'
		
		dataDict['word'] = word
		dataDict['definitions'] = definitionText
		dataDict['synonyms'] = synonymsText
		dataDict['antonyms'] = antonymsText
		dataDict['examples'] = examplesText
		self.cache.update({word:dataDict})
		return dataDict 
			
	def getDefinitions(self,wordData):
		partCounter = Counter()
		definitions = []
		for definition in wordData:
			if partCounter[definition['partOfSpeech']] < 2:
				definitions.append((definition['attributionText'],definition['partOfSpeech'],definition['text']))
				partCounter[definition['partOfSpeech']] += 1
		return definitions


	def getSynonyms(self,wordData):
		synonyms = []
		for relatedWords in wordData[0]['relatedWords']:
			if relatedWords['relationshipType'] == 'synonym':
				for synonym in relatedWords['words']:
					synonyms.append(synonym)
		
		for relatedWords in wordData[0]['relatedWords']:
			if relatedWords['relationshipType']  == 'cross-reference':
				for synonym in relatedWords['words']:
					synonyms.append(synonym)
		
		return synonyms

	def getAntonyms(self,wordData):
		antonyms = []
		for relatedWords in wordData[0]['relatedWords']:
			if relatedWords['relationshipType']  == 'antonym':
				for antonym in relatedWords['words']:
					antonyms.append(antonym)
		return antonyms

	def getExamples(self,wordData):
		examples = []
		for index,example in enumerate(wordData[0]['examples']):
			examples.append(example['text'].replace('\n',''))
		return examples
		
	def getEtymology(self,wordData):
		etymologies = []
		for etymology in wordData[0]['etymologies']:
			etymologies.append(etymology)
		return etymology

	def getWord(self,word):
		url1 = wordnik_url + word + '/definitions?api_key=' + wordnik_api_key
		url2 = wordnik_url + word + '/examples?api_key=' + wordnik_api_key
		url3 = wordnik_url + word + '/relatedWords?api_key=' + wordnik_api_key
		urls = [url1,url2,url3]
		data = []
		for url in urls:
			try:
				response = self.session.get(url,verify=False)
				data.append(json.loads(response.text.encode('utf-8')))
			except ValueError:
				return None
		if not data[0]:
			return None
		wordData = data[0]
		try:
			wordData[0]['examples'] = data[1]['examples']
		except KeyError:
			wordData[0]['examples'] = []
		try:
			wordData[0]['relatedWords'] = data[2]
		except KeyError:
			wordData[0]['relatedWords'] = []
		return self.updateCache(word,wordData)
	
	def getWordOfTheDay(self):
		today = datetime.datetime.strftime(datetime.datetime.now(), '%Y-%m-%d')
		url = wordnik_url[:-10] + 'words.json/wordOfTheDay?api_key=' + wordnik_api_key + '&date=' + today
		wordData = []
		data = []
		response = self.session.get(url,verify = False)
		data.append(json.loads(response.text.encode('utf-8')))
		word = data[0]['word']
		if self.cache.get(word):
			wordData = self.cache.get(word)
			return wordData
		url = wordnik_url + word + '/relatedWords?api_key=' + wordnik_api_key
		wordData = [definition for definition in data[0]['definitions']]
		for definition in wordData:
			definition['attributionText'] = ''
		wordData[0]['examples'] = data[0]['examples']
		response = self.session.get(url,verify = False)
		relatedWords = json.loads(response.text)
		wordData[0]['relatedWords'] = relatedWords
		wordData[0]['word'] = word
		return self.updateCache(word,wordData)		
			
	def sendMessage(self,message,chat_id):
		dataDict = {'chat_id':str(chat_id),
				'text':message.encode('utf-8'),
				'reply_markup':self.keyboard}
		response = self.session.post(self.URL + '/sendMessage',data = dataDict)
		if response.status_code == 200:
			return True
		else:
			return False
示例#36
0
 def __init__(self, path):
     self._cache = LFUCache(maxsize=512)
     self.path = path
     if not os.path.isdir(self.path):
         os.mkdir(self.path, 0o700)