Esempio n. 1
0
    def __init__(self, bot):
        self.bot = GenericEvent.bot = bot
        self.bot_command = ['/bot']

        self.pluggables = {
            "allmessages": [],
            "call": [],
            "membership": [],
            "message": [],
            "rename": [],
            "history": [],
            "sending": [],
            "typing": [],
            "watermark": [],
        }

        # timeout for messages to be received for reprocessing: 6hours
        receive_timeout = 60 * 60 * 6

        self._reprocessors = Cache(receive_timeout, increase_on_access=False)

        self._contexts = Cache(receive_timeout, increase_on_access=False)

        self._image_ids = Cache(receive_timeout, increase_on_access=False)

        self._executables = Cache(receive_timeout, increase_on_access=False)
Esempio n. 2
0
    def __init__(self):
        super().__init__(command_prefix=_prefix_callable, owner_id=394859035209498626, case_insensitive=True)
        self.restrictions = ["create", "edit", ""]
        self.reactions = {"arrows": ["◀", "▶"],
                          "toggle": ["⏏"],
                          "ticks": ["<:greenTick:600735269993578496>", "<:redTick:600735269792120977>"],
                          "boolean": ["<:greenTick:600735269993578496>", "<:redTick:600735269792120977>"],
                          "thumbs": ["👍","👎"],
                          "cancel": ["<:redTick:600735269792120977>"],
                          "pager": ["⏪", "⏩"],
                          "superlike": "🔥"
        }

        self.translate = {"greenTick": 1, "redTick": 0}

        # Simple "cache" for some data
        self.data = {}

        self.cache = Cache()
        # Prefix class for fetching cached prefixes
        self.prefixes = Prefix(self)

        # Loads the images into memory
        self.images = ImageCache("images")

        # Removes the help command
        self.remove_command("help")
        # Loads the extensions (cogs)
        for ext in extensions:
            try:
                self.load_extension(ext)
            except Exception:
                print(f'Failed to load extension {ext}.', file=sys.stderr)
                traceback.print_exc()
            else:
                print(f'Successfully loaded {ext}')
        # Can be used to measure statistics
        self.stats = {}

        # Can be used to prevent people from using commands multiple times
        self.restricted = {}

        # Loads the fonts into memory
        self.fonts = FontCache("fonts")

        # Loads data from json files as a directory
        self.json = JsonCache()

        # Creates session for API calls.
        self.session = aiohttp.ClientSession(loop=self.loop)

        # Starts background tasks ::

        # Starts the latency timer
        self.loop.create_task(self.latency_timer())

        # Creates redis database class
        #self.db = Rdb(config.redis_path, self.loop)

        print("Tinker successfully initialized.")
Esempio n. 3
0
    async def get_from_backend(address_components):
        cache = Cache(TAMULookup.name)
        result = cache.get(address_components)
        is_cached = bool(result)
        if is_cached:
            request_count_cached.labels(TAMULookup.name).inc()
        else:
            result = tamu_geocode_address(address_components)
            cache.save(address_components,
                       result)  # TODO do this in the background
        request_count.labels(TAMULookup.name).inc()

        point = Point(
            (Decimal(result["Longitude"]), Decimal(result["Latitude"])))
        feature = Feature(
            geometry=point,
            properties={
                "service": TAMULookup.name,
                "quality": result["NAACCRGISCoordinateQualityCode"],
                "timestamp":
                datetime.datetime.utcnow().isoformat() + "Z",  # poop
                "cached": is_cached,  # should this be a timestamp?
            },
        )

        return feature
Esempio n. 4
0
def crawl(test):
    print "subversion"
    cache = Cache()
    last_crawl = upstream.last_crawl(source_id)
    rels = get_releases(last_crawl)
    count, max_date = upstream.add_releases(source_id, rels, test, cache)
    print "\t" + str(count), "new releases"
    upstream.set_last_crawl(source_id, max_date, test)
Esempio n. 5
0
 def __init__(self):
     config = Configuration()
     self.cache = Cache()
     self.cards = []
     self.name = ""
     self.combos = []
     self.api = GameApi(self.config)
     self.savePath = config.paths.savePath
Esempio n. 6
0
def crawl(test):
    cache = Cache()
    sources = explore_module.get_explore_targets()
    for target in sources:
        print target[1]
        rels = explore(*target[2:])
        count, max_date = explore_module.add_releases(source_id, target[0],
                                                      rels, test, cache)
        print "\t" + str(count), "new releases"
        explore_module.set_last_crawl(target[0], max_date, test)
Esempio n. 7
0
    def __init__(self, adapter, **kwargs):
        self.adapter = adapter
        "The dataset adapter serving the raw data."

        self.queue = []
        "A queue for the elements still to be served this epoch."

        self.split_limits = {'train': None, 'test': None, 'val': None}
        "Optional limits for the number of elements in train, test, and val sets."

        self.noshuffle = False
        "If set, epoch elements are not shuffled."

        self.current_minibatch = None
        "The current minibatch index in the epoch."

        self.current_phase = None
        "The current phase (train, test or val)."

        self.minibatch_size = None
        "The amount of elements per minibatch."

        self.cache = Cache(enabled=kwargs.pop('caching', True),
                           gpu=kwargs.pop('gpu_caching', True))
        "The cache used by the data function calls. By default, caches everything in GPU memory."

        self.data_function = None
        """
        Function that serves the input and target data for a given minibatch element from a given adapter.
        The minibatch dimension should already be added - they are concatenated along the first dimension.
        
        This function should handle any desired caching itself, using the passed cache.
        Input: adapter, element [, cache]
        Output: (input, target) tuple
        Both input and target should be a tuple
        """

        self._logger = None
        "Logger to handle output."

        self.center_crop_size = None
        "Used by the patch-based data servers to crop the center view."

        self.refinement_experiment = None

        self.nr_neighbours = 4

        self.restricted_nr_views = 1
        "Used by some data loader functions"

        self.__dict__.update(kwargs)

        if self.refinement_experiment is not None:
            self.refinement_experiment = experiment_handler.ExperimentHandler.load_experiment_from_file(
                self.refinement_experiment)
Esempio n. 8
0
	def __init__(self,package=None,distro=None):
		c = Cache()
		self.releases = []
		
		con = db.connect(host=HOST,user=USER,password=PASSWORD,database=DB)
		cur = con.cursor()
		
		if package != None:
			cur.execute("SELECT id FROM packages WHERE name = %s",(package,))
			package_id = cur.fetchone()[0]
		else:
			package_id = None
		
		if distro != None:
			cur.execute("SELECT id FROM distros WHERE name = %s",(distro,))
			row = cur.fetchone()
			if row==None:
				print "Unknown distro: " + distro
				raise UnknownDistroError(distro)
			distro_id = row[0]
		else:
			distro_id = None
		
		cached = False
		if package == None and distro == None:
			key = "/upstream/latest"
			query = "SELECT packages.name, ureleases.version, MIN(ureleases.released) FROM packages, ureleases WHERE packages.id = ureleases.package_id GROUP BY packages.name, ureleases.version HAVING MIN(ureleases.released) >= current_timestamp - interval '1 day' ORDER BY MIN(ureleases.released) DESC, packages.name ASC"
			query_args = []
		elif package == None and distro != None:
			key = "/distro/%s/latest"%distro
			query = "SELECT packages.name, dreleases.version, dreleases.revision, MIN(dreleases.released) FROM packages, dreleases, repos, distros WHERE packages.id = dreleases.package_id AND repos.id = dreleases.repo_id AND distros.id = repos.distro_id AND distros.name = %s GROUP BY packages.name, dreleases.version, dreleases.revision HAVING MIN(dreleases.released) >= current_timestamp - interval '1 day' ORDER BY MIN(dreleases.released) DESC, packages.name ASC"
			query_args = (distro,)
		elif package != None and distro == None:
			key = "/pkg/%s/latest"%package
			query = "SELECT packages.name, ureleases.version, MIN(ureleases.released) FROM packages, ureleases WHERE packages.id = ureleases.package_id AND packages.name = %s GROUP BY packages.name, ureleases.version HAVING MIN(ureleases.released) >= current_timestamp - interval '1 day'ORDER BY MIN(ureleases.released) DESC"
			query_args = (package,)
		else:
			key = "/distro/%s/pkg/%s/latest"%(distro,package)
			query = "SELECT packages.name, dreleases.version, dreleases.revision, MIN(dreleases.released) FROM packages, dreleases, repos, distros WHERE packages.id = dreleases.package_id AND repos.id = dreleases.repo_id AND distros.id = repos.distro_id AND distros.name = %s AND packages.name = %s GROUP BY packages.name, dreleases.version, dreleases.revision HAVING MIN(dreleases.released) >= current_timestamp - interval '1 day' ORDER BY MIN(dreleases.released) DESC"
			query_args = (distro,package)
		
		now = datetime.datetime.now()
		day = datetime.timedelta(1)
		
		status = c.key_status(key)
		if status != None:
			self.releases = c.get(key)
			if status == Cache.STALE:
				t = threading.Thread(target=self.update, args=(key, query, query_args, package_id, distro_id))
				t.start()
		else:
			self.update(key, query, query_args, package_id, distro_id)
			
		self.today = len(self.releases)
Esempio n. 9
0
    def __init__(self, tables, cache_key='', is_bootstrap=True, is_resume=True):
        """从 DB 读取数据
        :param tables: list, 要读取的库表
        :param cache_key: 缓存key
        :param is_bootstrap: 是否全量查询
        :param is_resume: 是与否启用断点续传
        """
        self.tables = tables
        self.is_bootstrap = is_bootstrap
        self.is_resume = is_resume

        self._cache_key = cache_key
        # resume_token 用于记录实时增量日志读取的位置
        self._cache_resume_token = None
        self.resume_token = {}
        self._rt = {}

        # 用于记录每张表读取到的时间
        self._cache_timestamps = None
        self.timestamps = {}
        self._ts = {}

        if self.is_resume:
            key_rt = f'{cache_key}:{self.suffix_rt}'
            # 从外部缓存读取
            self._cache_resume_token = Cache(key_rt, **REDIS_CONFIG)
            self.resume_token = self._rt = self.get_resume_token()

            key_ts = f'{cache_key}:{self.suffix_ts}'
            # 从外部缓存读取
            self._cache_timestamps = Cache(key_ts, **REDIS_CONFIG)
            self.timestamps = self._ts = self.get_timestamps()

        # 新增表
        self.new_tables = set(self.tables) - set(self.timestamps.keys())
        # 保留表
        self.old_tables = set(self.tables) & set(self.timestamps.keys())
        # 需要跟踪增量表
        self.inc_tables = self.new_tables | self.old_tables
Esempio n. 10
0
 def __init__(self, command_prefix: str, intents: discord.Intents,
              **kwargs):
     super().__init__(command_prefix=command_prefix,
                      intents=intents,
                      **kwargs)
     self.logger = set_logger()
     self.verification_queue = dict()
     self.event_queue = Queue()
     self.obj_cache = Cache()
     self.running = True
     self.default_invite = \
         "https://discord.com/api/oauth2/authorize?client_id=767842408758771742&permissions=51200&scope=bot"
     self.reddit = self.create_reddit_connection()
     self.load_data()
Esempio n. 11
0
def crawl(test=False):
	cache = Cache()
	sources = sf_module.get_sf_targets()
	all_rels = []
	total_new = 0
	for target in sources:
		print target[1]
		rels = get_releases(*target[2:])
		all_rels += rels
		
		count, max_date = sf_module.add_releases(source_id, target[0], rels, test, cache)
		total_new += count
		print "\t"+str(count),"new releases"
		sf_module.set_last_crawl(target[0], max_date, test)
	return (total_new, all_rels)
Esempio n. 12
0
	def update(self, key, query, query_args, package_id, distro_id, cache=None):
		if cache == None:
			cache = Cache()
		
		con = db.connect(host=HOST,user=USER,password=PASSWORD,database=DB)
		cur = con.cursor()
		
		cur.execute(query, query_args)
		
		tmp = []
		for row in cur:
			tmp.append(row)
		
		self.releases = tmp
		print tmp
		cache.put(key,tmp,[(package_id,distro_id)])
Esempio n. 13
0
def crawl(mod):
    cache = Cache()
    repos = mod.get_repos(test)
    i = 0
    for repo in repos:
        print str(i) + "/" + str(len(repos)), repo
        s = time.clock()
        if not last:
            repo.last_crawl = None
        last_crawl, rels = mod.crawl_repo(repo)
        total_new = downstream.add_releases(repo, rels, test, cache)
        if total_new > 0:
            cache.evict([(None, repo.distro_id)])
        downstream.set_last_crawl(repo, last_crawl, test)
        print "\t" + str(
            total_new), "new releases", "\t\t", time.clock() - s, "secs"
        i += 1
Esempio n. 14
0
    def __init__(self, cache_key='', is_bootstrap=True, is_resume=True):
        """从 MQ 读取数据
        :param cache_key: 缓存key
        :param is_bootstrap: 是否全量读取
        :param is_resume: 是否断点续传
        """
        self.is_bootstrap = is_bootstrap
        self.is_resume = is_resume

        self._cache_key = cache_key
        self._cache_resume_token = None
        self.resume_token = {}

        if self.is_resume:
            key = f'{cache_key}:{self.suffix}'
            # 从外部缓存读取
            self._cache_resume_token = Cache(key, **REDIS_CONFIG)
            self.resume_token = self.get_resume_token()
Esempio n. 15
0
 async def setup_hook(self) -> None:
     self.session: ClientSession = ClientSession()
     self.cache: Cache = Cache(self)
     self.pool: asyncpg.Pool = await asyncpg.create_pool(
         **self.settings["postgresql"])
     self.topgg: DBLClient = DBLClient(self,
                                       self.api["TopGG"],
                                       autopost_interval=None,
                                       session=self.session)
     self.topgg_webhook: WebhookManager = WebhookManager(self).dbl_webhook(
         "/dbl", self.api["TopGGWH"])
     self.gist: asyncgist.Client = asyncgist.Client(self.api["GitHub"],
                                                    self.session)
     self.sr: sr_api.Client = sr_api.Client()
     self.dagpi: asyncdagpi.Client = asyncdagpi.Client(self.api["DagpiAPI"],
                                                       session=self.session)
     self.myst: mystbin.Client = mystbin.Client(session=self.session)
     self.loop.create_task(self.cache.populate_cache())
     self.loop.create_task(self.load_extensions())
     self.loop.create_task(self.start_nodes())
     self.loop.create_task(self.find_restart_message())
     self.topgg_webhook.run(8025)
Esempio n. 16
0
    async def get_from_backend(address_components):
        cache = Cache(OSMLookup.name)
        result = cache.get(address_components)
        is_cached = bool(result)
        if is_cached:
            request_count_cached.labels(OSMLookup.name).inc()
        else:
            result = osm_geocode_address(address_components)
            cache.save(address_components,
                       result)  # TODO do this in the background
        request_count.labels(OSMLookup.name).inc()

        point = Point((Decimal(result["lon"]), Decimal(result["lat"])))
        feature = Feature(
            geometry=point,
            properties={
                "service": OSMLookup.name,
                "timestamp":
                datetime.datetime.utcnow().isoformat() + "Z",  # poop
                "cached": is_cached,  # should this be a timestamp?
            },
        )

        return feature
Esempio n. 17
0
 def __init__(self, verbose: bool = False) -> None:
     self.verbose = verbose
     self.cache = Cache()
Esempio n. 18
0
    def test_save_can_overwrite_data(self):
        cache = Cache("test", data_dir=self.tempdir)
        cache.save(self.address, {"foo": "bar"})
        cache.save(self.address, {"foo": "baz"})

        assert cache.get(self.address)["foo"] == "baz"
Esempio n. 19
0
 def test_get_returns_nothing_for_cold_cache(self):
     cache = Cache("test", data_dir=self.tempdir)
     assert not cache.get(self.address)
Esempio n. 20
0
 def test_init_accepts_nonexistent_dir(self):
     Cache("test", data_dir="/tmp/non-existent-dir")
Esempio n. 21
0
file_handler = RotatingFileHandler(conf.settings['logfile'],
                                   maxBytes=1024 * 1024 * 5,
                                   backupCount=5,
                                   encoding='utf-8')
file_handler.setFormatter(log_formatter)
root_logger.addHandler(file_handler)

# Set chosen logging level
root_logger.setLevel(conf.settings['loglevel'])
log = root_logger.getChild('cloudplow')

# Load config from disk
conf.load()

# Init Cache class
cache = Cache(conf.settings['cachefile'])

# Init Notifications class
notify = Notifications()

# Init Syncer class
syncer = Syncer(conf.configs)

# Ensure lock folder exists
lock.ensure_lock_folder()

# Init thread class
thread = Thread()

# Logic vars
uploader_delay = cache.get_cache('uploader_bans')
Esempio n. 22
0
from utils.environment import get_process_number
from FAdo.reex import str2regexp as fado_str2regexp
from FAdo.reex import ParseReg1
from rule import get_context_regex
from segment_table import LEFT_APPLICATION_BRACKET, LEFT_CENTER_BRACKET, LEFT_IDENTITY_BRACKET
from segment_table import RIGHT_APPLICATION_BRACKET, RIGHT_CENTER_BRACKET, RIGHT_IDENTITY_BRACKET

LEFT_BRACKETS = [
    LEFT_APPLICATION_BRACKET, LEFT_CENTER_BRACKET, LEFT_IDENTITY_BRACKET
]
RIGHT_BRACKETS = [
    RIGHT_APPLICATION_BRACKET, RIGHT_CENTER_BRACKET, RIGHT_IDENTITY_BRACKET
]
BRACKETS = RIGHT_BRACKETS + LEFT_BRACKETS

cache = Cache()
right_context_dfas = dict()
left_context_dfas = dict()
rule_transducers = dict()


# Wrappers for FAdo.reex.ParseReg1 and str2regexp().
# Needed when running multiple processes of simulations since dbm writes to the same shelve file by default.
class ParseReg1MultiProcess(ParseReg1):
    def __init__(self, no_table=0, table=None):
        super(ParseReg1MultiProcess,
              self).__init__(no_table=no_table,
                             table=BracketRuleTransducer.get_table_name())


def str2regexp(s,
Esempio n. 23
0
from PyQt5 import QtCore, QtWidgets

from serverside import query
from serverside.enotify import Emailer
from user import User
from utils import fmt
from utils.cache import Cache
from utils.fmt import update_msg_list

PORT = 65501
HOST = socket.gethostbyname(socket.gethostname())

NULL_BYTE = "\x00"

msg_cache = Cache(max_size=2**15)
room_cache = Cache()
user_cache = Cache()

msg_query = query.MessageQuery(cache=msg_cache)
room_query = query.RoomQuery(cache=room_cache)
user_query = query.UserQuery(cache=user_cache)

msg_query.room_query, msg_query.user_query = room_query, user_query
room_query.msg_query, room_query.user_query = msg_query, user_query
user_query.msg_query, user_query.room_query = msg_query, room_query


class Server(QtWidgets.QMainWindow):

    SCKT_TYPE = Union[int, "HasFileno"]