Example #1
0
    def bail_out(self, message, from_error=False):
        """
        In case if the transport pipes are closed and the sanic app encounters
        an error while writing data to the transport pipe, we log the error
        with proper details.

        :param message: Error message to display
        :param from_error: If the bail out was invoked while handling an
            exception scenario.

        :type message: str
        :type from_error: bool

        :return: None
        """
        if from_error or self.transport is None or self.transport.is_closing():
            logger.error(
                "Transport closed @ %s and exception "
                "experienced during error handling",
                (
                    self.transport.get_extra_info("peername")
                    if self.transport is not None
                    else "N/A"
                ),
            )
            logger.debug("Exception:", exc_info=True)
        else:
            self.write_error(ServerError(message))
            logger.error(message)
Example #2
0
 def write_error(self, exception):
     # An error _is_ a response.
     # Don't throw a response timeout, when a response _is_ given.
     if self._response_timeout_handler:
         self._response_timeout_handler.cancel()
         self._response_timeout_handler = None
     response = None
     try:
         response = self.error_handler.response(self.request, exception)
         version = self.request.version if self.request else "1.1"
         self.transport.write(response.output(version))
     except RuntimeError:
         if self._debug:
             logger.error(
                 "Connection lost before error written @ %s",
                 self.request.ip if self.request else "Unknown",
             )
     except Exception as e:
         self.bail_out(
             "Writing error failed, connection closed {}".format(repr(e)),
             from_error=True,
         )
     finally:
         if self.parser and (
             self.keep_alive or getattr(response, "status", 0) == 408
         ):
             self.log_response(response)
         try:
             self.transport.close()
         except AttributeError:
             logger.debug("Connection lost before server could close it.")
Example #3
0
 def bail_out(self, message, from_error=False):
     if from_error or self.transport.is_closing():
         logger.error("Transport closed @ %s and exception "
                      "experienced during error handling",
                      self.transport.get_extra_info('peername'))
         logger.debug('Exception:\n%s', traceback.format_exc())
     else:
         exception = ServerError(message)
         self.write_error(exception)
         logger.error(message)
Example #4
0
    def keep_alive_timeout_callback(self):
        """
        Check if elapsed time since last response exceeds our configured
        maximum keep alive timeout value and if so, close the transport
        pipe and let the response writer handle the error.

        :return: None
        """
        time_elapsed = time() - self._last_response_time
        if time_elapsed < self.keep_alive_timeout:
            time_left = self.keep_alive_timeout - time_elapsed
            self._keep_alive_timeout_handler = self.loop.call_later(
                time_left, self.keep_alive_timeout_callback)
        else:
            logger.debug("KeepAlive Timeout. Closing connection.")
            self.transport.close()
            self.transport = None
    async def get_data(self, chr, start, end, bins, bin=True):
        """Get data for a genomic region from file

        Args: 
            chr (str): chromosome 
            start (int): genomic start
            end (int): genomic end
            bin (bool): True to bin the results, defaults to False

        Returns:
            a dataframe with results
        """ 
        result = None
        err = None
        
        logging.debug("File Measurement: %s\t%s\t%s" %(self.mid, self.name, "file_get_data"))

        try:
            if self.fileHandler is None:
                file = self.create_parser_object(self.mtype, self.source, self.columns)
                result, err = file.getRange(chr, start, end, bins=bins)
            else:
                result, err = await self.fileHandler.handleFile(self.source, self.mtype, chr, start, end, bins=bins)

            # rename columns from score to mid for BigWigs
            if self.mtype in ["BigWig", "bigwig", "bw", "bigWig"]:
                result = result.rename(columns={'score': self.mid})
            elif self.mtype in ['Tabix', 'tabix', 'tbx'] and not self.isGenes:
                result.columns = ["chr", "start", "end"].extend(self.columns)
                cols = ["chr", "start", "end"]
                cols.append(self.mid)
                result = result[cols]   
            elif self.mtype == "tiledb":
                cols = ["chr", "start", "end", self.mid]
                cols.extend(self.metadata)
                result = result[cols]
                result = result.fillna(0)

            if bin and not self.isGenes and self.mtype not in ["tiledb", "interaction_bigbed"]: 
                result, err = await self.fileHandler.binFileData(self.source, self.mtype, result, chr, start, end, 
                                bins, columns=self.get_columns(), metadata=self.metadata)
 
            return result, str(err)
        except Exception as e:
            logging.error("File Measurement: %s\t%s\t%s" %(self.mid, self.name, "file_get_data"), exc_info=True)
            return {}, str(e)
    async def handleError(self,
                          trans_num,
                          command,
                          error_msg,
                          user_id='',
                          stock_symbol='',
                          amount=0,
                          filename=''):
        event = {
            **self.baseEvent(trans_num, command), 'error_msg': error_msg,
            **addKeyValuePairs(user_id, stock_symbol, amount, filename)
        }

        logger.info(f'Auditing error event - {trans_num} - {user_id}')
        resp = await self.client.postRequest(f'{self.url}/event/error', event)
        logger.debug(f'Audit response - {resp}')
        return
Example #7
0
    def sync_collections(self):
        req_url = self.emd_endpoint + "/collections/ids"
        logging.debug("Syncing collections from emd")
        r = requests.get(req_url)
        if r.status_code != 200:
            raise Exception(
                "Error getting collection ids to sync from emd {}: {}".format(
                    req_url, r.text))

        emd_ids = r.json()
        new_ids = list(set(emd_ids) - set(self.collections.values()))
        del_ids = [k for k, v in self.collections.items() if v not in emd_ids]

        for id in del_ids:
            del self.collections[id]

        return new_ids
Example #8
0
async def get_single_ticker(request):
    """Get a single market ticker. The market parameter must be given."""
    logger.info('Received a single ticker request.')
    request_args = request.get_args()
    if 'market' not in request_args:
        return json(
            {'error': 'must give "market" parameter!'},
            status=400,
        )
    logger.debug(request_args)
    market = request_args['market'][0]
    if market not in coinblockpro.markets:
        return json(
            {'error': 'market code not recognised!'},
            status=400,
        )
    return json({'result': load_single_ticker(market)})
Example #9
0
    async def pickleFileObject(self, fileName):
        """automated task to load a pickled file object

        Args:
            fileName: file name to load
        """
        logging.debug("Handler: %s\t%s" %(fileName,  "pickleFileObject"))
        record = self.records.get(fileName)
        record["pickling"] = True
        record["pickled"] = True
        # record["fileObj"].clearLock()
        filehandler = open(os.getcwd() + "/cache/"+ str(record["ID"]) + ".cache", "wb")
        cache = await record["fileObj"].get_cache()
        # pickle.dump(record["fileObj"], filehandler)
        pickle.dump(cache, filehandler)
        filehandler.close()
        record["pickling"] = False
        record["fileObj"] = None
Example #10
0
 def reload(self):
     """
     Reload the configuration
     :return: None
     """
     if not os.path.exists(NEWS) and not os.path.isdir(NEWS):
         logger.error('News directory not found, quitting...')
         exit(1)
     for source_type in os.listdir(NEWS):
         for source in os.listdir(NEWS + '/' + source_type):
             with open(NEWS + '/' + source_type + '/' + source) as source_f:
                 source_content = yaml.load(source_f.read(), Loader=yaml.FullLoader)
             source_parsed = source_content
             source_parsed['type'] = source_type
             self.news.append(source_parsed)
             logger.debug('Loaded %s' % NEWS + '/' + source_type + '/' + source)
     logger.info('Configuration (re)loaded')
     return
Example #11
0
    def visit_Expr(self, node: Expr) -> Any:
        call = node.value
        if isinstance(call, Await):
            call = call.value

        func = getattr(call, "func", None)
        args = getattr(call, "args", None)
        if not func or not args:
            return node

        if isinstance(func, Attribute) and func.attr == "dispatch":
            event = args[0]
            if hasattr(event, "s"):
                event_name = getattr(event, "value", event.s)
                if self._not_registered(event_name):
                    logger.debug(f"Disabling event: {event_name}")
                    return None
        return node
Example #12
0
async def fetch_data(
    keyname: str, fallback_func, recache: bool = False
) -> dict:
    logger.debug("Trying to fetch data...")
    if recache:
        logger.debug("Recaching data as requested by user...")
        data = await fallback_func()
        return data
    try:
        data = await cache.get(keyname)
        if not data:
            logger.debug("No cache found, fetching to remote DB.")
            data = await fallback_func()
        logger.debug("Cache found, using cache...")
    except Exception:
        logger.debug("Failed fetching cache...")
        data = await fallback_func()
    return data
Example #13
0
    def multisign(self, envvars: dict) -> bool:
        env = self._prepare_path(envvars)
        self._prepare_scripts()
        cmd = self._prepare_cmd(env, None)
        for spec in env['BUPGENSPECS'].split():
            localenv = copy.deepcopy(env)
            for setting in spec.split(';'):
                var, val = setting.split('=')
                logger.debug('Setting: {}={}'.format(var.upper(), val))
                localenv[var.upper()] = val
            if not self.run_command(cmd, cleanup=False, env=localenv):
                self.keys.cleanup()
                return False

        self.keys.cleanup()
        self._remove_files([fname for fname in os.listdir(self.workdir) if not fname.startswith('payloads')])

        return True
Example #14
0
 async def __make_http_request(cls, method: str, url: str, headers: Optional[dict] = None,
                               params: Optional[str] = None, data: Optional[dict] = None) -> ResponseWrapper:
     request_url = f'{cls.api_url}/{url}?{params}'
     async with aiohttp.ClientSession() as session:
         logger.debug(f'Sending {method} request to {url}, headers: {headers}')
         async with session.request(method=method, url=request_url, data=data, headers=headers,
                                    timeout=cls.REQUEST_TIMEOUT) as response:
             logger.debug(f'Got response from {request_url}, status {response.status}')
             try:
                 resp_data = await response.json()
             # type: ignore
             except aiohttp.ContentTypeError:
                 return ResponseWrapper(request_url=request_url, headers=dict(response.headers),
                                        status=response.status, data=dict(error=response.text))
             else:
                 return ResponseWrapper(request_url=request_url, headers=dict(response.headers),
                                        status=response.status,
                                        data=resp_data)
Example #15
0
 async def setup(cls, request: HitomiRequest) -> "CommonJS":
     logger.debug(f"Setting up {cls.__name__}")
     common_js_code = await request.get_common_js()
     gg_js_code = await request.get_gg_js()
     # Because it is executed only once for the first time, it is safe to block
     # 한번만 실행되기때문에 블로킹 걸려도 괜찮아요.
     with open("./heliotrope/interpreter/polyfill.js") as f:
         # Polyfill is used because there are functions that js2py does not have.
         # 폴리필을 사용하는 이유는 js2py에 없는 함수가 몇몇개 있기 때문이에요.
         polyfill = f.read()
     instance = cls(polyfill)
     # See ./heliotrope/interpreter/function.js
     # ./heliotrope/interpreter/function.js 파일 확인해주세요.
     with open("./heliotrope/interpreter/function.js") as f:
         function = f.read()
     instance.interpreter.execute(function)
     instance.update_js_code(common_js_code, gg_js_code)
     return instance
Example #16
0
async def verify_request(method: str, path: str, headers, body: str) -> bool:
    hsig = _parse_sig_header(headers.get("Signature"))
    if not hsig:
        logger.debug("no signature in header")
        return False
    logger.debug(f"hsig={hsig}")
    signed_string = _build_signed_string(hsig["headers"], method, path,
                                         headers, _body_digest(body))

    actor = await fetch(hsig["keyId"])
    if not actor: return False
    k = get_key(actor["id"])
    k.load_pub(actor["publicKey"]["publicKeyPem"])
    if k.key_id() != hsig["keyId"]:
        return False

    return _verify_h(signed_string, base64.b64decode(hsig["signature"]),
                     k.pubkey)
    def add_new_measurements(self, new_ms_ids):
        logging.debug("Adding new ms from emd")
        all_records = []

        for ms_id in new_ms_ids:
            req_url = self.emd_endpoint + "/ms/" + ms_id
            r = requests.get(req_url)
            if r.status_code != 200:
                raise Exception(
                    "Error getting ms with id {} from {}: {}".format(
                        ms_id, req_url, r.text))

            rec = r.json()
            self.process_emd_record(rec)
            all_records.append(rec)

        logging.debug("Done adding new ms from emd")
        return all_records
Example #18
0
    def keep_alive_timeout_callback(self):
        """
        Check if elapsed time since last response exceeds our configured
        maximum keep alive timeout value and if so, close the transport
        pipe and let the response writer handle the error.

        :return: None
        """
        time_elapsed = time() - self._last_response_time
        if time_elapsed < self.keep_alive_timeout:
            time_left = self.keep_alive_timeout - time_elapsed
            self._keep_alive_timeout_handler = self.loop.call_later(
                time_left, self.keep_alive_timeout_callback
            )
        else:
            logger.debug("KeepAlive Timeout. Closing connection.")
            self.transport.close()
            self.transport = None
Example #19
0
async def member_into_group_callback(data):
    """群成员入群回调"""
    '''{
    "group_id": "20181111222222",
    "user_id": "20181111222222",
    "invite_user_id": "20181111222222",
    "nickname": "群成员",
    "avatar": "http://xxx/xxx.png",
    "user_wxid": "wxid_xxxxxxxx",
    "join_type": 0,
    "join_time": "1970-01-01T00:00:00"
 }'''
    # 用户ID,邀请人ID,群ID,入群时间
    group_code = data.get('group_id')
    logger.debug(f'new member join group, data:{data}')

    async with db.conn.acquire() as con:
        select_stmt = await con.prepare('''
        select  "group".code,"group".id, "group".user_id, robot.code as robot_code,"group".welcome_msg from "group" 
        join robot_group_map  ON "group".id = robot_group_map.group_id join robot on robot_group_map.robot_id=robot.id
        where "group".code=$1 AND "group".status<>3 and "robot_group_map".status <>3 and "robot".status<>3 '''
                                        )
        group_info = await select_stmt.fetchrow(group_code)
    if not group_info:
        logger.error(
            f'member_into_group_callback: not match group: {group_code}')
        return
    user_id = group_info['user_id']
    group_id = group_info['id']
    robot_code = group_info['robot_code']
    welcome_msg = group_info['welcome_msg']
    await update_join_and_retreat_redis_record(user_id, group_code,
                                               'join_group')

    # 触发入群欢迎语
    logger.debug(f'{group_id} welcome_msg is {welcome_msg}')
    if welcome_msg is not None:
        welcome_key = GROUP_WELCOME_MSG_RECORD_REDIS_KEY + ":{}".format(
            group_id)
        has_record = await redis.conn.exists(welcome_key)
        if not has_record:
            await send_text_msg(robot_code, [data['user_id']], welcome_msg,
                                group_code)
            await redis.conn.setex(welcome_key, 300, 1)
Example #20
0
async def custom_aggregate(collection,
                           query,
                           function_name=None,
                           session=None,
                           to_list=False):
    if function_name:
        logger.debug('%s query %s', function_name, query)

    cursor = collection.aggregate(query, session=session)
    if to_list:
        output = await cursor.to_list(None)
    else:
        output = await cursor.to_list(length=1)
        output = output[0] if output else None

    if function_name:
        logger.debug('%s response %s', function_name, output)

    return output
Example #21
0
async def launch_history(request, **kwargs):
    user_id = kwargs['user']['user_id']
    current_page = int(request.raw_args.get('current_page'))
    page_size = int(request.raw_args.get('page_size'))
    logger.debug(f'launch history req, user_id:{user_id}')
    async with db.conn.acquire() as con:
        user_task_stmt = await con.prepare(
            '''select id as task_id, to_char(send_date, 'YYYY-MM-DD HH24:MI:SS') as send_date, jsonb_array_length(group_ids)::varchar as group_count, status as send_flag from "group_task"
                where user_id = $1 and type = 2 and status =1 and create_date >= (current_date  - 2)
                order by create_date desc limit $2 offset $3''')
        user_tasks = await user_task_stmt.fetch(user_id, page_size,
                                                current_page * page_size)
        task_list = records_to_list(user_tasks)
        history_count_stmt = await con.prepare(
            '''select count(1) from "group_task" where user_id = $1 and type = 2 and status = 1 and create_date >= (current_date - 2)'''
        )
        total_records = await history_count_stmt.fetchval(user_id)
        page_info = get_page_info(page_size, current_page, total_records)
        return response_json(task_list, page_info=page_info)
Example #22
0
async def feed(request, ws):
    """在线人数统计 uv"""
    connected.add(ws)
    ua = request.headers.get('user-agent', 'unkown')
    user_agents[ua] += 1
    log.debug('Open WebSockets: ', len(connected))
    try:
        while True:
            await ws.send(json.dumps({
                'user_agents': user_agents,
                'websockets': len(connected),
            }))
            await asyncio.sleep(0.1)
    finally:
        connected.remove(ws)
        user_agents[ua] -= 1
        if user_agents[ua] == 0:
            user_agents.pop(ua)
        log.debug('Open WebSockets: ', len(connected))
Example #23
0
async def fetch_channels(keyname: str, fallback_func) -> dict:
    logger.debug("Trying to fetch channels data...")
    try:
        data = await cache.get(keyname)
        if not data:
            logger.debug("No cache found, fetching to remote DB.")
            data = await fallback_func()
        logger.debug("Cache found, using cache...")
    except Exception:
        logger.debug("Failed fetching cache...")
        data = await fallback_func()
    return data
Example #24
0
async def tts_view(request: Request, user_cache: dict):

    # 解包相关信息,供之后逻辑调用
    token, pivot = user_cache["token"], user_cache["pivot"]
    processed_id = user_cache["processed"]
    _Logger.debug(f"tts view token = {token} processed_id = {processed_id}")
    # 获取当前的缓存中的关于 pivot 的诗句
    sent_ids = await get_pivot_cache(pivot)
    sent_ids = [int(i) for i in sent_ids]
    random.shuffle(sent_ids)
    ans_id = None
    for _id in sent_ids:
        if _id not in processed_id:
            ans_id = _id
            break

    if not ans_id:
        return response.json(
            {
                "status": False,
                "ret": Ret.FAIL_LOSE,
                "msg": "当前刷新的id已经全部在 processed id 中"
            }
        )

    _id, poetry_id, _ = await get_sentence_by_id(ans_id)

    user_cache["processed"].append(_id)
    await set_user_cache(token, user_cache)

    author, title, poetry_text = await get_poetry_by_id(poetry_id)
    _Logger.debug(f"get poetry from sql text = {poetry_text}, author = {author}, {title}")
    return response.json(
        {
            "status": True,
            "ret": Ret.SUCCESS_CODE,
            "data": {
                "author": author,
                "title": title,
                "text": poetry_text
            }
        }
    )
    def bin_rows_legacy(self, data, chr, start, end, bins=2000):
        """Bin genome by bin length and summarize the bin

        Args:
            data: DataFrame from the file
            chr: chromosome
            start: genomic start
            end: genomic end
            length: max rows to summarize the data frame into

        Returns:
            a binned data frame whose max rows is length
        """

        logging.debug("Measurement: %s\t%s\t%s" %
                      (self.mid, self.name, "bin_rows"))
        freq = round((end - start) / length)
        if end - start < length:
            freq = 1

        data = data.set_index(['start', 'end'])
        data.index = pd.IntervalIndex.from_tuples(data.index)

        bins = pd.interval_range(start=start, end=end, freq=freq)
        bins_df = pd.DataFrame(index=bins)
        bins_df["chr"] = chr
        if self.metadata:
            for meta in self.metadata:
                bins_df[meta] = data[meta]

        for col in self.get_columns():
            bins_df[col] = None

        # map data to bins
        for index, row in data.iterrows():
            for col in self.get_columns():
                bins_df.loc[index, col] = row[col]

        bins_df["start"] = bins_df.index.left
        bins_df["end"] = bins_df.index.right

        return pd.DataFrame(bins_df)
Example #26
0
    async def handleSearch(self, fileName, fileType, query, maxResults):
        """submit tasks to the dask client

        Args: 
            fileName: file location
            fileType: file type
            chr: chromosome
            start: genomic start
            end: genomic end
        """
        logging.debug("Handler: %s\t%s" %(fileName, "handleSearch"))
        fileObj = await self.get_file_object(fileName, fileType)
        try:
            data, err = await fileObj.search_gene(query, maxResults)
        except Exception as e:
            # assuming worker is no longer available, retry
            del self.records[fileName]
            fileObj = await self.get_file_object(fileName, fileType)
            data, err = await fileObj.search_gene(query, maxResults)
        return data, err
Example #27
0
    async def handleSearch(self, fileName, fileType, query, maxResults):
        """submit tasks to the dask client

        Args: 
            fileName: file location
            fileType: file type
            chr: chromosome
            start: genomic start
            end: genomic end
        """
        logging.debug("Handler: %s\t%s" % (fileName, "handleSearch"))

        if self.records.get(fileName) == None:
            fileClass = create_parser_object(fileType, fileName)
            fileObj = fileClass(fileName)
            self.setRecord(fileName, fileObj, fileType)
        fileObj = await self.getRecord(fileName)
        fileFuture = self.client.submit(fileObj.search_gene, query, maxResults)
        data, err = await self.client.gather(fileFuture)
        return data, err
Example #28
0
    async def get_data(self, mMgr, handler=None):

        result = {
            "annotation": [],
            "datasourceGroup": [],
            "datasourceId": [],
            "defaultChartType": [],
            "id": [],
            "maxValue": [],
            "minValue": [],
            "name": [],
            "type": [],
            "metadata": []
        }        
        error = None

        try:
            measurements = mMgr.get_measurements()

            logging.debug("Formatting measurements to send response")
            for rec in measurements:
                result.get("annotation").append(rec.annotation)
                result.get("datasourceGroup").append(rec.source)
                result.get("datasourceId").append(rec.source)
                result.get("defaultChartType").append("track")
                result.get("id").append(rec.mid)
                result.get("maxValue").append(rec.maxValue)
                result.get("minValue").append(rec.minValue)
                result.get("name").append(rec.name)

                result_type = "feature"
                if rec.isGenes:
                    result_type = "range"
                result.get("type").append(result_type)

                result.get("metadata").append(rec.metadata)

        except Exception as e:
            error = e

        return result, error
Example #29
0
async def update_col(request):
    isUsingEmd = request.app.epivizMeasurementsManager.using_emd()
    psname = request.app.psname

    nothingResponse = response.json({
                "requestId": -1,
                "type": "response",
                "error": None,
                "version": 5,
                "data": {}
            }, status = 405)

    def errorResponse(e):
        return response.json({
                "requestId": -1,
                "type": "response",
                "error": str(e),
                "version": 5,
                "data": {}
            }, status = 500) 

    okResponse = response.json({
                "requestId": -1,
                "type": "response",
                "error": None,
                "version": 5,
                "data": {}
            }, status = 200)

    if isUsingEmd and psname is not None:
        try:
            pid = check_output(["pidof", psname])
            os.kill(int(pid), signal.SIGHUP)
            return okResponse

        except Exception as e:
            logging.debug(f"Tried to sighup to re-read measurements with psname {psname}: {e}")
            return errorResponse(e)

    else:
        return nothingResponse
Example #30
0
    async def create(cls, url: str) -> "Template":
        parts = urlparse(url)
        if parts.netloc == "api.memegen.link":
            logger.debug(f"Handling template URL: {url}")
            key = parts.path.split(".")[0].split("/")[2]
            if key == "custom":
                url = parts.query.removeprefix("background=")
            else:
                return cls.objects.get_or_none(key) or cls.objects.get(
                    "_error")

        key = "_custom-" + hashlib.sha1(url.encode()).hexdigest()
        template = cls.objects.get_or_create(key, url)
        if template.image.exists() and not settings.DEBUG:
            logger.info(f"Found background {url} at {template.image}")

        else:
            logger.info(f"Saving background {url} to {template.image}")
            async with aiohttp.ClientSession() as session:
                try:
                    async with session.get(url) as response:
                        if response.status == 200:
                            template.directory.mkdir(exist_ok=True)
                            f = await aiofiles.open(template.image,
                                                    mode="wb")  # type: ignore
                            await f.write(await response.read())
                            await f.close()
                        else:
                            logger.error(
                                f"{response.status} response from {url}")
                except (InvalidURL, ClientConnectionError):
                    logger.error(f"Invalid response from {url}")

        if template.image.exists():
            try:
                utils.images.load(template.image)
            except OSError as e:
                logger.error(e)
                template.image.unlink()

        return template
Example #31
0
    def sync_measurements(self, current_ms):
        req_url = self.emd_endpoint + "/ms/ids"
        logging.debug("Syncing measurements from emd")
        r = requests.get(req_url)
        if r.status_code != 200:
            raise Exception("Error getting ms ids to sync from emd {}: {}".format(req_url, r.text))

        ms_ids = r.json()
        new_ids = list(set(ms_ids) - set(self.measurement_map.values()))
        del_ids = [ k for k, v in self.measurement_map.items() if v not in ms_ids]

        for id in del_ids:
            ms_id = self.measurement_map[id]
            del current_ms[ms_id]

            if id in self.measurement_map:
                del self.measurement_map[id]
            else:
                logging.debug("Tried to del ms map {}: not found".format(id))

        return new_ids
Example #32
0
    async def handleFile(self, fileName, fileType, chr, start, end, bins=2000):
        """submit tasks to the dask client

        Args: 
            fileName: file location
            fileType: file type
            chr: chromosome
            start: genomic start
            end: genomic end
            points: number of base-pairse to group per bin
        """
        logging.debug("Handler: %s\t%s" % (fileName, "handleFile"))
        if self.records.get(fileName) == None:
            fileClass = create_parser_object(fileType, fileName)
            fileObj = fileClass(fileName)
            self.setRecord(fileName, fileObj, fileType)
        fileObj = await self.getRecord(fileName)
        fileFuture = self.client.submit(fileObj.getRange, chr, start, end,
                                        bins)
        data, err = await self.client.gather(fileFuture)
        return data, err
Example #33
0
async def status(request):
    """
    Get status of CoMeT (dataset-broker).

    Poke comet to see if it's alive. Is either dead or returns {"running": True}.

    curl -X GET http://localhost:12050/status
    """
    try:
        logger.debug("status: Received status request")
        return response.json({"running": True, "result": "success"})
    except Exception as e:
        logger.error(
            "status: threw exception {} while handling request from {}",
            str(e),
            request.ip,
        )
        traceback.print_exc()
        raise
    finally:
        logger.debug("status: finished")
Example #34
0
async def webhook_handler(request: Request) -> HTTPResponse:
    if APP_DEBUG:
        logger.debug('Request body below')
        print(request.body)

    text_body = request.body
    try:
        message = ujson.loads(text_body)
    except ValueError:
        logger.debug('Failed parsing the post body as JSON')
        message = None

    if not message or not is_valid_message(message=message):
        logger.debug('Not a valid message, ignoring..')
        return ok_response()

    logger.debug('Message is valid, starting to trade in the background')
    request.app.add_task(trade(message=message))

    logger.debug('Sending response to client and closes connection')
    return ok_response()
Example #35
0
    def _helper(self, host=None, port=None, debug=False,
                ssl=None, sock=None, workers=1, loop=None,
                protocol=HttpProtocol, backlog=100, stop_event=None,
                register_sys_signals=True, run_async=False, access_log=True):
        """Helper function used by `run` and `create_server`."""
        if isinstance(ssl, dict):
            # try common aliaseses
            cert = ssl.get('cert') or ssl.get('certificate')
            key = ssl.get('key') or ssl.get('keyfile')
            if cert is None or key is None:
                raise ValueError("SSLContext or certificate and key required.")
            context = create_default_context(purpose=Purpose.CLIENT_AUTH)
            context.load_cert_chain(cert, keyfile=key)
            ssl = context
        if stop_event is not None:
            if debug:
                warnings.simplefilter('default')
            warnings.warn("stop_event will be removed from future versions.",
                          DeprecationWarning)

        self.error_handler.debug = debug
        self.debug = debug

        server_settings = {
            'protocol': protocol,
            'request_class': self.request_class,
            'is_request_stream': self.is_request_stream,
            'router': self.router,
            'host': host,
            'port': port,
            'sock': sock,
            'ssl': ssl,
            'signal': Signal(),
            'debug': debug,
            'request_handler': self.handle_request,
            'error_handler': self.error_handler,
            'request_timeout': self.config.REQUEST_TIMEOUT,
            'response_timeout': self.config.RESPONSE_TIMEOUT,
            'keep_alive_timeout': self.config.KEEP_ALIVE_TIMEOUT,
            'request_max_size': self.config.REQUEST_MAX_SIZE,
            'keep_alive': self.config.KEEP_ALIVE,
            'loop': loop,
            'register_sys_signals': register_sys_signals,
            'backlog': backlog,
            'access_log': access_log,
            'websocket_max_size': self.config.WEBSOCKET_MAX_SIZE,
            'websocket_max_queue': self.config.WEBSOCKET_MAX_QUEUE,
            'graceful_shutdown_timeout': self.config.GRACEFUL_SHUTDOWN_TIMEOUT
        }

        # -------------------------------------------- #
        # Register start/stop events
        # -------------------------------------------- #

        for event_name, settings_name, reverse in (
                ("before_server_start", "before_start", False),
                ("after_server_start", "after_start", False),
                ("before_server_stop", "before_stop", True),
                ("after_server_stop", "after_stop", True),
        ):
            listeners = self.listeners[event_name].copy()
            if reverse:
                listeners.reverse()
            # Prepend sanic to the arguments when listeners are triggered
            listeners = [partial(listener, self) for listener in listeners]
            server_settings[settings_name] = listeners

        if self.configure_logging and debug:
            logger.setLevel(logging.DEBUG)
        if self.config.LOGO is not None:
            logger.debug(self.config.LOGO)

        if run_async:
            server_settings['run_async'] = True

        # Serve
        if host and port:
            proto = "http"
            if ssl is not None:
                proto = "https"
            logger.info('Goin\' Fast @ {}://{}:{}'.format(proto, host, port))

        return server_settings
Example #36
0
def parse_multipart_form(body, boundary):
    """Parse a request body and returns fields and files

    :param body: bytes request body
    :param boundary: bytes multipart boundary
    :return: fields (RequestParameters), files (RequestParameters)
    """
    files = RequestParameters()
    fields = RequestParameters()

    form_parts = body.split(boundary)
    for form_part in form_parts[1:-1]:
        file_name = None
        content_type = "text/plain"
        content_charset = "utf-8"
        field_name = None
        line_index = 2
        line_end_index = 0
        while not line_end_index == -1:
            line_end_index = form_part.find(b"\r\n", line_index)
            form_line = form_part[line_index:line_end_index].decode("utf-8")
            line_index = line_end_index + 2

            if not form_line:
                break

            colon_index = form_line.index(":")
            form_header_field = form_line[0:colon_index].lower()
            form_header_value, form_parameters = parse_header(
                form_line[colon_index + 2 :]
            )

            if form_header_field == "content-disposition":
                field_name = form_parameters.get("name")
                file_name = form_parameters.get("filename")

                # non-ASCII filenames in RFC2231, "filename*" format
                if file_name is None and form_parameters.get("filename*"):
                    encoding, _, value = email.utils.decode_rfc2231(
                        form_parameters["filename*"]
                    )
                    file_name = unquote(value, encoding=encoding)
            elif form_header_field == "content-type":
                content_type = form_header_value
                content_charset = form_parameters.get("charset", "utf-8")

        if field_name:
            post_data = form_part[line_index:-4]
            if file_name is None:
                value = post_data.decode(content_charset)
                if field_name in fields:
                    fields[field_name].append(value)
                else:
                    fields[field_name] = [value]
            else:
                form_file = File(
                    type=content_type, name=file_name, body=post_data
                )
                if field_name in files:
                    files[field_name].append(form_file)
                else:
                    files[field_name] = [form_file]
        else:
            logger.debug(
                "Form-data field does not have a 'name' parameter "
                "in the Content-Disposition header"
            )

    return fields, files