Esempio n. 1
0
    def RecvFromSever(self) -> Tuple[Dict, int]:

        sockdata: str = ""

        try:
            sockdata = self.socket.recvfrom(UDP_MSGLEN)[0].decode("utf-8")
            if self.verbose:
                print("\nrecv", sockdata)
        except Exception as e:
            logger.exception(f"{e}")
            return dict(), SERVERCODE_TIMEOUT

        if "***restart***" in sockdata:
            logger.info(f"server restarted on port: {self.port}")
            return dict(), SERVERCODE_RESTART

        elif "***shutdown***" in sockdata:
            logger.info(f"server shutdown on port: {self.port}.")
            return dict(), SERVERCODE_SHUTDOWN

        elif not sockdata:
            logger.warning(f"server no response on port: {self.port}")
            return dict(), SERVERCODE_NORESPONSE

        elif "***identified***" in sockdata:
            logger.info(f"client identified connected on port: {self.port}")
            # * always delay few secs after starting round maybe ??
            time.sleep(self.delay)
            return dict(), SERVERCODE_IDENTIFIED
        else:
            if self.verbose:
                logger.info(f"recv: {sockdata}")
            return ServerState.Deserialize(sockdata), SERVERCODE_OBSERVATION
        return True
Esempio n. 2
0
def add_additional_background(region: SpliceRegion):
    if region.sites is None:
        return
    _, _, y1, y2 = pylab.axis()

    for site, color in region.sites.items():
        try:
            pylab.vlines(x=site,
                         ymin=y1,
                         ymax=y2,
                         color=color,
                         linestyles="dashed",
                         lw=0.5)
        except IndexError as err:
            logger.warning("Indicator line is out of bound: " + str(err))

    if region.focus is not None:
        for left, right in region.focus.items():
            try:
                fill_x = [left, right, right, left]

                _, _, y1, y2 = pylab.axis()
                fill_y = [y1, y1, y2, y2]

                pylab.fill(fill_x, fill_y, alpha=0.1, color='grey')
            except IndexError as err:
                logger.warning("focus region is out of bound: " + str(err))
Esempio n. 3
0
    def prepare(self, region: SpliceRegion, ):
        data = []

        for i, f in enumerate(self.files):
            with pyBigWig.open(f) as r:
                try:
                    data.append(r.values(region.chromosome, region.start, region.end + 1))
                except RuntimeError as e:
                    logger.warning(e)

                    logger.info("may be caused by the mismatch of chromosome")
                    if region.chromosome.startswith("chr"):
                        data.append(r.values(region.chromosome.replace("chr", ""), region.start, region.end + 1))
                    else:
                        data.append(r.values("chr" + region.chromosome, region.start, region.end + 1))

        self.data = np.array(data)

        if self.clustering and self.data.shape[0] > 1:
            data = linkage(self.data, method=self.clustering_method, metric=self.distance_metric)
            order = dendrogram(data, orientation='right')
            self.data = self.data[order["leaves"], :]

        if self.do_scale:
            """
            y = (x – mean) / standard_deviation
            """
            # b = (self.data.transpose() - np.mean(self.data, axis=1)) / np.std(self.data, axis=1)
            # self.data = b.transpose()
            self.data = zscore(self.data, axis=1)
            pass
Esempio n. 4
0
def search_movie_by_name(name: str, year: int) -> MovieByName:
    payload = {'apikey': OMDB_API_KEY, 't': name, 'y': year}
    request = handle_request(OMDB_API_URL, None, payload)

    if (request is not None) and (request.status_code == 200):
        json_object = request.json()
        response = json_object['Response']

        if response == 'True':
            content_type = json_object['Type']
            if content_type == 'movie':
                result = get_movie_data(json_object)
                return result
            else:
                return MovieByName(content_type=content_type)
        else:
            error = json_object['Error']
            logger.warning(f"Error from OMDb API: {error}")
            if error == OMDBAPI_ERROR_RESPONSE_MESSAGE:
                return MovieByName(error=True, error_message=MOVIE_NOT_FOUND)
            else:
                return MovieByName(error=True, error_message=error)
    else:
        logger.warning(
            'Error connecting to OMDb API. The service may not be available at this time.'
        )

        return MovieByName()
Esempio n. 5
0
async def notification(websocket: WebSocket,
                       user: Union[None, User] = Depends(get_current_user_ws)):
    if not user:
        return
    user_id = str(user.id)
    await websocket.accept()
    redis = await aioredis.create_redis_pool(settings.REDIS_URI)
    response = await redis.subscribe(channel=f"{user_id}_notify")
    channel = response[0]
    try:
        while await channel.wait_message():
            raw_event = await channel.get(encoding="utf8")
            try:
                event = json.loads(raw_event)
            except json.JSONDecodeError as e:
                logger.warning(
                    f"[{user_id}]Event '{raw_event}' was ignored. Decode failed"
                )
                continue
            else:
                await websocket.send_text(raw_event)
    except ConnectionClosed as e:
        logger.info(f"User {user_id} Disconnected")
    finally:
        redis.close()
        await redis.wait_closed()
def handle_request(api_url: str, headers: Any, parameters: Any) -> Any:
    try:
        if headers is not None and parameters is not None:
            request = requests.get(api_url, headers=headers, params=parameters)
        elif parameters is None:
            request = requests.get(api_url, headers=headers)
        elif headers is None:
            request = requests.get(api_url, params=parameters)
        else:
            request = requests.get(api_url)
        request.raise_for_status()

        return request
    except requests.exceptions.HTTPError as http_error:
        logger.warning(f"Http Error: {http_error}")
    except requests.exceptions.ConnectionError as connection_error:
        logger.warning(f"Error Connecting: {connection_error}")
    except requests.exceptions.TooManyRedirects as redirects_error:
        logger.warning(f"Too Many Redirects: {redirects_error}")
    except requests.exceptions.Timeout as timeout_error:
        logger.warning(f"Timeout Error: {timeout_error}")
    except requests.exceptions.RequestException as request_exception:
        logger.warning(f"Error: {request_exception}")

    return None
Esempio n. 7
0
def requests_remaining_pages(movie_id: str, pages: int,
                             movies: List[dict]) -> List[dict]:
    result = movies

    for page in range(INITIAL_PAGE + 1, pages + 1):
        # The User-Agent has to be specified to avoid Http Error: 403 Client Error
        headers = {'User-Agent': BROWSER_USER_AGENT}
        payload = {'limit': LIMIT, 'page': page, 'query_term': movie_id}
        request = handle_request(YTS_API, headers, payload)

        if (request is not None) and (request.status_code == 200):
            json_object = request.json()
            status = json_object['status']
            status_message = json_object['status_message']

            if status == 'ok':
                data = json_object['data']
                remaining_movies = data['movies']
                for remaining_movie in remaining_movies:
                    result.append(remaining_movie)
            elif status == 'error':
                logger.warning(
                    f"The YTS API has returned an error fetching the page {page} for the movie with IMDb ID {movie_id}: "
                    f"{status_message}")
        else:
            logger.warning(
                f"Unable to retrieve page {page} of available movies for the movie with IMDb ID {movie_id}"
            )

        return result
def search_series_by_imdb(series_id: str) -> ByIMDb:
    payload = {'limit': LIMIT, 'page': INITIAL_PAGE, 'imdb_id': series_id}
    request = handle_request(EZTV_API, None, payload)

    if (request is not None) and (request.status_code == 200):
        json_object = request.json()

        imdb_id = json_object['imdb_id']
        torrents_count = json_object['torrents_count']
        if torrents_count > 0:
            torrents = json_object['torrents']
            pages = measure_number_pages(torrents_count)

            if pages > 1:
                total_torrents = requests_remaining_pages(
                    series_id, pages, torrents)
            else:
                total_torrents = torrents

            parsed_torrents = parse_available_torrents(total_torrents)
            result = ByIMDb(imdb_id, torrents_count, parsed_torrents)

            return result
        else:
            logger.warning(
                f"No results for the series with IMDb ID {series_id}")

            return ByIMDb()
    else:
        logger.warning(
            'Error connecting to EZTV API. The service may not be available at this time.'
        )

        return ByIMDb()
Esempio n. 9
0
    def _copy(file: FileDescription):
        try:
            file.destination_path.parent.mkdir(exist_ok=True, parents=True)
            shutil.copy(file.source_path, file.destination_path)
        except Exception as e:
            logger.warning(f'Something wrong: {e}')

        logger.info(f'File `{file.source_path.resolve()}` is copied to `{file.destination_path.resolve()}`.')
Esempio n. 10
0
File: main.py Progetto: FoxOdd/FDGBM
def verifFile(filename):
    if os.path.exists(filename) and not args.force:
        log.warning(filename + " already exist, you can overwrite with -f option")
        return False
    elif os.path.exists(filename) and args.force:
        log.debug(filename + " will be overwrite")
        return True
    else:
        return True
Esempio n. 11
0
def process_uploader_cell(row: HtmlElement, selector: CSSSelector) -> str:
    uploader = ''

    elements = selector(row)
    if len(elements) == 1:
        uploader = elements[0].text_content()
    else:
        logger.warning(
            'The structure of the table has changed. There is more than one uploader value.'
        )

    return uploader
Esempio n. 12
0
def process_language_cell(row: HtmlElement, selector: CSSSelector) -> str:
    language = ''

    elements = selector(row)
    if len(elements) == 1:
        language = elements[0].text_content()
    else:
        logger.warning(
            'The structure of the table has changed. There is more than one <span> tag in the language cell.'
        )

    return language
Esempio n. 13
0
    def get_contents(self):

        response = CACHE.get(self.node_key)

        if response is None:
            logger.warning(f"Data not found for {self.node_key}")
            return {}
        try:
            response_dict = json.loads(response)
            return response_dict
        except Exception as err:
            logger.error(err)
            raise
Esempio n. 14
0
def search_movie_by_imdb(movie_id: str, quality_specified: str) -> ByIMDb:
    # The User-Agent has to be specified to avoid Http Error: 403 Client Error
    headers = {
        'User-Agent': BROWSER_USER_AGENT,
        'Upgrade-Insecure-Requests': '1',
        'DNT': '1'
    }
    payload = {'limit': LIMIT, 'page': INITIAL_PAGE, 'query_term': movie_id}

    request = handle_request(YTS_API, headers, payload)

    if (request is not None) and (request.status_code == 200):
        json_object = request.json()
        status = json_object['status']
        status_message = json_object['status_message']

        if status == 'ok':
            data = json_object['data']
            movie_count = data['movie_count']
            if movie_count > 0:
                movies_available = data['movies']
                pages = measure_number_pages(movie_count)

                if pages > 1:
                    total_movies = requests_remaining_pages(
                        movie_id, pages, movies_available)
                else:
                    total_movies = movies_available

                parsed_movies = parse_available_movies(total_movies,
                                                       quality_specified)

                if parsed_movies.torrents:
                    return parsed_movies
                else:
                    return ByIMDb()
            else:
                return ByIMDb()
        elif status == 'error':
            logger.warning(
                f"The YTS API has returned an error for movie with IMDb ID {movie_id}: {status_message}"
            )
            return ByIMDb()
    else:
        logger.warning(
            f"Error connecting to YTS API. The service may not be available at this time."
        )
        return ByIMDb()
Esempio n. 15
0
    def copy(self):
        if not self.files:
            logger.warning('Nothing to copy...')
            return

        logger.info('Copying is started...')
        for file in self.files:
            should_process_file = True
            if self._skip_existed_destination and file.destination_path.exists():
                continue
            elif file.destination_path.exists() and not self._copy_all_with_replace:
                should_process_file = self._handle_file_destination(file)

            if should_process_file:
                self._copy(file)
        logger.info('Copying is finished!')
Esempio n. 16
0
    def simulate_sell(self, coin_pair, price, rsi=-1, day_volume=-1):
        """
        Used to place a simulated trade in the database

        :param coin_pair: String literal for the market (ex: BTC-LTC)
        :type coin_pair: str
        :param price: Market's current price
        :type price: float
        :param rsi: Market's current RSI
        :type rsi: float
        :param day_volume: Market's 24 hour volume
        :type day_volume: float
        """
        if coin_pair not in self.simulated_trades['trackedCoinPairs']:
            return logger.warning(
                "Trying to sell on the {} market, which is not a tracked coin pair"
            )
        current_date = datetime.now().strftime('%Y/%m/%d %I:%M:%S')
        sell_object = {
            "date": current_date,
            "rsi": rsi,
            "24HrVolume": day_volume,
            "price": price
        }
        profit_margin = self.get_simulated_profit_margin(coin_pair, price)

        self.simulated_trades['trackedCoinPairs'].remove(coin_pair)

        simulated_trade = self.get_simulated_open_trade(coin_pair)
        simulated_trade['sell'] = sell_object
        simulated_trade['profit_margin'] = profit_margin

        write_json_to_file(self.file_string, self.simulated_trades)
Esempio n. 17
0
    def read_config(self):
        tree = ET.parse(self.config_path)
        config = tree.getroot()
        for file in config:
            source_path = file.attrib.get('source_path')
            destination_path = file.attrib.get('destination_path')
            file_name = file.attrib.get('file_name')
            if not all((source_path, destination_path, file_name)):
                raise KeyError(
                    f'Not expected attributes for tag `{file.tag}`: {", ".join(file.attrib.keys())}!'
                )

            if (source := Path(source_path, file_name)).exists():
                self.files.append(
                    FileDescription(source, Path(destination_path, file_name)))
            else:
                logger.warning(f'File `{source}` not found!')
Esempio n. 18
0
 def seqUniprot(self):
     """
     get sequence from Uniprot database
     """
     if self.accession == "":
         log.info('no sequence because no accession code for ' + self.id)
         return ""
     queryURL = "https://www.ebi.ac.uk/proteins/api/proteins?offset=0&size=-1&accession=" + self.accession
     r = requests.get(queryURL, headers={"Accept": "text/x-fasta"})
     if not r.ok:
         try:
             r.raise_for_status()
         except requests.exceptions.HTTPError as e:
             log.critical(str(e) + " with id " + self.id)
         log.warning("error to download the sequence from uniprot")
         self.sequence = ""
     else:
         self.sequence = "".join(r.text.split("\n")[1:])
Esempio n. 19
0
def process_compatible_torrents_cell(row: HtmlElement,
                                     selector: CSSSelector) -> tuple:
    download_link = ''
    subtitles = ''

    elements = selector(row)
    if len(elements) == 1:
        link_element = elements[0].getparent()
        redirection_path = link_element.get('href')
        download_link = get_download_link(redirection_path)
        content = link_element.text_content()
        subtitles = content.split('subtitle ')[1]
    else:
        logger.warning(
            'The structure of the table has changed. There is more than one <a> tag in the subtitle name cell.'
        )

    return subtitles, download_link
Esempio n. 20
0
def process_rating_cell(row: HtmlElement, selector: CSSSelector) -> int:
    rating = 0

    elements = selector(row)
    if len(elements) == 1:
        rating = elements[0].text_content()
        if is_a_number(rating):
            rating = int(rating)
        else:
            logger.warning(
                'The structure of the table has changed. The score value is not a number.'
            )
    else:
        logger.warning(
            'The structure of the table has changed. There is more than one score value.'
        )

    return int(rating)
Esempio n. 21
0
def requests_remaining_pages(series_id: str, pages: int,
                             torrents: List[dict]) -> List[dict]:
    result = torrents

    for page in range(INITIAL_PAGE + 1, pages + 1):
        payload = {'limit': LIMIT, 'page': page, 'imdb_id': series_id}
        request = handle_request(EZTV_API, None, payload)

        if (request is not None) and (request.status_code == 200):
            json_object = request.json()
            remaining_torrents = json_object['torrents']
            for remaining_torrent in remaining_torrents:
                result.append(remaining_torrent)
        else:
            logger.warning(
                f"Unable to retrieve page {page} of Torrents for the show with ID {series_id}"
            )

    return result
Esempio n. 22
0
    def add_events(self, events: Iterable):
        """Add events in batch by calling add_event"""
        start_time = timeit.default_timer()

        if not self.thread_pool:
            logger.warning('Add events called while misp is already shutdown')
            return  # close() has already been called

        # Use the thread_pool to perform several insertion in parallel
        results = []
        futures = [self.thread_pool.submit(self.add_event, event=event) for event in events]
        for future in concurrent.futures.as_completed(futures):
            results.append(future.result())

        logger.debug(
            'MISP: Done with the insertion of %s events, after %1.2fs',
            len(events),
            timeit.default_timer() - start_time,
        )
        return results
Esempio n. 23
0
 def accEnsembl(self):
     """
     Get accession from Ensemble ID to Uniprot
     """
     url = 'https://www.uniprot.org/uploadlists/'
     params = {'from': 'ENSEMBL_ID', 'to': 'ACC', 'format': 'tab', 'query': self.id}
     data = urllib.parse.urlencode(params)
     data = data.encode('utf-8')
     req = urllib.request.Request(url, data)
     essai = 1
     #request API Uniprot
     while True:
         try:
             response = urllib.request.urlopen(req).read()
             break
         except urllib.error.HTTPError as e:
             if e.code == 503 and essai < 5:
                 log.info('Error 503, renew in 10 secondes')
                 time.sleep(5)
                 essai += 1
                 pass
             elif e.code == 400 and essai < 5:
                 log.critical('Error 400 with ' + url + str(urllib.parse.urlencode(params)))
                 time.sleep(5)
                 essai += 1
             elif essai > 5:
                 log.warning('Error ' + str(e.code))
                 break
             else:
                 log.debug(e.code)
                 essai += 1
     accList = re.findall('[0-9A-Z]+',response.decode('utf-8'))
     #Verification of good request
     if len(accList[-1]) >= 5:
         self.accession = accList[-1]
     elif len(accList) == 2:
         #maybe no have accession found
         self.accession = ""
         log.warning("no accession uniprot found with id " + self.id)
     else:
         log.critical("unknow error with id " + self.id)
Esempio n. 24
0
    def __process_input_button(self, button):

        button_type = button.get("ButtonType")

        if button_type == "GetItemFromSource":
            message_data = self.__process_getitem_button(button)
            return message_data

        message_type = ""
        input_type = ""
        input_attr = None
        content = None

        message_type = MessageType.get_value("INPUT")
        placeholder_text = button.get("PlaceholderText", "")
        placeholder_text = AnaHelper.verb_replacer(text=placeholder_text,
                                                   state=self.state)
        input_attr = TextInput(placeHolder=placeholder_text).trim()

        button_type_map = config["button_map"]

        elem_type = button_type_map.get(button_type)
        if elem_type is None:
            logger.warning(f"Undefined Input Type {button_type}")

        type_of_input = elem_type["input_type"]
        type_of_media = elem_type["media_type"]

        input_type = InputType.get_value(type_of_input)
        media_type = MediaType.get_value(type_of_media)

        content = MessageContent(
            inputType=input_type,
            mediaType=media_type,
            textInputAttr=input_attr,
            mandatory=1,
        ).trim()
        message_data = MessageData(type=message_type, content=content).trim()

        return message_data
Esempio n. 25
0
def load_module(name: str, path: str):
    '''加载指定文件路径处的模块
    '''
    sepc = importlib.util.spec_from_file_location(name, path)
    if sepc is None:
        return None
    module = importlib.util.module_from_spec(sepc)
    old = sys.modules.get(name)
    sys.modules[name] = module
    try:
        sepc.loader.exec_module(module)
    except BaseException as e:
        logger.debug(
            f'A {e.__class__.__name__} occured when load module `{path}`!',
            True)
        logger.warning(f'Module `{name}` maybe have some error. Load failed!')
        module = None
    if old:
        sys.modules[name] = old
    else:
        sys.modules.pop(name)
    return module
Esempio n. 26
0
def search_subtitles_by_imdb(movie_id: str, language: str) -> List[ByIMDb]:
    movie_url = YIFY_API + movie_id
    # The User-Agent has to be specified to avoid the "requests.exceptions.TooManyRedirects" exception.
    headers = {
        'User-Agent': BROWSER_USER_AGENT,
        'Upgrade-Insecure-Requests': '1',
        'DNT': '1'
    }
    request = handle_request(movie_url, headers, None)

    if (request is not None) and (request.status_code == 200):
        content = request.text
        language_rows = find_language_rows(content, language)
        if language_rows:
            available_subtitles = get_subtitles_details(language_rows)
            return available_subtitles
        else:
            return []
    else:
        logger.warning(
            'Error connecting to YIFY API. The service may not be available at this time.'
        )
        return []
Esempio n. 27
0
    def hook_start(self, session: WebshellSession):
        super().hook_start(session)
        p = CSharpPayload('csharp/base/baseinfo.asp_net.cs')
        ret = self.eval(p)
        if not ret.is_success():
            logger.error("Basic info gather failed!")
            return
        info = json.loads(ret.data)
        session.state['name'] = info['host']
        session.state['pwd'] = info.get('pwd').strip()
        session.state['description'] = self.help.lstrip('\r\n ').split('\n')[0]
        session.server_info.lang = self.ASP_NET_CS
        session.server_info.user = info.get('user').strip()
        session.server_info.webshell_root = info.get('pwd').strip()
        session.server_info.os_type = info.get('os_type').strip()
        session.server_info.tmpdir = info.get('tmpdir').strip()
        session.server_info.sep = info.get('sep').strip()
        session.server_info.domain = info.get('domain')
        session.server_info.group = info.get('group')
        session.server_info.os_bits = info.get('os_bits')

        if not session.server_info.isWindows():
            logger.warning("The target system is not windows. Some commands may fail.")
Esempio n. 28
0
    def __init__(self):
        super().__init__()

        self._readline = None  # 指示是否包含readline库

        try:
            self._readline = importlib.import_module('readline')
            self._readline.set_completer(self.complete)
            self._readline.set_completer_delims('')
            self._readline.parse_and_bind('tab: complete')
            self._readline.clear_history()
            if os.path.exists(config.history_path):
                self._readline.read_history_file(config.history_path)
            if hasattr(self._readline, 'set_auto_history'):
                self._readline.set_auto_history(False)
        except ModuleNotFoundError:
            logger.warning(
                "No module `readline`! You can type `pip install readline` to install it in Unix platform, or `pip install pyreadline` in windows platform."
            )
            logger.info(
                "You can ignore this warning, but command will not be auto-completed and command history is not available."
            )
            self._readline = None
Esempio n. 29
0
    def get_historical_data(self, market, period, unit):
        """
        Queries the historical data in the form of a list

        :param market: String literal for the market (ex: BTC-LTC)
        :type market: str
        :param period: Number of periods to query
        :type period: int
        :param unit: Ticker interval (one of: 'oneMin', 'fiveMin', 'thirtyMin', 'hour', 'week', 'day', and 'month')
        :type unit: str

        :return: List adapted from Bittrex JSON response
        :rtype : list
        """
        request_url = 'https://bittrex.com/Api/v2.0/pub/market/GetTicks?marketName={}&tickInterval={}'.format(
            market, unit)

        try:
            historical_data = requests.get(request_url,
                                           headers={
                                               "apisign":
                                               hmac.new(
                                                   self.api_secret.encode(),
                                                   request_url.encode(),
                                                   hashlib.sha512).hexdigest()
                                           }).json()
        except json.decoder.JSONDecodeError as exception:
            logger.exception(exception)
            return []

        if not historical_data['success']:
            if historical_data['message'] == 'INVALID_MARKET':
                logger.warning(
                    'The {} market is currently not available on Bittrex'.
                    format(market))
            return []
        return historical_data['result'][-period:]
Esempio n. 30
0
    def __init__(self, id, name, scores, qSeq, hSeq, mSeq, hSeqLen):
        self.id = id
        self.name = name
        self.scores = scores  #dico
        self.qSeq = qSeq
        self.hSeq = hSeq
        self.mSeq = mSeq
        self.lenght = hSeqLen

        #add RefSeq if present in name
        if re.compile('[XN][MPR]\_[0-9]+.?[0-9]?').search(name):
            self.refseq = re.findall(
                '[XN][MPR]\_[0-9]+.?[0-9]?',
                name)[0].rstrip()  #or regex [A-Z][A-Z]\_[0-9]+.?[0-9]?
        else:
            self.refseq = ''
            log.warning(f"Hit {self.id}, {self.name} not have refseq")
        #add specie if present
        if re.compile('PREDICTED: ([A-z]+.[A-z]+)').search(name):
            self.specie = re.findall('PREDICTED: ([A-z]+.[A-z]+)',
                                     name)[0].rstrip()
        else:
            self.specie = 'unknow'
            log.warning(f"Hit {self.id}, {self.name} not have specie")