示例#1
0
    def __init__(self, ipc_path=None, testnet=False, *args, **kwargs):
        if ipc_path is None:
            self.ipc_path = get_default_ipc_path(testnet)
        else:
            self.ipc_path = ipc_path

        self._lock = Lock()
        super(IPCProvider, self).__init__(*args, **kwargs)
示例#2
0
 def __init__(self,
              username,
              password,
              urls,
              use_lock_for_reseting_jwt=False,
              max_retries=5):
     self.username = username
     self.password = password
     self.urls = urls
     self.lock_for_reseting_jwt = Lock(
     ) if use_lock_for_reseting_jwt else None
     self.__init_request_session(max_retries)
     self.__set_token()
示例#3
0
    def __init__(self):
        """
        Const
        """

        # Gevent
        self._gevent_pool_max = 1024
        self._gevent_locker = Lock()
        self._gevent_pool = dict()

        # urllib3
        # Force underlying fifo queue to 1024 via maxsize
        self._u3_basic_pool = PoolManager(num_pools=1024, maxsize=1024)
        self._u3_proxy_pool_max = 1024
        self._u3_proxy_locker = Lock()
        self._u3_proxy_pool = dict()
示例#4
0
文件: task.py 项目: chengdg/weizoom
class Task(object):
    task_id = 0
    lock = Lock()

    def __init__(self, runner, *task_args, **task_kw):
        assert callable(runner)

        self.runner = runner
        self.task_args = task_args
        self.task_kw = task_kw

        self.name = self.__generate_name()

    def __generate_name(self):
        from gevent.threading import Lock
        Task.lock.acquire()
        Task.task_id += 1
        _task_id = Task.task_id
        Task.lock.release()

        return "task_{}".format(_task_id)

    def execute(self):
        self.runner(*self.task_args, **self.task_kw)

    def __str__(self):
        return u"task({}) runner:'{}', args:'{}', kw:'{}'"\
         .format(self.name, self.runner.__name__, self.task_args, self.task_kw)
示例#5
0
 def __init__(self, name, bind_address, *, master_address, driver_port: int):
     super().__init__(name, bind_address, master_address=master_address)
     self.driver = None
     self.driver_port = driver_port
     self.last_handle = None
     self.init_driver()
     self.disabled_until = time.time()
     self._lock = Lock()
     self.handle_to_url = {}  # type:Dict[str, str]
示例#6
0
文件: ipc.py 项目: 4gn3s/web3.py
class IPCProvider(BaseProvider):
    def __init__(self, ipc_path=None, testnet=False, *args, **kwargs):
        if ipc_path is None:
            self.ipc_path = get_default_ipc_path(testnet)
        else:
            self.ipc_path = ipc_path

        self._lock = Lock()
        super(IPCProvider, self).__init__(*args, **kwargs)

    def make_request(self, method, params):
        request = self.encode_rpc_request(method, params)

        self._lock.acquire()

        try:
            with get_ipc_socket(self.ipc_path) as sock:
                sock.sendall(request)
                response_raw = b""

                while True:
                    try:
                        response_raw += sock.recv(4096)
                    except socket.timeout:
                        if response_raw != b"":
                            break

                    if response_raw == b"":
                        gevent.sleep(0)
                    else:
                        try:
                            json.loads(force_text(response_raw))
                        except JSONDecodeError:
                            continue
                        else:
                            break

                else:
                    raise ValueError("No JSON returned by socket")
        finally:
            self._lock.release()

        return response_raw
    def __init__(self):
        self.register_lock = Lock()
        self.spawn_signal = Event()
        self.__t_reg = {}

        # set the worker timeout (unit: second)
        self.main_timeout = 1
        self.worker_timeout = 1

        # record the mapping relationship between tx_id and channel_name
        self.tx_id_channel_map = {}

        #
        self.nodeb_api = NodeBApi()

        # update channel api
        self.deposit_action = {
            State.OPEN: depositin,
            State.SETTLED: depoistout
        }
示例#8
0
    def __init__(self,
                 url=None,
                 file_name=None,
                 path_to_store=None,
                 total_size=None,
                 thread_number=None,
                 logger=None,
                 print_progress=False):

        assert url or file_name

        file_name = file_name if file_name else url.split('?')[0].split(
            '/')[-1]
        file_name_split = file_name.split('.')
        if len(file_name_split[0]) > 64:
            file_name_split[0] = file_name_split[0][:64]
        file_name = '.'.join(file_name_split)
        self.path_to_store = path_to_store if path_to_store else f"{os.environ['HOME']}/Downloads"

        self.logger = logger or logging
        self.print_progress = print_progress
        self.url = url
        self.total_size = total_size
        self.file_name_with_path = self.path_to_store + '/' + file_name
        self.breakpoint_file_path = self.file_name_with_path + '.tmp'
        self.file_seeker = None
        self.thread_number = thread_number
        self.thread_buffer_size = None
        self.remaining_segments = []
        self.start_time = None
        self.segment_dispatch_task = None
        self.speed_calculation_task = None
        self.bytearray_of_threads = None
        self.last_progress_time = None
        self.last_downloaded_size = 0
        self.workers = []
        self.coworker_end_to_indexes_map = dict()
        self.to_dispatcher_queue = Queue()
        self.complete_notify_lock = Lock()

        self.download_info = self.get_download_info()
示例#9
0
    def test_run_coroutine_with_green_thread_with_decorator(self):
        reset_thread_running_flag()
        reset_thread_running_timer()

        from gevent.threading import Lock
        global _Thread_Lock, Sleep_Function
        _Thread_Lock = Lock()
        # Sleep_Function = gevent.sleep
        Sleep_Function = mr.sleep

        _target_with_green_threads()
        TestPackageInit._chk_thread_result()
示例#10
0
 def __init__(self, name, bind_address, *, title_re: str, instruments: List[str] = (),
              capture_rects: List[Dict[str, Tuple]], filter_mode: int = 0,
              chrome: bool = False,
              master_address):
     super().__init__(name, bind_address, master_address=master_address)
     self.service = name
     self.title_re = title_re
     self.instruments = instruments or ([''] * len(capture_rects))
     self.capture_rects = capture_rects
     self.last_handle = None
     self.chrome = chrome
     self.recognizer = Recognizer(name=name, in_width=16, in_height=16, n_units=100, n_out=128,
                                  filter_mode=filter_mode,
                                  chrome=chrome)
     self._lock = Lock()
示例#11
0
    def __init__(
        self,
        jsonrpc_client: JSONRPCClient,
        user_deposit_address: UserDepositAddress,
        contract_manager: ContractManager,
        proxy_manager: "ProxyManager",
        block_identifier: BlockIdentifier,
    ) -> None:
        if not is_binary_address(user_deposit_address):
            raise ValueError("Expected binary address format for token nework")

        check_address_has_code_handle_pruned_block(
            client=jsonrpc_client,
            address=Address(user_deposit_address),
            contract_name=CONTRACT_USER_DEPOSIT,
            expected_code=decode_hex(
                contract_manager.get_runtime_hexcode(CONTRACT_USER_DEPOSIT)),
            given_block_identifier=block_identifier,
        )

        self.client = jsonrpc_client

        self.address = user_deposit_address
        self.node_address = self.client.address
        self.contract_manager = contract_manager
        self.gas_measurements = gas_measurements(
            self.contract_manager.contracts_version)

        self.proxy_manager = proxy_manager

        self.proxy = jsonrpc_client.new_contract_proxy(
            abi=self.contract_manager.get_contract_abi(CONTRACT_USER_DEPOSIT),
            contract_address=Address(user_deposit_address),
        )

        # Keeps track of the current in-flight deposits, to avoid sending
        # unecessary transactions.
        self._inflight_deposits: Dict[Address, InflightDeposit] = dict()

        # Don't allow concurrent withdraw_plan and withdraw calls.
        # This simplifies the precondition checks.
        self._withdraw_lock = Lock()
示例#12
0
    def __init__(self, host="http://trade", config=None, size=200):
        self._pool_size = size

        self._instance_list = list()

        ins = nge(host=host, config=config)

        self._instance_list.append(ins)

        for _ in range(self._pool_size-1):
            new_instance = SwaggerClient(
                swagger_spec=ins.swagger_spec,
                also_return_response=config.get("also_return_response",
                                                True) if config else True)
            self._instance_list.append(new_instance)

        self._semaphore = BoundedSemaphore(self._pool_size)

        self._lock = Lock()

        self._idx = 0
示例#13
0
    def __init__(self, conf_dict):
        """
        Init
        :param conf_dict: dict
        :type conf_dict: dict
        """

        # Lock for acquire/release
        self.pool_lock = Lock()

        # Store
        self.conf_dict = conf_dict

        # Max size
        self.max_size = self.conf_dict.get("pool_max_size", 10)

        # Alloc
        self.pool = queue.Queue(maxsize=self.max_size)

        # Init
        self.size = 0
示例#14
0
 def __init__(self, proxy: PaywalledProxy, es: ElasticsearchBackend):
     self.proxy = proxy
     self.resource_cache = {}
     self.cache_lock = Lock()
     proxy.add_paywalled_resource(
         ExpensiveElasticsearch,
         '/_search',
         None,
         '/<string:_index>/_search',
         '/<string:_index>/<string:_type>/_search',
         '/_msearch',
         '/<string:_index>/_msearch',
         '/<string:_index>/<string:_type>/_msearch',
         '/_mapping',
         '/<string:_index>/_mapping',
         '/<string:_index>/<string:_type>/_mapping',
         resource_class_kwargs=dict(
             resource_cache=self.resource_cache,
             cache_lock=self.cache_lock,
             es=es,
         )
     )
示例#15
0
    def __init__(self, parent):
        super(Order, self).__init__(parent=parent)

        self._lock = Lock()
示例#16
0
class MysqlApi(object):
    """
    Mysql Api
    """

    # Lock
    POOL_LOCK = Lock()

    # Static pool instances (hash from config dict => MysqlConnectionPool)
    D_POOL_INSTANCES = dict()

    @classmethod
    def reset_pools(cls):
        """
        Reset all pools
        """

        with cls.POOL_LOCK:
            for s_hash, pool in cls.D_POOL_INSTANCES.items():
                logger.info("Closing pool, s_hash=%s", s_hash)
                pool.close_all()
            cls.D_POOL_INSTANCES = dict()

    @classmethod
    def _get_pool_hash(cls, conf_dict):
        """
        Get pool hash
        :param conf_dict: dict
        :type conf_dict: dict
        :return: str
        :rtype: str
        """

        s_hash = str(hash(ujson.dumps(conf_dict, sort_keys=True)))
        return s_hash

    @classmethod
    def _get_pool(cls, conf_dict):
        """
        Init static pool
        :param conf_dict: dict
        :type conf_dict: dict
        :return pysolmysql.Pool.mysql_pool.MysqlConnectionPool
        :rtype pysolmysql.Pool.mysql_pool.MysqlConnectionPool
        """

        # Hash
        s_hash = cls._get_pool_hash(conf_dict)

        # Alloc if needed
        if s_hash not in cls.D_POOL_INSTANCES:
            with cls.POOL_LOCK:
                if s_hash not in cls.D_POOL_INSTANCES:
                    cls.D_POOL_INSTANCES[s_hash] = MysqlConnectionPool(
                        conf_dict)
                    logger.info("Allocated pool, s_hash=%s, pool.len=%s",
                                s_hash, len(cls.D_POOL_INSTANCES))
                    Meters.aii("k.db_pool.hash.cur")

        # Over
        return cls.D_POOL_INSTANCES[s_hash]

    @classmethod
    def _fix_type(cls, data):
        """
        Fix type
        :param data: data
        """
        if isinstance(data, bytearray):
            return data.decode("utf-8")
        else:
            return data

    @classmethod
    def exec_0(cls, conf_dict, statement):
        """
        Execute a sql statement, returning row affected.
        :param conf_dict: configuration dict
        :type conf_dict: dict
        :param statement: statement to execute
        :type statement: str
        :rtype: int
        :return rows affected
        """

        cnx = None
        rows_affected = 0
        try:
            cnx = cls._get_pool(conf_dict).connection_acquire()
            with closing(cnx.cursor()) as cur:
                cur.execute(statement)
                rows_affected = cur.rowcount
        finally:
            cls._get_pool(conf_dict).connection_release(cnx)
            return rows_affected

    @classmethod
    def exec_n(cls, conf_dict, statement, fix_types=True):
        """
        Execute a sql statement, returning 0..N rows
        :param conf_dict: configuration dict
        :type conf_dict: dict
        :param statement: statement to execute
        :type statement: str
        :param fix_types: If true, fix data type
        :type fix_types: bool
        :return list of dict.
        :rtype list
        """

        cnx = None
        try:
            cnx = cls._get_pool(conf_dict).connection_acquire()
            with closing(cnx.cursor()) as cur:
                cur.execute(statement)
                rows = cur.fetchall()
                for row in rows:
                    logger.debug("row=%s", row)
                    for k, v in row.items():
                        logger.debug("k=%s, %s, %s", k, type(v), v)
                        if fix_types:
                            row[k] = MysqlApi._fix_type(v)
                return rows
        finally:
            cls._get_pool(conf_dict).connection_release(cnx)

    @classmethod
    def exec_1(cls, conf_dict, statement, fix_types=True):
        """
        Execute a sql statement, returning 1 row.
        Method will fail if 1 row is not returned.
        :rtype: object
        :param conf_dict: configuration dict
        :type conf_dict: dict
        :param statement: statement to execute
        :type statement: str
        :param fix_types: If true, fix data type
        :type fix_types: bool
        :return dict
        :rtype dict
        """

        cnx = None
        try:
            cnx = cls._get_pool(conf_dict).connection_acquire()
            with closing(cnx.cursor()) as cur:
                cur.execute(statement)
                rows = cur.fetchall()
                for row in rows:
                    logger.debug("row=%s", row)
                    for k, v in row.items():
                        logger.debug("k=%s, %s, %s", k, type(v), v)
                        if fix_types:
                            row[k] = MysqlApi._fix_type(v)
                if len(rows) != 1:
                    raise Exception(
                        "Invalid row len, expecting 1, having={0}".format(
                            len(rows)))
                return rows[0]
        finally:
            cls._get_pool(conf_dict).connection_release(cnx)

    @classmethod
    def exec_01(cls, conf_dict, statement, fix_types=True):
        """
        Execute a sql statement, returning 0 or 1 row.
        Method will fail if 0 or 1 row is not returned.
        :param conf_dict: configuration dict
        :type conf_dict: dict
        :param statement: statement to execute
        :type statement: str
        :param fix_types: If true, fix data type
        :type fix_types: bool
        :return dict, None
        :rtype dict, None
        """

        cnx = None
        try:
            cnx = cls._get_pool(conf_dict).connection_acquire()
            with closing(cnx.cursor()) as cur:
                cur.execute(statement)
                rows = cur.fetchall()
                for row in rows:
                    logger.debug("row=%s", row)
                    for k, v in row.items():
                        logger.debug("k=%s, %s, %s", k, type(v), v)
                        if fix_types:
                            row[k] = MysqlApi._fix_type(v)
                if len(rows) == 0:
                    return None
                elif len(rows) != 1:
                    raise Exception(
                        "Invalid row len, expecting 1, having={0}".format(
                            len(rows)))
                else:
                    return rows[0]
        finally:
            cls._get_pool(conf_dict).connection_release(cnx)

    @classmethod
    def multi_n(cls, conf_dict, ar_statement):
        """
        Execute multiple sql statement, reading nothing from mysql.
        :type conf_dict: dict
        :param ar_statement: list of statements to execute (for instance, batch of insert or whatever)
        :type ar_statement: list
        """

        cnx = None
        try:
            cnx = cls._get_pool(conf_dict).connection_acquire()
            with closing(cnx.cursor()) as cur:
                for s in ar_statement:
                    cur.execute(s)
        finally:
            cls._get_pool(conf_dict).connection_release(cnx)
示例#17
0
class MonitorDaemon(object):
    """
    Monitor Thread set to handle the transaction states from the trinity networks.
    """
    def __init__(self):
        self.register_lock = Lock()
        self.spawn_signal = Event()
        self.__t_reg = {}

        # set the worker timeout (unit: second)
        self.main_timeout = 1
        self.worker_timeout = 1

        # record the mapping relationship between tx_id and channel_name
        self.tx_id_channel_map = {}

        #
        self.nodeb_api = NodeBApi()

        # update channel api
        self.deposit_action = {
            State.OPEN: depositin,
            State.SETTLED: depoistout
        }

    def worker(self, callback, *args, **kwargs):
        """
        Worker to handler the each response from the NODE-B
        :param callback: The real handler called by worker.
        :param args:
        :param kwargs:
        :return:
        """
        transport = self.__register_transport(callback.__name__)
        if not transport:
            logger.info(
                'No monitor worker thread is created. Channel state will never be updated.'
            )
            return

        while True:
            try:
                task = transport.get(timeout=self.worker_timeout)
                if task:
                    callback(task)
            except Exception as e:
                # Never run here.
                logger.error(
                    'Exception occurred in the worker thread. exceptions: {}'.
                    format(e))

            gevent.sleep(0.5)

        return

    def daemon_monitor_timer_event(self, interval=10):
        self.spawn_signal.clear()
        self.spawn_signal.wait(5)
        if self.spawn_signal.is_set():
            logger.info('Start the Monitor timer event.')
        else:
            logger.info('Why no transport is registered????')
            return

        # timer event to handle the response from the NODE-B
        while True:
            try:
                start_time = datetime.now()
                self.confirm_channel_state(self.all_opening_channels)
                self.confirm_channel_state(self.all_settling_channels,
                                           'settled')

                # calculate the total seconds
                total_seconds = (datetime.now() - start_time).total_seconds()
                if interval > total_seconds:
                    gevent.sleep(interval - total_seconds)
                else:
                    # sleep 500 ms
                    gevent.sleep(0.5)
            except Exception as e:
                logger.error('exception info: {}'.format(e))
                gevent.sleep(interval)

        # clear the spawn signal. should never run here.
        self.spawn_signal.clear()

        # need delete the resources here ??????

        return

    def confirm_channel_state(self,
                              channel_set,
                              expected_state='open',
                              count_per_second=100):
        """

        :param channel_set:
        :param count_per_second:
        :return:
        """
        if not channel_set:
            return

        # parse the all channels from the channel sets
        total_channels = len(channel_set)
        send_loop = total_channels // count_per_second
        left_chan = total_channels % count_per_second

        # handle the left channels
        if 0 != left_chan:
            tx_id_list = list()
            for ch in channel_set[send_loop * count_per_second::]:
                tx_id_list.append(ch.tx_id)
                self.tx_id_channel_map.update({ch.tx_id: ch.channel_name})

            # post data to the NODE-B
            response = self.nodeb_api.get_channel_state_from_nodeb(
                data=list(set(tx_id_list)))
            self.add_task(task={expected_state.lower(): response})

        # handle the request wrapped by count_per_second
        for loop_index in range(send_loop):
            start_index = loop_index * count_per_second
            tx_id_list = list()
            for ch in channel_set[start_index:start_index + count_per_second:]:
                tx_id_list.append(ch.tx_id)
                self.tx_id_channel_map.update({ch.tx_id: ch.channel_name})

            response = self.nodeb_api.get_channel_state_from_nodeb(
                data=list(set(tx_id_list)))
            self.add_task(task={expected_state.lower(): response})

        return

    def update_channel(self, task):
        if not task:
            return

        if task.get('open'):
            tx_id_dict = task.get('open')
            expected_state = State.OPEN
        elif task.get('settled'):
            tx_id_dict = task.get('settled')
            expected_state = State.SETTLED
        else:
            logger.info('unknown expected state of channels')
            return

        for tx_id, status in tx_id_dict.items():
            if status is True:
                channel_set = Session.query(ChannelDatabase).filter(
                    ChannelDatabase.channel_name ==
                    self.tx_id_channel_map[tx_id]).first()
                if channel_set:
                    self.deposit_action[expected_state](channel_set.sender, 0)
                    self.deposit_action[expected_state](channel_set.receiver,
                                                        0)

    def add_task(self, task):
        tid_list = list(self.__t_reg.keys())
        transport = self.__t_reg[tid_list[0]]
        transport.add_task(task)

    @property
    def all_opening_channels(self):
        # get all of the opening channels
        channel_set = Session.query(ChannelDatabase).filter(
            or_(ChannelDatabase.state == State.OPENING,
                ChannelDatabase.state == State.UPDATING)).all()
        return channel_set if channel_set else []

    @property
    def all_settling_channels(self):
        # get all of the opening channels
        channel_set = Session.query(ChannelDatabase).filter(
            ChannelDatabase.state == State.SETTLING).all()
        return channel_set if channel_set else []

    '''Private function'''

    class Transport(object):
        def __init__(self, tid, name):
            self.__tid = tid
            self.__name = name
            self.q_maxsize = 100
            self.__task_queue = Queue(maxsize=self.q_maxsize)

        def add_task(self, task):
            if self.__task_queue.qsize() < self.q_maxsize:
                self.__task_queue.put_nowait(task)
                return True
            else:
                # queue is full
                logger.debug('The task queue is full')
                return False

        def get(self, timeout=None):
            task = None
            try:
                task = self.__task_queue.get(block=True, timeout=timeout)
            except Exception as e:
                if not self.__task_queue.empty():
                    logger.error('Transport Thread still in processing data')
            finally:
                return task

        @property
        def name(self):
            return str(self.__name)

    def __register_transport(self, name):
        if not name:
            logger.error('Failed to register {}'.format(name))
            return None

        self.register_lock.acquire()
        tid = len(self.__t_reg.keys())
        t_name = '{}-{}'.format(name, tid)

        # if name has already existed, don't change
        transport = self.Transport(tid, t_name)
        self.__t_reg.update({}.fromkeys([tid], transport))
        self.register_lock.release()
        self.spawn_signal.set()

        return transport
示例#18
0
    accs = db.session.query(Account).order_by(asc(Account.disabled)).order_by(
        Account.username).all()
    mails = {
        'resident': [sget_resident_mail(),
                     sget_resident_topic()],
        'tbadge': [sget_tbadge_mail(), sget_tbadge_topic()],
        'other': [sget_other_mail(), sget_other_topic()]
    }

    return render_template("settings/accounts.html",
                           roles=roles,
                           accounts=accs,
                           mails=mails)


alt_clean_lock: Lock = Lock()
last_alt_clean: datetime = datetime.min
min_time_between_checks: timedelta = timedelta(hours=12)


def should_clean_alts() -> bool:
    return (datetime.utcnow() - last_alt_clean) > min_time_between_checks


def async_clean_alt_list() -> None:
    """
    This function asyncron and returns instantly
    Removes links between accounts and characters if
     - there is a token
       - but the token expired
     - or the owner_hash changed (this should expire the token!)
示例#19
0
class HttpMultiThreadDownloader:

    TOTAL_BUFFER_SIZE = 1024 * 1024 * 16
    CHUNK_SIZE = 1024
    MIN_TASK_CHUNK_SIZE = 1 * CHUNK_SIZE
    DEFAULT_THREAD_NUMBER = 32

    headers = {
        "User-Agent":
        "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_3) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36",
        'Accept-Language': 'zh-CN,zh;q=0.9'
    }

    def __init__(self,
                 url=None,
                 file_name=None,
                 path_to_store=None,
                 total_size=None,
                 thread_number=None,
                 logger=None,
                 print_progress=False):

        assert url or file_name

        file_name = file_name if file_name else url.split('?')[0].split(
            '/')[-1]
        file_name_split = file_name.split('.')
        if len(file_name_split[0]) > 64:
            file_name_split[0] = file_name_split[0][:64]
        file_name = '.'.join(file_name_split)
        self.path_to_store = path_to_store if path_to_store else f"{os.environ['HOME']}/Downloads"

        self.logger = logger or logging
        self.print_progress = print_progress
        self.url = url
        self.total_size = total_size
        self.file_name_with_path = self.path_to_store + '/' + file_name
        self.breakpoint_file_path = self.file_name_with_path + '.tmp'
        self.file_seeker = None
        self.thread_number = thread_number
        self.thread_buffer_size = None
        self.remaining_segments = []
        self.start_time = None
        self.segment_dispatch_task = None
        self.speed_calculation_task = None
        self.bytearray_of_threads = None
        self.last_progress_time = None
        self.last_downloaded_size = 0
        self.workers = []
        self.coworker_end_to_indexes_map = dict()
        self.to_dispatcher_queue = Queue()
        self.complete_notify_lock = Lock()

        self.download_info = self.get_download_info()

    def get_total_size(self):
        while True:
            try:
                with gevent.Timeout(10):
                    res = requests.get(self.url,
                                       stream=True,
                                       verify=False,
                                       headers=self.headers)
                break
            except KeyboardInterrupt:
                os.kill(os.getpid(), signal.SIGTERM)
            except (gevent.timeout.Timeout, requests.exceptions.ProxyError,
                    requests.exceptions.ConnectionError):
                self.logger.error(traceback.format_exc())
        if int(res.status_code / 100) == 2 and 'Content-Length' in res.headers:
            return int(res.headers['Content-Length'])
        else:
            raise RuntimeError(f'Not support multi thread: {self.url}')

    def get_download_info(self):
        if os.path.exists(self.file_name_with_path) and os.path.exists(
                self.breakpoint_file_path):
            with open(self.breakpoint_file_path, 'rb') as f:
                info_map = pickle.load(f)
            self.file_seeker = open(self.file_name_with_path, "r+b")
            thread_number = info_map['thread_number']
            self.remaining_segments = info_map['remaining_segments']
            if self.thread_number is not None and self.thread_number > thread_number:
                self.remaining_segments += [[0, 0]] * (self.thread_number -
                                                       thread_number)
                for index in range(thread_number, self.thread_number):
                    self.balance_remaining_segments(index)
            else:
                self.thread_number = thread_number

            self.total_size = info_map['total_size']
            self.url = info_map['url']
        else:
            if self.total_size is None:
                self.total_size = self.get_total_size()
            info_map = {
                'url': self.url,
                'thread_number': self.thread_number,
                'total_size': self.total_size
            }
            if self.thread_number is None:
                self.thread_number = self.DEFAULT_THREAD_NUMBER
            subprocess.Popen(f'mkdir -p {self.path_to_store}',
                             shell=True,
                             stdout=subprocess.PIPE,
                             stderr=subprocess.PIPE)
            self.file_seeker = open(self.file_name_with_path, "w+b")
            if math.floor(
                    self.total_size / self.CHUNK_SIZE) < self.thread_number:
                self.thread_number = math.floor(self.total_size /
                                                self.CHUNK_SIZE)
            divided_segment_size = math.floor(
                self.total_size / self.thread_number /
                self.CHUNK_SIZE) * self.CHUNK_SIZE
            for i in range(self.thread_number - 1):
                self.remaining_segments.append(
                    [divided_segment_size * i, divided_segment_size * (i + 1)])
            self.remaining_segments.append([
                divided_segment_size * (self.thread_number - 1),
                self.total_size
            ])

        self.thread_buffer_size = math.ceil(
            self.TOTAL_BUFFER_SIZE / self.thread_number /
            self.CHUNK_SIZE) * self.CHUNK_SIZE
        self.bytearray_of_threads = [None] * self.thread_number
        info_map['remaining_segments'] = self.remaining_segments
        return info_map

    def balance_remaining_segments(self, completed_index):
        copied_remaining_segments = copy.deepcopy(self.remaining_segments)
        max_remaining_index, max_remaining_task = sorted(
            enumerate(copied_remaining_segments),
            key=lambda x: x[1][1] - x[1][0],
            reverse=True)[0]
        remaining_bytes = max_remaining_task[1] - max_remaining_task[0]
        if remaining_bytes >= self.MIN_TASK_CHUNK_SIZE:
            new_end_of_max_remaining_task = max_remaining_task[1] - math.floor(
                remaining_bytes * 0.5 / self.CHUNK_SIZE) * self.CHUNK_SIZE
            new_start = new_end_of_max_remaining_task
            new_end = max_remaining_task[1]
            self.remaining_segments[max_remaining_index][
                1] = new_end_of_max_remaining_task
            self.remaining_segments[completed_index][0] = new_start
            self.remaining_segments[completed_index][1] = new_end
            return {'type': 'worker'}
        elif remaining_bytes > 0:
            new_start = max_remaining_task[0]
            new_end = max_remaining_task[1]
            self.remaining_segments[completed_index][0] = new_start
            self.remaining_segments[completed_index][1] = new_end
            self.coworker_end_to_indexes_map.get(
                new_end, (max_remaining_index, set()))[1].add(completed_index)
            return {
                'type': 'coworker',
                'new_start': new_start,
                'new_end': new_end
            }
        else:
            return {'type': 'stop'}

    def calculate_realtime_speed_once(self, do_init=False):
        tmp = copy.deepcopy(self.remaining_segments)
        new_tmp = []
        for [start, end] in tmp:
            if start < end:
                for index, [s, e] in enumerate(new_tmp):
                    if start < e and end > s:
                        new_tmp[index] = [max(start, s), min(end, e)]
                        break
                else:
                    new_tmp.append([start, end])
        remaining_size = sum([end - start for [start, end] in new_tmp])
        downloaded_size = self.total_size - remaining_size

        current_time = datetime.datetime.now()
        seconds = (current_time - self.start_time).seconds

        if do_init:
            self.last_progress_time = seconds + 0.001
            self.last_downloaded_size = downloaded_size
        if downloaded_size < self.last_downloaded_size:
            downloaded_size = self.last_downloaded_size
        downloaded_size_in_period = downloaded_size - self.last_downloaded_size
        self.last_downloaded_size = downloaded_size
        finish_percent = math.floor(
            downloaded_size / self.total_size * 10000) / 10000

        return {
            'total_size':
            self.total_size,
            'downloaded_size':
            downloaded_size,
            'downloaded_size_in_period':
            downloaded_size_in_period,
            'finish_percent':
            finish_percent,
            'realtime_speed':
            downloaded_size_in_period / (seconds - self.last_progress_time),
            'total_seconds':
            seconds
        }

    def calculate_realtime_speed(self):
        while True:
            sleep(1)
            progress = self.calculate_realtime_speed_once()
            finish_percent = progress['finish_percent']
            done = math.floor(50 * finish_percent)

            sys.stdout.write(
                "\r[%s%s] %.2f%% %.2fMB|%.2fMB %.3fMB/s %ds" %
                ('█' * done, ' ' * (50 - done), 100 * finish_percent,
                 math.floor(progress['downloaded_size'] / 1024 / 10.24) / 100,
                 math.floor(self.total_size / 1024 / 10.24) / 100,
                 math.floor(progress['downloaded_size_in_period'] / 1024 /
                            1.024) / 1000, progress['total_seconds']))
            sys.stdout.flush()

            if finish_percent == 1:
                break

    def store_segment(self, start, data):
        self.file_seeker.seek(start)
        if start + len(data) > self.total_size:
            data = data[:self.total_size - start]
        self.file_seeker.write(data)

    def store_remaining_segment(self):
        for index, segment in enumerate(self.bytearray_of_threads):
            if segment is not None:
                start, data, data_length = segment[0], segment[1], segment[2]
                if data_length > 0:
                    data = data[:data_length]
                    self.file_seeker.seek(start)
                    self.file_seeker.write(data)

    def start_download_task(self, task_index, task_type):

        (start, end) = self.remaining_segments[task_index]
        if start is None or start >= end:
            return task_index
        data = bytearray(self.thread_buffer_size)
        data_length = 0
        task_downloaded_size = 0

        self.bytearray_of_threads[task_index] = [start, data, data_length]

        while True:
            try:
                with gevent.Timeout(5):
                    headers = {
                        **self.headers, 'Range':
                        'bytes=%d-%d' % (start + data_length, end - 1)
                    }
                    r = requests.get(self.url,
                                     stream=True,
                                     verify=False,
                                     headers=headers)
                status_code = r.status_code
                assert status_code not in (200, 416)
                if status_code == 206:

                    chunk_size = min(self.CHUNK_SIZE,
                                     end - start - data_length)
                    chunks = r.iter_content(chunk_size=chunk_size)
                    while True:
                        with gevent.Timeout(5):
                            chunk = chunks.__next__()
                        get_chunk_size = len(chunk)
                        if chunk and (len(chunk) == self.CHUNK_SIZE
                                      or len(chunk) == chunk_size):
                            task_downloaded_size += get_chunk_size
                            data_length += get_chunk_size
                            end = self.remaining_segments[task_index][1]
                            self.remaining_segments[task_index][
                                0] = start + data_length
                            data[data_length -
                                 get_chunk_size:data_length] = chunk
                            self.bytearray_of_threads[task_index][
                                2] = data_length

                            if end - 1 <= start + data_length or data_length + self.CHUNK_SIZE > self.thread_buffer_size:
                                if end - 1 <= start + data_length:
                                    self.bytearray_of_threads[
                                        task_index] = None
                                    self.complete_notify_lock.acquire()
                                    self.to_dispatcher_queue.put({
                                        'type':
                                        task_type + '_finish_download',
                                        'index':
                                        task_index,
                                        'start':
                                        start,
                                        'data':
                                        data[:data_length],
                                        'end':
                                        end
                                    })
                                    self.complete_notify_lock.release()
                                    return
                                else:
                                    self.to_dispatcher_queue.put({
                                        'type':
                                        'part_downloaded',
                                        'start':
                                        start,
                                        'data':
                                        data[:data_length]
                                    })
                                    start += data_length
                                    data = bytearray(self.thread_buffer_size)
                                    data_length = 0
                                    self.bytearray_of_threads[task_index] = [
                                        start, data, data_length
                                    ]
                        else:
                            break
            except (gevent.timeout.Timeout, requests.exceptions.ProxyError,
                    requests.exceptions.ConnectionError, StopIteration,
                    KeyboardInterrupt):
                pass

    def store_breakpoint(self):
        self.segment_dispatch_task.kill()
        self.speed_calculation_task.kill()
        self.store_remaining_segment()
        self.calculate_realtime_speed_once()
        self.file_seeker.flush()
        self.file_seeker.close()
        with open(self.breakpoint_file_path, 'wb') as f:
            self.download_info['thread_number'] = self.thread_number
            pickle.dump(self.download_info, f)

    def remove_breakpoint_file(self):
        if os.path.exists(self.breakpoint_file_path):
            os.remove(self.breakpoint_file_path)

    def dispatch_segment(self):
        while True:
            request = self.to_dispatcher_queue.get()
            if request['type'] == 'part_downloaded':
                start, data = request['start'], request['data']
                self.store_segment(start, data)

            elif request['type'] == 'worker_finish_download':
                self.complete_notify_lock.acquire()
                completed_index, start, data, end = request['index'], request[
                    'start'], request['data'], request['end']
                self.store_segment(start, data)
                (worker_index,
                 coworker_indexes) = self.coworker_end_to_indexes_map.get(
                     end, (completed_index, set()))
                for index in coworker_indexes:
                    self.remaining_segments[index][1] = -1
                    self.workers[index].kill()
                res = self.balance_remaining_segments(worker_index)
                if res['type'] == 'stop':
                    return
                elif res['type'] == 'worker':
                    self.workers[worker_index] = gevent.spawn(
                        self.start_download_task, worker_index, res['type'])
                elif res['type'] == 'coworker':
                    for index in {worker_index} | coworker_indexes:
                        self.workers[index] = gevent.spawn(
                            self.start_download_task, index, res['type'])
                self.complete_notify_lock.release()

            elif request['type'] == 'coworker_finish_download':
                self.complete_notify_lock.acquire()
                completed_index, completed_start, completed_data, completed_end = request[
                    'index'], request['start'], request['data'], request['end']
                self.store_segment(completed_start, completed_data)
                (worker_index, coworker_indexes
                 ) = self.coworker_end_to_indexes_map.get(completed_end)
                [worker_start, worker_data,
                 _] = self.bytearray_of_threads[worker_index]
                self.store_segment(
                    worker_start, worker_data[:completed_start - worker_start])
                for index in coworker_indexes - {completed_index}:
                    self.remaining_segments[index][1] = -1
                    self.workers[index].kill()
                res = self.balance_remaining_segments(worker_index)
                if res['type'] == 'stop':
                    return
                elif res['type'] == 'coworker':
                    for index in {worker_index} | coworker_indexes:
                        self.remaining_segments[index][0] = res['new_start']
                        self.remaining_segments[index][1] = res['new_end']
                        self.workers[index] = gevent.spawn(
                            self.start_download_task, index, res['type'])
                self.complete_notify_lock.release()

    def download(self):

        self.start_time = datetime.datetime.now()
        self.calculate_realtime_speed_once(do_init=True)
        if self.print_progress:
            self.speed_calculation_task = gevent.spawn(
                self.calculate_realtime_speed)
        self.segment_dispatch_task = gevent.spawn(self.dispatch_segment)

        for index in range(self.thread_number):
            self.workers.append(
                gevent.spawn(self.start_download_task, index, 'worker'))
        try:
            if self.print_progress:
                self.speed_calculation_task.join()
            self.segment_dispatch_task.kill()
            self.file_seeker.flush()
            self.file_seeker.close()
            self.remove_breakpoint_file()
        except KeyboardInterrupt:
            self.store_breakpoint()
        sys.stdout.write('\n')
class MysqlConnectionPool(DatabaseConnectionPool):
    """

    """
    LOCK = Lock()
    WATCHDOG = None

    def __init__(self, maxsize):
        super(MysqlConnectionPool, self).__init__(maxsize)

    def create_connection(self, conn_params):
        """

        :param conn_params:
        :type: dict
        :return:
        """
        if len(HOSTS_STATUS) == 0:
            with MysqlConnectionPool.LOCK:
                if len(HOSTS_STATUS) == 0:
                    # populate
                    logger.debug("Populate host list")
                    if 'host' not in conn_params:
                        HOSTS_STATUS['localhost'] = 0
                    else:
                        for host in conn_params['host'].split(','):
                            HOSTS_STATUS[host] = 0

        # Deep copy param
        new_conn_params = copy.deepcopy(conn_params)

        db_connection = False
        ex = Exception('No Db Host available')

        while not db_connection:
            host = self._get_random_host()
            if not host:
                logger.error("No mysql host available... %s are down",
                             HOSTS_STATUS.keys())
                raise ex
            # overide host in param
            new_conn_params['host'] = host

            try:
                db_connection = Database.connect(**new_conn_params)
            except Exception as e:
                ex = e
                logger.error("de-activate %s for 1 minute ex=%s", host, e)
                HOSTS_STATUS[host] = time.time() + 60

        return db_connection

    def is_usable(self, conn):
        try:
            conn.ping()
        except Database.Error:
            return False
        else:
            return True

    # noinspection PyMethodMayBeStatic
    def _get_random_host(self):
        """
        Return a host in HOSTS_STATUS where the host is up

        :return:
        """
        now = time.time()
        hosts_up = [
            host for host, prison in HOSTS_STATUS.items() if prison < now
        ]
        try:
            host = random.choice(hosts_up)
            return host
        except IndexError:
            return False
示例#21
0
class JWTAuth(requests.auth.AuthBase):

    # Half a day before the actual expiration.
    REAUTH_TIME_INTERVEL = 43200

    def __init__(self,
                 username,
                 password,
                 urls,
                 use_lock_for_reseting_jwt=False,
                 max_retries=5):
        self.username = username
        self.password = password
        self.urls = urls
        self.lock_for_reseting_jwt = Lock(
        ) if use_lock_for_reseting_jwt else None
        self.__init_request_session(max_retries)
        self.__set_token()

    def __init_request_session(self, max_retries):
        self.max_retries = max_retries
        self.session = requests.Session()
        http = requests.adapters.HTTPAdapter(max_retries=max_retries)
        https = requests.adapters.HTTPAdapter(max_retries=max_retries)
        self.session.mount('http://', http)
        self.session.mount('https://', https)

    def __parse_token(self):
        decoded_token = b64decode(self.token.split('.')[1].encode())
        return json_mod.loads(decoded_token.decode())

    def __get_auth_token(self):
        request_data = '{"username":"******","password":"******"}' % (self.username,
                                                              self.password)
        for connection_url in self.urls:
            try:
                response = self.session.post('%s/_open/auth' % connection_url,
                                             data=request_data)
                if response.ok:
                    json_data = response.content
                    if json_data:
                        data_dict = json_mod.loads(json_data.decode("utf-8"))
                        return data_dict.get('jwt')
            except requests_exceptions.ConnectionError:
                if connection_url is not self.urls[-1]:
                    logging.critical("Unable to connect to %s trying another",
                                     connection_url)
                else:
                    logging.critical(
                        "Unable to connect to any of the urls: %s", self.urls)
                    raise

    def __set_token(self):
        self.token = self.__get_auth_token()
        self.parsed_token = \
            self.__parse_token() if self.token is not None else {}
        self.token_last_updated = time.time()

    def reset_token(self):
        logging.warning("Reseting the token.")
        self.__set_token()

    def is_token_expired(self):
        return (self.parsed_token.get("exp", 0) - time.time() <
                JWTAuth.REAUTH_TIME_INTERVEL)

    def __call__(self, req):
        # Implement JWT authentication
        if self.is_token_expired():
            if self.lock_for_reseting_jwt is not None:
                self.lock_for_reseting_jwt.acquire()
            if self.is_token_expired():
                self.reset_token()
            if self.lock_for_reseting_jwt is not None:
                self.lock_for_reseting_jwt.release()
        req.headers['Authorization'] = 'Bearer %s' % self.token
        return req