Example #1
0
 def __init__( self ):
     self._canRead = Event()
     self._canWrite = Event()
     self._mutex = BoundedSemaphore( value = 1 )
     self._readers = 0
     self._isWriting = False
     self._canRead.set()
     self._canWrite.set()
Example #2
0
 def __init__(self, account_id, num_connections, readonly):
     log.info('Creating Crispin connection pool for account {} with {} '
              'connections'.format(account_id, num_connections))
     self.account_id = account_id
     self.readonly = readonly
     self._queue = Queue(num_connections, items=num_connections * [None])
     self._sem = BoundedSemaphore(num_connections)
     self._set_account_info()
 def test_process_request(self):
     self.spider.settings.set("PROXY_LIST", ['124.88.67.54:80'])
     request = Request('http://httpbin.org/get')
     pm = ProxyMiddleware(self.spider.settings, self.spider.logger)
     dh = DownloadHandler(self.spider, None, BoundedSemaphore(1))
     pm.process_request(request)
     response = dh.fetch(request)
     assert response.body
Example #4
0
 def __setitem__(self, key, factory):
     if not callable(factory):
         raise ValueError('value %s should be callable' % factory)
     lock = self.data[key] = BoundedSemaphore()
     lock.acquire()
     self.data[key] = factory()
     assert not isinstance(self.data[key], BoundedSemaphore)
     lock.release()
Example #5
0
 def __init__(self, media_environment):
     self.active_scripts = gevent.pool.Group()
     self.sleeping_scripts = []
     self.project = None
     self.media_environment = media_environment
     # Get the script_lock before adding or removing scripts
     self.script_lock = BoundedSemaphore(1)
     self.clients = []
Example #6
0
def configure_publish_lock(max_connections=0):
    global _publish_lock
    if _publish_lock is not None:
        raise RuntimeError("socket_lock already configured!")
    if max_connections < 1:
        _publish_lock = DummySemaphore()
    else:
        _publish_lock = BoundedSemaphore(max_connections)
Example #7
0
 def __init__(self, agent_addr, parallel_task_limit=None):
     self.agent_addr = agent_addr
     self.tasks = set()
     self.ptask_semaphore = None
     self.capacity = parallel_task_limit or self.cpu_count()
     self.ptask_semaphore = BoundedSemaphore(self.capacity)
     self.sync_tag = 0
     self.sync_flag = False
Example #8
0
def test_imap_message_deduplication(db, generic_account, inbox_folder,
                                    generic_trash_folder, mock_imapclient):
    uid = 22
    uid_values = uid_data.example()

    mock_imapclient.list_folders = lambda: [((
        '\\All',
        '\\HasNoChildren',
    ), '/', u'/Inbox'), ((
        '\\Trash',
        '\\HasNoChildren',
    ), '/', u'/Trash')]
    mock_imapclient.idle = lambda: None
    mock_imapclient.add_folder_data(inbox_folder.name, {uid: uid_values})
    mock_imapclient.add_folder_data(generic_trash_folder.name,
                                    {uid: uid_values})

    folder_sync_engine = FolderSyncEngine(generic_account.id,
                                          generic_account.namespace.id,
                                          inbox_folder.name,
                                          generic_account.email_address,
                                          'custom', BoundedSemaphore(1),
                                          mock.Mock())
    folder_sync_engine.initial_sync()

    trash_folder_sync_engine = FolderSyncEngine(generic_account.id,
                                                generic_account.namespace.id,
                                                generic_trash_folder.name,
                                                generic_account.email_address,
                                                'custom', BoundedSemaphore(1),
                                                mock.Mock())
    trash_folder_sync_engine.initial_sync()

    # Check that we have two uids, but just one message.
    assert [(uid, )] == db.session.query(
        ImapUid.msg_uid).filter(ImapUid.folder_id == inbox_folder.id).all()

    assert [(uid, )] == db.session.query(ImapUid.msg_uid).filter(
        ImapUid.folder_id == generic_trash_folder.id).all()

    # used to uniquely ID messages
    body_sha = sha256(uid_values['BODY[]']).hexdigest()

    assert db.session.query(Message).filter(
        Message.namespace_id == generic_account.namespace.id,
        Message.data_sha256 == body_sha).count() == 1
Example #9
0
    def __init__(self, config_file):
        u"""Cluster 抽象"""
        self.config_file = config_file
        self.config = Config(config_file)
        self.context = Context(self.config.parse_refer(),
                               self.config.parse_registry())

        self.clients = {}
        self.sem = BoundedSemaphore(1)
Example #10
0
    def __init__(self, *args, **kwargs):
        BaseProcessor.__init__(self, *args, **kwargs)
        self.build_args()

        # 缓存的城市信息
        from gevent.lock import BoundedSemaphore

        self._city_cache = {}
        self._city_cache_lock = BoundedSemaphore(1)
 def test_process_request(self):
     request = Request('http://httpbin.org/user-agent')
     self.assertIs(request.headers.get("User-Agent"), None)
     uam = UserAgentMiddleware(self.spider.settings, self.spider.logger)
     dh = DownloadHandler(self.spider, None, BoundedSemaphore(1))
     uam.process_request(request)
     response = dh.fetch(request)
     self.assertEqual(
         json.loads(response.body)['user-agent'],
         request.headers['User-Agent'])
Example #12
0
File: app.py Project: Montana/dirt
 def _get_call_semaphore(self, call):
     if call.name.startswith("debug."):  # XXX A bit of a hack
         return DummySemaphore()
     if self._call_semaphore is None:
         if self.max_concurrent_calls is None:
             semaphore = DummySemaphore()
         else:
             semaphore = BoundedSemaphore(self.max_concurrent_calls)
         self._call_semaphore = semaphore
     return self._call_semaphore
Example #13
0
    def __init__(self, account, heartbeat=1, refresh_frequency=30):
        self.refresh_frequency = refresh_frequency
        self.syncmanager_lock = BoundedSemaphore(1)
        self.saved_remote_folders = None
        self.sync_engine_class = FolderSyncEngine

        self.folder_monitors = Group()
        self.delete_handler = None

        BaseMailSyncMonitor.__init__(self, account, heartbeat)
Example #14
0
 def getGevent():
     from gevent import monkey
     monkey.patch_all(socket=True, select=True)
     from gevent.queue import Queue  #get,put
     from gevent.local import local
     try:
         from gevent.lock import BoundedSemaphore
     except:
         from gevent.coros import BoundedSemaphore
     sem = BoundedSemaphore(2)  #acquire,release
     return local, Queue, sem  #返回常量,队列,锁
 def lock(self, key):
     """
     Synchronization primitive for client functions to ensure that multiple
     functions are not trying to act on the same contract at the same time.
     """
     try:
         sem = self._locks[key]
     except KeyError:
         sem = BoundedSemaphore()
         self._locks[key] = sem
     return sem
Example #16
0
 def __init__(self, path: str, name: str, host: str, port: int):
     """ Constructor """
     self._name = name
     self.host = host
     self.port = port
     self.datadir = os.path.join(path, 'data')
     if not os.path.exists(self.datadir):
         os.mkdir(self.datadir)
     db_path = os.path.join(self.datadir, f'device_{self._name}.sqlite3')
     self.lock_db = BoundedSemaphore(1)
     self.db = sqlite3.connect(db_path)
Example #17
0
 def __init__(self, account_id, num_connections, readonly):
     log.info(
         "Creating Crispin connection pool",
         account_id=account_id,
         num_connections=num_connections,
     )
     self.account_id = account_id
     self.readonly = readonly
     self._queue = Queue(num_connections, items=num_connections * [None])
     self._sem = BoundedSemaphore(num_connections)
     self._set_account_info()
Example #18
0
    def __init__(self):
        BaseHardwareInterface.__init__(self)
        self.update_thread = None # Thread for running the main update loop
        self.pass_record_callback = None # Function added in server.py
        self.hardware_log_callback = None # Function added in server.py
        self.new_enter_or_exit_at_callback = None # Function added in server.py
        self.node_crossing_callback = None # Function added in server.py

        self.i2c = smbus.SMBus(1) # Start i2c bus
        self.semaphore = BoundedSemaphore(1) # Limits i2c to 1 read/write at a time
        self.i2c_timestamp = -1

        # Scans all i2c_addrs to populate nodes array
        self.nodes = [] # Array to hold each node object
        i2c_addrs = [8, 10, 12, 14, 16, 18, 20, 22] # Software limited to 8 nodes
        for index, addr in enumerate(i2c_addrs):
            try:
                self.i2c.read_i2c_block_data(addr, READ_ADDRESS, 1)
                print "Node {0} found at address {1}".format(index+1, addr)
                gevent.sleep(I2C_CHILL_TIME)
                node = Node() # New node instance
                node.i2c_addr = addr # Set current loop i2c_addr
                node.index = index
                self.nodes.append(node) # Add new node to RHInterface
            except IOError as err:
                print "No node at address {0}".format(addr)
            gevent.sleep(I2C_CHILL_TIME)

        for node in self.nodes:
            node.frequency = self.get_value_16(node, READ_FREQUENCY)
                   # read NODE_API_LEVEL and verification value:
            rev_val = self.get_value_16(node, READ_REVISION_CODE)
            if (rev_val >> 8) == 0x25:  # if verify passed (fn defined) then set API level
                node.api_level = rev_val & 0xFF
            else:
                node.api_level = 0  # if verify failed (fn not defined) then set API level to 0
            if node.api_level >= 10:
                node.api_valid_flag = True  # set flag for newer API functions supported
                node.node_peak_rssi = self.get_value_16(node, READ_NODE_RSSI_PEAK)
                if node.api_level >= 13:
                    node.node_nadir_rssi = self.get_value_16(node, READ_NODE_RSSI_NADIR)
                node.enter_at_level = self.get_value_16(node, READ_ENTER_AT_LEVEL)
                node.exit_at_level = self.get_value_16(node, READ_EXIT_AT_LEVEL)
                print "Node {0}: API_level={1}, Freq={2}, EnterAt={3}, ExitAt={4}".format(node.index+1, node.api_level, node.frequency, node.enter_at_level, node.exit_at_level)
            else:
                print "Node {0}: API_level={1}".format(node.index+1, node.api_level)
            if node.index == 0:
                if node.api_valid_flag:
                    self.filter_ratio = self.get_value_16(node, READ_FILTER_RATIO)
                else:
                    self.filter_ratio = 10
            else:
                self.set_filter_ratio(node.index, self.filter_ratio)
Example #19
0
    def __init__(self, api: RaidenAPI, token_address: TokenAddress) -> None:
        assert isinstance(api, RaidenAPI)
        self.ready = Event()

        self.api = api
        self.token_address = token_address

        existing_channels = self.api.get_channel_list(
            api.raiden.default_registry.address, self.token_address
        )

        open_channels = [
            channel_state
            for channel_state in existing_channels
            if channel.get_status(channel_state) == ChannelState.STATE_OPENED
        ]

        if len(open_channels) == 0:
            token_proxy = self.api.raiden.proxy_manager.token(self.token_address)
            if not token_proxy.balance_of(self.api.raiden.address) > 0:
                raise ValueError(
                    f"Not enough funds for echo node "
                    f"{to_checksum_address(self.api.raiden.address)} for token "
                    f"{to_checksum_address(self.token_address)}"
                )

            # Using the balance of the node as funds
            funds = TokenAmount(token_proxy.balance_of(self.api.raiden.address))

            self.api.token_network_connect(
                registry_address=self.api.raiden.default_registry.address,
                token_address=self.token_address,
                funds=funds,
                initial_channel_target=10,
                joinable_funds_target=0.5,
            )

        self.num_seen_events = 0
        self.received_transfers: Queue[EventPaymentReceivedSuccess] = Queue()

        # This is used to signal REMOVE_CALLBACK and stop echo_workers
        self.stop_signal: Optional[bool] = None

        self.greenlets: Set[Greenlet] = set()
        self.lock = BoundedSemaphore()
        self.seen_transfers: Deque[EventPaymentReceivedSuccess] = deque(list(), TRANSFER_MEMORY)
        self.num_handled_transfers = 0
        self.lottery_pool = Queue()

        # register ourselves with the raiden alarm task
        self.api.raiden.alarm.register_callback(self.echo_node_alarm_callback)
        self.echo_worker_greenlet = gevent.spawn(self.echo_worker)
        log.info("Echo node started")
Example #20
0
 def __init__(self, cfg_file):
     if os.path.isfile(cfg_file):
         self._cfg_file = cfg_file
         self._init_cfg()
     else:
         raise FileNotFoundError('ERROR: can not find {}'.format(cfg_file))
     self._setting = dict(self.config.items('setting'))
     if 'sem' in self._setting and self._setting['sem'].isdigit():
         self._sem = BoundedSemaphore(int(self._setting['sem']))
         self._pool = Pool()
     self._events = dict()
     self._node_events = {'begin': list(), 'final': list()}
    def test_process_request_interval(self):
        self.spider.settings.set("PROXY_LIST", ['218.76.106.78:3128'])
        request = Request('http://httpbin.org/get')
        pm = ProxyMiddleware(self.spider.settings, self.spider.logger)
        dh = DownloadHandler(self.spider, None, BoundedSemaphore(1))
        pm.process_request(request)
        time1 = time.time()
        dh.fetch(request)

        request = Request('http://httpbin.org/get')
        pm.process_request(request)
        self.assertGreater(time.time() - time1, 3)
Example #22
0
 def _buildSocket( self ):
     if self.s is not None:
         self.s.close( linger = 0 )
         self.s = None
     self.s = self.ctx.socket( self._socketType )
     self.s.set( zmq.LINGER, 0 )
     if self._isBind:
         self.s.bind( self._url )
     else:
         self.s.connect( self._url )
     if self._isTransactionSocket:
         self._lock = BoundedSemaphore( 1 )
    def __init__(self,
                 discServer,
                 zk_srv_ip='127.0.0.1',
                 zk_srv_port='2181',
                 reset_config=False):
        self._reset_config = reset_config
        self._service_id_to_type = {}
        self._ds = discServer
        self._zk_sem = BoundedSemaphore(1)
        self._election = None
        self._restarting = False

        zk_endpts = []
        for ip in zk_srv_ip.split(','):
            zk_endpts.append('%s:%s' % (ip, zk_srv_port))

        # logging
        logger = logging.getLogger('discovery-service')
        logger.setLevel(logging.WARNING)
        handler = logging.handlers.RotatingFileHandler(
            '/var/log/contrail/discovery_zk.log',
            maxBytes=1024 * 1024,
            backupCount=10)
        log_format = logging.Formatter('%(asctime)s [%(name)s]: %(message)s',
                                       datefmt='%m/%d/%Y %I:%M:%S %p')
        handler.setFormatter(log_format)
        logger.addHandler(handler)

        self._zk = kazoo.client.KazooClient(
            hosts=','.join(zk_endpts),
            handler=kazoo.handlers.gevent.SequentialGeventHandler(),
            logger=logger)
        self._logger = logger

        # connect
        self.connect()

        if reset_config:
            self.delete_node("/services", recursive=True)
            self.delete_node("/clients", recursive=True)
            self.delete_node("/election", recursive=True)

        # create default paths
        self.create_node("/services")
        self.create_node("/clients")
        self.create_node("/election")

        self._debug = {
            'subscription_expires': 0,
            'oos_delete': 0,
            'db_excepts': 0,
        }
Example #24
0
def test_controller(test_urls, writer_buff):
    tasks_gevent = []
    error_cnt = mp.Manager().Value('i', 0)
    sem = BoundedSemaphore(1)
    l = test_urls.qsize()
    for i in range(l):
        tasks_gevent.append(
            gevent.spawn(test_scan,
                         url=test_urls.get(),
                         writer_buff=writer_buff,
                         error_cnt=error_cnt,
                         sem=sem))
    gevent.joinall(tasks_gevent)
Example #25
0
def test_gmail_message_deduplication(db, default_account, all_mail_folder,
                                     trash_folder, mock_imapclient):
    uid = 22
    uid_values = uid_data.example()

    mock_imapclient.list_folders = lambda: [((
        '\\All',
        '\\HasNoChildren',
    ), '/', u'[Gmail]/All Mail'),
                                            ((
                                                '\\Trash',
                                                '\\HasNoChildren',
                                            ), '/', u'[Gmail]/Trash')]
    mock_imapclient.idle = lambda: None
    mock_imapclient.add_folder_data(all_mail_folder.name, {uid: uid_values})
    mock_imapclient.add_folder_data(trash_folder.name, {uid: uid_values})

    all_folder_sync_engine = GmailFolderSyncEngine(
        default_account.id, default_account.namespace.id, all_mail_folder.name,
        all_mail_folder.id, default_account.email_address, 'gmail',
        BoundedSemaphore(1))
    all_folder_sync_engine.initial_sync()

    trash_folder_sync_engine = GmailFolderSyncEngine(
        default_account.id, default_account.namespace.id, trash_folder.name,
        trash_folder.id, default_account.email_address, 'gmail',
        BoundedSemaphore(1))
    trash_folder_sync_engine.initial_sync()

    # Check that we have two uids, but just one message.
    assert [(uid, )] == db.session.query(
        ImapUid.msg_uid).filter(ImapUid.folder_id == all_mail_folder.id).all()

    assert [(uid, )] == db.session.query(
        ImapUid.msg_uid).filter(ImapUid.folder_id == trash_folder.id).all()

    assert db.session.query(Message).filter(
        Message.namespace_id == default_account.namespace.id,
        Message.g_msgid == uid_values['X-GM-MSGID']).count() == 1
Example #26
0
    def __init__(self, log, mempool, params):
        self.log = log
        self.mempool = mempool
        self.params = params
        self.utxo_changes = 0
        self.cache = Cache()
        self.cache.clear()

        ## level DB 
        #    pg_block: block data to insert into PG database
        #    pg_tx:    transaction data to insert into PG database
        #    tx:*      transaction outputs
        #    misc:*    state
        #    height:*  list of blocks at height h
        #    blkmeta:* block metadata
        #    blocks:*  block seek point in stream
        datadir = '/data/explorer/blocks/'
        self.db = self.cache.db
        self.blk_write = io.BufferedWriter(io.FileIO(datadir + '/blocks.dat','ab'))
        self.blk_read = io.BufferedReader(io.FileIO(datadir + '/blocks.dat','rb'))

        if self.db.get(b'misc:height') is None:
            self.log.info('INITIALIZING EMPTY BLOCKCHAIN DATABASE')
            with self.db.write_batch(transaction=True) as batch:
                batch.put(b'misc:height', struct.pack('i', -1))
                batch.put(b'misc:msg_start', self.params.NETMAGIC)
                batch.put(b'misc:tophash', ser_uint256(0))
                batch.put(b'misc:total_work', b'0x0')

        start = self.db.get(b'misc:msg_start')
        if start != self.params.NETMAGIC:
            self.log.error("Database magic number mismatch. Data corruption or incorrect network?")
            raise RuntimeError

        self.block_lock = BoundedSemaphore()
        self.address_changes = {}
        self.address_change_count = 0
        self.transaction_change_count = 0
        self.utxo_cache = {}
        self.tx_lock = False
        self.initial_sync = True
        self.wallet_group = WalletGrouper('/data/explorer/wallets')
        self.checktransactions(True)
        self.checkaddresses(True)
        self.checkblocks(0, True)
        self.checkutxos(True)
        self.orphans = {}
        self.orphan_deps = {}
        if Block.select().count(None) == 0:
            self.log.info('Initialising genesis block')
            self.putblock(self.params.GENESIS_BLOCK)
Example #27
0
    def __init__(self, server_node, iface_cls, config):

        self._section_name = utils.get_module(__name__)
        self._logger = logging.getLogger(__name__)
        self._host = server_node.split(":")[0]
        self._port = int(server_node.split(":")[1])
        self._iface_cls = iface_cls

        self._get_conn_timeout = config.getint(
            self._section_name,
            "pool_timeout",
            default=settings.DEFAULT_POOL_TIMEOUT)
        self._socket_timeout = config.getint(
            self._section_name,
            "request_timeout",
            default=settings.DEFAULT_REQUEST_TIMEOUT) * 1000
        self._size = config.getint(self._section_name,
                                   "pool_size",
                                   default=settings.DEFAULT_POOL_SIZE)

        self._c_module_serialize = config.getboolean(
            self._section_name,
            "c_module_serialize",
            default=settings.USE_C_MODULE_SERIALIZE)

        self._closed = False
        if ASYNC_TAG:
            from gevent.lock import BoundedSemaphore
            from gevent import queue as Queue
            self._semaphore = BoundedSemaphore(self._size)
            self._connection_queue = Queue.LifoQueue(self._size)
            self._QueueEmpty = Queue.Empty
        else:
            from threading import BoundedSemaphore
            import Queue
            self._semaphore = BoundedSemaphore(self._size)
            self._connection_queue = Queue.LifoQueue(self._size)
            self._QueueEmpty = Queue.Empty
Example #28
0
 def __init__(self, f, block=True):
     if isinstance(f, io.IOBase):
         self.filename = f.name
         self.handle = f if not f.closed else open(f, "w")  # noqa: SIM115
     else:
         self.filename = f
         mkdirp(os.path.dirname(f))
         self.handle = open(f, "w")  # noqa: SIM115
     if block:
         self.lock_op = fcntl.LOCK_EX
     else:
         self.lock_op = fcntl.LOCK_EX | fcntl.LOCK_NB
     self.block = block
     self.gevent_lock = BoundedSemaphore(1)
Example #29
0
    def __init__(self,
                 process_identifier,
                 process_number,
                 poll_interval=SYNC_POLL_INTERVAL):
        self.host = platform.node()
        self.process_number = process_number
        self.process_identifier = process_identifier
        self.monitor_cls_for = {
            mod.PROVIDER: getattr(mod, mod.SYNC_MONITOR_CLS)
            for mod in module_registry.values()
            if hasattr(mod, 'SYNC_MONITOR_CLS')
        }

        for p_name, p in providers.iteritems():
            if p_name not in self.monitor_cls_for:
                self.monitor_cls_for[p_name] = self.monitor_cls_for["generic"]

        self.log = get_logger()
        self.log.bind(process_number=process_number)
        self.log.info('starting mail sync process',
                      supported_providers=module_registry.keys())

        self.syncing_accounts = set()
        self.email_sync_monitors = {}
        self.contact_sync_monitors = {}
        self.event_sync_monitors = {}
        # Randomize the poll_interval so we maintain at least a little fairness
        # when using a timeout while blocking on the redis queues.
        min_poll_interval = 5
        self.poll_interval = int((random.random() *
                                  (poll_interval - min_poll_interval)) +
                                 min_poll_interval)
        self.semaphore = BoundedSemaphore(1)
        self.zone = config.get('ZONE')

        # Note that we don't partition by zone for the private queues.
        # There's not really a reason to since there's one queue per machine
        # anyways. Also, if you really want to send an Account to a mailsync
        # machine in another zone you can do so.
        self.private_queue = EventQueue(
            SYNC_EVENT_QUEUE_NAME.format(self.process_identifier))
        self.queue_group = EventQueueGroup([
            shared_sync_event_queue_for_zone(self.zone),
            self.private_queue,
        ])

        self.stealing_enabled = config.get('SYNC_STEAL_ACCOUNTS', True)
        self._pending_avgs_provider = None
        self.last_unloaded_account = time.time()
Example #30
0
 def __init__(self, tasks, result, pool=None):
     self._tasks = tasks
     self._result = result
     self.timeout = 5
     self.headers = {
         'User-Agent':
         'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.321.132 Safari/537.36',
         'Accept': '*/*',
         'Pragma': 'no-cache',
         'Cache-control': 'no-cache',
         'Referer': 'https://www.google.com/'
     }
     self.pool = pool
     self.lock = BoundedSemaphore()
     self.prepare()  # must run after init