Beispiel #1
0
 def _get_greenlet_lock(self):
     if not hasattr(self._thread_local, 'greenlet_lock'):
         greenlet_lock = self._thread_local.greenlet_lock = BoundedSemaphore(
             1)
     else:
         greenlet_lock = self._thread_local.greenlet_lock
     return greenlet_lock
Beispiel #2
0
    def __init__(self,
                 account,
                 heartbeat=1,
                 refresh_frequency=30,
                 poll_frequency=30,
                 retry_fail_classes=[],
                 refresh_flags_max=2000):
        self.refresh_frequency = refresh_frequency
        self.poll_frequency = poll_frequency
        self.syncmanager_lock = BoundedSemaphore(1)
        self.refresh_flags_max = refresh_flags_max
        self.saved_remote_folders = None

        provider_supports_condstore = account.provider_info.get(
            'condstore', False)
        account_supports_condstore = getattr(account, 'supports_condstore',
                                             False)
        if provider_supports_condstore or account_supports_condstore:
            self.sync_engine_class = CondstoreFolderSyncEngine
        else:
            self.sync_engine_class = FolderSyncEngine

        self.folder_monitors = Group()

        BaseMailSyncMonitor.__init__(self, account, heartbeat,
                                     retry_fail_classes)
Beispiel #3
0
    def __init__(self, module, server_list):
        # logging
        logger = logging.getLogger(module)
        logger.setLevel(logging.INFO)
        try:
            handler = logging.handlers.RotatingFileHandler(
                '/var/log/contrail/' + module + '-zk.log',
                maxBytes=10 * 1024 * 1024,
                backupCount=5)
        except IOError:
            print "Cannot open log file in /var/log/contrail/"
        else:
            log_format = logging.Formatter(
                '%(asctime)s [%(name)s]: %(message)s',
                datefmt='%m/%d/%Y %I:%M:%S %p')
            handler.setFormatter(log_format)
            logger.addHandler(handler)

        self._zk_client = \
            kazoo.client.KazooClient(
                server_list,
                handler=kazoo.handlers.gevent.SequentialGeventHandler(),
                logger=logger)

        self._logger = logger
        self._election = None
        self._zk_sem = BoundedSemaphore(1)
        self.connect()
Beispiel #4
0
    def sync(self):
        sync_folder_names_ids = self.prepare_sync()
        thread_download_lock = BoundedSemaphore(1)
        for folder_name, folder_id in sync_folder_names_ids:
            log.info('initializing folder sync')
            thread = GmailFolderSyncEngine(
                thread_download_lock, self.account_id, folder_name, folder_id,
                self.email_address, self.provider_name, self.poll_frequency,
                self.syncmanager_lock, self.refresh_flags_max,
                self.retry_fail_classes)
            thread.start()
            self.folder_monitors.add(thread)
            if thread.should_block:
                while not self._thread_polling(thread) and \
                        not self._thread_finished(thread) and \
                        not thread.ready():
                    sleep(self.heartbeat)

            # Allow individual folder sync monitors to shut themselves down
            # after completing the initial sync.
            if self._thread_finished(thread) or thread.ready():
                log.info('folder sync finished/killed',
                         folder_name=thread.folder_name)
                # NOTE: Greenlet is automatically removed from the group.

        self.folder_monitors.join()
Beispiel #5
0
    def __init__(
        self,
        syncback_id,
        process_number,
        total_processes,
        poll_interval=1,
        retry_interval=120,
        num_workers=NUM_PARALLEL_ACCOUNTS,
        batch_size=20,
        fetch_batch_size=100,
    ):
        self.process_number = process_number
        self.total_processes = total_processes
        self.poll_interval = poll_interval
        self.retry_interval = retry_interval

        # Amount of log entries to fetch before merging/de-duplication to
        # determine which records need to be processed.
        self.fetch_batch_size = fetch_batch_size

        # Amount of log entries to process in a batch.
        self.batch_size = batch_size

        self.keep_running = True
        self.workers = gevent.pool.Group()
        # Dictionary account_id -> semaphore to serialize action syncback for
        # any particular account.
        # TODO(emfree): We really only need to serialize actions that operate
        # on any given object. But IMAP actions are already effectively
        # serialized by using an IMAP connection pool of size 1, so it doesn't
        # matter too much.
        self.account_semaphores = defaultdict(lambda: BoundedSemaphore(1))
        # This SyncbackService performs syncback for only and all the accounts
        # on shards it is reponsible for; shards are divided up between
        # running SyncbackServices.
        self.log = logger.new(component="syncback")
        syncback_assignments = {
            int(k): v
            for k, v in config.get("SYNCBACK_ASSIGNMENTS", {}).items()
        }
        if syncback_id in syncback_assignments:
            self.keys = [
                key for key in engine_manager.engines
                if key in syncback_assignments[syncback_id] and key %
                total_processes == process_number
            ]
        else:
            self.log.warn("No shards assigned to syncback server",
                          syncback_id=syncback_id)
            self.keys = []

        self.log = logger.new(component="syncback")
        self.num_workers = num_workers
        self.num_idle_workers = 0
        self.worker_did_finish = gevent.event.Event()
        self.worker_did_finish.clear()
        self.task_queue = Queue()
        self.running_action_ids = set()
        gevent.Greenlet.__init__(self)
 def __init__(self, redis_uve_server, logger):
     self._local_redis_uve = redis_uve_server
     self._redis_uve_list = []
     self._logger = logger
     self._sem = BoundedSemaphore(1)
     self._redis = None
     if self._local_redis_uve:
         self._redis = redis.StrictRedis(self._local_redis_uve[0],
                                         self._local_redis_uve[1], db=0)
Beispiel #7
0
 def connectionMade(self):
     """连接建立处理
     """
     address = self.transport.getAddress()
     logger.info('Client %d login in.[%s,%d]' % (self.transport.sessionno,
                                                 address[0], address[1]))
     self.factory.connmanager.addConnection(self)
     self.factory.doConnectionMade(self)
     self.sem = BoundedSemaphore(1)
Beispiel #8
0
 def __init__(self, socket, client_class):
     self.socket = None
     self.mutex = BoundedSemaphore(1)
     self.socket = TSocket.TSocket(unix_socket=socket)
     self.transport = TTransport.TBufferedTransport(self.socket)
     self.protocol = TBinaryProtocol.TBinaryProtocolAccelerated(
         self.transport)
     self.client = client_class(self.protocol)
     self.connected = False
Beispiel #9
0
    def __init__(self, account, heartbeat=1, refresh_frequency=30):
        self.refresh_frequency = refresh_frequency
        self.syncmanager_lock = BoundedSemaphore(1)
        self.saved_remote_folders = None
        self.sync_engine_class = FolderSyncEngine

        self.folder_monitors = Group()

        BaseMailSyncMonitor.__init__(self, account, heartbeat)
Beispiel #10
0
 def __init__(self, poll_interval=1, chunk_size=100):
     semaphore_factory = lambda: BoundedSemaphore(CONCURRENCY_LIMIT)
     self.semaphore_map = defaultdict(semaphore_factory)
     self.keep_running = True
     self.running = False
     self.log = logger.new(component='syncback')
     self.poll_interval = poll_interval
     self.chunk_size = chunk_size
     self._scheduled_actions = set()
     gevent.Greenlet.__init__(self)
Beispiel #11
0
    def __init__(self, account_id, num_connections, readonly):
        logger.info('Creating Crispin connection pool for account {} with {} '
                    'connections'.format(account_id, num_connections))
        self.account_id = account_id
        self.readonly = readonly
        self._new_conn_lock = BoundedSemaphore(1)
        self._set_account_info()

        # 1200s == 20min
        geventconnpool.ConnectionPool.__init__(
            self, num_connections, keepalive=1200,
            exc_classes=CONN_DISCARD_EXC_CLASSES)
 def __init__(self, redis_uve_server, logger, redis_password=None):
     self._local_redis_uve = redis_uve_server
     self._redis_uve_list = []
     self._logger = logger
     self._sem = BoundedSemaphore(1)
     self._redis = None
     self._redis_password = redis_password
     if self._local_redis_uve:
         self._redis = redis.StrictRedis(self._local_redis_uve[0],
                                         self._local_redis_uve[1],
                                         password=self._redis_password,
                                         db=1)
Beispiel #13
0
    def __init__(self,
                 discServer,
                 zk_srv_ip='127.0.0.1',
                 zk_srv_port='2181',
                 reset_config=False):
        self._reset_config = reset_config
        self._service_id_to_type = {}
        self._ds = discServer
        self._zk_sem = BoundedSemaphore(1)
        self._election = None
        self._restarting = False

        zk_endpts = []
        for ip in zk_srv_ip.split(','):
            zk_endpts.append('%s:%s' % (ip, zk_srv_port))

        # logging
        logger = logging.getLogger('discovery-service')
        logger.setLevel(logging.WARNING)
        handler = logging.handlers.RotatingFileHandler(
            '/var/log/contrail/discovery_zk.log',
            maxBytes=1024 * 1024,
            backupCount=10)
        log_format = logging.Formatter('%(asctime)s [%(name)s]: %(message)s',
                                       datefmt='%m/%d/%Y %I:%M:%S %p')
        handler.setFormatter(log_format)
        logger.addHandler(handler)

        self._zk = kazoo.client.KazooClient(
            hosts=','.join(zk_endpts),
            handler=kazoo.handlers.gevent.SequentialGeventHandler(),
            logger=logger)
        self._logger = logger

        # connect
        self.connect()

        if reset_config:
            self.delete_node("/services", recursive=True)
            self.delete_node("/clients", recursive=True)
            self.delete_node("/election", recursive=True)

        # create default paths
        self.create_node("/services")
        self.create_node("/clients")
        self.create_node("/election")

        self._debug = {
            'subscription_expires': 0,
            'oos_delete': 0,
            'db_excepts': 0,
        }
Beispiel #14
0
    def __init__(self, localServer):
        Greenlet.__init__(self)
        self.callinfos = {}
        self.localServer = localServer
        self.recv_queue = Queue()
        self.lock = BoundedSemaphore(1)

        self.handle_stoping = False
        self.recv_stop_evt = Event()
        self.timeout_stop_evt = Event()

        self.timeout_handle_greenlet = gevent.spawn(self.on_timeout_handle)
        self.timeout_handle_greenlet.start()
Beispiel #15
0
class Cache(dict):

    semaphore = BoundedSemaphore()

    def __setattr__(self, key, value):
        with self.semaphore:
            self[key] = value

    def __delattr__(self, key):
        with self.semaphore:
            try:
                del self[key]
            except KeyError, k:
                raise AttributeError, k
Beispiel #16
0
 def __init__(self, poll_interval=1, retry_interval=30):
     self.log = logger.new(component='syncback')
     self.keep_running = True
     self.poll_interval = poll_interval
     self.retry_interval = retry_interval
     self.workers = gevent.pool.Group()
     # Dictionary account_id -> semaphore to serialize action syncback for
     # any particular account.
     # TODO(emfree): We really only need to serialize actions that operate
     # on any given object. But IMAP actions are already effectively
     # serialized by using an IMAP connection pool of size 1, so it doesn't
     # matter too much.
     self.account_semaphores = defaultdict(lambda: BoundedSemaphore(1))
     gevent.Greenlet.__init__(self)
Beispiel #17
0
 def __init__(self, f, block=True):
     if isinstance(f, file):
         self.filename = f.name
         self.handle = f if not f.closed else open(f, 'w')
     else:
         self.filename = f
         mkdirp(os.path.dirname(f))
         self.handle = open(f, 'w')
     if block:
         self.lock_op = fcntl.LOCK_EX
     else:
         self.lock_op = fcntl.LOCK_EX | fcntl.LOCK_NB
     self.block = block
     self.gevent_lock = BoundedSemaphore(1)
Beispiel #18
0
    def __init__(self, args):

        self.thread = None
        self.socket = None
        self.cli_args = args
        self.force_debug = args['debug']

        # timers, counters and triggers
        self.pokemon_caught = 0
        self._error_counter = 0
        self._error_threshold = 10
        self.start_time = time()
        self.exp_start = None
        self._heartbeat_number = 1  # setting this back to one because we make parse a full heartbeat during login!
        self._heartbeat_frequency = 3  # 1 = always
        self._full_heartbeat_frequency = 15  # 10 = as before (every 10th heartbeat)
        self._farm_mode_triggered = False

        # objects, order is important!
        self.config = None
        self._load_config()

        self.log = create_logger(__name__,
                                 self.config.log_colors["poketrainer".upper()])

        self._open_socket()

        self.player = Player({})
        self.player_stats = PlayerStats({})
        self.inventory = Inventory(self, [])
        self.fort_walker = FortWalker(self)
        self.map_objects = MapObjects(self)
        self.poke_catcher = PokeCatcher(self)
        self.incubate = Incubate(self)
        self.evolve = Evolve(self)
        self.release = Release(self)
        self.sniper = Sniper(self)

        self._origPosF = (0, 0, 0)
        self.api = None
        self._load_api()

        # config values that might be changed during runtime
        self.step_size = self.config.step_size
        self.should_catch_pokemon = self.config.should_catch_pokemon

        # threading / locking
        self.sem = BoundedSemaphore(1)  # gevent
        self.persist_lock = False
        self.locker = None
Beispiel #19
0
 def __init__(self, size, exc_classes=DEFAULT_EXC_CLASSES, keepalive=None):
     self.size = size
     self.conn = deque()
     self.lock = BoundedSemaphore(size)
     self.keepalive = keepalive
     # Exceptions list must be in tuple form to be caught properly
     self.exc_classes = tuple(exc_classes)
     for i in xrange(size):
         self.lock.acquire()
     for i in xrange(size):
         greenlet = TestRunGreenlet(self._addOne)
         greenlet.start_later(self.SPAWN_FREQUENCY * i)
     if self.keepalive:
         greenlet = TestRunGreenlet(self._keepalive_periodic)
         greenlet.start_later()
Beispiel #20
0
    def __init__(self, amqp_info):
        Greenlet.__init__(self)
        ExampleConsumer.__init__(self, amqp_info)
        self.callinfos = {}
        self.send_queue = Queue()
        self.lock = BoundedSemaphore(1)
        self.send_greenlet = None

        self.handle_stoping = False
        self.send_stop_evt = Event()

        self.timeout_stop_evt = Event()

        self.timeout_handle_greenlet = gevent.spawn(self.on_timeout_handle)
        self.timeout_handle_greenlet.start()
Beispiel #21
0
 def __init__(self, size, exc_classes=DEFAULT_EXC_CLASSES, keepalive=None):
     """
     :param exc_classes: tuple, exceptions which connection should be dropped when it raises
     """
     self.size = size
     self.connections = deque()
     self.lock = BoundedSemaphore(size)
     self.keepalive = keepalive
     # Exceptions list must be in tuple form to be caught properly
     self.exc_classes = tuple(exc_classes)
     for i in xrange(size):
         self.lock.acquire()
     for i in xrange(size):
         gevent.spawn_later(self.SPAWN_FREQUENCY * i, self._add_one)
     if self.keepalive:
         gevent.spawn(self._keepalive_periodic)
Beispiel #22
0
 def __init__(self, size, exc_classes=DEFAULT_EXC_CLASSES, keepalive=None):
     self.size = size
     self.conn = deque()
     self.lock = BoundedSemaphore(size)
     self.keepalive = keepalive
     # Exceptions list must be in tuple form to be caught properly
     self.exc_classes = tuple(exc_classes)
     # http://stackoverflow.com/a/31136897/357578
     try:
         xrange
     except NameError:
         xrange = range
     for i in xrange(size):
         self.lock.acquire()
     for i in xrange(size):
         gevent.spawn_later(self.SPAWN_FREQUENCY * i, self._addOne)
     if self.keepalive:
         gevent.spawn(self._keepalive_periodic)
Beispiel #23
0
    def process_parallely(self, year, last_page):
        """
            Process various pages of a particular year parallely

            @param year (Number): year
            @param last_page (Number): Last page number
        """

        threads = []
        params = dict(PARAMS)
        params["year"] = year
        for page in xrange(1, last_page):
            params["page"] = page
            url = "https://www.codechef.com/submissions?" + urlencode(params)
            threads.append(gevent.spawn(self.process_page, year, page, url))

        self.semaphore = BoundedSemaphore(len(threads))
        gevent.joinall(threads)
Beispiel #24
0
    def __init__(self,
                 discServer,
                 zk_srv_ip='127.0.0.1',
                 zk_srv_port='2181',
                 reset_config=False):
        self._reset_config = reset_config
        self._service_id_to_type = {}
        self._ds = discServer
        self._zk_sem = BoundedSemaphore(1)

        self._zk = kazoo.client.KazooClient(
            hosts='%s:%s' % (zk_srv_ip, zk_srv_port),
            timeout=120,
            handler=kazoo.handlers.gevent.SequentialGeventHandler())

        # connect
        while True:
            try:
                self._zk.start()
                break
            except gevent.event.Timeout as e:
                self.syslog(
                    'Failed to connect with Zookeeper - will retry in a second'
                )
                gevent.sleep(1)
        self.syslog('Connected to ZooKeeper!')

        if reset_config:
            self._zk.delete("/services", recursive=True)
            self._zk.delete("/clients", recursive=True)
            self._zk.delete("/publishers", recursive=True)
            self._zk.delete("/election", recursive=True)

        # create default paths
        self.create_node("/services")
        self.create_node("/clients")
        self.create_node("/publishers")
        self.create_node("/election")

        self._debug = {
            'subscription_expires': 0,
            'oos_delete': 0,
            'db_excepts': 0,
        }
Beispiel #25
0
    def __init__(self, input, output, func, nthreads=800, maxcnt=None):
        """@todo: to be defined

        :param input: @todo
        :param output: @todo
        :param func: @todo
        :param qname: @todo

        """
        self._func = func
        self._input = input
        self._output = output
        self._lock = BoundedSemaphore(1)
        self._pool = Pool(nthreads)
        self._nthreads = nthreads
        self._true = 0
        self._false = 0
        self._nogeo = 0
        self._notruth = 0
        self.maxcnt = maxcnt
Beispiel #26
0
 def load(self, spider):
     redis_args = dict(host=self.settings.REDIS_URL,
                       port=self.settings.REDIS_PORT,
                       db=self.settings.REDIS_DB)
     if hasattr(self.settings, 'NAMESPACE'):
         redis_args['namespace'] = self.settings.NAMESPACE
     else:
         redis_args['namespace'] = spider.name
     self.url_set = redisds.Set('urlset', **redis_args)
     self.url_queue = redisds.Queue('urlqueue',
                                    serializer=Pickle(),
                                    **redis_args)
     self.runner = redisds.Lock("runner:%s" % uuid4().hex, **redis_args)
     self.runners = redisds.Dict("runner:*", **redis_args)
     self.stats = redisds.Dict("stats:*", **redis_args)
     self.lock = BoundedSemaphore(1)
     self.running_count = 0
     self.allowed_urls_regex = self.get_regex(spider.allowed_domains)
     self.spider = spider
     self.start()
Beispiel #27
0
    def __init__(self,
                 account,
                 heartbeat=1,
                 refresh_frequency=30,
                 syncback_frequency=5):
        # DEPRECATED.
        # TODO[k]: Remove after sync-syncback integration deploy is complete.
        self.refresh_frequency = refresh_frequency
        self.syncmanager_lock = BoundedSemaphore(1)
        self.saved_remote_folders = None
        self.sync_engine_class = FolderSyncEngine
        self.folder_monitors = Group()

        self.delete_handler = None

        self.syncback_handler = None
        self.folder_sync_signals = {}
        self.syncback_timestamp = None
        self.syncback_frequency = syncback_frequency

        BaseMailSyncMonitor.__init__(self, account, heartbeat)
Beispiel #28
0
    def __init__(self, name, force_new_connection=False):
        """@todo: to be defined

        :param name: @todo
        :param force_new_connection: @todo

        """

        if not INITTED:
            log.warn("QUEUE INIT Not called, calling")
            init()

        self._name = RPC_PREFIX + name
        self._connection = connect(force_new_connection)
        self._channel = self._connection.channel()
        self._queue = self._connection.SimpleQueue(
            "", queue_opts={"exclusive": True})
        self._marshal = queue.JsonMarshal()
        self._producer = messaging.Producer(self._channel,
                                            routing_key=self._name)
        self._lock = BoundedSemaphore(1)
        self.reqs = {}
def mass_deauth(network, iface):
    sniff_ap(iface, network)
    channels = set()
    for bssid in network:
        if 'channel' in network[bssid] and network[bssid]['channel'] < 13:
            channels.add(network[bssid]['channel'])

    q = Queue()
    lock = BoundedSemaphore(1)
    t3 = gevent.spawn(switch_channel, lock, iface, list(channels))
    t1 = gevent.spawn(sniff_data, q, lock, iface)
    t2 = gevent.spawn(add_network, q, network)
    t4 = gevent.spawn(deauth_attack, lock, network, iface)
    ts = [t1, t2, t3, t4]
    gevent.signal(signal.SIGINT, gevent.killall, ts)
    try:
        gevent.joinall(ts, timeout=600)
    except KeyboardInterrupt:
        pass
    finally:
        print('---kill all---')
        gevent.killall(ts)
Beispiel #30
0
    def __init__(self,
                 syncback_id,
                 cpu_id,
                 total_cpus,
                 poll_interval=1,
                 retry_interval=30):
        self.cpu_id = cpu_id
        self.total_cpus = total_cpus
        self.poll_interval = poll_interval
        self.retry_interval = retry_interval
        self.keep_running = True
        self.workers = gevent.pool.Group()
        # Dictionary account_id -> semaphore to serialize action syncback for
        # any particular account.
        # TODO(emfree): We really only need to serialize actions that operate
        # on any given object. But IMAP actions are already effectively
        # serialized by using an IMAP connection pool of size 1, so it doesn't
        # matter too much.
        self.account_semaphores = defaultdict(lambda: BoundedSemaphore(1))
        # This SyncbackService performs syncback for only and all the accounts
        # on shards it is reponsible for; shards are divided up between
        # running SyncbackServices.
        self.log = logger.new(component='syncback')
        syncback_assignments = config.get("SYNCBACK_ASSIGNMENTS", {})
        if syncback_id in syncback_assignments:
            self.keys = [
                key for key in engine_manager.engines
                if key in syncback_assignments[syncback_id] and key %
                total_cpus == cpu_id
            ]
        else:
            self.log.warn("No shards assigned to syncback server",
                          syncback_id=syncback_id)
            self.keys = []

        self.log = logger.new(component='syncback')
        gevent.Greenlet.__init__(self)