def __init__(self, server, jobmanager, **config): Greenlet.__init__(self) self._set_config(**config) self.jobmanager = jobmanager self.server = server self.reporter = server.reporter self.logger = server.register_logger('auxmonitor_{}'.format(self.config['name'])) self.block_stats = dict(accepts=0, rejects=0, solves=0, last_solve_height=None, last_solve_time=None, last_solve_worker=None) self.current_net = dict(difficulty=None, height=None) self.recent_blocks = deque(maxlen=15) self.prefix = self.config['name'] + "_" # create an instance local one_min_stats for use in the def status func self.one_min_stats = [self.prefix + key for key in self.one_min_stats] self.server.register_stat_counters(self.one_min_stats) self.coinservs = self.config['coinservs'] self.coinserv = bitcoinrpc.AuthServiceProxy( "http://{0}:{1}@{2}:{3}/" .format(self.coinservs[0]['username'], self.coinservs[0]['password'], self.coinservs[0]['address'], self.coinservs[0]['port']), pool_kwargs=dict(maxsize=self.coinservs[0].get('maxsize', 10))) self.coinserv.config = self.coinservs[0] if self.config['signal']: gevent.signal(self.config['signal'], self.update, reason="Signal recieved")
def __init__(self, poll_interval=30, chunk_size=DOC_UPLOAD_CHUNK_SIZE): self.poll_interval = poll_interval self.chunk_size = chunk_size self.transaction_pointers = {} self.log = log.new(component='contact-search-index') Greenlet.__init__(self)
def __init__(self, server_state, net_state, config, monitor_network, **kwargs): Greenlet.__init__(self) self.net_state = net_state self.server_state = server_state self.config = config self.monitor_network = monitor_network self.__dict__.update(kwargs) self.server_state['aux_state'][self.name] = {'difficulty': None, 'height': None, 'chain_id': None, 'block_solve': None, 'work_restarts': 0, 'new_jobs': 0, 'solves': 0} # convenience self.aux_state = self.server_state['aux_state'][self.name] self.coinservs = self.coinserv self.coinserv = bitcoinrpc.AuthServiceProxy( "http://{0}:{1}@{2}:{3}/" .format(self.coinserv[0]['username'], self.coinserv[0]['password'], self.coinserv[0]['address'], self.coinserv[0]['port']), pool_kwargs=dict(maxsize=self.coinserv[0].get('maxsize', 10))) self.coinserv.config = self.coinservs[0]
def __init__(self, *args, **kwargs): self._inbox = Queue() self._running = True Greenlet.__init__(self) self.start() self._args = args self._kwargs = kwargs
def __init__(self, log, bindport, node_id): Greenlet.__init__(self) self.log = log self.dstaddr = '0.0.0.0' self.bindport = bindport self.node_id = node_id self.sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) self.sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) self.sock.bind(('', bindport)) self.last_sent = 0 self.dht_cache = LRU.LRU(100000) self.dht_router = DHTRouter(self) self.taskman = DHTTaskManager() self.log.write(self.dstaddr + ':' + str(self.bindport) + " open DHT") # add ourselves to the router node_msgobj = codec_pb2.MsgDHTNode() node_msgobj.node_id = node_id node_msgobj.ip = socket.inet_pton(socket.AF_INET, "127.0.0.1") node_msgobj.port = bindport rc = self.dht_router.add_node(node_msgobj) if rc < 0: self.log.write("DHT: failed to add node %s %d" % ("127.0.0.1", bindport))
def __init__(self,name): Greenlet.__init__(self) self.name=name self.inbox=Queue() self.outbox=Queue() self.proceed=Event() self.proceed.set()
def __init__ (self, scHandle): """ Saves the handle and initializes the greenlet. """ Greenlet.__init__(self) self.scHandle = scHandle
def __init__(self, node, peersocket, address, port = None): # for incoming connection, port = None # for outgoing conneciton, socket = None Greenlet.__init__(self) self.node = node self.socket = peersocket self.dstaddr = address self.dstport = port self.recvbuf = "" self.last_sent = 0 self.getblocks_ok = True self.last_block_rx = time.time() self.last_getblocks = 0 self.hash_continue = None self.log = log self.ver_recv = MIN_PROTO_VERSION self.remote_height = -1 if self.socket: self.direction = "INCOMING" print("in coming connection") else: self.socket = gevent.socket.socket(socket.AF_INET, socket.SOCK_STREAM) self.direction = "OUTGOING" print("outgoing connection!") print("connecting") try: self.socket.connect((self.dstaddr, self.dstport)) except Exception, err: print "Exception: ", Exception, err print "Unable to establish connection" self.handle_close() self.sendVersionMessage()
def __init__(self, user_id, name, all_fields, timelimit, notify_fields, session): Greenlet.__init__(self) self.all_fields = all_fields self.user_id, self.timelimit = user_id, timelimit self.name = name self.session = session self.notify_fields = set(notify_fields)
def __init__(self,gc,threadList,processor=""): Greenlet.__init__(self) self.gc=gc self.opList=gc.opList self.threadList=threadList self.processor=processor self.sleeptimes=600
def __init__ (self, scHandle): """ Initializes the response monitor with the Scarecrow handle. """ Greenlet.__init__(self) self.scHandle = scHandle
def __init__(self, bot): # Make the thread stoppable Greenlet.__init__(self) self._stop = gevent.event.Event() self.bot = bot # Handle to PerusBot self.repeats = {} # Explained in docstring self.calls = {} # Explained in docstring self.lastrepeat = 0 # Time when last repeating tasks were done # Add own logger to Timer. self.log = logger.Logger('Timer',bot.sets['timer_logfile']) # USE DATABASE. # Values to create to table. table_values = 'id INTEGER PRIMARY KEY,time INTEGER,' table_values += 'receiver TEXT,msg TEXT' # Basically, id is null and ? is replced given value when insert_data() # is called. ins_params = '(null, ?, ?, ?)' db_file = self.bot.sets['timer_db_file'] # Database filename table = 'jobs' self.db = database.DbHandler(self.bot, db_file, table, table_values, ins_params)
def __init__(self,sock): Greenlet.__init__(self) self.sock = sock self.recvq = Queue() self.sendq = Queue() self.send = self.sendq.put self.recv = self.recvq.get
def __init__(self, monitor, ns, state_ns, sink = None, inclusive = False, force = False, noop = False): Greenlet.__init__(self) AMongoMonitorObject.__init__(self) assert isinstance(monitor, AMongoMonitor) if sink is None: sink = monitor else: assert isinstance(sink, AMongoCollectionSink) self._ns = ns self._sink = sink self._monitor = monitor self._collection = self._get_collection(monitor.conn, ns) self._state_collection = self._get_collection(monitor.conn, state_ns) self._inclusive = inclusive self._force = force self._noop = noop self._stopped = False
def __init__(self, sock, addr): Endpoint.__init__(self, sock, addr) Greenlet.__init__(self) self.observers = BatchList() self.init_gamedata_mixin() self.gdhistory = [] self.usergdhistory = []
def __init__(self, account_id, folder_name, folder_id, email_address, provider_name, poll_frequency, syncmanager_lock, refresh_flags_max, retry_fail_classes): self.account_id = account_id self.folder_name = folder_name self.folder_id = folder_id self.poll_frequency = poll_frequency self.syncmanager_lock = syncmanager_lock self.refresh_flags_max = refresh_flags_max self.retry_fail_classes = retry_fail_classes self.state = None self.provider_name = provider_name with mailsync_session_scope() as db_session: account = db_session.query(Account).get(self.account_id) self.throttled = account.throttled self.namespace_id = account.namespace.id assert self.namespace_id is not None, "namespace_id is None" self.state_handlers = { 'initial': self.initial_sync, 'initial uidinvalid': self.resync_uids, 'poll': self.poll, 'poll uidinvalid': self.resync_uids, 'finish': lambda self: 'finish', } Greenlet.__init__(self) self.sync_status = SyncStatus(self.account_id, self.folder_id) self.sync_status.publish(provider_name=self.provider_name, folder_name=self.folder_name)
def __init__(self, reporter): Greenlet.__init__(self) self.reporter = reporter self.queue = reporter.queue self.config = reporter.config self.logger = reporter.register_logger('puller') self.context = None
def __init__(self, run=None, *args, **kwargs): Greenlet.__init__(self) self.isStart = False self.inbox = queue.Queue() context = zmq.Context() self.sock = context.socket(zmq.PUB) self.to_db_address = (ToDBAddress().m2db_host,ToDBAddress().m2db_port)
def __init__(self, sock, data_listener, outfile=None, prefix_state=False): """ @param sock The connection to read in characters from the instrument. @param data_listener data_listener(sample) is called whenever a new data line is received, where sample is a dict indexed by the names in trhph.CHANNEL_NAMES. @param outfile name of output file for all received data; None by default. @param prefix_state True to prefix each line in the outfile with the current state; False by default. """ # Thread.__init__(self, name="_Recv") Greenlet.__init__(self) self._sock = sock self._data_listener = data_listener self._last_line = '' self._new_line = '' self._lines = [] self._active = True self._outfile = outfile self._prefix_state = prefix_state self._state = None # self.setDaemon(True) self._last_data_burst = None self._diagnostic_data = [] self._system_info = {} self._power_statuses = {} log.debug("_Recv created.")
def __init__(self, max_sessions, clear_sessions=False, delay_seconds=30): assert delay_seconds > 1 Greenlet.__init__(self) db_session = database_setup.get_session() self.enabled = True # pending session will be converted to attacks if we cannot match with bait traffic # with this period self.delay_seconds = delay_seconds # clear all pending sessions on startup, pending sessions on startup pending_classification = db_session.query(Classification).filter(Classification.type == 'pending').one() pending_deleted = db_session.query(Session).filter( Session.classification == pending_classification).delete() db_session.commit() logging.info('Cleaned {0} pending sessions on startup'.format(pending_deleted)) self.do_classify = False self.do_maintenance = False if clear_sessions or max_sessions == 0: db_session = database_setup.get_session() count = db_session.query(Session).delete() logging.info('Deleting {0} sessions on startup.'.format(count)) db_session.commit() self.max_session_count = max_sessions if max_sessions: logger.info('Database has been limited to contain {0} sessions.'.format(max_sessions)) context = beeswarm.shared.zmq_context # prepare sockets self.drone_data_socket = context.socket(zmq.SUB) self.processedSessionsPublisher = context.socket(zmq.PUB) self.databaseRequests = context.socket(zmq.REP) self.config_actor_socket = context.socket(zmq.REQ) self.drone_command_receiver = context.socket(zmq.PUSH)
def __init__(self, log, peermgr, sock=None, dstaddr=None, dstport=None): Greenlet.__init__(self) self.log = log self.peermgr = peermgr self.dstaddr = dstaddr self.dstport = dstport self.recvbuf = "" self.ver_send = MIN_PROTO_VERSION self.last_sent = 0 if sock is None: self.log.write("connecting to " + self.dstaddr) self.outbound = True try: self.sock = gevent.socket.socket(socket.AF_INET, socket.SOCK_STREAM) self.sock.connect((dstaddr, dstport)) except: self.handle_close() # immediately send message vt = self.version_msg() self.send_message("version", vt) else: self.sock = sock self.outbound = False if self.dstaddr is None: self.dstaddr = '0.0.0.0' if self.dstport is None: self.dstport = 0 self.log.write(self.dstaddr + " connected")
def __init__(self, max_sessions, clear_sessions=False, delay_seconds=30): assert delay_seconds > 1 Greenlet.__init__(self) self.db_session = database_setup.get_session() # pending session will be converted to attacks if we cannot match with bait traffic # with this period self.delay_seconds = delay_seconds # clear all pending sessions on startup, pending sessions on startup pending_classification = self.db_session.query(Classification).filter(Classification.type == 'pending').one() pending_deleted = self.db_session.query(Session).filter( Session.classification == pending_classification).delete() self.db_session.commit() logging.info('Cleaned {0} pending sessions on startup'.format(pending_deleted)) self.do_classify = False if clear_sessions or max_sessions == 0: count = self.db_session.query(Session).delete() logging.info('Deleting {0} sessions on startup.'.format(count)) self.db_session.commit() self.max_session_count = max_sessions if max_sessions: logger.info('Database has been limited to contain {0} sessions.'.format(max_sessions)) context = beeswarm.shared.zmq_context self.subscriber_sessions = context.socket(zmq.SUB) self.subscriber_sessions.connect(SocketNames.RAW_SESSIONS) self.subscriber_sessions.setsockopt(zmq.SUBSCRIBE, Messages.SESSION_CLIENT) self.subscriber_sessions.setsockopt(zmq.SUBSCRIBE, Messages.SESSION_HONEYPOT) self.processedSessionsPublisher = context.socket(zmq.PUB) self.processedSessionsPublisher.bind(SocketNames.PROCESSED_SESSIONS) self.config_actor_socket = context.socket(zmq.REQ) self.config_actor_socket.connect(SocketNames.CONFIG_COMMANDS)
def __init__(self, sock, addr): Endpoint.__init__(self, sock, addr) Greenlet.__init__(self) self.observers = BatchList() self.gamedata = Gamedata() self.cmd_listeners = defaultdict(WeakSet) self.current_game = None
def __init__(self, fetcher_url_queue,process_html_queue,start_url,max_depth,url_list): Greenlet.__init__(self) self.fetcher_url_queue = fetcher_url_queue; self.process_html_queue = process_html_queue; self.start_url = start_url; self.max_depth = max_depth; self.url_list = url_list;
def __init__(self, sock, addr): Endpoint.__init__(self, sock, addr) Greenlet.__init__(self) self.gdqueue = deque(maxlen=100000) self.gdevent = Event() self.ctlcmds = Queue(0) self.userid = 0
def __init__(self, gdlist): Greenlet.__init__(self) self.gdlist = gdlist self.channel = Queue(100000) indexes = [i[0] for i in gdlist] cnt = [indexes.count(i) for i in xrange(10)] self.data_count = cnt
def __init__(self, poll_interval=30, chunk_size=100): self.poll_interval = poll_interval self.chunk_size = chunk_size self.transaction_pointer = None self.log = log.new(component='search-index') Greenlet.__init__(self)
def __init__(self, conn_pool): Greenlet.__init__(self) self.conn_pool = conn_pool self.connections = self.conn_pool.pool self.max_idle = self.conn_pool.max_idle self.eviction_delay = self.conn_pool.eviction_delay
def __init__(self,address,factory): """ @param address: (host,port)包含了host和port的元组\n @param factory: 协议工厂这里主要是指ClientFactory\n """ Greenlet.__init__(self) self.address = address self.factory = factory
def __init__(self,port,factory,server_cls,port_type=""): """端口监听器 """ Greenlet.__init__(self) self.port = port self.factory = factory self.server_cls = server_cls self.port_type = port_type
def __init__(self, app): Greenlet.__init__(self) self.is_stopped = False self.app = app self.config = utils.update_config_with_defaults( app.config, self.default_config) available_service = [s.__class__ for s in self.app.services.values()] for r in self.required_services: assert r in available_service, (r, available_service)
def __init__(self, node, packet_type, callback, timeout=K_REQUEST_TIMEOUT): Greenlet.__init__(self) self._node = node self._packet_type = packet_type self._callback = callback self._timeout = timeout self._box = Queue()
def __init__(self, index, command, arg): Greenlet.__init__(self) self.index = index self.command = command self.arg = arg try: self.iarg = int(arg) except ValueError: pass
def __init__(self,reactor,selectable,method): Greenlet.__init__(self) self.reactor = reactor self.selectable = selectable self.method = method self.wake = Event() self.wake.set() self.pause = self.wake.clear self.resume = self.wake.set
def __init__(self, skt, address, sessionno=0, maxSendlen = 10 * 1024 * 1024): Greenlet.__init__(self) self.skt = skt self.address = address self.sessionno = sessionno self.inbox = queue.Queue() self.maxSendlen = maxSendlen # 最大待发送数据,用户可以通过设置此参数,改变最大的发送缓冲buffer大小 self.notSendlen = 0 self.runing = True
def __init__(self, *args, **kwargs): Greenlet.__init__(self) # Save the constructer arguments # so we can recreate the Greenlet self.rsargs = args self.rskwargs = kwargs # Set up this Greenlet to use the restarter self.link(self.restart)
def __init__(self, *args, **kwargs): Greenlet.__init__(self) Model.__init__(self, *args, **kwargs) docker_url = config.get('docker_url') self.container = api.DockerAPI(self.cid, docker_url) self._lock = Semaphore() self._lock.acquire() # locking semaphore self._new_data = None
def __init__(self, server): Greenlet.__init__(self) self.server = server self.reporter = server.reporter self.config = server.config # lookup tables for finding trackers self.clients = {} self.addr_worker_lut = {} self.address_lut = {}
def __init__(self, a_job, job_holder, status_holder): """ :type a_job: JobDetail :type job_holder: JobHolder :type status_holder: StatusHolder """ Greenlet.__init__(self) self.a_job = a_job self.job_holder = job_holder self.status_holder = status_holder
def __init__(self, poll_interval=30, chunk_size=100): self.poll_interval = poll_interval self.chunk_size = chunk_size self.encoder = APIEncoder() self.transaction_pointer = None self.log = log.new(component='search-index') Greenlet.__init__(self)
def __init__(self, callback): """Stores shutdown callback. Args: callback (function): the method to run on application shutdown (holds references to objects that this class doesn't necessarily know about (e.g. the cv2 cameras) """ Greenlet.__init__(self) self.callback = callback
def __init__(self, spider): Greenlet.__init__(self) self.fetcher_id = str(uuid.uuid1())[:8] self.TOO_LONG = 1048576 # 1M self.spider = spider self.fetcher_cache = self.spider.fetcher_cache self.crawler_cache = self.spider.crawler_cache self.fetcher_queue = self.spider.fetcher_queue self.crawler_queue = self.spider.crawler_queue self.logger = self.spider.logger
def __init__(self, server, **config): Greenlet.__init__(self) self._set_config(**config) self.logger = server.register_logger('jobbridge') self.stratum_manager = server.stratum_manager self.jobs = {} self.context = zmq.Context() self.solve_socket = self.context.socket(zmq.SUB) self.sub_socket = self.context.socket(zmq.REQ)
def __init__(self, sync_client, client, params): Greenlet.__init__(self) self.sync_client = sync_client self.resource = self.sync_client.resource self.retrievers_params = self.sync_client.retrievers_params self.adaptive = self.sync_client.adaptive self.client = sync_client.forward_client self.params = params self.exit_successful = False self.queue_priority = 1
def __init__(self, obj, cmd, params, name, cores=[0], nice=0): Greenlet.__init__(self) self._obj = obj self._nice = str(nice) self._cores = cores self._process = None self._keep_running = True self._cmd = cmd self._params = params self._name = name
def __init__(self, name: str, cookie: str) -> None: Greenlet.__init__(self) if Node.singleton is not None: raise NodeException("Singleton Node was already created") Node.singleton = self # Message queue based on ``gevent.Queue``. It is periodically checked # in the ``_run`` method and the receive handler is called. self.inbox_ = mailbox.Mailbox() # An internal counter used to generate unique process ids self.pid_counter_ = 0 # Process dictionary which stores all the existing ``Process`` objects # adressable by a pid. # # .. note:: This creates a python reference to an # object preventing its automatic garbage collection. # In the end of its lifetime an object must be explicitly removed # from this dictionary using ``Process.exit`` method on the # process. self.processes_ = {} # type: Dict[Pid, Process] # Registered objects dictionary, which maps atoms to pids self.reg_names_ = {} # type: Dict[Atom, Pid] self.is_exiting_ = False # An option object with feature support flags packed into an # integer. self.node_opts_ = NodeOpts(cookie=cookie) # Node name as seen on the network. Use full node names here: # ``name@hostname`` self.name_ = Atom(name) self.dist_nodes_ = {} # type: Dict[str, Node] self.dist_ = ErlangDistribution(node=self, name=name) # This is important before we can begin spawning processes # to get the correct node creation self.dist_.connect(self) # Spawn and register (automatically) the process 'rex' for remote # execution, which takes 'rpc:call's from Erlang from Pyrlang.rex import Rex self.rex_ = Rex(self) self.rex_.start() # Spawn and register (automatically) the 'net_kernel' process which # handles special ping messages from Pyrlang.net_kernel import NetKernel self.net_kernel_ = NetKernel(self) self.net_kernel_.start()
def __init__(self, client_socket: gevent._socket3.socket, clientid: int, max_bad_transactions: int = 5) -> None: Greenlet.__init__(self) self.client_socket = client_socket self.clientid = clientid # type: int self.expected_packet_number = 1 # type: int self.max_bad_transactions = max_bad_transactions # type: int self.bad_transaction_count = 0 # type: int
def __init__(self, app_eui, request_sid): """ :param app_eui: bytes :param request_sid: :return: """ # threading.Thread.__init__(self, daemon=True) Greenlet.__init__(self) self.app_eui = app_eui self.request_sid = request_sid self.msg = Msg(self.app_eui)
def __init__(self, peermgr, mempool, chaindb, netmagic): Greenlet.__init__(self) self.peermgr = peermgr self.mempool = mempool self.chaindb = chaindb self.netmagic = netmagic self.hash_continue = None self.ver_send = MIN_PROTO_VERSION # setup logging logging.basicConfig(level=logging.DEBUG) self.logger = logging.getLogger(__name__)
def __init__(self, account_id, namespace_id, folder_name, email_address, provider_name, syncmanager_lock, sync_signal): with session_scope(namespace_id) as db_session: try: folder = db_session.query(Folder). \ filter(Folder.name == folder_name, Folder.account_id == account_id).one() except NoResultFound: raise MailsyncError( u"Missing Folder '{}' on account {}".format( folder_name, account_id)) self.folder_id = folder.id self.folder_role = folder.canonical_name # Metric flags for sync performance self.is_initial_sync = folder.initial_sync_end is None self.is_first_sync = folder.initial_sync_start is None self.is_first_message = self.is_first_sync bind_context(self, 'foldersyncengine', account_id, self.folder_id) self.account_id = account_id self.namespace_id = namespace_id self.folder_name = folder_name self.email_address = email_address if self.folder_name.lower() == 'inbox': self.poll_frequency = INBOX_POLL_FREQUENCY else: self.poll_frequency = DEFAULT_POLL_FREQUENCY self.syncmanager_lock = syncmanager_lock self.state = None self.provider_name = provider_name self.last_fast_refresh = None self.flags_fetch_results = {} self.conn_pool = connection_pool(self.account_id) self.sync_signal = sync_signal self.state_handlers = { 'initial': self.initial_sync, 'initial uidinvalid': self.resync_uids, 'poll': self.poll, 'poll uidinvalid': self.resync_uids, } self.setup_heartbeats() Greenlet.__init__(self) # Some generic IMAP servers are throwing UIDVALIDITY # errors forever. Instead of resyncing those servers # ad vitam, we keep track of the number of consecutive # times we got such an error and bail out if it's higher than # MAX_UIDINVALID_RESYNCS. self.uidinvalid_count = 0
def __init__(self, skt, address, sessionno=0): """基础连接通道\n @param skt: socket socket实例,所有的Transport通信通过它来实现\n @param address: (host,port) 一个包含了host和port的元组\n @param sessionno: int 由服务端生成的一个唯一的ID编号\n """ Greenlet.__init__(self) self.inbox = queue.Queue() self.skt = skt self.address = address self.sessionno = sessionno
def __init__(self, logger): Greenlet.__init__(self) self.logger = logger self._users_lock = RLock() self._msgs = {} self._users = {} self.send_queue = Queue() self.pending_online_users = Queue() self.bootstrap() self._dying = False self.start()
def __init__(self,name,context,topic=None): self.name=name self.topic=topic Greenlet.__init__(self) self.inbox=context.socket(zmq.SUB) self.inbox.setsockopt(zmq.SUBSCRIBE, "%s.inbox"%(name)) self.inbox.connect("tcp://*:10000") self.outbox=context.socket(zmq.PUB) self.outbox.connect("tcp://*:10000")
def __init__(self,name,context,number): Greenlet.__init__(self) self.name=name self.context = context self.number = number self.inbox = self.context.socket(zmq.PULL) self.inbox.bind("inproc://%s.inbox"%(name)) self.outbox = self.context.socket(zmq.PUSH) self.outbox.bind("inproc://%s.outbox"%(name)) self.block=gevent.event.Event() self.block.clear()
def __init__(self, account, heartbeat=1): bind_context(self, 'mailsyncmonitor', account.id) self.shutdown = event.Event() # how often to check inbox, in seconds self.heartbeat = heartbeat self.log = log.new(component='mail sync', account_id=account.id) self.account_id = account.id self.namespace_id = account.namespace.id self.email_address = account.email_address self.provider_name = account.provider Greenlet.__init__(self)
def __init__(self, parent_actor, factory, uri, node): Greenlet.__init__(self) if not callable(factory): # pragma: no cover raise TypeError( "Provide a callable (such as a class, function or Props) as the factory of the new actor" ) self.factory = factory self.parent_actor = parent_actor self.uri = uri self.node = node self.queue = gevent.queue.Queue() self.inbox = deque()
def __init__(self): Greenlet.__init__(self) # from utils import ITIEvent # self.event = ITIEvent() from gevent.event import Event self.event = Event() self.msg_queue = [] # This callback is called when executive completed a request # Called with these args: # callback('message', *results) self.default_callback = lambda *a, **k: False self.state = 'initial' # initial connected
def __init__(self, config_file, work_dir): Greenlet.__init__(self) self.config_file = os.path.join(work_dir, config_file) if not os.path.exists(self.config_file): self.config = {} self._save_config_file() self.config = json.load(open(self.config_file, 'r')) self.work_dir = work_dir context = beeswarm.shared.zmq_context self.config_commands = context.socket(zmq.REP) self.enabled = True
def __init__(self, redirect, refresh_time=0.3, extra_info=None, buffer=1024): Greenlet.__init__(self) BaseRedirector.__init__(self, redirect, refresh_time=refresh_time, extra_info=extra_info, buffer=buffer, selector=select)
def __init__(self, raidex_node, initial_price): """ Args: raidex_node: the node that is used to place the orders """ Greenlet.__init__(self) self.raidex_node = raidex_node self.initial_price = initial_price self.average_amount = int(10e18) # average order amount self.average_frequency = 0.2 # trades per second self.urgency = 0.02 # percentag bot is willing to overpay to get its order filled quicker self.log = structlog.get_logger('bots.random_walker')