Exemplo n.º 1
0
class GeventedHTTPTransport(HTTPTransport):

    scheme = ['gevent+http', 'gevent+https']

    def __init__(self, parsed_url, maximum_outstanding_requests=100):
        if not gevented:
            raise ImportError('GeventedHTTPTransport requires gevent.')
        self._lock = Semaphore(maximum_outstanding_requests)

        super(GeventedHTTPTransport, self).__init__(parsed_url)

        # remove the gevent+ from the protocol, as it is not a real protocol
        self._url = self._url.split('+', 1)[-1]

    def send(self, data, headers):
        """
        Spawn an async request to a remote webserver.
        """
        # this can be optimized by making a custom self.send that does not
        # read the response since we don't use it.
        self._lock.acquire()
        return spawn(super(GeventedHTTPTransport, self).send, data,
                     headers).link(self._done, self)

    def _done(self, *args):
        self._lock.release()
Exemplo n.º 2
0
    def __new__(cls, s):
        if s.id in sites:
            return sites[s.id]
        self = object.__new__(cls)
        sites[s.id] = self
        self.s = s
        self.connect()
        self._delay_on = Semaphore()

        self.controllers = set()
        self.envgroups = set()
        self.meters = {}
        for M in METERS:
            ml = set()
            self.meters[M.meter_type] = ml
            for d in getattr(self.s, M.meter_type + "_meters").all():
                ml.add(M(d))

        self.log("Startup")
        self.connect_monitors(do_controllers=False)
        signal.signal(signal.SIGINT, self.do_shutdown)
        signal.signal(signal.SIGTERM, self.do_shutdown)
        signal.signal(signal.SIGHUP, self.do_syncsched)

        self.running = True
        return self
Exemplo n.º 3
0
class GeventedHTTPTransport(HTTPTransport):

    scheme = ['gevent+http', 'gevent+https']

    def __init__(self, parsed_url, maximum_outstanding_requests=100):
        if not gevented:
            raise ImportError('GeventedHTTPTransport requires gevent.')
        self._lock = Semaphore(maximum_outstanding_requests)

        super(GeventedHTTPTransport, self).__init__(parsed_url)

        # remove the gevent+ from the protocol, as it is not a real protocol
        self._url = self._url.split('+', 1)[-1]

    def send(self, data, headers):
        """
        Spawn an async request to a remote webserver.
        """
        # this can be optimized by making a custom self.send that does not
        # read the response since we don't use it.
        self._lock.acquire()
        return spawn(super(GeventedHTTPTransport, self).send, data, headers).link(self._done, self)

    def _done(self, *args):
        self._lock.release()
Exemplo n.º 4
0
 def __init__(self, svc):
     #maxsize - 最大缓存,超过限制将视为无效解码
     self.buf = ''
     self.conn = None
     self.svc = svc
     self.mtx = Semaphore()
     self.msgcnt = 0  #接收消息计数
 def __init__(self, url, bucket, password="", verbose=False):
     self.log = logger.logger("VBucketAwareMemcachedClient")
     self.bucket = bucket
     self.rest_username = bucket
     self.rest_password = password
     self._memcacheds = {}
     self._vBucketMap = {}
     self._vBucketMap_lock = Lock()
     self._vBucketMapFastForward = {}
     self._vBucketMapFastForward_lock = Lock()
     #TODO: use regular expressions to parse the url
     server = {}
     if not bucket:
         raise InvalidArgumentException("bucket can not be an empty string", parameters="bucket")
     if not url:
         raise InvalidArgumentException("url can not be an empty string", parameters="url")
     if url.find("http://") != -1 and url.rfind(":") != -1 and url.find("/pools/default") != -1:
         server["ip"] = url[url.find("http://") + len("http://"):url.rfind(":")]
         server["port"] = url[url.rfind(":") + 1:url.find("/pools/default")]
         server["username"] = self.rest_username
         server["password"] = self.rest_password
     self.servers = [server]
     self.servers_lock = Lock()
     self.rest = RestConnection(server)
     self.reconfig_vbucket_map()
     self.init_vbucket_connections()
     self.dispatcher = CommandDispatcher(self)
     self.dispatcher_thread = Process(name="dispatcher-thread", target=self._start_dispatcher)
     self.dispatcher_thread.daemon = True
     self.dispatcher_thread.start()
     self.streaming_thread = Process(name="streaming", target=self._start_streaming, args=())
     self.streaming_thread.daemon = True
     self.streaming_thread.start()
     self.verbose = verbose
Exemplo n.º 6
0
class MediaCodec_Base:	
	def __init__(self,svc):
		#maxsize - 最大缓存,超过限制将视为无效解码
		self.buf =''
		self.conn = None
		self.svc = svc
		self.mtx = Semaphore()
		self.msgcnt=0 #接收消息计数

	def parseMessage(self,s):
		pass


	#数据置入队列,应检查队列长度和数据合法性
	def queueIn(self,s,conn):
		self.mtx.acquire()
#		self.buflist.append(s)
		self.buf+=s
		self.mtx.release()
		return True


	
	def decode(self):
		#@return (packets,retry) retry表示解码遇到错误或者数据非法
		return (),False


	#根据传入的消息分拣出gps,alarm消息  MsgAoModule_Alarm(), MsgAoModule_GpsData()
	def filter_msg(self,m,aom):
		# GSP,ALARM 消息是关心的消息,需要反馈到客户端
		return (m,)
	
	def command(self,aom,m):
		pass

	def save(self,aom,m):
		'''
			保存设备所有信息,不区分是何类型
		'''
		try:
			cm = aom.ao.cm
			params= m.params
			if isinstance(m.params,dict):
				params = json.dumps(m.params)
			log = cm.AO_ModuleLog()
			log.ao = aom.ao.r
			log.module = aom.r
			log.type = ModuleMsgType.DEV2SYS
			log.time = datetime.datetime.now()
			log.msgtype = 'N/A'
			log.params = params
			log.rawmsg = m.rawmsg
			log.seq = 0
			log.save()
			return True
		except:
			traceback.print_exc()
			return False
Exemplo n.º 7
0
class Client(object):
    px_per_tick = 100
    
    def __init__(self, canvas):
        self.canvas = canvas
        self.socket = None
        self.connect_ts = time.time()
        # This buffer discards all but the newest 1024 messages
        self.sendbuffer = deque([], 1024)
        # And this is used to limit clients to X messages per tick
        # We start at 0 (instead of x) to add a reconnect-penalty.
        self.limit = Semaphore(0)
        self.lock = RLock()

    def send(self, line):
        self.sendbuffer.append(line.strip() + '\n')

    def nospam(self, line):
        if not self.sendbuffer:
            self.sendbuffer.append(line.strip() + '\n')

    def disconnect(self):
        with self.lock:
            if self.socket:
                socket = self.socket
                self.socket = None
                socket.close()
                log.info('Disconnect')

    def serve(self, socket):
        with self.lock:
            self.socket = socket
            sendall = self.socket.sendall
            readline = self.socket.makefile().readline

        try:
            while self.socket:
                self.limit.acquire()
                # Idea: Send first, receive later. If the client is to
                # slow to get the send-buffer empty, he cannot send.
                while self.sendbuffer:
                    sendall(self.sendbuffer.popleft())
                line = readline().strip()
                if not line:
                    break
                arguments = line.split()
                command = arguments.pop(0)
                try:
                    self.canvas.fire('COMMAND-%s' % command.upper(), self, *arguments)
                except Exception, e:
                    socket.send('ERROR %r :(' % e)
                    break
        finally:
            self.disconnect()

    def tick(self):
        while self.limit.counter <= self.px_per_tick:
            self.limit.release()
Exemplo n.º 8
0
 def test_release_twice(self):
     s = Semaphore()
     result = []
     s.rawlink(lambda s: result.append('a'))
     s.release()
     s.rawlink(lambda s: result.append('b'))
     s.release()
     gevent.sleep(0.001)
     self.assertEqual(result, ['a', 'b'])
Exemplo n.º 9
0
class GeventSemaphore(BaseSemaphore):
    def __init__(self):
        self.__semaphore = Semaphore()

    def acquire(self):
        self.__semaphore.acquire()

    def release(self):
        self.__semaphore.release()
Exemplo n.º 10
0
    def __init__(self, parsed_url, maximum_outstanding_requests=100):
        if not gevented:
            raise ImportError('GeventedHTTPTransport requires gevent.')
        self._lock = Semaphore(maximum_outstanding_requests)

        super(GeventedHTTPTransport, self).__init__(parsed_url)

        # remove the gevent+ from the protocol, as it is not a real protocol
        self._url = self._url.split('+', 1)[-1]
Exemplo n.º 11
0
        def test():
            s = Semaphore(0)
            future1 = yield batchy_gevent.greenlet_future(gevent.spawn(acq, s))
            future2 = yield batchy_gevent.greenlet_future(gevent.spawn(acq, s))

            s.release()
            yield future1
            s.release()
            yield future2
def handle_socket(sock, address):
    semaphore = Semaphore()
    while 1:
        sock.setblocking(0)
        semaphore.acquire()
        sockfd = sock.makefile()
        chunk_size = 1024
        head_packet_format = "!LL128s128sL"
        head_packet_size = struct.calcsize(head_packet_format)
        data = sockfd.read(head_packet_size)
        if not data or len(data) != head_packet_size:
            return
        filepath_len, filename_len, filepath,filename, filesize = struct.unpack(head_packet_format,data)
        filepath = filepath[:filepath_len]
        filename = filename[:filename_len]
        #logger.debug("update file: %s" % filepath + '/' + filename)
        
        fd = open(filename,'wb')
        fcntl.flock(fd,fcntl.LOCK_EX)
        print "File %s size: %s" % (filename, filesize)
        print 111111111111111
        writen_size = 0
        if filesize > chunk_size:
            times = filesize / chunk_size
            first_part_size = times * chunk_size
            second_part_size = filesize % chunk_size
            print "times: %s  first_part_size:%s  second_part_size:%s" % (times,first_part_size,second_part_size)
            print 22222222222222222222
            #receive first part packets
            while 1:
                data = sockfd.read(chunk_size)
                fd.write(data)
                fd.flush()
                writen_size += len(data)
                if writen_size == first_part_size:
                    break
            print "writen_size in first_par: %s" % writen_size
            print 333333333333333333333
            if second_part_size:
                #receive the packet at last
                data = sockfd.read(second_part_size)
                fd.write(data)
                fd.flush()
                writen_size += len(data)
            print 4444444444444444444444
        else:
            data = sockfd.read(filesize)
            fd.write(data)
            fd.flush()
            writen_size += len(data)
            
        fcntl.flock(fd,fcntl.LOCK_UN)
        fd.close()
        print '555555555555555555555'
        print "File %s size: %s\n" % (filename, writen_size)
        semaphore.release()
Exemplo n.º 13
0
    def __init__(self, *args, **kwargs):
        Greenlet.__init__(self)
        Model.__init__(self, *args, **kwargs)

        docker_url = config.get('docker_url')
        self.container = api.DockerAPI(self.cid, docker_url)

        self._lock = Semaphore()
        self._lock.acquire()  # locking semaphore
        self._new_data = None
Exemplo n.º 14
0
 def __init__(self):
     self.__db = SQ.connect(":memory:")
     self.cur = self.__db.cursor()
     self.cur.execute(
         "CREATE TABLE hosts (id TEXT, addr TEXT, hostname TEXT)")
     self.cur.execute(
         "CREATE TABLE workers (id TEXT, host_id TEXT, service TEXT, addr TEXT, pid INT, online BOOLEAN)"
     )
     self.cur.execute("CREATE TABLE services (host_id TEXT, name TEXT)")
     self.SEMA = Semaphore()
Exemplo n.º 15
0
class GeventSemaphore(BaseSemaphore):

    def __init__(self):
        self.__semaphore = Semaphore()

    def acquire(self):
        self.__semaphore.acquire()

    def release(self):
        self.__semaphore.release()
Exemplo n.º 16
0
class MediaCodec_Base:
    def __init__(self, svc):
        #maxsize - 最大缓存,超过限制将视为无效解码
        self.buf = ''
        self.conn = None
        self.svc = svc
        self.mtx = Semaphore()
        self.msgcnt = 0  #接收消息计数

    def parseMessage(self, s):
        pass

    #数据置入队列,应检查队列长度和数据合法性
    def queueIn(self, s, conn):
        self.mtx.acquire()
        #		self.buflist.append(s)
        self.buf += s
        self.mtx.release()
        return True

    def decode(self):
        #@return (packets,retry) retry表示解码遇到错误或者数据非法
        return (), False

    #根据传入的消息分拣出gps,alarm消息  MsgAoModule_Alarm(), MsgAoModule_GpsData()
    def filter_msg(self, m, aom):
        # GSP,ALARM 消息是关心的消息,需要反馈到客户端
        return (m, )

    def command(self, aom, m):
        pass

    def save(self, aom, m):
        '''
			保存设备所有信息,不区分是何类型
		'''
        try:
            cm = aom.ao.cm
            params = m.params
            if isinstance(m.params, dict):
                params = json.dumps(m.params)
            log = cm.AO_ModuleLog()
            log.ao = aom.ao.r
            log.module = aom.r
            log.type = ModuleMsgType.DEV2SYS
            log.time = datetime.datetime.now()
            log.msgtype = 'N/A'
            log.params = params
            log.rawmsg = m.rawmsg
            log.seq = 0
            log.save()
            return True
        except:
            traceback.print_exc()
            return False
Exemplo n.º 17
0
 def __init__(self, size=None, greenlet_class=None):
     if size is not None and size < 0:
         raise ValueError('size must not be negative: %r' % (size, ))
     Group.__init__(self)
     self.size = size
     if greenlet_class is not None:
         self.greenlet_class = greenlet_class
     if size is None:
         self._semaphore = DummySemaphore()
     else:
         self._semaphore = Semaphore(size)
Exemplo n.º 18
0
 def __init__(self, size=None, greenlet_class=None):
     if size is not None and size < 0:
         raise ValueError('Invalid size for pool (positive integer or None required): %r' % (size, ))
     Group.__init__(self)
     self.size = size
     if greenlet_class is not None:
         self.greenlet_class = greenlet_class
     if size is None:
         self._semaphore = DummySemaphore()
     else:
         self._semaphore = Semaphore(size)
 def __init__(self, size=None, greenlet_class=None):
     if size is not None and size < 0:
         raise ValueError('size must not be negative: %r' % (size, ))
     Group.__init__(self)
     self.size = size
     if greenlet_class is not None:
         self.greenlet_class = greenlet_class
     if size is None:
         self._semaphore = DummySemaphore()
     else:
         self._semaphore = Semaphore(size)
Exemplo n.º 20
0
 def __init__(self,
              dsn,
              max_con=10,
              max_idle=3,
              connection_factory=RealDictConnection):
     self.dsn = dsn
     self.max_con = max_con
     self.max_idle = max_idle
     self.connection_factory = connection_factory
     self._sem = Semaphore(max_con)
     self._free = []
     self._local = gevent_local()
Exemplo n.º 21
0
    def __init__(self, engine, min_sessions=10, max_sessions=25):
        self.engine = engine
        sm = sessionmaker(autoflush=False, autocommit=False, bind=engine)
        self.session = scoped_session(sm)

        self.min_sessions = min_sessions
        self.max_sessions = max_sessions

        self.session_pool = []
        self.available = []
        self.checkouts = {}
        self.sessions = local()
        self.lock = Semaphore()
Exemplo n.º 22
0
 def __init__(self, size=None, greenlet_class=None):
     if size is not None and size < 1:
         raise ValueError(
             'Invalid size for pool (positive integer or None required): %r'
             % (size, ))
     Group.__init__(self)
     self.size = size
     if greenlet_class is not None:
         self.greenlet_class = greenlet_class
     if size is None:
         self._semaphore = DummySemaphore()
     else:
         self._semaphore = Semaphore(size)
Exemplo n.º 23
0
class imap_manager(object):
    def __init__(self):
        from gevent.coros import Semaphore
        self.client = None
        self.sem = Semaphore(1)
        self.count = 0

    def close(self, current):
        import gevent
        gevent.sleep(360)
        self.sem.acquire()
        if self.client is not None and self.count == current:
            self.client.close()
            self.client = None
        self.sem.release()

    @contextmanager
    def get(self):
        import gevent
        self.count += 1
        self.sem.acquire()
        self.count += 1
        if self.client is None:
            from rbit import config
            from rbit import backend
            from rbit import imap
            cfg = config.Config('config', backend.create_session)
            self.client = imap.IMAPClient.from_config(cfg)
        yield self.client
        self.sem.release()
        gevent.spawn(lambda : self.close(self.count))
Exemplo n.º 24
0
class Pool(Group):
    def __init__(self, size=None, greenlet_class=None):
        if size is not None and size < 0:
            raise ValueError('size must not be negative: %r' % (size, ))
        Group.__init__(self)
        self.size = size
        if greenlet_class is not None:
            self.greenlet_class = greenlet_class
        if size is None:
            self._semaphore = DummySemaphore()
        else:
            self._semaphore = Semaphore(size)

    def wait_available(self):
        self._semaphore.wait()

    def full(self):
        return self.free_count() <= 0

    def free_count(self):
        if self.size is None:
            return 1
        return max(0, self.size - len(self))

    def add(self, greenlet):
        self._semaphore.acquire()
        try:
            Group.add(self, greenlet)
        except:
            self._semaphore.release()
            raise

    def _discard(self, greenlet):
        Group._discard(self, greenlet)
        self._semaphore.release()
Exemplo n.º 25
0
class SessionPool(object):
    """
    This class is intended to allow for the model to be used in a threaded
    environment.
    """
    def __init__(self, engine, min_sessions=10, max_sessions=25):
        self.engine = engine
        sm = sessionmaker(autoflush=False, autocommit=False, bind=engine)
        self.session = scoped_session(sm)

        self.min_sessions = min_sessions
        self.max_sessions = max_sessions

        self.session_pool = []
        self.available = []
        self.checkouts = {}
        self.sessions = local()
        self.lock = Semaphore()

    def checkin(self):
        self.lock.acquire()
        try:
            session = self.sessions.session
            if session:
                session.close()
        finally:
            self.lock.release()

    def checkout(self):
        self.lock.acquire()
        try:
            session = self.sessions.session = self.session()
            return session
        finally:
            self.lock.release()
Exemplo n.º 26
0
def websocket(token):
    if request.environ.get('wsgi.websocket'):
        email = database_helper.get_active(token)
        if (email == None) :
#            return json.dumps({"success": False, "message": "Your are not signed in."})
        else :
            websocket = request.environ['wsgi.websocket']
            sema = Semaphore(0)
            websockets[email] = {"websocket": websocket, "sema": sema}
            print "websocket(): waiting at sema for [" + email + "]"
            sema.acquire()
            print "websocket(): sema for [" + email + "] passed"
#            return json.dumps({"success": True, "message": "Websocket connected."})
    return "websocket(): done"
Exemplo n.º 27
0
class HitCounter(object):
    
    def __init__(self):
        self.value = 0
        self.lock = Semaphore(1)
    
    def __str__(self):
        return str(self.value)

    def increment(self):
        try:
            self.lock.acquire()
            self.value += 1
        finally:
            self.lock.release()
Exemplo n.º 28
0
 def __init__(self, oc_gateway):
     self._oc_gateway = oc_gateway
     self._model_settings_update_in_progress = Semaphore()
     self._vol_curves = {}
     self._parent_symbol_to_sandbox_symbol_dict = {}
     self._parent_month_id_to_sandbox_month_id_dict = {}
     self._vol_curve_factory = VolCurveFactory(oc_gateway)
Exemplo n.º 29
0
 def __init__(self, socket, environ):
     self.origin = environ.get('HTTP_ORIGIN')
     self.protocol = environ.get('HTTP_SEC_WEBSOCKET_PROTOCOL')
     self.path = environ.get('PATH_INFO')
     self.fobj = socket.makefile()
     self._writelock = Semaphore(1)
     self._write = socket.sendall
Exemplo n.º 30
0
	def __init__(self,svc):
		#maxsize - 最大缓存,超过限制将视为无效解码
		self.buf =''
		self.conn = None
		self.svc = svc
		self.mtx = Semaphore()
		self.msgcnt=0 #接收消息计数
Exemplo n.º 31
0
 def __init__(self, sock, environ):
     self.rfile = sock.makefile('rb', -1)
     self.socket = sock
     self.origin = environ.get('HTTP_ORIGIN')
     self.protocol = environ.get('HTTP_SEC_WEBSOCKET_PROTOCOL', 'unknown')
     self.path = environ.get('PATH_INFO')
     self._writelock = Semaphore(1)
Exemplo n.º 32
0
 def _reset(self):
     '''
     reset all the variables used for keeping track of internal state
     '''
     #an list of Character()s 
     self.results = []
     #an list of strings
     self.str_results = []
     #character generators take care of building the Character objects. we need one per row
     self.char_gens = []
     #a queue for communications between Character()s and request_makers
     self.q = Queue()
     #"threads" that run the Character()s
     self.character_pool = Pool(self.concurrency)
     #"threads" that make requests
     self.request_makers = [gevent.spawn(self._request_maker) for i in range(self.concurrency)]
     #fire this event when shutting down
     self.shutting_down = Event()
     #do we need to add more rows?
     self.need_more_rows = True
     #use this as a lock to know when not to mess with self.results        
     self.results_lock = Semaphore(1)
     #request_count is the number of requests made on the current run
     self.request_count = 0
     #failure_count is the number of requests made on the current run
     self.failure_count = 0
Exemplo n.º 33
0
 def _init_instance(cls):
     _worker = SidManager.SidWorker()
     _instance = cls(_worker)
     _instance.sid_dict = dict()
     _instance.semaphore = Semaphore()
     _instance.worker.sid_mgr = _instance
     return _instance
Exemplo n.º 34
0
    def __init__(self, local_listener_ip, local_listener_port, lwm2m_server_ip,
                 lwm2m_server_port, local_client_ip, local_client_port):

        self.ep_location_mapping = {}
        self.total_resources = {}
        self.res_dict = {}
        self.lwm2m_dm_server_ip = lwm2m_server_ip
        self.lwm2m_dm_server_port = lwm2m_server_port
        self.sem = Semaphore()
        self.local_listener_ip = local_listener_ip
        self.local_listener_port = local_listener_port
        self.local_client_ip_ = local_client_ip
        self.local_client_port = local_client_port  #local_client_port
        #self.local_client_port_end = local_client_port_end  #local_client_port
        self.dispatcher = EventDispatcher()
        self.lwm2m_resources = LWM2MResourceTree(self.dispatcher)
        self.registration = Registration(self.lwm2m_resources)
        self.read = Read(self.lwm2m_resources)
        self.write = Write(self.lwm2m_resources)
        self.write_attributes = WriteAttributes(self.lwm2m_resources)
        self.create_object_instance = Create(self.lwm2m_resources)
        self.observation = ObservationNotificationEngine(
            self.lwm2m_resources, self.dispatcher)
        self.execution = Execution(self.lwm2m_resources)
        self.discover = Discovery(lwm2m_resources=self.lwm2m_resources)

        self.observation_started = False
Exemplo n.º 35
0
	def __new__(cls,s):
		if s.id in sites:
			return sites[s.id]
		self = object.__new__(cls)
		sites[s.id] = self
		self.s = s
		self.connect()
		self._delay_on = Semaphore()

		self.controllers = set()
		self.envgroups = set()
		self.meters = {}
		for M in METERS:
			ml = set()
			self.meters[M.meter_type] = ml
			for d in getattr(self.s,M.meter_type+"_meters").all():
				ml.add(M(d))

		self.log("Startup")
		self.connect_monitors(do_controllers=False)
		signal.signal(signal.SIGINT,self.do_shutdown)
		signal.signal(signal.SIGTERM,self.do_shutdown)
		signal.signal(signal.SIGHUP,self.do_syncsched)

		self.running = True
		return self
Exemplo n.º 36
0
 def __init__(self, fobj, environ):
     self.origin = environ.get('HTTP_ORIGIN')
     self.protocol = environ.get('HTTP_SEC_WEBSOCKET_PROTOCOL')
     self.path = environ.get('PATH_INFO')
     self._writelock = Semaphore(1)
     self.fobj = fobj
     self._write = _get_write(fobj)
Exemplo n.º 37
0
    def __init__(self, svr, sock, uid, size=None):
        if 0:
            self.svr = RpcServer()
        self.svr = svr
        #self._pool = Pool(size=size)
        self.sock = sock
        if isinstance(svr, RpcClient):
            self.sock_addr = svr.addr
        else:
            self.sock_addr = self.sock.getpeername()
        self.uid = str(uid)
        if len(self.uid) != self.UID_LEN:
            raise ValueError, 'uid length error: len(uid)=%d <> %d' % (
                len(uid), self.UID_LEN)

        self._slock = Semaphore()
        self._reconnected = None
        self.reconnect_timeout = RECONNECT_TIMEOUT
        #self.iter_id = itertools.cycle(xrange(MAX_INDEX))
        self._next_id = 0
        self._resps = {}
        self._proxys = WeakValueDictionary()
        self.stoped = True
        self.sock_error = False
        if HEARTBEAT_TIME > 0:
            self._heart_time = time.time()
            self._heart_task = spawn(self.heartbeat)
        self.shells = {}
Exemplo n.º 38
0
    def __init__(self, worker_id):
        # used in process of selecting jobs
        self.own_async_id = worker_id
        # database setup
        dbb = settings.ASYNC_DB_BACKEND
        if dbb=="sqlite":
            from db.sqlite import SQLiteDatabase
            self.DB = SQLiteDatabase( worker_id )
        else:
            raise Exception("Unknown database backend defined in configuration: %r" % dbb)
        # serializer / deserializer
        self.serializer = Serializer()
        # caller
        self.PROXY = RawProxy()
        self._processing = True

        self.SEMA = Semaphore()
Exemplo n.º 39
0
    def __init__(self, parsed_url, maximum_outstanding_requests=100):
        if not gevented:
            raise ImportError('GeventedHTTPTransport requires gevent.')
        self._lock = Semaphore(maximum_outstanding_requests)

        super(GeventedHTTPTransport, self).__init__(parsed_url)

        # remove the gevent+ from the protocol, as it is not a real protocol
        self._url = self._url.split('+', 1)[-1]
 def _start(self, ):
     self.sem = Semaphore()
     self.sem_counter = 0
     self.set_configurations()
     self.api.run_task(self.create_server)
     self.subscribe_nscl()
     self.api.run_task(self.subscribe_dm_server)
     if self.config["enable_test"]:
         pass
         # self.api.run_task(self.send_execute_command)
         # Uncomment to check these operations
         # self.api.run_task(self.send_specific_observation)
         # self.api.run_task(self.send_specific_observation1)
         # self.api.run_task(self.send_cancel_observation)
         #self.api.run_task(self.send_discover_resources)
         #self.api.run_task(self.send_write_attributes)
         #self.api.run_task(self.send_create)
     self._started()
Exemplo n.º 41
0
 def __init__(self, fobj, environ):
     self.origin = environ.get('HTTP_SEC_WEBSOCKET_ORIGIN')
     self.protocol = environ.get('HTTP_SEC_WEBSOCKET_PROTOCOL', 'unknown')
     self.path = environ.get('PATH_INFO')
     self._chunks = bytearray()
     self._first_opcode = None
     self._writelock = Semaphore(1)
     self.fobj = fobj
     self._write = _get_write(fobj)
Exemplo n.º 42
0
 def __init__(self,
              store,
              relay,
              backoff=None,
              bounce_factory=None,
              store_pool=None,
              relay_pool=None):
     super(Queue, self).__init__()
     self.store = store
     self.relay = relay
     self.backoff = backoff or self._default_backoff
     self.bounce_factory = bounce_factory or Bounce
     self.wake = Event()
     self.queued = []
     self.queued_lock = Semaphore(1)
     self.queue_policies = []
     self._use_pool('store_pool', store_pool)
     self._use_pool('relay_pool', relay_pool)
Exemplo n.º 43
0
    def __init__(self, url, protocols=None, version='13'):
        sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM, 0)
        ThreadedClient.__init__(self, url, protocols=protocols, version=version, sock=sock)

        self._lock = Semaphore()
        self._th = Greenlet(self._receive)
        self._messages = Queue()

        self.extensions = []
Exemplo n.º 44
0
 def __init__(self, minimal_precision=None, max_track=None):
     if minimal_precision is None:
         minimal_precision = config.default_precision
     if max_track is None:
         max_track = config.default_track
     self.min_prec = minimal_precision
     self.max_track = max_track
     self.registred = deque()
     self.lock = Semaphore(1)
Exemplo n.º 45
0
 def initialize(self, *args, **kwargs):
     #TODO: Add documentation
     #TODO: This should put the DataHandler back into an 'unconfigured' state
     """
     Called from:
                   InstrumentAgent._handler_idle_reset
                   InstrumentAgent._handler_idle_go_inactive
                   InstrumentAgent._handler_stopped_reset
                   InstrumentAgent._handler_stopped_go_inactive
                   InstrumentAgent._handler_observatory_reset
                   InstrumentAgent._handler_observatory_go_inactive
                   InstrumentAgent._handler_uninitialized_initialize
                   |--> ExternalDataAgent._start_driver
     """
     log.debug('Initializing DataHandler...')
     self._glet_queue = []
     self._semaphore = Semaphore()
     return None
Exemplo n.º 46
0
    def __init__(self, *args, **kwargs):
        Greenlet.__init__(self)
        Model.__init__(self, *args, **kwargs)

        docker_url = config.get('docker_url')
        self.container = api.DockerAPI(self.cid, docker_url)

        self._lock = Semaphore()
        self._lock.acquire() # locking semaphore
        self._new_data = None
Exemplo n.º 47
0
 def __init__(self, canvas):
     self.canvas = canvas
     self.socket = None
     self.connect_ts = time.time()
     # This buffer discards all but the newest 1024 messages
     self.sendbuffer = deque([], 1024)
     # And this is used to limit clients to X messages per tick
     # We start at 0 (instead of x) to add a reconnect-penalty.
     self.limit = Semaphore(0)
     self.lock = RLock()
Exemplo n.º 48
0
 def __new__(cls, v):
     if v.id in valves:
         return valves[v.id]
     self = object.__new__(cls)
     valves[v.id] = self
     self.v = v
     self.site = SchedSite(self.v.controller.site)
     self.env = EnvGroup(self.v.envgroup)
     self.controller = SchedController(self.v.controller)
     self.sched_lock = Semaphore()
     if self.site.qb:
         try:
             self.site.send_command("set", "output", "off",
                                    *(self.v.var.split()))
         except NotConnected:
             pass
         except Exception as e:
             raise RuntimeError(self.v.var) from e
     return self
Exemplo n.º 49
0
 def __init__(self, store, relay, backoff=None, bounce_factory=None,
                    store_pool=None, relay_pool=None):
     super(Queue, self).__init__()
     self.store = store
     self.relay = relay
     self.backoff = backoff or self._default_backoff
     self.bounce_factory = bounce_factory or Bounce
     self.wake = Event()
     self.queued = []
     self.queued_lock = Semaphore(1)
     self.queue_policies = []
     self._use_pool('store_pool', store_pool)
     self._use_pool('relay_pool', relay_pool)
Exemplo n.º 50
0
    def __init__(self, engine, min_sessions=10, max_sessions=25):
        self.engine = engine
        sm = sessionmaker(autoflush=False, autocommit=False, bind=engine)
        self.session = scoped_session(sm)

        self.min_sessions = min_sessions
        self.max_sessions = max_sessions

        self.session_pool = []
        self.available = []
        self.checkouts = {}
        self.sessions = local()
        self.lock = Semaphore()
Exemplo n.º 51
0
    def __init__(self, stream_registrar, dh_config):
        self._polling = False           #Moved these four variables so they're instance variables, not class variables
        self._polling_glet = None
        self._dh_config = {}
        self._terminate_polling = None
        self._params = {
            'POLLING_INTERVAL' : 3600,
            'PATCHABLE_CONFIG_KEYS' : ['stream_id','constraints']
        }

        self._dh_config=dh_config
        self._stream_registrar = stream_registrar

        self._semaphore=Semaphore()
Exemplo n.º 52
0
	def run_every(self,delay):
		"""Initiate running the calculation and scheduling loop every @delay seconds."""

		if self._run_delay is not None:
			self._run_delay = delay # just update
			return
		self._run_delay = delay
		self._run_last = now()
		self._running = Semaphore()
		self._run_result = None
		sd = self._run_delay.total_seconds()/10
		if sd < 66: sd = 66
		self._run = gevent.spawn_later(sd, connwrap,self.run_main_task, kill=False)
		if self._sched is not None:
			self._sched.kill()
		self._sched = gevent.spawn_later(2, connwrap,self.run_sched_task, kill=False, reason="run_every")
Exemplo n.º 53
0
	def __new__(cls,v):
		if v.id in valves:
			return valves[v.id]
		self = object.__new__(cls)
		valves[v.id] = self
		self.v = v
		self.site = SchedSite(self.v.controller.site)
		self.env = EnvGroup(self.v.envgroup)
		self.controller = SchedController(self.v.controller)
		self.sched_lock = Semaphore()
		if self.site.ci:
			try:
				self.site.send_command("set","output","off",*(self.v.var.split()))
			except NotConnected:
				pass
		return self
Exemplo n.º 54
0
class BaseSensor(Greenlet, Model):
    """
    Base class for sensors.  
    Each sensor is associated with a Docker container by its 
    cid (or name, if you prefer).  
    A sensor has to be started with its `start` method, eventually, 
    stopped with its `kill` method.

    Every sensor provides collected data by means of 
    `get_data` method which returns a generator.
    Each call to `next` value is blocking for a time declared 
    on `__init__` of the sensor through `spacing` parameter.
    """
    cid = StringType(required=True)
    spacing = FloatType(default=0.1)

    @serializable
    def uid(self):
        return get_uid(self.__class__, self.cid)

    def __init__(self, *args, **kwargs):
        Greenlet.__init__(self)
        Model.__init__(self, *args, **kwargs)

        docker_url = config.get('docker_url')
        self.container = api.DockerAPI(self.cid, docker_url)

        self._lock = Semaphore()
        self._lock.acquire() # locking semaphore
        self._new_data = None

    @raise_connection_error
    def _run(self):
        while True:
            self._new_data = self._get()
            LOG.debug("{} got {}".format(self.uid, self._new_data))
            self._store(self._new_data)
            self._lock.release()
            sleep(self.spacing)

    def _get(self):
        """Override"""
        return None

    def _store(self, data):
        """Override"""
        pass

    def get_data(self):
        while True:
            if not self.started or self.dead:
                raise SensorError("Start the sensor before getting data.")
            self._lock.acquire()
            yield self._new_data
Exemplo n.º 55
0
    def __init__(self, dh_config):
        """
        Constructor for all data handlers.


        @param dh_config: Dictionary containing configuration parameters for the data handler
        """
        self._polling = False  # Moved these four variables so they're instance variables, not class variables
        self._polling_glet = None
        self._dh_config = {}
        self._terminate_polling = None
        self._acquiring_data = None
        self._params = {"POLLING_INTERVAL": 30, "PATCHABLE_CONFIG_KEYS": ["stream_id", "constraints", "stream_route"]}

        self._dh_config = dh_config

        self._semaphore = Semaphore()
Exemplo n.º 56
0
    def __init__(self, cps=None, cpm=None, cph=None, cpd=None, max_wait=None):
        self.__max_wait = max_wait
        self.__limits = {}

        if cps is not None and cps != 0:
            self.__limits['calls per second'] = RateLimit(cps, 1)

        if cpm is not None and cpm != 0:
            self.__limits['calls per minute'] = RateLimit(cpm, 60)

        if cph is not None and cph != 0:
            self.__limits['calls per hour'] = RateLimit(cph, 60*60)

        if cpd is not None and cpd != 0:
            self.__limits['calls per day'] = RateLimit(cpd, 60*60*24)

        self.__semaphore = Semaphore()
Exemplo n.º 57
0
 def initialize(self, *args, **kwargs):
     #TODO: Add documentation
     #TODO: This should put the DataHandler back into an 'unconfigured' state
     """
     Called from:
                   InstrumentAgent._handler_idle_reset
                   InstrumentAgent._handler_idle_go_inactive
                   InstrumentAgent._handler_stopped_reset
                   InstrumentAgent._handler_stopped_go_inactive
                   InstrumentAgent._handler_observatory_reset
                   InstrumentAgent._handler_observatory_go_inactive
                   InstrumentAgent._handler_uninitialized_initialize
                   |--> ExternalDataAgent._start_driver
     """
     log.debug('Initializing DataHandler...')
     self._glet_queue = []
     self._semaphore=Semaphore()
     return None
Exemplo n.º 58
0
    class Sema:
        def __init__(self):
            self.sem = Semaphore()
            self.timestamp = datetime.datetime.now()
            self.count = 1
            self.sem.acquire(blocking=True)

        def join(self):
            self.count += 1
            self.sem.acquire(blocking=True)

        def release(self):
            for ii in xrange(self.count):
                self.sem.release()