Esempio n. 1
0
class imap_manager(object):
    def __init__(self):
        from gevent.coros import Semaphore
        self.client = None
        self.sem = Semaphore(1)
        self.count = 0

    def close(self, current):
        import gevent
        gevent.sleep(360)
        self.sem.acquire()
        if self.client is not None and self.count == current:
            self.client.close()
            self.client = None
        self.sem.release()

    @contextmanager
    def get(self):
        import gevent
        self.count += 1
        self.sem.acquire()
        self.count += 1
        if self.client is None:
            from rbit import config
            from rbit import backend
            from rbit import imap
            cfg = config.Config('config', backend.create_session)
            self.client = imap.IMAPClient.from_config(cfg)
        yield self.client
        self.sem.release()
        gevent.spawn(lambda : self.close(self.count))
Esempio n. 2
0
class GeventedHTTPTransport(HTTPTransport):

    scheme = ['gevent+http', 'gevent+https']

    def __init__(self, parsed_url, maximum_outstanding_requests=100):
        if not gevented:
            raise ImportError('GeventedHTTPTransport requires gevent.')
        self._lock = Semaphore(maximum_outstanding_requests)

        super(GeventedHTTPTransport, self).__init__(parsed_url)

        # remove the gevent+ from the protocol, as it is not a real protocol
        self._url = self._url.split('+', 1)[-1]

    def send(self, data, headers):
        """
        Spawn an async request to a remote webserver.
        """
        # this can be optimized by making a custom self.send that does not
        # read the response since we don't use it.
        self._lock.acquire()
        return spawn(super(GeventedHTTPTransport, self).send, data, headers).link(self._done, self)

    def _done(self, *args):
        self._lock.release()
Esempio n. 3
0
class Pool(Group):
    def __init__(self, size=None, greenlet_class=None):
        if size is not None and size < 0:
            raise ValueError('size must not be negative: %r' % (size, ))
        Group.__init__(self)
        self.size = size
        if greenlet_class is not None:
            self.greenlet_class = greenlet_class
        if size is None:
            self._semaphore = DummySemaphore()
        else:
            self._semaphore = Semaphore(size)

    def wait_available(self):
        self._semaphore.wait()

    def full(self):
        return self.free_count() <= 0

    def free_count(self):
        if self.size is None:
            return 1
        return max(0, self.size - len(self))

    def add(self, greenlet):
        self._semaphore.acquire()
        try:
            Group.add(self, greenlet)
        except:
            self._semaphore.release()
            raise

    def _discard(self, greenlet):
        Group._discard(self, greenlet)
        self._semaphore.release()
Esempio n. 4
0
class GeventedHTTPTransport(HTTPTransport):

    scheme = ['gevent+http', 'gevent+https']

    def __init__(self, parsed_url, maximum_outstanding_requests=100):
        if not gevented:
            raise ImportError('GeventedHTTPTransport requires gevent.')
        self._lock = Semaphore(maximum_outstanding_requests)

        super(GeventedHTTPTransport, self).__init__(parsed_url)

        # remove the gevent+ from the protocol, as it is not a real protocol
        self._url = self._url.split('+', 1)[-1]

    def send(self, data, headers):
        """
        Spawn an async request to a remote webserver.
        """
        # this can be optimized by making a custom self.send that does not
        # read the response since we don't use it.
        self._lock.acquire()
        return spawn(super(GeventedHTTPTransport, self).send, data,
                     headers).link(self._done, self)

    def _done(self, *args):
        self._lock.release()
Esempio n. 5
0
class SessionPool(object):
    """
    This class is intended to allow for the model to be used in a threaded
    environment.
    """
    def __init__(self, engine, min_sessions=10, max_sessions=25):
        self.engine = engine
        sm = sessionmaker(autoflush=False, autocommit=False, bind=engine)
        self.session = scoped_session(sm)

        self.min_sessions = min_sessions
        self.max_sessions = max_sessions

        self.session_pool = []
        self.available = []
        self.checkouts = {}
        self.sessions = local()
        self.lock = Semaphore()

    def checkin(self):
        self.lock.acquire()
        try:
            session = self.sessions.session
            if session:
                session.close()
        finally:
            self.lock.release()

    def checkout(self):
        self.lock.acquire()
        try:
            session = self.sessions.session = self.session()
            return session
        finally:
            self.lock.release()
Esempio n. 6
0
class MediaCodec_Base:	
	def __init__(self,svc):
		#maxsize - 最大缓存,超过限制将视为无效解码
		self.buf =''
		self.conn = None
		self.svc = svc
		self.mtx = Semaphore()
		self.msgcnt=0 #接收消息计数

	def parseMessage(self,s):
		pass


	#数据置入队列,应检查队列长度和数据合法性
	def queueIn(self,s,conn):
		self.mtx.acquire()
#		self.buflist.append(s)
		self.buf+=s
		self.mtx.release()
		return True


	
	def decode(self):
		#@return (packets,retry) retry表示解码遇到错误或者数据非法
		return (),False


	#根据传入的消息分拣出gps,alarm消息  MsgAoModule_Alarm(), MsgAoModule_GpsData()
	def filter_msg(self,m,aom):
		# GSP,ALARM 消息是关心的消息,需要反馈到客户端
		return (m,)
	
	def command(self,aom,m):
		pass

	def save(self,aom,m):
		'''
			保存设备所有信息,不区分是何类型
		'''
		try:
			cm = aom.ao.cm
			params= m.params
			if isinstance(m.params,dict):
				params = json.dumps(m.params)
			log = cm.AO_ModuleLog()
			log.ao = aom.ao.r
			log.module = aom.r
			log.type = ModuleMsgType.DEV2SYS
			log.time = datetime.datetime.now()
			log.msgtype = 'N/A'
			log.params = params
			log.rawmsg = m.rawmsg
			log.seq = 0
			log.save()
			return True
		except:
			traceback.print_exc()
			return False
Esempio n. 7
0
class Client(object):
    px_per_tick = 100
    
    def __init__(self, canvas):
        self.canvas = canvas
        self.socket = None
        self.connect_ts = time.time()
        # This buffer discards all but the newest 1024 messages
        self.sendbuffer = deque([], 1024)
        # And this is used to limit clients to X messages per tick
        # We start at 0 (instead of x) to add a reconnect-penalty.
        self.limit = Semaphore(0)
        self.lock = RLock()

    def send(self, line):
        self.sendbuffer.append(line.strip() + '\n')

    def nospam(self, line):
        if not self.sendbuffer:
            self.sendbuffer.append(line.strip() + '\n')

    def disconnect(self):
        with self.lock:
            if self.socket:
                socket = self.socket
                self.socket = None
                socket.close()
                log.info('Disconnect')

    def serve(self, socket):
        with self.lock:
            self.socket = socket
            sendall = self.socket.sendall
            readline = self.socket.makefile().readline

        try:
            while self.socket:
                self.limit.acquire()
                # Idea: Send first, receive later. If the client is to
                # slow to get the send-buffer empty, he cannot send.
                while self.sendbuffer:
                    sendall(self.sendbuffer.popleft())
                line = readline().strip()
                if not line:
                    break
                arguments = line.split()
                command = arguments.pop(0)
                try:
                    self.canvas.fire('COMMAND-%s' % command.upper(), self, *arguments)
                except Exception, e:
                    socket.send('ERROR %r :(' % e)
                    break
        finally:
            self.disconnect()

    def tick(self):
        while self.limit.counter <= self.px_per_tick:
            self.limit.release()
Esempio n. 8
0
class GeventSemaphore(BaseSemaphore):
    def __init__(self):
        self.__semaphore = Semaphore()

    def acquire(self):
        self.__semaphore.acquire()

    def release(self):
        self.__semaphore.release()
def handle_socket(sock, address):
    semaphore = Semaphore()
    while 1:
        sock.setblocking(0)
        semaphore.acquire()
        sockfd = sock.makefile()
        chunk_size = 1024
        head_packet_format = "!LL128s128sL"
        head_packet_size = struct.calcsize(head_packet_format)
        data = sockfd.read(head_packet_size)
        if not data or len(data) != head_packet_size:
            return
        filepath_len, filename_len, filepath,filename, filesize = struct.unpack(head_packet_format,data)
        filepath = filepath[:filepath_len]
        filename = filename[:filename_len]
        #logger.debug("update file: %s" % filepath + '/' + filename)
        
        fd = open(filename,'wb')
        fcntl.flock(fd,fcntl.LOCK_EX)
        print "File %s size: %s" % (filename, filesize)
        print 111111111111111
        writen_size = 0
        if filesize > chunk_size:
            times = filesize / chunk_size
            first_part_size = times * chunk_size
            second_part_size = filesize % chunk_size
            print "times: %s  first_part_size:%s  second_part_size:%s" % (times,first_part_size,second_part_size)
            print 22222222222222222222
            #receive first part packets
            while 1:
                data = sockfd.read(chunk_size)
                fd.write(data)
                fd.flush()
                writen_size += len(data)
                if writen_size == first_part_size:
                    break
            print "writen_size in first_par: %s" % writen_size
            print 333333333333333333333
            if second_part_size:
                #receive the packet at last
                data = sockfd.read(second_part_size)
                fd.write(data)
                fd.flush()
                writen_size += len(data)
            print 4444444444444444444444
        else:
            data = sockfd.read(filesize)
            fd.write(data)
            fd.flush()
            writen_size += len(data)
            
        fcntl.flock(fd,fcntl.LOCK_UN)
        fd.close()
        print '555555555555555555555'
        print "File %s size: %s\n" % (filename, writen_size)
        semaphore.release()
Esempio n. 10
0
class GeventSemaphore(BaseSemaphore):

    def __init__(self):
        self.__semaphore = Semaphore()

    def acquire(self):
        self.__semaphore.acquire()

    def release(self):
        self.__semaphore.release()
Esempio n. 11
0
class MediaCodec_Base:
    def __init__(self, svc):
        #maxsize - 最大缓存,超过限制将视为无效解码
        self.buf = ''
        self.conn = None
        self.svc = svc
        self.mtx = Semaphore()
        self.msgcnt = 0  #接收消息计数

    def parseMessage(self, s):
        pass

    #数据置入队列,应检查队列长度和数据合法性
    def queueIn(self, s, conn):
        self.mtx.acquire()
        #		self.buflist.append(s)
        self.buf += s
        self.mtx.release()
        return True

    def decode(self):
        #@return (packets,retry) retry表示解码遇到错误或者数据非法
        return (), False

    #根据传入的消息分拣出gps,alarm消息  MsgAoModule_Alarm(), MsgAoModule_GpsData()
    def filter_msg(self, m, aom):
        # GSP,ALARM 消息是关心的消息,需要反馈到客户端
        return (m, )

    def command(self, aom, m):
        pass

    def save(self, aom, m):
        '''
			保存设备所有信息,不区分是何类型
		'''
        try:
            cm = aom.ao.cm
            params = m.params
            if isinstance(m.params, dict):
                params = json.dumps(m.params)
            log = cm.AO_ModuleLog()
            log.ao = aom.ao.r
            log.module = aom.r
            log.type = ModuleMsgType.DEV2SYS
            log.time = datetime.datetime.now()
            log.msgtype = 'N/A'
            log.params = params
            log.rawmsg = m.rawmsg
            log.seq = 0
            log.save()
            return True
        except:
            traceback.print_exc()
            return False
Esempio n. 12
0
class BaseSensor(Greenlet, Model):
    """
    Base class for sensors.  
    Each sensor is associated with a Docker container by its 
    cid (or name, if you prefer).  
    A sensor has to be started with its `start` method, eventually, 
    stopped with its `kill` method.

    Every sensor provides collected data by means of 
    `get_data` method which returns a generator.
    Each call to `next` value is blocking for a time declared 
    on `__init__` of the sensor through `spacing` parameter.
    """
    cid = StringType(required=True)
    spacing = FloatType(default=0.1)

    @serializable
    def uid(self):
        return get_uid(self.__class__, self.cid)

    def __init__(self, *args, **kwargs):
        Greenlet.__init__(self)
        Model.__init__(self, *args, **kwargs)

        docker_url = config.get('docker_url')
        self.container = api.DockerAPI(self.cid, docker_url)

        self._lock = Semaphore()
        self._lock.acquire() # locking semaphore
        self._new_data = None

    @raise_connection_error
    def _run(self):
        while True:
            self._new_data = self._get()
            LOG.debug("{} got {}".format(self.uid, self._new_data))
            self._store(self._new_data)
            self._lock.release()
            sleep(self.spacing)

    def _get(self):
        """Override"""
        return None

    def _store(self, data):
        """Override"""
        pass

    def get_data(self):
        while True:
            if not self.started or self.dead:
                raise SensorError("Start the sensor before getting data.")
            self._lock.acquire()
            yield self._new_data
Esempio n. 13
0
class BaseSensor(Greenlet, Model):
    """
    Base class for sensors.  
    Each sensor is associated with a Docker container by its 
    cid (or name, if you prefer).  
    A sensor has to be started with its `start` method, eventually, 
    stopped with its `kill` method.

    Every sensor provides collected data by means of 
    `get_data` method which returns a generator.
    Each call to `next` value is blocking for a time declared 
    on `__init__` of the sensor through `spacing` parameter.
    """
    cid = StringType(required=True)
    spacing = FloatType(default=0.1)

    @serializable
    def uid(self):
        return get_uid(self.__class__, self.cid)

    def __init__(self, *args, **kwargs):
        Greenlet.__init__(self)
        Model.__init__(self, *args, **kwargs)

        docker_url = config.get('docker_url')
        self.container = api.DockerAPI(self.cid, docker_url)

        self._lock = Semaphore()
        self._lock.acquire()  # locking semaphore
        self._new_data = None

    @raise_connection_error
    def _run(self):
        while True:
            self._new_data = self._get()
            LOG.debug("{} got {}".format(self.uid, self._new_data))
            self._store(self._new_data)
            self._lock.release()
            sleep(self.spacing)

    def _get(self):
        """Override"""
        return None

    def _store(self, data):
        """Override"""
        pass

    def get_data(self):
        while True:
            if not self.started or self.dead:
                raise SensorError("Start the sensor before getting data.")
            self._lock.acquire()
            yield self._new_data
Esempio n. 14
0
    class Sema:
        def __init__(self):
            self.sem = Semaphore()
            self.timestamp = datetime.datetime.now()
            self.count = 1
            self.sem.acquire(blocking=True)

        def join(self):
            self.count += 1
            self.sem.acquire(blocking=True)

        def release(self):
            for ii in xrange(self.count):
                self.sem.release()
Esempio n. 15
0
def websocket(token):
    if request.environ.get('wsgi.websocket'):
        email = database_helper.get_active(token)
        if (email == None) :
#            return json.dumps({"success": False, "message": "Your are not signed in."})
        else :
            websocket = request.environ['wsgi.websocket']
            sema = Semaphore(0)
            websockets[email] = {"websocket": websocket, "sema": sema}
            print "websocket(): waiting at sema for [" + email + "]"
            sema.acquire()
            print "websocket(): sema for [" + email + "] passed"
#            return json.dumps({"success": True, "message": "Websocket connected."})
    return "websocket(): done"
Esempio n. 16
0
class ConnectionPool(object):
    def __init__(self,
                 dsn,
                 max_con=10,
                 max_idle=3,
                 connection_factory=RealDictConnection):
        self.dsn = dsn
        self.max_con = max_con
        self.max_idle = max_idle
        self.connection_factory = connection_factory
        self._sem = Semaphore(max_con)
        self._free = []
        self._local = gevent_local()

    def __enter__(self):
        self._sem.acquire()
        try:
            if getattr(self._local, 'con', None) is not None:
                raise RuntimeError("Attempting to re-enter connection pool?")
            if self._free:
                con = self._free.pop()
            else:
                con = psycopg2.connect(
                    dsn=self.dsn, connection_factory=self.connection_factory)
            self._local.con = con
            return con
        except StandardError:
            self._sem.release()
            raise

    def __exit__(self, exc_type, exc_value, traceback):
        try:
            if self._local.con is None:
                raise RuntimeError("Exit connection pool with no connection?")
            if exc_type is not None:
                self.rollback()
            else:
                self.commit()
            if len(self._free) < self.max_idle:
                self._free.append(self._local.con)
            self._local.con = None
        finally:
            self._sem.release()

    def commit(self):
        self._local.con.commit()

    def rollback(self):
        self._local.con.rollback()
Esempio n. 17
0
class HitCounter(object):
    
    def __init__(self):
        self.value = 0
        self.lock = Semaphore(1)
    
    def __str__(self):
        return str(self.value)

    def increment(self):
        try:
            self.lock.acquire()
            self.value += 1
        finally:
            self.lock.release()
class Pool(Group):

    def __init__(self, size=None, greenlet_class=None):
        if size is not None and size < 0:
            raise ValueError('size must not be negative: %r' % (size, ))
        Group.__init__(self)
        self.size = size
        if greenlet_class is not None:
            self.greenlet_class = greenlet_class
        if size is None:
            self._semaphore = DummySemaphore()
        else:
            self._semaphore = Semaphore(size)

    def wait_available(self):
        self._semaphore.wait()

    def full(self):
        return self.free_count() <= 0

    def free_count(self):
        if self.size is None:
            return 1
        return max(0, self.size - len(self))

    def start(self, greenlet):
        self._semaphore.acquire()
        try:
            self.add(greenlet)
        except:
            self._semaphore.release()
            raise
        greenlet.start()

    def spawn(self, *args, **kwargs):
        self._semaphore.acquire()
        try:
            greenlet = self.greenlet_class.spawn(*args, **kwargs)
            self.add(greenlet)
        except:
            self._semaphore.release()
            raise
        return greenlet

    def discard(self, greenlet):
        Group.discard(self, greenlet)
        self._semaphore.release()
Esempio n. 19
0
class BlockingDeque(deque):

    def __init__(self, *args, **kwargs):
        super(BlockingDeque, self).__init__(*args, **kwargs)
        self.sema = Semaphore(len(self))

    def append(self, *args, **kwargs):
        ret = super(BlockingDeque, self).append(*args, **kwargs)
        self.sema.release()
        return ret

    def appendleft(self, *args, **kwargs):
        ret = super(BlockingDeque, self).appendleft(*args, **kwargs)
        self.sema.release()
        return ret

    def clear(self, *args, **kwargs):
        ret = super(BlockingDeque, self).clear(*args, **kwargs)
        while not self.sema.locked():
            self.sema.acquire(blocking=False)
        return ret

    def extend(self, *args, **kwargs):
        pre_n = len(self)
        ret = super(BlockingDeque, self).extend(*args, **kwargs)
        post_n = len(self)
        for i in xrange(pre_n, post_n):
            self.sema.release()
        return ret

    def extendleft(self, *args, **kwargs):
        pre_n = len(self)
        ret = super(BlockingDeque, self).extendleft(*args, **kwargs)
        post_n = len(self)
        for i in xrange(pre_n, post_n):
            self.sema.release()
        return ret

    def pop(self, *args, **kwargs):
        self.sema.acquire()
        return super(BlockingDeque, self).pop(*args, **kwargs)

    def popleft(self, *args, **kwargs):
        self.sema.acquire()
        return super(BlockingDeque, self).popleft(*args, **kwargs)

    def remove(self, *args, **kwargs):
        ret = super(BlockingDeque, self).remove(*args, **kwargs)
        self.sema.acquire()
        return ret
Esempio n. 20
0
class SessionPool(object):
    """
    This class is intended to allow for the model to be used in a threaded
    environment.
    """

    def __init__(self, engine, min_sessions=10, max_sessions=25):
        self.engine = engine
        sm = sessionmaker(autoflush=False, autocommit=False, bind=engine)
        self.session = scoped_session(sm)

        self.min_sessions = min_sessions
        self.max_sessions = max_sessions

        self.session_pool = []
        self.available = []
        self.checkouts = {}
        self.sessions = local()
        self.lock = Semaphore()

    def checkin(self):
        self.lock.acquire()
        try:
            session = self.sessions.session
            if session:
                session.close()
        finally:
            self.lock.release()

    def checkout(self):
        self.lock.acquire()
        try:
            session = self.sessions.session = self.session()
            return session
        finally:
            self.lock.release()
Esempio n. 21
0
class SchedValve(SchedCommon):
	"""Mirrors (and monitors) a valve."""
	locked = False # external command, don't change
	sched = None
	sched_ts = None
	sched_job = None
	sched_lock = None
	on = False
	on_ts = None
	flow = 0
	_flow_check = None

	def __new__(cls,v):
		if v.id in valves:
			return valves[v.id]
		self = object.__new__(cls)
		valves[v.id] = self
		self.v = v
		self.site = SchedSite(self.v.controller.site)
		self.env = EnvGroup(self.v.envgroup)
		self.controller = SchedController(self.v.controller)
		self.sched_lock = Semaphore()
		if self.site.qb:
			try:
				self.site.send_command("set","output","off",*(self.v.var.split()))
			except NotConnected:
				pass
			except Exception as e:
				raise RuntimeError(self.v.var) from e
		return self
	def __init__(self,v):
		pass

	def _on(self,caller,sched=None,duration=None):
		print("Open",caller,self.v.var, file=sys.stderr)
		self.site.delay_on()
		if duration is None and sched is not None:
			duration = sched.duration
		if self.controller.has_max_on():
			print("… but too many:", ", ".join(str(v) for v in self.controller.c.valves.all() if SchedValve(v).on), file=sys.stderr)
			if sched:
				sched.update(seen = False)
			self.log("NOT running %s for %s: too many"%(self.v,duration,))
			raise TooManyOn(self)
		if duration is None:
			self.log("Run (indefinitely)")
			self.site.send_command("set","output","on",*(self.v.var.split()))
		else:
			self.log("Run for %s"%(duration,))
			if not isinstance(duration,six.integer_types):
				duration = duration.total_seconds()
			try:
				self.site.send_command("set","output","on",*(self.v.var.split()), sub=(("for",duration),("async",)))
			except Exception:
				# Something broke. Try to turn this thing off.
				self.log(format_exc())
				
				self.site.send_command("set","output","off",*(self.v.var.split()))
				raise RuntimeError("Could not start (logged)")

		if sched is not None:
			if self.v.verbose:
				self.log("Opened for %s"%(sched,))
			self.sched = sched
			if not sched.seen:
				sched.update(start=now(), seen=True)
				sched.refresh()
			#Save(sched)
		else:
			if self.v.verbose:
				self.log("Opened for %s"%(duration,))

	def _off(self, num):
		if self.on:
			if self.v.verbose:
				self.log("Closing "+str(num))
			print("Close",self.v.var, file=sys.stderr)
		try:
			self.site.send_command("set","output","off",*(self.v.var.split()))
		except NotConnected:
			pass

	def shutdown(self):
		if self._flow_check is not None:
			self._flow_check.dead()

	def run_schedule(self):
		if not self.sched_lock.acquire(blocking=False):
			if self.v.verbose:
				print("SCHED LOCKED1 %s" % (self.v.name,), file=sys.stderr)
			return
		try:
			self._run_schedule()
		except Exception:
			self.log(format_exc())
		finally:
			self.sched_lock.release()

	def _run_schedule(self):
		if self.sched_job is not None:
			self.sched_job.kill()
			self.sched_job = None
		if self.locked:
			if self.v.verbose:
				print("SCHED LOCKED2 %s" % (self.v.name,), file=sys.stderr)
			return
		n = now()

		try:
			if self.sched is not None:
				self.sched.refresh()
				if self.sched.end <= n:
					if self.v.verbose:
						print("Turn off: %s+%s <= %s" % (self.sched.start,self.sched.duration,n), file=sys.stderr)
					self._off(2)
					self.sched = None
				else:
					self.sched_job = gevent.spawn_later((self.sched.end-n).total_seconds(),connwrap,self.run_sched_task,reason="_run_schedule 1")
					if self.v.verbose:
						print("SCHED LATER %s: %s" % (self.v.name,humandelta(self.sched.end-n)), file=sys.stderr)
					return
		except ObjectDoesNotExist:
			pass # somebody deleted it *shrug*
		sched = None

		if self.sched_ts is None:
			try:
				sched = self.v.schedules.filter(start__lt=n).order_by("-start")[0]
			except IndexError:
				self.sched_ts = n-timedelta(1,0)
			else:
				self.sched_ts = sched.end
				if sched.end > n: # still running
					if self.v.verbose:
						print("SCHED RUNNING %s: %s" % (self.v.name,humandelta(sched.end-n)), file=sys.stderr)
					try:
						self._on(1,sched, sched.end-n)
					except TooManyOn:
						self.log("Could not schedule: too many open valves")
					except NotConnected:
						self.log("Could not schedule: connection to MoaT failed")
					return

		try:
			sched = self.v.schedules.filter(start__gte=self.sched_ts).order_by("start")[0]
		except IndexError:
			if self.v.verbose:
				print("SCHED EMPTY %s: %s" % (self.v.name,str_tz(self.sched_ts)), file=sys.stderr)
			self._off(3)
			return

		if sched.end <= n:
			if self.v.verbose:
				print("SCHED %s: sched %d done for %s" % (self.v.name,sched.id,humandelta(n-sched.end)), file=sys.stderr)
			self.sched_ts = None
			return
		if sched.start > n:
			if self.v.verbose:
				print("SCHED %s: sched %d in %s" % (self.v.name,sched.id,humandelta(sched.start-n)), file=sys.stderr)
			self._off(4)
			self.sched_job = gevent.spawn_later((sched.start-n).total_seconds(),connwrap,self.run_sched_task,reason="_run_schedule 2")
			return
		try:
			self._on(2,sched)
		except TooManyOn:
			self.log("Could not schedule: too many open valves")
		except NotConnected:
			self.log("Could not schedule: connection to MoaT failed")
	
	def run_sched_task(self,reason="valve"):
		self.sched_job = None
		self.site.run_sched_task(reason=reason)
	run_sched_ext = async_gevent(run_sched_task)

	def add_flow(self, val):
		if self._flow_check is not None:
			if self._flow_check.add_flow(val):
				return
		if self.v.verbose:
			print("FLOW %s: %s %s" % (self.v.name,self.flow,val), file=sys.stderr)
		self.flow += val

	@async_gevent
	def check_flow(self,**k):
		cf = None
		try:
			cf = FlowCheck(self)
			cf.run()
		except Exception as ex:
			log_error(self.v)
			if cf is not None:
				cf._unlock()
		
	def refresh(self):
		self.v.refresh()
#		if self.sched is not None:
#			self.sched.refresh()

	def connect_monitors(self):
		if self.site.qb is None:
			return
		n = self.v.var.replace(' ','.')
		self.mon = self.site.qb.register_alert_gevent("moat.event.output.change."+n, self.watch_state, call_conv=CC_DICT)
		self.ckf = self.site.qb.register_rpc_gevent("rain.check.flow."+n, self.check_flow, call_conv=CC_DICT)
		
	@async_gevent
	def watch_state(self,value=None,**kv):
		"""output change NAME ::value ON"""
		on = (str(value).lower() in ("1","true","on"))
		if self._flow_check is not None:
			# TODO
			self.on = on
			self._flow_check.state(on)
			return
		if self.locked:
			self.on = on
			return
		try:
			if on != self.on:
				n=now()
				print("Report %s" % ("ON" if on else "OFF"),self.v.var,self.sched, file=sys.stderr)
				if self.sched is not None and not on:
					self.sched.update(db_duration=(n-self.sched.start).total_seconds())
					self.sched.refresh()
					self.sched_ts = self.sched.end
					self.sched = None
				flow,self.flow = self.flow,0
				# If nothing happened, calculate.
				if not on:
					duration = n-self.on_ts
					maxflow = self.v.flow * duration.total_seconds()
					if (not flow or not self.v.feed.var) or flow > 2*maxflow:
						flow = maxflow
				self.new_level_entry(flow)
				if not on:
					if self.v.level > self.v.stop_level + (self.v.start_level-self.v.stop_level)/5:
						self.v.update(priority=True)
					self.log("Done for %s, level is now %s"%(duration,self.v.level))
				self.on = on
				self.on_ts = n

		except Exception:
			print_exc()

	def sync(self):
		flow,self.flow = self.flow,0
		self.new_level_entry(flow)

	def sync_history(self):
		n=now()
		try:
			lv = self.v.levels.order_by("-time")[0]
		except IndexError:
			pass
		else:
			if self.v.time > lv.time:
				self.log("Timestamp downdate: %s %s" % (self.v.time,lv.time))
				self.v.update(time = lv.time)
				self.v.refresh()
				#Save(self.v)
		if (n-self.v.time).total_seconds() >= 295:
			flow,self.flow = self.flow,0
			self.new_level_entry(flow)

	def new_level_entry(self,flow=0):
		self.site.current_history_entry()
		n=now()
		self.v.refresh()
		hts = None
		try:
			lv = self.v.levels.order_by("-time")[0]
		except IndexError:
			ts = n-timedelta(1,0)
		else:
			ts = lv.time
		sum_f = 0
		sum_r = 0
		for h in self.site.s.history.filter(time__gt=ts).order_by("time"):
			if self.v.verbose>2:
				self.log("Env factor for %s: T=%s W=%s S=%s"%(h,h.temp,h.wind,h.sun))
			f = self.env.env_factor(h, logger=self.log if self.v.verbose>2 else None)*self.v.adj
			if self.v.verbose>1:
				self.log("Env factor for %s is %s"%(h,f))
			sum_f += self.site.s.db_rate * self.v.do_shade(self.env.eg.factor*f) * (h.time-ts).total_seconds()
			sum_r += self.v.runoff*h.rain
			ts=h.time

		if self.v.verbose:
			self.log("Apply env %f, rain %r,, flow %f = %f" % (sum_f,sum_r,flow,flow/self.v.area))

		if self.v.time == ts:
			return
		if self.v.level < 0:
			level = 0
		else:
			level = F('level')
		level += sum_f
		if (flow > 0 or sum_r > 0) and self.v.level > self.v.max_level:
			level = self.v.max_level
		level -= flow/self.v.area+sum_r
		#if level < 0:
		#	self.log("Level %s ?!?"%(self.v.level,))
		self.v.update(time=ts, level=level)
		self.v.refresh()

		lv = Level(valve=self.v,time=ts,level=self.v.level,flow=flow)
		lv.save()

		if self.on and not (self.sched and self.sched.forced) and self.v.level <= self.v.stop_level:
			self._off(5)

	def log(self,txt):
		log(self.v,txt)
Esempio n. 22
0
class SchedSite(SchedCommon):
	"""Mirrors a site"""
	rain_timer = None
	rain_counter = 0
	_run_delay = None
	_sched = None
	_sched_running = None
	_delay_on = None
	running = False

	def __new__(cls,s):
		if s.id in sites:
			return sites[s.id]
		self = object.__new__(cls)
		sites[s.id] = self
		self.s = s
		self.connect()
		self._delay_on = Semaphore()

		self.controllers = set()
		self.envgroups = set()
		self.meters = {}
		for M in METERS:
			ml = set()
			self.meters[M.meter_type] = ml
			for d in getattr(self.s,M.meter_type+"_meters").all():
				ml.add(M(d))

		self.log("Startup")
		self.connect_monitors(do_controllers=False)
		signal.signal(signal.SIGINT,self.do_shutdown)
		signal.signal(signal.SIGTERM,self.do_shutdown)
		signal.signal(signal.SIGHUP,self.do_syncsched)

		self.running = True
		return self
	def __init__(self,s):
		pass

	def do_shutdown(self,x,y,**k):
		gevent.spawn_later(0.1,connwrap,self.shutdown)

	def do_syncsched(self,x,y):
		gevent.spawn_later(0.1,connwrap,self.syncsched)

	def syncsched(self):
		print("Sync+Sched", file=sys.stderr)
		self.sync()
		self.refresh()
		self.run_sched_task(reason="Sync+Sched")

	def delay_on(self):
		self._delay_on.acquire()
		gevent.spawn_later(1,self._delay_on.release)

	@async_gevent
	def check_flow(self,**k):
		for c in self.controllers:
			c.check_flow(**k)

	def connect(self):
		d = dict()
		if self.s.port:
			d['port'] = self.s.port
		if self.s.username:
			d['login'] = self.s.username
		if self.s.password:
			d['password'] = self.s.password
		if self.s.virtualhost:
			d['virtualhost'] = self.s.virtualhost
		try:
			self.qb = qbroker.make_unit_gevent("moat.rain.runschedule", amqp=dict(server=dict(host=self.s.host, **d)))
		except Exception:
			print("Could not connect:",self.s.host, file=sys.stderr)
			raise

	def maybe_restart(self):
		self.log("reconnecting")
		try:
			self.connect()
		except Exception:
			print_exc()
			gevent.spawn_later(100,connwrap,self.maybe_restart)
		else:
			self.connect_monitors()

	def connect_monitors(self,do_controllers=True):
		if self.qb is None:
			return
		if do_controllers:
			for c in self.controllers:
				c.connect_monitors()
		for mm in self.meters.values():
			for m in mm:
				m.connect_monitors()
		n = self.s.var.replace(' ','.')
		self.ckf = self.qb.register_rpc_gevent("rain.check.flow."+n, self.check_flow)
		self.cks = self.qb.register_rpc_gevent("rain.read.schedule."+n, partial(self.run_sched_ext,reason="read schedule"))
		self.ckt = self.qb.register_rpc_gevent("rain.sync."+n, self.sync_ext)
		self.cku = self.qb.register_rpc_gevent("rain.shutdown."+n, self.do_shutdown)

	def sync(self,**k):
		print("Sync", file=sys.stderr)
		for c in self.controllers:
			c.sync()
		for eg in self.envgroups:
			eg.sync()
		for mm in self.meters.values():
			for m in mm:
				m.sync()
		self.run_main_task()
		#Save(None)
		print("Sync end", file=sys.stderr)
	sync_ext = async_gevent(sync)

	def shutdown(self,**k):
		print("Shutdown", file=sys.stderr)
		signal.signal(signal.SIGINT,signal.SIG_DFL)
		signal.signal(signal.SIGTERM,signal.SIG_DFL)
		if self.running:
			self.running = False
			self.sync()
			for eg in self.envgroups:
				eg.sync()
			for c in self.controllers:
				c.shutdown()
			for mm in self.meters.values():
				for m in mm:
					m.shutdown()
		#Save(None)
		sys.exit(0)

	def run_schedule(self):
		for c in self.controllers:
			c.run_schedule()

	def refresh(self):
		self.s.refresh()
		for eg in self.envgroups:
			eg.refresh()
		for c in self.controllers:
			c.refresh()
		for mm in self.meters.values():
			for m in mm:
				m.refresh()

	def log(self,txt):
		log(self.s,txt)

	def add_controller(self,controller):
		self.controllers.add(controller)

	def no_rain(self):
		"""Rain has stopped."""
		# called by timer
		self.rain_timer = None
		self.log("Stopped raining")
		self.run_main_task()

	def has_rain(self):
		"""Some monitor told us that it started raining"""
		r,self.rain_timer = self.rain_timer,gevent.spawn_later(self.s.db_rain_delay,connwrap,self.no_rain)
		if r:
			r.kill()
			return
		self.log("Started raining")
		self.rain = True

		#for v in self.s.valves.all():
		vo = Valve.objects.filter(controller__site=self.s, runoff__gt=0)
		for v in vo.all():
			valve = SchedValve(v)
			if valve.locked:
				continue
			try:
				valve._off(1)
			except NotConnected:
				pass
			except Exception:
				self.log_error(v)
		Schedule.objects.filter(valve__in=vo, start__gte=now()-timedelta(1),seen=False).delete()
		self.run_main_task()

	def send_command(self,*a,**k):
		# TODO: return a sensible error and handle that correctly
		if self.qb is None:
			raise NotConnected
		try:
			self.qb.rpc_gevent("moat.cmd",args=a, _dest="moat.main",**k)
		except asyncio.TimeoutError as e:
			print("Timeout sending %s %s" % (repr(a),repr(k)))

	def run_every(self,delay):
		"""Initiate running the calculation and scheduling loop every @delay seconds."""

		if self._run_delay is not None:
			self._run_delay = delay # just update
			return
		self._run_delay = delay
		self._run_last = now()
		self._running = Semaphore()
		self._run_result = None
		sd = self._run_delay.total_seconds()/10
		if sd < 66: sd = 66
		self._run = gevent.spawn_later(sd, connwrap,self.run_main_task, kill=False)
		if self._sched is not None:
			self._sched.kill()
		self._sched = gevent.spawn_later(2, connwrap,self.run_sched_task, kill=False, reason="run_every")

	def run_main_task(self, kill=True):
		"""Run the calculation loop."""
		res = None
		if not self._running.acquire(blocking=False):
			return self._run_result.get()
		try:
			self._run_result = AsyncResult()
			if kill:
				self._run.kill()
			n = now()
			ts = (n-self._run_last).total_seconds()
			if ts < 5:
				try:
					res = self.s.history.order_by("-time")[0]
				except IndexError:
					return None
				else:
					return res
			self._run_last = n

			res = self.main_task()
			return res
		finally:
			self._run = gevent.spawn_later((self._run_last+self._run_delay-n).total_seconds(), connwrap,self.run_main_task, kill=False)
			r,self._run_result = self._run_result,None
			self._running.release()
			r.set(res)

	def current_history_entry(self,delta=15):
		# assure that the last history entry is reasonably current
		try:
			he = self.s.history.order_by("-time")[0]
		except IndexError:
			pass
		else:
			if (now()-he.time).total_seconds() < delta:
				return he
		return self.new_history_entry()

	def new_history_entry(self,rain=0):
		"""Create a new history entry"""
		values = {}
		n = now()
		for t,ml in self.meters.items():
			sum_it = False
			sum_val = 0
			sum_f = 0
			for m in ml:
				f = m.weight
				v = m.get_value()
				if m.last_time is None:
					f *= 0.01
				else:
					s = (n - m.last_time).total_seconds()
					if s > METER_MAXTIME:
						f *= 0.01
					elif s > METER_TIME:
						f *= METER_TIME/s
				if v is not None:
					sum_val += f*v
					sum_f += f
				if m.sum_it: sum_it = True
			if sum_f:
				if not sum_it:
					sum_val /= sum_f
				values[t] = sum_val
		
		print("Values:",values, file=sys.stderr)
		h = History(site=self.s,time=now(),**values)
		h.save()
		return h

	def sync_history(self):
		for c in self.controllers:
			c.sync_history()

	def main_task(self):
		print("MainTask", file=sys.stderr)
		self.refresh()
		h = self.current_history_entry(3)
		self.sync_history()
			
		gevent.spawn_later(2,connwrap,self.sched_task)
		print("MainTask end",h, file=sys.stderr)
		return h

	def run_sched_task(self,delayed=False,reason=None,kill=True, **k):
		if self._sched_running is not None:
			print("RunSched.running",reason, file=sys.stderr)
			return self._sched_running.get()
		if self._sched is not None:
			if kill:
				self._sched.kill()
		if delayed:
			print("RunSched.delay",reason, file=sys.stderr)
			self._sched = gevent.spawn_later(10,connwrap,self.run_sched_task,kill=False,reason="Timer 10")
			return
		print("RunSched",reason, file=sys.stderr)
		self._sched = None
		self._sched_running = AsyncResult()
		try:
			self.sched_task()
		except Exception:
			self.log(format_exc())
		finally:
			r,self._sched_running = self._sched_running,None
			if self._sched is None:
				self._sched = gevent.spawn_later(600,connwrap,self.run_sched_task,kill=False,reason="Timer 600")
			if r is not None:
				r.set(None)
		print("RunSched end", file=sys.stderr)
	run_sched_ext = async_gevent(run_sched_task)

	def sched_task(self, kill=True):
		self.refresh()
		self.run_schedule()
class VBucketAwareCouchbaseClient(object):
    #poll server every few seconds to see if the vbucket-map
    #has changes
    def __init__(self, url, bucket, password="", verbose=False):
        self.log = logger.logger("VBucketAwareMemcachedClient")
        self.bucket = bucket
        self.rest_username = bucket
        self.rest_password = password
        self._memcacheds = {}
        self._vBucketMap = {}
        self._vBucketMap_lock = Lock()
        self._vBucketMapFastForward = {}
        self._vBucketMapFastForward_lock = Lock()
        #TODO: use regular expressions to parse the url
        server = {}
        if not bucket:
            raise InvalidArgumentException("bucket can not be an empty string", parameters="bucket")
        if not url:
            raise InvalidArgumentException("url can not be an empty string", parameters="url")
        if url.find("http://") != -1 and url.rfind(":") != -1 and url.find("/pools/default") != -1:
            server["ip"] = url[url.find("http://") + len("http://"):url.rfind(":")]
            server["port"] = url[url.rfind(":") + 1:url.find("/pools/default")]
            server["username"] = self.rest_username
            server["password"] = self.rest_password
        self.servers = [server]
        self.servers_lock = Lock()
        self.rest = RestConnection(server)
        self.reconfig_vbucket_map()
        self.init_vbucket_connections()
        self.dispatcher = CommandDispatcher(self)
        self.dispatcher_thread = Process(name="dispatcher-thread", target=self._start_dispatcher)
        self.dispatcher_thread.daemon = True
        self.dispatcher_thread.start()
        self.streaming_thread = Process(name="streaming", target=self._start_streaming, args=())
        self.streaming_thread.daemon = True
        self.streaming_thread.start()
        self.verbose = verbose

    def _start_dispatcher(self):
        self.dispatcher.dispatch()

    def _start_streaming(self):
        # this will dynamically update vBucketMap, vBucketMapFastForward, servers
        urlopener = urllib.FancyURLopener()
        urlopener.prompt_user_passwd = lambda host, realm: (self.rest_username, self.rest_password)
        current_servers = True
        while current_servers:
            self.servers_lock.acquire()
            current_servers = deepcopy(self.servers)
            self.servers_lock.release()
            for server in current_servers:
                response = urlopener.open("http://{0}:{1}/pools/default/bucketsStreaming/{2}".format(server["ip"], server["port"], self.bucket))
                while response:
                    try:
                        line = response.readline()
                        if not line:
                            # try next server if we get an EOF
                            response.close()
                            break
                    except:
                        # try next server if we fail to read
                        response.close()
                        break
                    try:
                        data = json.loads(line)
                    except:
                        continue

                    serverlist = data['vBucketServerMap']['serverList']
                    vbucketmapfastforward = {}
                    index = 0
                    if 'vBucketMapForward' in data['vBucketServerMap']:
                        for vbucket in data['vBucketServerMap']['vBucketMapForward']:
                            vbucketmapfastforward[index] = serverlist[vbucket[0]]
                            index += 1
                        self._vBucketMapFastForward_lock.acquire()
                        self._vBucketMapFastForward = deepcopy(vbucketmapfastforward)
                        self._vBucketMapFastForward_lock.release()
                    vbucketmap = {}
                    index = 0
                    for vbucket in data['vBucketServerMap']['vBucketMap']:
                        vbucketmap[index] = serverlist[vbucket[0]]
                        index += 1

                    # only update vBucketMap if we don't have a fastforward
                    # on a not_mb_vbucket error, we already update the
                    # vBucketMap from the fastforward map
                    if not vbucketmapfastforward:
                        self._vBucketMap_lock.acquire()
                        self._vBucketMap = deepcopy(vbucketmap)
                        self._vBucketMap_lock.release()

                    new_servers = []
                    nodes = data["nodes"]
                    for node in nodes:
                        if node["clusterMembership"] == "active" and node["status"] == "healthy":
                            hostport = node["hostname"]
                            new_servers.append({"ip":hostport.split(":")[0],
                                                "port":int(hostport.split(":")[1]),
                                                "username":self.rest_username,
                                                "password":self.rest_password})
                    new_servers.sort()
                    self.servers_lock.acquire()
                    self.servers = deepcopy(new_servers)
                    self.servers_lock.release()


    def init_vbucket_connections(self):
        # start up all vbucket connections
        self._vBucketMap_lock.acquire()
        vbucketcount = len(self._vBucketMap)
        self._vBucketMap_lock.release()
        for i in range(vbucketcount):
            self.start_vbucket_connection(i)

    def start_vbucket_connection(self,vbucket):
        self._vBucketMap_lock.acquire()
        server = deepcopy(self._vBucketMap[vbucket])
        self._vBucketMap_lock.release()
        serverIp, serverPort = server.split(":")
        if not server in self._memcacheds:
            self._memcacheds[server] = MemcachedClientHelper.direct_client(self.rest, serverIp, serverPort, self.bucket)

    def start_vbucket_fastforward_connection(self,vbucket):
        self._vBucketMapFastForward_lock.acquire()
        if not vbucket in self._vBucketMapFastForward:
            self._vBucketMapFastForward_lock.release()
            return
        server = deepcopy(self._vBucketMapFastForward[vbucket])
        self._vBucketMapFastForward_lock.release()
        serverIp, serverPort = server.split(":")
        if not server in self._memcacheds:
            self._memcacheds[server] = MemcachedClientHelper.direct_client(self.rest, serverIp, serverPort, self.bucket)

    def restart_vbucket_connection(self,vbucket):
        self._vBucketMap_lock.acquire()
        server = deepcopy(self._vBucketMap[vbucket])
        self._vBucketMap_lock.release()
        serverIp, serverPort = server.split(":")
        if server in self._memcacheds:
            self._memcacheds[server].close()
        self._memcacheds[server] = MemcachedClientHelper.direct_client(self.rest, serverIp, serverPort, self.bucket)

    def reconfig_vbucket_map(self, vbucket=-1):
        vb_ready = RestHelper(self.rest).vbucket_map_ready(self.bucket, 60)
        if not vb_ready:
            raise Exception("vbucket map is not ready for bucket {0}".format(self.bucket))
        vBuckets = self.rest.get_vbuckets(self.bucket)
        self.vbucket_count = len(vBuckets)
        bucket_info = self.rest.get_bucket(self.bucket)
        nodes = bucket_info.nodes

        self._vBucketMap_lock.acquire()
        for vBucket in vBuckets:
            if vBucket.id == vbucket or vbucket == -1:
                self._vBucketMap[vBucket.id] = vBucket.master
        self._vBucketMap_lock.release()

    def memcached(self, key, fastforward=False):
        self._vBucketMap_lock.acquire()
        self._vBucketMapFastForward_lock.acquire()
        vBucketId = (zlib.crc32(key) >> 16) & (len(self._vBucketMap) - 1)

        if fastforward and vBucketId in self._vBucketMapFastForward:
            # only try the fastforward if we have an entry
            # otherwise we just wait for the main map to update
            self.start_vbucket_fastforward_connection(vBucketId)
            self._vBucketMap[vBucketId] = self._vBucketMapFastForward[vBucketId]

        if vBucketId not in self._vBucketMap:
            msg = "vbucket map does not have an entry for vb : {0}"
            self._vBucketMapFastForward_lock.release()
            self._vBucketMap_lock.release()
            raise Exception(msg.format(vBucketId))
        if self._vBucketMap[vBucketId] not in self._memcacheds:
            msg = "smart client does not have a mc connection for server : {0}"
            self._vBucketMapFastForward_lock.release()
            self._vBucketMap_lock.release()
            raise Exception(msg.format(self._vBucketMap[vBucketId]))
        r = self._memcacheds[self._vBucketMap[vBucketId]]
        self._vBucketMapFastForward_lock.release()
        self._vBucketMap_lock.release()
        return r

    def vbucketid(self, key):
        self._vBucketMap_lock.acquire()
        r = (zlib.crc32(key) >> 16) & (len(self._vBucketMap) - 1)
        self._vBucketMap_lock.release()
        return r

    def done(self):
        if self.dispatcher:
            self.dispatcher.shutdown()
            if self.verbose:
                self.log.info("dispatcher shutdown invoked")
            [self._memcacheds[ip].close() for ip in self._memcacheds]
            if self.verbose:
                self.log.info("closed all memcached open connections")
            self.dispatcher = None


    def _respond(self, item, event):
        timeout = 30
        event.wait(timeout)
        if not event.is_set():
            # if we timeout, then try to reconnect to the server
            # responsible for this vbucket
            self.restart_vbucket_connection(self.vbucketid(item['key']))
            raise MemcachedTimeoutException(item, timeout)
        if "error" in item["response"]:
            raise item["response"]["error"]
        return item["response"]["return"]

    def get(self, key):
        event = Event()
        item = {"operation": "get", "key": key, "event": event, "response": {}}
        self.dispatcher.put(item)
        return self._respond(item, event)

    def gat(self, key, expiry):
        event = Event()
        item = {"operation": "gat", "key": key, "expiry": expiry, "event": event,
                "response": {}}
        self.dispatcher.put(item)
        return self._respond(item, event)


    def touch(self, key, expiry):
        event = Event()
        item = {"operation": "touch", "key": key, "expiry": expiry, "event": event,
                "response": {}}
        self.dispatcher.put(item)
        return self._respond(item, event)

    def cas(self, key, expiry, flags, old_value, value):
        event = Event()
        item = {"operation": "cas", "key": key, "expiry": expiry, "flags": flags, "old_value": old_value, "value": value
            , "event": event, "response": {}}
        self.dispatcher.put(item)
        return self._respond(item, event)

    def decr(self, key, amount=1, init=0, expiry=0):
        event = Event()
        item = {"operation": "decr", "key": key, "amount": amount, "init": init, "expiry": expiry, "event": event,
                "response": {}}
        self.dispatcher.put(item)
        return self._respond(item, event)

    def set(self, key, expiry, flags, value):
        event = Event()
        item = {"operation": "set", "key": key, "expiry": expiry, "flags": flags, "value": value, "event": event,
                "response": {}}
        self.dispatcher.put(item)
        return self._respond(item, event)

    def add(self, key, expiry, flags, value):
        event = Event()
        item = {"operation": "add", "key": key, "expiry": expiry, "flags": flags, "value": value, "event": event,
                "response": {}}
        self.dispatcher.put(item)
        return self._respond(item, event)

    def append(self, key, value, cas=0):
        event = Event()
        item = {"operation": "append", "key": key, "cas": cas, "value": value, "event": event,
                "response": {}}
        self.dispatcher.put(item)
        return self._respond(item, event)

    def delete(self, key, cas=0):
        event = Event()
        item = {"operation": "delete", "key": key, "cas": cas, "event": event,
                "response": {}}
        self.dispatcher.put(item)
        return self._respond(item, event)

    def prepend(self, key, value, cas=0):
        event = Event()
        item = {"operation": "prepend", "key": key, "cas": cas, "value": value, "event": event,
                "response": {}}
        self.dispatcher.put(item)
        return self._respond(item, event)


    def getl(self, key, expiry=15):
        event = Event()
        item = {"operation": "getl", "key": key, "expiry": expiry, "event": event,
                "response": {}}
        self.dispatcher.put(item)
        return self._respond(item, event)

    def replace(self, key, expiry, flags, value):
        event = Event()
        item = {"operation": "replace", "key": key, "expiry": expiry, "flags": flags, "value": value, "event": event,
                "response": {}}
        self.dispatcher.put(item)
        return self._respond(item, event)

    def incr(self, key, amount=1, init=0, expiry=0):
        event = Event()
        item = {"operation": "incr", "key": key, "amount": amount, "init": init, "expiry": expiry, "event": event,
                "response": {}}
        self.dispatcher.put(item)
        return self._respond(item, event)
Esempio n. 24
0
class BaseDataHandler(object):

    def __init__(self, dh_config):
        '''
        Constructor for all data handlers.


        @param dh_config: Dictionary containing configuration parameters for the data handler
        '''
        self._polling = False           # Moved these four variables so they're instance variables, not class variables
        self._polling_glet = None
        self._dh_config = {}
        self._terminate_polling = None
        self._acquiring_data = None
        self._params = {
            'POLLING_INTERVAL': 3600,
            'PATCHABLE_CONFIG_KEYS': ['stream_id', 'constraints', 'stream_route']
        }

        self._dh_config = dh_config

        self._semaphore = Semaphore()

    def set_event_callback(self, evt_callback):
        """
        Sets a callback function to be triggered on events

        @param evt_callback:
        """
        self._event_callback = evt_callback

    def _dh_event(self, type, value):
        event = {
            'type': type,
            'value': value,
            'time': time.time()
        }
        self._event_callback(event)

    def _poll(self):
        """
        Internal polling method, run inside a greenlet, that triggers execute_acquire_sample without configuration mods
        The polling interval (in seconds) is retrieved from the POLLING_INTERVAL parameter
        """
        self._polling = True
        self._terminate_polling = Event()
        interval = get_safe(self._params, 'POLLING_INTERVAL', 3600)
        log.debug('Polling interval: {0}'.format(interval))

        while not self._terminate_polling.wait(timeout=interval):
            self.execute_acquire_sample()

    def cmd_dvr(self, cmd, *args, **kwargs):
        """
        Command a DataHandler by request-reply messaging. Package command
        message and send on blocking command socket. Block on same socket
        to receive the reply. Return the driver reply.
        @param cmd The DataHandler command identifier.
        @param args Positional arguments of the command.
        @param kwargs Keyword arguments of the command.
        @throws InstrumentCommandException if the command is not recognized
        @retval Command result.
        """
        # Package command dictionary.

        #need to account for observatory_execute_resource commands
        #connect -> Not used
        #get_current_state -> Not used
        #discover -> Not used
        #disconnect -> Not used

        log.debug('cmd_dvr received command \'{0}\' with: args={1} kwargs={2}'.format(cmd, args, kwargs))

        reply = None
        if cmd == 'initialize':
            # Delegate to BaseDataHandler.initialize()
            reply = self.initialize(*args, **kwargs)
        elif cmd == 'get_resource':
            # Delegate to BaseDataHandler.get()
            reply = self.get(*args, **kwargs)
        elif cmd == 'set_resource':
            # Delegate to BaseDataHandler.set()
            reply = self.set(*args, **kwargs)
        elif cmd == 'get_resource_params':
            # Delegate to BaseDataHandler.get_resource_params()
            reply = self.get_resource_params(*args, **kwargs)
        elif cmd == 'get_resource_commands':
            # Delegate to BaseDataHandler.get_resource_commands()
            reply = self.get_resource_commands(*args, **kwargs)
        elif cmd == 'get_resource_capabilities':
            # Delegate to BaseDataHandler.get_resource_commands()
            reply = self.get_resource_capabilities(*args, **kwargs)
        elif cmd == 'execute_acquire_sample':
            # Delegate to BaseDataHandler.execute_acquire_sample()
            reply = self.execute_acquire_sample(*args, **kwargs)
        elif cmd == 'execute_start_autosample':
            # Delegate to BaseDataHandler.execute_start_autosample()
            reply = self.execute_start_autosample(*args, **kwargs)
        elif cmd == 'execute_stop_autosample':
            # Delegate to BaseDataHandler.execute_stop_autosample()
            reply = self.execute_stop_autosample(*args, **kwargs)
        elif cmd == 'execute_resource':
            reply = self.execute_resource(*args, **kwargs)
        elif cmd in ['configure', 'connect', 'disconnect', 'get_current_state', 'discover', 'execute_acquire_data']:
            # Disregard
            log.info('Command \'{0}\' not used by DataHandler'.format(cmd))
            pass
        else:
            desc = 'Command \'{0}\' unknown by DataHandler'.format(cmd)
            log.info(desc)
            raise InstrumentCommandException(desc)

        return reply

    def initialize(self, *args, **kwargs):
        #TODO: Add documentation
        #TODO: This should put the DataHandler back into an 'unconfigured' state
        """
        Called from:
                      InstrumentAgent._handler_idle_reset
                      InstrumentAgent._handler_idle_go_inactive
                      InstrumentAgent._handler_stopped_reset
                      InstrumentAgent._handler_stopped_go_inactive
                      InstrumentAgent._handler_observatory_reset
                      InstrumentAgent._handler_observatory_go_inactive
                      InstrumentAgent._handler_uninitialized_initialize
                      |--> ExternalDataAgent._start_driver

        @param args Positional arguments of the command.
        @param kwargs Keyword arguments of the command.
        """
        log.debug('Initializing DataHandler...')
        self._glet_queue = []
        return None

    def configure(self, *args, **kwargs):
        #TODO: Add documentation
        #TODO: This should configure the DataHandler for the particular dataset
        """
        Called from:
                      InstrumentAgent._handler_inactive_go_active
        @param args First argument should be a config dictionary
        @param kwargs Keyword arguments of the command.
        @throws InstrumentParameterException if the first argument isn't a config dictionary
        """
        log.debug('Configuring DataHandler: args = {0}'.format(args))
        try:
            self._dh_config = args[0]

        except IndexError:
            raise InstrumentParameterException('\'acquire_sample\' command requires a config dict as the first argument')

        return

    def execute_acquire_sample(self, *args):
        """
        Creates a copy of self._dh_config, creates a publisher, and spawns a greenlet to perform a data acquisition cycle
        If the args[0] is a dict, any entries keyed with one of the 'PATCHABLE_CONFIG_KEYS' are used to patch the config
        Greenlet binds to BaseDataHandler._acquire_sample and passes the publisher and config
        Disallows multiple "new data" (unconstrained) requests using BaseDataHandler._semaphore lock
        Called from:
                      InstrumentAgent._handler_observatory_execute_resource
                       |-->  ExternalDataAgent._handler_streaming_execute_resource

        @parameter args First argument can be a config dictionary
        @throws IndexError if first argument is not a dictionary
        @throws ConfigurationError if required members aren't present
        @retval New ResourceAgentState (COMMAND)
        """
        log.debug('Executing acquire_sample: args = {0}'.format(args))

        # Make a copy of the config to ensure no cross-pollution
        config = self._dh_config.copy()

        # Patch the config if mods are passed in
        try:
            config_mods = args[0]
            if not isinstance(config_mods, dict):
                raise IndexError()

            log.debug('Configuration modifications provided: {0}'.format(config_mods))
            for k in self._params['PATCHABLE_CONFIG_KEYS']:
                p = get_safe(config_mods, k)
                if not p is None:
                    config[k] = p

        except IndexError:
            log.info('No configuration modifications were provided')

        # Verify that there is a stream_id member in the config
        stream_id = get_safe(config, 'stream_id', None)
        if not stream_id:
            raise ConfigurationError('Configuration does not contain required \'stream_id\' member')
        stream_route = get_safe(config, 'stream_route', None)

        if not stream_route:
            raise ConfigurationError('Configuration does not contain required \'stream_route\' member')

        isNew = get_safe(config, 'constraints') is None

        if isNew and not self._semaphore.acquire(blocking=False):
            log.warn('Already acquiring new data - action not duplicated')
            return

        ndc = None
        if isNew:
            # Get the NewDataCheck attachment and add it's content to the config
            ext_ds_id = get_safe(config, 'external_dataset_res_id')
            if ext_ds_id:
                ndc = self._find_new_data_check_attachment(ext_ds_id)

        config['new_data_check'] = ndc

        # Create a publisher to pass into the greenlet
        publisher = StandaloneStreamPublisher(stream_id=stream_id, stream_route=stream_route)

        # Spawn a greenlet to do the data acquisition and publishing
        g = spawn(self._acquire_sample, config, publisher, self._unlock_new_data_callback, self._update_new_data_check_attachment)
        log.debug('** Spawned {0}'.format(g))
        self._glet_queue.append(g)
        return ResourceAgentState.COMMAND, None

    def execute_resource(self, *args, **kwargs):
        """
        Function to acquire data and start/stop autosample

        @param args: first argument should be the command to run, second optional argument is a config dictionary
        @param kwargs: Keyword arguments of the command
        @retval Next ResourceAgentState
        """
        cmd = args[0]
        if cmd == DriverEvent.ACQUIRE_SAMPLE:
            if len(args) == 1:
                return self.execute_acquire_sample()
            else:
                return self.execute_acquire_sample(args[1])
        elif cmd == DriverEvent.START_AUTOSAMPLE:
            if len(args) == 1:
                return self.execute_start_autosample()
            else:
                return self.execute_start_autosample(args[1])
        elif cmd == DriverEvent.STOP_AUTOSAMPLE:
            if len(args) == 1:
                return self.execute_stop_autosample()
            else:
                return self.execute_stop_autosample(args[1])

    def execute_start_autosample(self, *args, **kwargs):
        #TODO: Add documentation
        #TODO: Fix raises statements
        """
        Put the DataHandler into streaming mode and start polling for new data
        Called from:
                      InstrumentAgent._handler_observatory_go_streaming

        @raises InstrumentTimeoutException:
        @raises InstrumentProtocolException:
        @raises NotImplementedException:
        @raises InstrumentParameterException:
        @retval Next ResourceAgentState (STREAMING)
        """
        log.debug('Entered execute_start_autosample with args={0} & kwargs={1}'.format(args, kwargs))
        if not self._polling and self._polling_glet is None:
            self._polling_glet = spawn(self._poll)

        return ResourceAgentState.STREAMING, None

    def execute_stop_autosample(self, *args, **kwargs):
        #TODO: Add documentation
        #TODO: Fix raises statements
        """
        Stop polling for new data and put the DataHandler into observatory mode
        Called from:
                      InstrumentAgent._handler_streaming_go_observatory

        @raises InstrumentTimeoutException:
        @raises InstrumentProtocolException:
        @raises NotImplementedException:
        @raises InstrumentParameterException:
        @retval Next ResourceAgentState (COMMAND)
        """
        log.debug('Entered execute_stop_autosample with args={0} & kwargs={1}'.format(args, kwargs))
        if self._polling and not self._polling_glet is None:
            log.debug("Terminating polling")
            self._terminate_polling.set()
            self._polling_glet.join(timeout=30)
            log.debug("Polling terminated")
            self._polling = False
            self._polling_glet = None

        return ResourceAgentState.COMMAND, None

    def get(self, *args, **kwargs):
        #TODO: Add documentation
        #TODO: Fix raises statements
        """
        Retrieve required parameter

        Called from:
                      InstrumentAgent._handler_get_params

        @throws InstrumentTimeoutException:
        @throws InstrumentProtocolException:
        @throws NotImplementedException:
        @throws InstrumentParameterException: if paramter is not a list or not found
        @retval Parameter value
        """
        try:
            pnames = args[0]
        except IndexError:
            log.warn("No argument provided to get, return all parameters")
            pnames = [DataHandlerParameter.ALL]

        result = None
        if DataHandlerParameter.ALL in pnames:
            result = self._params
        else:
            if not isinstance(pnames, (list, tuple)):
                raise InstrumentParameterException('Get argument not a list or tuple: {0}'.format(pnames))
            result = {}
            for pn in pnames:
                try:
                    log.debug('Get parameter with key: {0}'.format(pn))
                    result[pn] = self._params[pn]
                except KeyError:
                    log.debug('\'{0}\' not found in self._params'.format(pn))
                    raise InstrumentParameterException('{0} is not a valid parameter for this DataHandler.'.format(pn))

        return result

    def set(self, *args, **kwargs):
        #TODO: Add documentation
        #TODO: Fix raises statements
        """
        Set required parameter.
        Called from:
                      InstrumentAgent._handler_observatory_set_params

        @throws InstrumentTimeoutException:
        @throws InstrumentProtocolException:
        @throws NotImplementedException:
        @throws InstrumentParameterException: Invalid parameter or no parameter dictionary provided
        @retval None
        """

        try:
            params = args[0]

        except IndexError:
            raise InstrumentParameterException('Set command requires a parameter dict.')

        to_raise = []

        if not isinstance(params, dict):
            raise InstrumentParameterException('Set parameters not a dict.')
        else:
            for (key, val) in params.iteritems():
                if key in self._params:
                    log.debug('Set parameter \'{0}\' = {1}'.format(key, val))
                    self._params[key] = val
                else:
                    log.debug('Parameter \'{0}\' not in self._params and cannot be set'.format(key))
                    to_raise.append(key)

        if len(to_raise) > 0:
            log.debug('Raise InstrumentParameterException for un-set parameters: {0}'.format(to_raise))
            raise InstrumentParameterException('Invalid parameter(s) could not be set: {0}'.format(to_raise))

    def get_resource_params(self, *args, **kwargs):
        """
        Return list of resource parameters. Implemented in specific DataHandlers
        Called from:
                      InstrumentAgent._handler_get_resource_params

        @retval list of resource parameters
        """
        return self._params.keys()

    def get_resource_capabilities(self, *args, **kwargs):
        """
        Return list of DataHandler execute commands available.
        Called from:
                      InstrumentAgent._handler_get_resource_capabilities
        @retval list of available execute commands and parameters
        """
        res_cmds = [cmd.replace('execute_', '') for cmd in dir(self) if cmd.startswith('execute_')]
        res_params = ['POLLING_INTERVAL', 'PATCHABLE_CONFIG_KEYS']

        result = [res_cmds, res_params]
        return result

    def get_resource_commands(self, *args, **kwargs):
        """
        Return list of DataHandler execute commands available.
        Called from:
                      InstrumentAgent._handler_get_resource_commands
        @retval list of available execute commands
        """
        cmds = [cmd.replace('execute_', '') for cmd in dir(self) if cmd.startswith('execute_')]
        return cmds

    def _unlock_new_data_callback(self, caller):
        """
        Release the polling semaphore

        @retval None
        """
        log.debug('** Release {0}'.format(caller))
        self._semaphore.release()

    def _find_new_data_check_attachment(self, res_id):
        """
        Returns a list of the last data files that were found

        @param res_id The resource ID of the external dataset resource
        @throws InstrumentException if no attachment is found
        @retval list of data file names and sizes
        """
        rr_cli = ResourceRegistryServiceClient()
        try:
            attachment_objs = rr_cli.find_attachments(resource_id=res_id, include_content=False, id_only=False)
            for attachment_obj in attachment_objs:
                kwds = set(attachment_obj.keywords)
                if 'NewDataCheck' in kwds:
                    log.debug('Found NewDataCheck attachment: {0}'.format(attachment_obj._id))
                    return msgpack.unpackb(attachment_obj.content)
                else:
                    log.debug('Found attachment: {0}'.format(attachment_obj))
        except NotFound:
            raise InstrumentException('ExternalDatasetResource \'{0}\' not found'.format(res_id))

    @classmethod
    def _update_new_data_check_attachment(cls, res_id, new_content):
        """
        Update the list of data files for the external dataset resource

        @param res_id the ID of the external dataset resource
        @param new_content list of new files found on data host
        @throws InstrumentException if external dataset resource can't be found
        @retval the attachment ID
        """
        rr_cli = ResourceRegistryServiceClient()
        try:
            # Delete any attachments with the "NewDataCheck" keyword
            attachment_objs = rr_cli.find_attachments(resource_id=res_id, include_content=False, id_only=False)
            for attachment_obj in attachment_objs:
                kwds = set(attachment_obj.keywords)
                if 'NewDataCheck' in kwds:
                    log.debug('Delete NewDataCheck attachment: {0}'.format(attachment_obj._id))
                    rr_cli.delete_attachment(attachment_obj._id)
                else:
                    log.debug('Found attachment: {0}'.format(attachment_obj))

            # Create the new attachment
            att = Attachment(name='new_data_check', attachment_type=AttachmentType.ASCII, keywords=['NewDataCheck', ], content_type='text/plain', content=msgpack.packb(new_content))
            att_id = rr_cli.create_attachment(resource_id=res_id, attachment=att)

        except NotFound:
            raise InstrumentException('ExternalDatasetResource \'{0}\' not found'.format(res_id))

        return att_id

    @classmethod
    def _acquire_sample(cls, config, publisher, unlock_new_data_callback, update_new_data_check_attachment):
        """
        Ensures required keys (such as stream_id) are available from config, configures the publisher and then calls:
             BaseDataHandler._constraints_for_new_request (only if config does not contain 'constraints')
             BaseDataHandler._publish_data passing BaseDataHandler._get_data as a parameter
        @param config Dict containing configuration parameters, may include constraints, formatters, etc
        @param publisher the publisher used to publish data
        @param unlock_new_data_callback BaseDataHandler callback function to allow conditional unlocking of the BaseDataHandler._semaphore
        @param update_new_data_check_attachment classmethod to update the external dataset resources file list attachment
        @throws InstrumentParameterException if the data constraints are not a dictionary
        @retval None
        """
        log.debug('start _acquire_sample: config={0}'.format(config))

        cls._init_acquisition_cycle(config)

        constraints = get_safe(config, 'constraints')
        if not constraints:
            gevent.getcurrent().link(unlock_new_data_callback)
            try:
                constraints = cls._constraints_for_new_request(config)
            except NoNewDataWarning:
                #log.info(nndw.message)
                if get_safe(config, 'TESTING'):
                    #log.debug('Publish TestingFinished event')
                    pub = EventPublisher('DeviceCommonLifecycleEvent')
                    pub.publish_event(origin='BaseDataHandler._acquire_sample', description='TestingFinished')
                return

            if constraints is None:
                raise InstrumentParameterException("Data constraints returned from _constraints_for_new_request cannot be None")
            config['constraints'] = constraints
        elif isinstance(constraints, dict):
            addnl_constr = cls._constraints_for_historical_request(config)
            if not addnl_constr is None and isinstance(addnl_constr, dict):
                constraints.update(addnl_constr)
        else:
            raise InstrumentParameterException('Data constraints must be of type \'dict\':  {0}'.format(constraints))

        cls._publish_data(publisher, cls._get_data(config))

        if 'set_new_data_check' in config:
            update_new_data_check_attachment(config['external_dataset_res_id'], config['set_new_data_check'])

        # Publish a 'TestFinished' event
        if get_safe(config, 'TESTING'):
            #log.debug('Publish TestingFinished event')
            pub = EventPublisher('DeviceCommonLifecycleEvent')
            pub.publish_event(origin='BaseDataHandler._acquire_sample', description='TestingFinished')

    @classmethod
    def _constraints_for_historical_request(cls, config):
        """
        Determines any constraints that must be added to the constraints configuration.
        This should present a uniform constraints configuration to be sent to _get_data
        @param config Dict containing configuration parameters, may include constraints, formatters, etc
        @retval dictionary containing the restraints
        """
        raise NotImplementedException('{0}.{1} must implement \'_constraints_for_historical_request\''.format(cls.__module__, cls.__name__))

    @classmethod
    def _init_acquisition_cycle(cls, config):
        """
        Allows the concrete implementation to initialize/prepare objects the data handler
        will use repeatedly (such as a dataset object) in cls._constraints_for_new_request and/or cls._get_data
        Objects should be added to the config so they are available later in the workflow
        @param config Dict containing configuration parameters, may include constraints, formatters, etc
        """
        raise NotImplementedException('{0}.{1} must implement \'_init_acquisition_cycle\''.format(cls.__module__, cls.__name__))

    @classmethod
    def _constraints_for_new_request(cls, config):
        #TODO: Document what "constraints" looks like (yml)!!
        """
        Determines the appropriate constraints for acquiring any "new data" from the external dataset
        Returned value cannot be None and is assigned to config['constraints']
        The format of the constraints are documented:
        @param config dict of configuration parameters - may be used to generate the returned 'constraints' dict
        @retval dict that contains the constraints for retrieval of new data from the external dataset
        """
        raise NotImplementedException('{0}.{1} must implement \'_constraints_for_new_request\''.format(cls.__module__, cls.__name__))

    @classmethod
    def _get_data(cls, config):
        """
        Iterable function that acquires data from a source iteratively based on constraints provided by config
        Passed into BaseDataHandler._publish_data and iterated to publish samples.
        @param config dict containing configuration parameters, may include constraints, formatters, etc
        @retval an iterable that returns well-formed Granule objects on each iteration
        """
        raise NotImplementedException('{0}.{1} must implement \'_get_data\''.format(cls.__module__, cls.__name__))

    @classmethod
    def _publish_data(cls, publisher, data_generator):
        """
        Iterates over the data_generator and publishes granules to the stream indicated in stream_id
        @param publisher to publish the data with
        @param data_generator enumerator to cycle through the data
        @throws InstrumentDataException if data_generator isn't an enumerator
        """
        if data_generator is None or not hasattr(data_generator, '__iter__'):
            raise InstrumentDataException('Invalid object returned from _get_data: returned object cannot be None and must have \'__iter__\' attribute')

        for count, gran in enumerate(data_generator):
            if isinstance(gran, Granule):
                #log.warn('_publish_data: {0}\n{1}'.format(count, gran))
                publisher.publish(gran)
            else:
                log.warn('Could not publish object of {0} returned by _get_data: {1}'.format(type(gran), gran))

        publisher.close()
Esempio n. 25
0
class GeventTransport(SocketTransport):
  '''
  Transport using gevent backend. It relies on gevent's implementation of 
  sendall to send whole frames at a time. On the input side, it uses a gevent
  semaphore to ensure exclusive access to the socket and input buffer.

  NOTE:
  This is new to haigha and there may be integration issues with some versions
  of RabbitMQ. In particular, what may happen when a blocking call to send a
  frame allows another thread to queue another frame, but the frames can't be 
  interlaced due to how the protocol is defined or implemented. A lot of the
  'synchronous' calls have very specific expectations. If this becomes a
  problem then the quickest way to solve it would be to switch to non-blocking
  so that the same thread of execution is allowed to send all of its frames in
  sequence. In that case, the gevent implementation may be pushed into the
  EventSocket as another supported concurrency lib.

  Note also that the blocking nature of the sockets means that the any threads
  running IO should actively yield with a sleep(0) to ensure other threads are
  serviced. In a saturated environment, failure to do so will lead to 
  significant lags in signal handling or other IO. In practice, a typical
  client is attaching to other data stores so those could be enough to yield.
  '''

  def __init__(self, *args, **kwargs):
    super(GeventTransport,self).__init__(*args)

    self._synchronous = kwargs.get('synchronous',False)
    self._read_lock = Semaphore()
    self._write_lock = Semaphore()

  ###
  ### Transport API
  ###

  # nothing to overload with connect()

  def read(self, timeout=None):
    '''
    Read from the transport. If no data is available, should return None. If
    timeout>0, will only block for `timeout` seconds.
    '''
    self._read_lock.acquire()
    try:
      return super(GeventTransport,self).read(timeout=timeout)
    finally:
      self._read_lock.release()

  def buffer(self, data):
    '''
    Buffer unused bytes from the input stream.
    '''
    self._read_lock.acquire()
    try:
      return super(GeventTransport,self).buffer(data)
    finally:
      self._read_lock.release()

  def write(self, data):
    '''
    Write some bytes to the transport.
    '''
    # MUST use a lock here else gevent could raise an exception if 2 greenlets
    # try to write at the same time. I was hoping that sendall() would do that
    # blocking for me, but I guess not. May require an eventsocket-like buffer
    # to speed up under high load.
    self._write_lock.acquire()
    try:
      return super(GeventTransport,self).write(data)
    finally:
      self._write_lock.release()
Esempio n. 26
0
class BooleanBlindTechnique:
    def __init__(self, query, requester):
        self.query = query
        self.requester = requester
        self.rungl = None

    def _reset(self):
        '''
        reset all the variables used for keeping track of internal state
        '''
        #an list of Character()s 
        self.results = []
        #an list of strings
        self.str_results = []
        #character generators take care of building the Character objects. we need one per row
        self.char_gens = []
        #a queue for communications between Character()s and request_makers
        self.q = Queue()
        #"threads" that run the Character()s
        self.character_pool = Pool(self.concurrency)
        #"threads" that make requests
        self.request_makers = [gevent.spawn(self._request_maker) for i in range(self.concurrency)]
        #fire this event when shutting down
        self.shutting_down = Event()
        #do we need to add more rows?
        self.need_more_rows = True
        #use this as a lock to know when not to mess with self.results        
        self.results_lock = Semaphore(1)
        #request_count is the number of requests made on the current run
        self.request_count = 0
        #failure_count is the number of requests made on the current run
        self.failure_count = 0

    def _request_maker(self):
        '''
        this runs in a gevent "thread". It is a worker
        '''
        #keep going until we shut down the technique
        while not self.shutting_down.is_set():
            #pull the info needed to make a request from the queue
            row_index,char_index,char_val,comparator,char_asyncresult = self.q.get()

            #build out our query object
            query = copy(self.query)
            query.set_option('row_index',str(row_index))
            query.set_option('char_index',str(char_index))
            query.set_option('char_val',str(ord(char_val)))
            query.set_option('comparator',comparator)
            query_string = query.render()

            self.request_count += 1

            count = 0
            response = None
            while response == None:
                try:
                    response = self.requester.make_request(query_string)
                except SendRequestFailed:
                    self.failure_count += 1
                    response = None
                    gevent.sleep(.01 * 2 ** count)                    
                    if count == 10: raise SendRequestFailed('cant request')
                count += 1

            char_asyncresult.set(response)

    def _character_generator(self,row_index):
        '''
        creates a Character object for us. this generator is useful just because it keeps track of the char_index
        '''
        char_index = 1
        row_die_event = AsyncResult()
        while not self.shutting_down.is_set():
            c = BlindCharacter(\
                row_index   = row_index,\
                char_index  = char_index,\
                queue       = self.q,\
                row_die     = row_die_event)
            char_index += 1
            #fire off the Character within our Pool.
            self.character_pool.spawn(c.run)
            yield c

    def _adjust_row_lengths(self):
        ''' 
        if a row is full of "success", but we havent reached the end yet (the last elt isnt "error")
        then we need to increase the row_len.
        '''
        while not self.shutting_down.is_set():
            self.results_lock.acquire()

            if self.row_len is not None:
                unused_threads = self.concurrency - reduce(lambda x,row: x + row.count('working'),self.results,0)
                rows_working = len(filter(lambda row: 'working' in row,self.results))
                if rows_working == 0:
                    add_to_rows = self.row_len
                else:
                    add_to_rows = unused_threads//rows_working
                    add_to_rows = [add_to_rows,1][add_to_rows==0]
            else:
                add_to_rows = 1

            for row_index in range(len(self.results)):
                #if the row isn't finished or hasn't been started yet, we add Character()s to the row
                if 'error' not in self.results[row_index]:
                    self.results[row_index] += [self.char_gens[row_index].next() for i in range(add_to_rows)]
            self.results_lock.release()
            gevent.sleep(.3)

    def _add_rows(self):
        '''
        look at how many gevent "threads" are being used and add more rows to correct this
        '''
        if self.row_len is not None:
            rows_to_work_on = self.concurrency // self.row_len
        else:
            rows_to_work_on = self.concurrency
        rows_to_work_on = [rows_to_work_on,1][rows_to_work_on == 0]

        row_index = 0

        # keep adding new rows until we dont need any more
        while self.need_more_rows:
            working_rows = len(filter(lambda row: 'working' in row,self.results))
            for row in range(rows_to_work_on - working_rows):
                self.char_gens.append(self._character_generator(row_index))
                self.results.append([])
                row_index += 1

            gevent.sleep(.3)
            self.need_more_rows = not(len(self.results) and filter(lambda row: len(row) and row[0] == 'error',self.results))
        
        # delete any extra rows.
        while not self.shutting_down.is_set():
            self.results_lock.acquire()
            # delete any rows that shouldn't have been added in the first place
            errored = filter(lambda ri: len(self.results[ri]) and self.results[ri][0] == 'error',range(len(self.results)))
            if errored:
                end = min(errored)
                for ri in xrange(len(self.results)-1,end-1,-1):
                    del(self.results[ri])

            self.results_lock.release()    
            #if there aren't going to be any more rows in need of deletion we can stop this nonsense
            if self.results and self.results[-1][0] == 'success':
                break
            gevent.sleep(.3)

    def _keep_going(self):
        '''
        Look at the results gathered so far and determine if we should keep going. we want to keep going until we have an empty row
        '''
        # chill out until we don't need any more rows
        while self.need_more_rows:
            gevent.sleep(1)

        # chill out untill all the rows have finished working
        while filter(lambda row:'error' not in row or 'working' in row[:row.index('error')],self.results):
            gevent.sleep(.5)
        
        # call it quits
        self.shutting_down.set()

    def _run(self):
        self.kg_gl = gevent.spawn(self._keep_going)
        self.ar_gl = gevent.spawn(self._add_rows)
        self.arl_gl = gevent.spawn(self._adjust_row_lengths)

        self.kg_gl.join()
        self.ar_gl.join()
        self.arl_gl.join()
    
        self.character_pool.join()
        gevent.killall(self.request_makers)
        gevent.joinall(self.request_makers)

    def run(self,row_len=None,concurrency=20):
        '''
        run the exploit. returns the data retreived.
            :concurrency    how many gevent "threads" to use. This is useful for throttling the attack.
            :row_len        An estimated starting point for the length of rows. This will get adjusted as the attack goes on.
        '''
        self.run_start_time = time()

        self.row_len = row_len
        self.concurrency = concurrency

        #start fresh
        self._reset()

        self.rungl = gevent.spawn(self._run)
        
        return self.rungl

    def get_results(self,color=False):
        if not color:
            return filter(lambda row: row != '',[''.join([str(x) for x in row]) for row in self.results])
        
        rval = []
        running_status = "unknown"

        for row in self.results:
            if len(row):
                    running_status = "unknown"
                    row_string = ""
                    for c in row:
                        cstatus = c.get_status()
                        if cstatus != running_status:
                            row_string += COLORS[cstatus]
                            running_status = cstatus
                        row_string += str(c)
                    rval.append(row_string + COLORS['endc'])
        return rval
            

        #return filter(lambda row: row != '',[''.join([COLORS[x.get_status()] + str(x) + COLORS['endc'] for x in row]) for row in self.results])        

    def get_status(self):
        status = ""
        status += "requests: %d\t" % self.request_count
        status += "failures: %d\t" % self.failure_count
        status += "rows: %d\t" % reduce(lambda x,row: ('success' in row)+x,self.results,0)
        status += "working threads: %d\t" %  reduce(lambda x,row: x + row.count('working'),self.results,0)
        
        chars = reduce(lambda x,row: row.count('success') + x,self.results,0)
        status += "chars: %d\t" % chars

        if self.run_start_time:
            run_time = time() - self.run_start_time
            status += "time: %f\t" % run_time
            status += "char/sec: %f\t" % (chars/run_time)

        if chars: rc = float(self.request_count) / chars
        else: rc = 0.0
        status += "req/char: %f\t" % rc

        return status
Esempio n. 27
0
class RateLimiter(object):

    def __init__(self, cps=None, cpm=None, cph=None, cpd=None, max_wait=None):
        self.__max_wait = max_wait
        self.__limits = {}

        if cps is not None and cps != 0:
            self.__limits['calls per second'] = RateLimit(cps, 1)

        if cpm is not None and cpm != 0:
            self.__limits['calls per minute'] = RateLimit(cpm, 60)

        if cph is not None and cph != 0:
            self.__limits['calls per hour'] = RateLimit(cph, 60*60)

        if cpd is not None and cpd != 0:
            self.__limits['calls per day'] = RateLimit(cpd, 60*60*24)

        self.__semaphore = Semaphore()

    def __enter__(self):
        begin  = time.time()
        locked = self.__semaphore.acquire(timeout=self.__max_wait)

        if locked:
            try:
                max_wait = 0
                max_name = None
                now      = time.time()

                for name, limit in self.__limits.items():
                    wait = limit.wait(now)

                    if wait > max_wait:
                        max_name = name
                        max_wait = wait

                if self.__max_wait is not None and max_wait > self.__max_wait:
                    raise RateException('Rate limit exceeded for %s' % max_name)

                if max_wait > 0:
                    end = now + max_wait

                    while True:
                        cur = time.time()
                        if cur >= end:
                            break
                        else:
                            sleep( end - cur)

                for name, limit in self.__limits.items():
                    wait = limit.wait()
                    if wait > 0:
                        logs.warning('%s wait should be 0 is %s' % (name, wait))

                    limit.call()
            finally:
                self.__semaphore.release()
        else:
            raise RateException('Max wait (%s) exceeded trying to get slot' % self.__max_wait)

    def __exit__(self, exctype, value, trace):
        return False
Esempio n. 28
0
class BaseDataHandler(object):
    _params = {
        'POLLING_INTERVAL': 3600,
        'PATCHABLE_CONFIG_KEYS': ['stream_id', 'constraints']
    }
    _polling = False
    _polling_glet = None
    _dh_config = {}
    _rr_cli = None

    def __init__(self, rr_cli, stream_registrar, dh_config):
        self._dh_config = dh_config
        self._stream_registrar = stream_registrar
        self._rr_cli = rr_cli

    def set_event_callback(self, evt_callback):
        self._event_callback = evt_callback

    def _dh_event(self, type, value):
        event = {'type': type, 'value': value, 'time': time.time()}
        self._event_callback(event)

    def _poll(self):
        """
        Internal polling method, run inside a greenlet, that triggers execute_acquire_data without configuration mods
        The polling interval (in seconds) is retrieved from the POLLING_INTERVAL parameter
        """
        self._polling = True
        interval = get_safe(self._params, 'POLLING_INTERVAL', 3600)
        log.debug('Polling interval: {0}'.format(interval))
        while self._polling:
            self.execute_acquire_data()
            time.sleep(interval)

    def cmd_dvr(self, cmd, *args, **kwargs):
        """
        Command a DataHandler by request-reply messaging. Package command
        message and send on blocking command socket. Block on same socket
        to receive the reply. Return the driver reply.
        @param cmd The DataHandler command identifier.
        @param args Positional arguments of the command.
        @param kwargs Keyword arguments of the command.
        @retval Command result.
        """
        # Package command dictionary.

        #need to account for observatory_execute_resource commands
        #connect -> Not used
        #get_current_state -> Not used
        #discover -> Not used
        #disconnect -> Not used

        log.debug('cmd_dvr received command \'{0}\' with: args={1} kwargs={2}'.
                  format(cmd, args, kwargs))

        reply = None
        if cmd == 'initialize':
            # Delegate to BaseDataHandler.initialize()
            reply = self.initialize(*args, **kwargs)
        elif cmd == 'get':
            # Delegate to BaseDataHandler.get()
            reply = self.get(*args, **kwargs)
        elif cmd == 'set':
            # Delegate to BaseDataHandler.set()
            reply = self.set(*args, **kwargs)
        elif cmd == 'get_resource_params':
            # Delegate to BaseDataHandler.get_resource_params()
            reply = self.get_resource_params(*args, **kwargs)
        elif cmd == 'get_resource_commands':
            # Delegate to BaseDataHandler.get_resource_commands()
            reply = self.get_resource_commands(*args, **kwargs)
        elif cmd == 'execute_acquire_data':
            # Delegate to BaseDataHandler.execute_acquire_data()
            reply = self.execute_acquire_data(*args, **kwargs)
        elif cmd == 'execute_start_autosample':
            # Delegate to BaseDataHandler.execute_start_autosample()
            reply = self.execute_start_autosample(*args, **kwargs)
        elif cmd == 'execute_stop_autosample':
            # Delegate to BaseDataHandler.execute_stop_autosample()
            reply = self.execute_stop_autosample(*args, **kwargs)
        elif cmd in [
                'configure', 'connect', 'disconnect', 'get_current_state',
                'discover', 'execute_acquire_sample'
        ]:
            # Disregard
            log.info('Command \'{0}\' not used by DataHandler'.format(cmd))
            pass
        else:
            desc = 'Command \'{0}\' unknown by DataHandler'.format(cmd)
            log.info(desc)
            raise InstrumentCommandException(desc)

        return reply

    def initialize(self, *args, **kwargs):
        #TODO: Add documentation
        #TODO: This should put the DataHandler back into an 'unconfigured' state
        """
        Called from:
                      InstrumentAgent._handler_idle_reset
                      InstrumentAgent._handler_idle_go_inactive
                      InstrumentAgent._handler_stopped_reset
                      InstrumentAgent._handler_stopped_go_inactive
                      InstrumentAgent._handler_observatory_reset
                      InstrumentAgent._handler_observatory_go_inactive
                      InstrumentAgent._handler_uninitialized_initialize
                      |--> ExternalDataAgent._start_driver
        """
        log.debug('Initializing DataHandler...')
        self._glet_queue = []
        self._semaphore = Semaphore()
        return None

    def configure(self, *args, **kwargs):
        #TODO: Add documentation
        #TODO: This should configure the DataHandler for the particular dataset
        """
        Called from:
                      InstrumentAgent._handler_inactive_go_active
        """
        log.debug('Configuring DataHandler: args = {0}'.format(args))
        try:
            self._dh_config = args[0]

        except IndexError:
            raise InstrumentParameterException(
                '\'acquire_data\' command requires a config dict as the first argument'
            )

        return

    def execute_acquire_data(self, *args):
        """
        Creates a copy of self._dh_config, creates a publisher, and spawns a greenlet to perform a data acquisition cycle
        If the args[0] is a dict, any entries keyed with one of the 'PATCHABLE_CONFIG_KEYS' are used to patch the config
        Greenlet binds to BaseDataHandler._acquire_data and passes the publisher and config
        Disallows multiple "new data" (unconstrained) requests using BaseDataHandler._semaphore lock
        Called from:
                      InstrumentAgent._handler_observatory_execute_resource
                       |-->  ExternalDataAgent._handler_streaming_execute_resource

        @parameter args First argument can be a config dictionary
        """
        log.debug('Executing acquire_data: args = {0}'.format(args))

        # Make a copy of the config to ensure no cross-pollution
        config = self._dh_config.copy()

        # Patch the config if mods are passed in
        try:
            config_mods = args[0]
            if not isinstance(config_mods, dict):
                raise IndexError()

            log.debug('Configuration modifications provided: {0}'.format(
                config_mods))
            for k in self._params['PATCHABLE_CONFIG_KEYS']:
                if get_safe(config_mods, k):
                    config[k] = config_mods[k]

        except IndexError:
            log.info('No configuration modifications were provided')

        # Verify that there is a stream_id member in the config
        stream_id = get_safe(config, 'stream_id')
        if not stream_id:
            raise ConfigurationError(
                'Configuration does not contain required \'stream_id\' member')

        isNew = get_safe(config, 'constraints') is None

        if isNew and not self._semaphore.acquire(blocking=False):
            log.warn('Already acquiring new data - action not duplicated')
            return

        if isNew:
            # Get any NewDataCheck attachments and add them to the config
            ext_ds_id = get_safe(config, 'external_dataset_res_id')
            if ext_ds_id:
                try:
                    attachment_objs, _ = self._rr_cli.find_objects(
                        ext_ds_id, PRED.hasAttachment, RT.Attachment, False)
                    for attachment_obj in attachment_objs:
                        kwds = set(attachment_obj.keywords)
                        if 'NewDataCheck' in kwds:
                            log.debug(
                                'Found NewDataCheck attachment: {0}'.format(
                                    attachment_obj))
                            config['new_data_check'] = attachment_obj.content
                            break
                        else:
                            log.debug(
                                'Found attachment: {0}'.format(attachment_obj))
                except NotFound:
                    raise InstrumentException(
                        'ExternalDatasetResource \'{0}\' not found'.format(
                            ext_ds_id))

        if not get_safe(config, 'new_data_check'):
            config['new_data_check'] = None

            # Create a publisher to pass into the greenlet
        publisher = self._stream_registrar.create_publisher(
            stream_id=stream_id)

        # Spawn a greenlet to do the data acquisition and publishing
        g = spawn(self._acquire_data, config, publisher,
                  self._unlock_new_data_callback)
        log.debug('** Spawned {0}'.format(g))
        self._glet_queue.append(g)

    def execute_start_autosample(self, *args, **kwargs):
        #TODO: Add documentation
        #TODO: Fix raises statements
        """
        Put the DataHandler into streaming mode and start polling for new data
        Called from:
                      InstrumentAgent._handler_observatory_go_streaming

        @raises InstrumentTimeoutException:
        @raises InstrumentProtocolException:
        @raises NotImplementedException:
        @raises InstrumentParameterException:
        """
        log.debug(
            'Entered execute_start_autosample with args={0} & kwargs={1}'.
            format(args, kwargs))
        if not self._polling and self._polling_glet is None:
            self._polling_glet = spawn(self._poll)

        return None

    def execute_stop_autosample(self, *args, **kwargs):
        #TODO: Add documentation
        #TODO: Fix raises statements
        """
        Stop polling for new data and put the DataHandler into observatory mode
        Called from:
                      InstrumentAgent._handler_streaming_go_observatory

        @raises InstrumentTimeoutException:
        @raises InstrumentProtocolException:
        @raises NotImplementedException:
        @raises InstrumentParameterException:
        """
        log.debug('Entered execute_stop_autosample with args={0} & kwargs={1}'.
                  format(args, kwargs))
        if self._polling and not self._polling_glet is None:
            self._polling_glet.kill()
            self._polling = False
            self._polling_glet = None

        return None

    def get(self, *args, **kwargs):
        #TODO: Add documentation
        #TODO: Fix raises statements
        """
        Called from:
                      InstrumentAgent._handler_get_params

        @raises InstrumentTimeoutException:
        @raises InstrumentProtocolException:
        @raises NotImplementedException:
        @raises InstrumentParameterException:
        """
        try:
            pnames = args[0]
        except IndexError:
            log.warn("No argument provided to get, return all parameters")
            pnames = [DataHandlerParameter.ALL]

        result = None
        if DataHandlerParameter.ALL in pnames:
            result = self._params
        else:
            if not isinstance(pnames, (list, tuple)):
                raise InstrumentParameterException(
                    'Get argument not a list or tuple: {0}'.format(pnames))
            result = {}
            for pn in pnames:
                try:
                    log.debug('Get parameter with key: {0}'.format(pn))
                    result[pn] = self._params[pn]
                except KeyError:
                    log.debug('\'{0}\' not found in self._params'.format(pn))
                    raise InstrumentParameterException(
                        '{0} is not a valid parameter for this DataHandler.'.
                        format(pn))

        return result

    def set(self, *args, **kwargs):
        #TODO: Add documentation
        #TODO: Fix raises statements
        """
        Called from:
                      InstrumentAgent._handler_observatory_set_params

        @raises InstrumentTimeoutException:
        @raises InstrumentProtocolException:
        @raises NotImplementedException:
        @raises InstrumentParameterException:
        """
        # Retrieve required parameter.
        # Raise if no parameter provided, or not a dict.
        try:
            params = args[0]

        except IndexError:
            raise InstrumentParameterException(
                'Set command requires a parameter dict.')

        to_raise = []

        if not isinstance(params, dict):
            raise InstrumentParameterException('Set parameters not a dict.')
        else:
            for (key, val) in params.iteritems():
                if key in self._params:
                    log.debug('Set parameter \'{0}\' = {1}'.format(key, val))
                    self._params[key] = val
                else:
                    log.debug(
                        'Parameter \'{0}\' not in self._params and cannot be set'
                        .format(key))
                    to_raise.append(key)

        if len(to_raise) > 0:
            log.debug(
                'Raise InstrumentParameterException for un-set parameters: {0}'
                .format(to_raise))
            raise InstrumentParameterException(
                'Invalid parameter(s) could not be set: {0}'.format(to_raise))

    def get_resource_params(self, *args, **kwargs):
        """
        Return list of resource parameters. Implemented in specific DataHandlers
        Called from:
                      InstrumentAgent._handler_get_resource_params
        """
        return self._params.keys()

    def get_resource_commands(self, *args, **kwargs):
        """
        Return list of DataHandler execute commands available.
        Called from:
                      InstrumentAgent._handler_get_resource_commands
        """
        cmds = [
            cmd.replace('execute_', '') for cmd in dir(self)
            if cmd.startswith('execute_')
        ]
        return cmds

    def _unlock_new_data_callback(self, caller):
        log.debug('** Release {0}'.format(caller))
        self._semaphore.release()

    @classmethod
    def _acquire_data(cls, config, publisher, unlock_new_data_callback):
        """
        Ensures required keys (such as stream_id) are available from config, configures the publisher and then calls:
             BaseDataHandler._new_data_constraints (only if config does not contain 'constraints')
             BaseDataHandler._publish_data passing BaseDataHandler._get_data as a parameter
        @param config Dict containing configuration parameters, may include constraints, formatters, etc
        @param unlock_new_data_callback BaseDataHandler callback function to allow conditional unlocking of the BaseDataHandler._semaphore
        """
        log.debug('start _acquire_data: config={0}'.format(config))

        cls._init_acquisition_cycle(config)

        constraints = get_safe(config, 'constraints')
        if not constraints:
            gevent.getcurrent().link(unlock_new_data_callback)
            constraints = cls._new_data_constraints(config)
            if constraints is None:
                raise InstrumentParameterException(
                    "Data constraints returned from _new_data_constraints cannot be None"
                )
            config['constraints'] = constraints

        cls._publish_data(publisher, cls._get_data(config))

        # Publish a 'TestFinished' event
        if get_safe(config, 'TESTING'):
            log.debug('Publish TestingFinished event')
            pub = EventPublisher('DeviceCommonLifecycleEvent')
            pub.publish_event(origin='BaseDataHandler._acquire_data',
                              description='TestingFinished')

    @classmethod
    def _init_acquisition_cycle(cls, config):
        """
        Allows the concrete implementation to initialize/prepare objects the data handler
        will use repeatedly (such as a dataset object) in cls._new_data_constraints and/or cls._get_data
        Objects should be added to the config so they are available later in the workflow
        """
        raise NotImplementedException(
            '{0}.{1} must implement \'_init_acquisition_cycle\''.format(
                cls.__module__, cls.__name__))

    @classmethod
    def _new_data_constraints(cls, config):
        #TODO: Document what "constraints" looks like (yml)!!
        """
        Determines the appropriate constraints for acquiring any "new data" from the external dataset
        Returned value cannot be None and is assigned to config['constraints']
        The format of the constraints are documented:
        @param config dict of configuration parameters - may be used to generate the returned 'constraints' dict
        @retval dict that contains the constraints for retrieval of new data from the external dataset
        """
        raise NotImplementedException(
            '{0}.{1} must implement \'_new_data_constraints\''.format(
                cls.__module__, cls.__name__))

    @classmethod
    def _get_data(cls, config):
        """
        Iterable function that acquires data from a source iteratively based on constraints provided by config
        Passed into BaseDataHandler._publish_data and iterated to publish samples.
        @param config dict containing configuration parameters, may include constraints, formatters, etc
        @return an iterable that returns well-formed Granule objects on each iteration
        """
        raise NotImplementedException(
            '{0}.{1} must implement \'_get_data\''.format(
                cls.__module__, cls.__name__))

    @classmethod
    def _publish_data(cls, publisher, data_generator):
        """
        Iterates over the data_generator and publishes granules to the stream indicated in stream_id
        """
        if data_generator is None or not hasattr(data_generator, '__iter__'):
            raise InstrumentDataException(
                'Invalid object returned from _get_data: returned object cannot be None and must have \'__iter__\' attribute'
            )

        for count, gran in enumerate(data_generator):
            if isinstance(gran, Granule):
                publisher.publish(gran)
            else:
                log.warn('Could not publish object returned by _get_data: {0}'.
                         format(gran))

            #TODO: Persist the 'state' of this operation so that it can be re-established in case of failure

        #TODO: When finished publishing, update (either directly, or via an event callback to the agent) the UpdateDescription

    @classmethod
    def _calc_iter_cnt(cls, total_recs, max_rec):
        """
        Given the total number of records and the maximum records allowed in a granule,
        calculates the number of iterations required to traverse the entire array in chunks of size max_rec
        @param total_recs The total number of records
        @param max_rec The maximum number of records allowed in a granule
        """
        cnt = total_recs / max_rec
        if total_recs % max_rec > 0:
            cnt += 1

        return cnt
Esempio n. 29
0
from locust import HttpLocust, TaskSet, task, events

from gevent.coros import Semaphore
all_locusts_spawned = Semaphore()
all_locusts_spawned.acquire()

def on_hatch_complete(**kw):
    all_locusts_spawned.release()

events.hatch_complete += on_hatch_complete

class UserTasks(TaskSet):
    def on_start(self):
        all_locusts_spawned.wait()
        self.wait()
    
    @task
    def index(self):
        self.client.get("/")
    
class WebsiteUser(HttpLocust):
    host = "http://127.0.0.1:8089"
    min_wait = 2000
    max_wait = 5000
    task_set = UserTasks
Esempio n. 30
0
class VBucketAwareCouchbaseClient(object):
    #poll server every few seconds to see if the vbucket-map
    #has changes
    def __init__(self, url, bucket, password="", verbose=False):
        self.log = logger.logger("VBucketAwareMemcachedClient")
        self.bucket = bucket
        self.rest_username = bucket
        self.rest_password = password
        self._memcacheds = {}
        self._vBucketMap = {}
        self._vBucketMap_lock = Lock()
        self._vBucketMapFastForward = {}
        self._vBucketMapFastForward_lock = Lock()
        #TODO: use regular expressions to parse the url
        server = {}
        if not bucket:
            raise InvalidArgumentException("bucket can not be an empty string",
                                           parameters="bucket")
        if not url:
            raise InvalidArgumentException("url can not be an empty string",
                                           parameters="url")
        if url.find("http://") != -1 and url.rfind(":") != -1 and url.find(
                "/pools/default") != -1:
            server["ip"] = url[url.find("http://") +
                               len("http://"):url.rfind(":")]
            server["port"] = url[url.rfind(":") + 1:url.find("/pools/default")]
            server["username"] = self.rest_username
            server["password"] = self.rest_password
        self.servers = [server]
        self.servers_lock = Lock()
        self.rest = RestConnection(server)
        self.reconfig_vbucket_map()
        self.init_vbucket_connections()
        self.dispatcher = CommandDispatcher(self)
        self.dispatcher_thread = Process(name="dispatcher-thread",
                                         target=self._start_dispatcher)
        self.dispatcher_thread.daemon = True
        self.dispatcher_thread.start()
        self.streaming_thread = Process(name="streaming",
                                        target=self._start_streaming,
                                        args=())
        self.streaming_thread.daemon = True
        self.streaming_thread.start()
        self.verbose = verbose

    def _start_dispatcher(self):
        self.dispatcher.dispatch()

    def _start_streaming(self):
        # this will dynamically update vBucketMap, vBucketMapFastForward, servers
        urlopener = urllib.FancyURLopener()
        urlopener.prompt_user_passwd = lambda host, realm: (self.rest_username,
                                                            self.rest_password)
        current_servers = True
        while current_servers:
            self.servers_lock.acquire()
            current_servers = deepcopy(self.servers)
            self.servers_lock.release()
            for server in current_servers:
                response = urlopener.open(
                    "http://{0}:{1}/pools/default/bucketsStreaming/{2}".format(
                        server["ip"], server["port"], self.bucket))
                while response:
                    try:
                        line = response.readline()
                        if not line:
                            # try next server if we get an EOF
                            response.close()
                            break
                    except:
                        # try next server if we fail to read
                        response.close()
                        break
                    try:
                        data = json.loads(line)
                    except:
                        continue

                    serverlist = data['vBucketServerMap']['serverList']
                    vbucketmapfastforward = {}
                    index = 0
                    if 'vBucketMapForward' in data['vBucketServerMap']:
                        for vbucket in data['vBucketServerMap'][
                                'vBucketMapForward']:
                            vbucketmapfastforward[index] = serverlist[
                                vbucket[0]]
                            index += 1
                        self._vBucketMapFastForward_lock.acquire()
                        self._vBucketMapFastForward = deepcopy(
                            vbucketmapfastforward)
                        self._vBucketMapFastForward_lock.release()
                    vbucketmap = {}
                    index = 0
                    for vbucket in data['vBucketServerMap']['vBucketMap']:
                        vbucketmap[index] = serverlist[vbucket[0]]
                        index += 1

                    # only update vBucketMap if we don't have a fastforward
                    # on a not_mb_vbucket error, we already update the
                    # vBucketMap from the fastforward map
                    if not vbucketmapfastforward:
                        self._vBucketMap_lock.acquire()
                        self._vBucketMap = deepcopy(vbucketmap)
                        self._vBucketMap_lock.release()

                    new_servers = []
                    nodes = data["nodes"]
                    for node in nodes:
                        if node["clusterMembership"] == "active" and node[
                                "status"] == "healthy":
                            hostport = node["hostname"]
                            new_servers.append({
                                "ip":
                                hostport.split(":")[0],
                                "port":
                                int(hostport.split(":")[1]),
                                "username":
                                self.rest_username,
                                "password":
                                self.rest_password
                            })
                    new_servers.sort()
                    self.servers_lock.acquire()
                    self.servers = deepcopy(new_servers)
                    self.servers_lock.release()

    def init_vbucket_connections(self):
        # start up all vbucket connections
        self._vBucketMap_lock.acquire()
        vbucketcount = len(self._vBucketMap)
        self._vBucketMap_lock.release()
        for i in range(vbucketcount):
            self.start_vbucket_connection(i)

    def start_vbucket_connection(self, vbucket):
        self._vBucketMap_lock.acquire()
        server = deepcopy(self._vBucketMap[vbucket])
        self._vBucketMap_lock.release()
        serverIp, serverPort = server.split(":")
        if not server in self._memcacheds:
            self._memcacheds[server] = MemcachedClientHelper.direct_client(
                self.rest, serverIp, serverPort, self.bucket)

    def start_vbucket_fastforward_connection(self, vbucket):
        self._vBucketMapFastForward_lock.acquire()
        if not vbucket in self._vBucketMapFastForward:
            self._vBucketMapFastForward_lock.release()
            return
        server = deepcopy(self._vBucketMapFastForward[vbucket])
        self._vBucketMapFastForward_lock.release()
        serverIp, serverPort = server.split(":")
        if not server in self._memcacheds:
            self._memcacheds[server] = MemcachedClientHelper.direct_client(
                self.rest, serverIp, serverPort, self.bucket)

    def restart_vbucket_connection(self, vbucket):
        self._vBucketMap_lock.acquire()
        server = deepcopy(self._vBucketMap[vbucket])
        self._vBucketMap_lock.release()
        serverIp, serverPort = server.split(":")
        if server in self._memcacheds:
            self._memcacheds[server].close()
        self._memcacheds[server] = MemcachedClientHelper.direct_client(
            self.rest, serverIp, serverPort, self.bucket)

    def reconfig_vbucket_map(self, vbucket=-1):
        vb_ready = RestHelper(self.rest).vbucket_map_ready(self.bucket, 60)
        if not vb_ready:
            raise Exception("vbucket map is not ready for bucket {0}".format(
                self.bucket))
        vBuckets = self.rest.get_vbuckets(self.bucket)
        self.vbucket_count = len(vBuckets)
        bucket_info = self.rest.get_bucket(self.bucket)
        nodes = bucket_info.nodes

        self._vBucketMap_lock.acquire()
        for vBucket in vBuckets:
            if vBucket.id == vbucket or vbucket == -1:
                self._vBucketMap[vBucket.id] = vBucket.master
        self._vBucketMap_lock.release()

    def memcached(self, key, fastforward=False):
        self._vBucketMap_lock.acquire()
        self._vBucketMapFastForward_lock.acquire()
        vBucketId = (zlib.crc32(key) >> 16) & (len(self._vBucketMap) - 1)

        if fastforward and vBucketId in self._vBucketMapFastForward:
            # only try the fastforward if we have an entry
            # otherwise we just wait for the main map to update
            self.start_vbucket_fastforward_connection(vBucketId)
            self._vBucketMap[vBucketId] = self._vBucketMapFastForward[
                vBucketId]

        if vBucketId not in self._vBucketMap:
            msg = "vbucket map does not have an entry for vb : {0}"
            self._vBucketMapFastForward_lock.release()
            self._vBucketMap_lock.release()
            raise Exception(msg.format(vBucketId))
        if self._vBucketMap[vBucketId] not in self._memcacheds:
            msg = "smart client does not have a mc connection for server : {0}"
            self._vBucketMapFastForward_lock.release()
            self._vBucketMap_lock.release()
            raise Exception(msg.format(self._vBucketMap[vBucketId]))
        r = self._memcacheds[self._vBucketMap[vBucketId]]
        self._vBucketMapFastForward_lock.release()
        self._vBucketMap_lock.release()
        return r

    def vbucketid(self, key):
        self._vBucketMap_lock.acquire()
        r = (zlib.crc32(key) >> 16) & (len(self._vBucketMap) - 1)
        self._vBucketMap_lock.release()
        return r

    def done(self):
        if self.dispatcher:
            self.dispatcher.shutdown()
            if self.verbose:
                self.log.info("dispatcher shutdown invoked")
            [self._memcacheds[ip].close() for ip in self._memcacheds]
            if self.verbose:
                self.log.info("closed all memcached open connections")
            self.dispatcher = None

    def _respond(self, item, event):
        timeout = 30
        event.wait(timeout)
        if not event.is_set():
            # if we timeout, then try to reconnect to the server
            # responsible for this vbucket
            self.restart_vbucket_connection(self.vbucketid(item['key']))
            raise MemcachedTimeoutException(item, timeout)
        if "error" in item["response"]:
            raise item["response"]["error"]
        return item["response"]["return"]

    def get(self, key):
        event = Event()
        item = {"operation": "get", "key": key, "event": event, "response": {}}
        self.dispatcher.put(item)
        return self._respond(item, event)

    def gat(self, key, expiry):
        event = Event()
        item = {
            "operation": "gat",
            "key": key,
            "expiry": expiry,
            "event": event,
            "response": {}
        }
        self.dispatcher.put(item)
        return self._respond(item, event)

    def touch(self, key, expiry):
        event = Event()
        item = {
            "operation": "touch",
            "key": key,
            "expiry": expiry,
            "event": event,
            "response": {}
        }
        self.dispatcher.put(item)
        return self._respond(item, event)

    def cas(self, key, expiry, flags, old_value, value):
        event = Event()
        item = {
            "operation": "cas",
            "key": key,
            "expiry": expiry,
            "flags": flags,
            "old_value": old_value,
            "value": value,
            "event": event,
            "response": {}
        }
        self.dispatcher.put(item)
        return self._respond(item, event)

    def decr(self, key, amount=1, init=0, expiry=0):
        event = Event()
        item = {
            "operation": "decr",
            "key": key,
            "amount": amount,
            "init": init,
            "expiry": expiry,
            "event": event,
            "response": {}
        }
        self.dispatcher.put(item)
        return self._respond(item, event)

    def set(self, key, expiry, flags, value):
        event = Event()
        item = {
            "operation": "set",
            "key": key,
            "expiry": expiry,
            "flags": flags,
            "value": value,
            "event": event,
            "response": {}
        }
        self.dispatcher.put(item)
        return self._respond(item, event)

    def add(self, key, expiry, flags, value):
        event = Event()
        item = {
            "operation": "add",
            "key": key,
            "expiry": expiry,
            "flags": flags,
            "value": value,
            "event": event,
            "response": {}
        }
        self.dispatcher.put(item)
        return self._respond(item, event)

    def append(self, key, value, cas=0):
        event = Event()
        item = {
            "operation": "append",
            "key": key,
            "cas": cas,
            "value": value,
            "event": event,
            "response": {}
        }
        self.dispatcher.put(item)
        return self._respond(item, event)

    def delete(self, key, cas=0):
        event = Event()
        item = {
            "operation": "delete",
            "key": key,
            "cas": cas,
            "event": event,
            "response": {}
        }
        self.dispatcher.put(item)
        return self._respond(item, event)

    def prepend(self, key, value, cas=0):
        event = Event()
        item = {
            "operation": "prepend",
            "key": key,
            "cas": cas,
            "value": value,
            "event": event,
            "response": {}
        }
        self.dispatcher.put(item)
        return self._respond(item, event)

    def getl(self, key, expiry=15):
        event = Event()
        item = {
            "operation": "getl",
            "key": key,
            "expiry": expiry,
            "event": event,
            "response": {}
        }
        self.dispatcher.put(item)
        return self._respond(item, event)

    def replace(self, key, expiry, flags, value):
        event = Event()
        item = {
            "operation": "replace",
            "key": key,
            "expiry": expiry,
            "flags": flags,
            "value": value,
            "event": event,
            "response": {}
        }
        self.dispatcher.put(item)
        return self._respond(item, event)

    def incr(self, key, amount=1, init=0, expiry=0):
        event = Event()
        item = {
            "operation": "incr",
            "key": key,
            "amount": amount,
            "init": init,
            "expiry": expiry,
            "event": event,
            "response": {}
        }
        self.dispatcher.put(item)
        return self._respond(item, event)
Esempio n. 31
0
class Queue(Greenlet):
    """Manages the queue of |Envelope| objects waiting for delivery. This is
    not a standard FIFO queue, a message's place in the queue depends entirely
    on the timestamp of its next delivery attempt.

    :param store: Object implementing :class:`QueueStorage`.
    :param relay: |Relay| object used to attempt message deliveries.
    :param backoff: Function that, given an |Envelope| and number of delivery
                    attempts, will return the number of seconds before the next
                    attempt. If it returns ``None``, the message will be
                    permanently failed. The default backoff function simply
                    returns ``None`` and messages are never retried.
    :param bounce_factory: Function that produces a |Bounce| object given the
                           same parameters as the |Bounce| constructor. If the
                           function returns ``None``, no bounce is delivered.
                           By default, a new |Bounce| is created in every case.
    :param store_pool: Number of simultaneous operations performable against
                       the ``store`` object. Default is unlimited.
    :param relay_pool: Number of simultaneous operations performable against
                       the ``relay`` object. Default is unlimited.

    """
    def __init__(self,
                 store,
                 relay,
                 backoff=None,
                 bounce_factory=None,
                 store_pool=None,
                 relay_pool=None):
        super(Queue, self).__init__()
        self.store = store
        self.relay = relay
        self.backoff = backoff or self._default_backoff
        self.bounce_factory = bounce_factory or Bounce
        self.wake = Event()
        self.queued = []
        self.queued_lock = Semaphore(1)
        self.queue_policies = []
        self._use_pool('store_pool', store_pool)
        self._use_pool('relay_pool', relay_pool)

    def add_policy(self, policy):
        """Adds a |QueuePolicy| to be executed before messages are persisted
        to storage.

        :param policy: |QueuePolicy| object to execute.

        """
        if isinstance(policy, QueuePolicy):
            self.queue_policies.append(policy)
        else:
            raise TypeError('Argument not a QueuePolicy.')

    @staticmethod
    def _default_backoff(envelope, attempts):
        pass

    def _run_policies(self, envelope):
        results = [envelope]

        def recurse(current, i):
            try:
                policy = self.queue_policies[i]
            except IndexError:
                return
            ret = policy.apply(current)
            if ret:
                results.remove(current)
                results.extend(ret)
                for env in ret:
                    recurse(env, i + 1)
            else:
                recurse(current, i + 1)

        recurse(envelope, 0)
        return results

    def _use_pool(self, attr, pool):
        if pool is None:
            pass
        elif isinstance(pool, Pool):
            setattr(self, attr, pool)
        else:
            setattr(self, attr, Pool(pool))

    def _pool_run(self, which, func, *args, **kwargs):
        pool = getattr(self, which + '_pool', None)
        if pool:
            ret = pool.spawn(func, *args, **kwargs)
            return ret.get()
        else:
            return func(*args, **kwargs)

    def _pool_imap(self, which, func, *iterables):
        pool = getattr(self, which + '_pool', gevent)
        threads = imap(pool.spawn, repeat(func), *iterables)
        ret = []
        for thread in threads:
            thread.join()
            ret.append(thread.exception or thread.value)
        return ret

    def _pool_spawn(self, which, func, *args, **kwargs):
        pool = getattr(self, which + '_pool', gevent)
        return pool.spawn(func, *args, **kwargs)

    def _add_queued(self, entry):
        for i, info in enumerate(self.queued):
            if info[0] > entry[0]:  # [0] is the timestamp.
                self.queued.insert(i, entry)
                break
        else:
            self.queued.append(entry)
        self.wake.set()

    def enqueue(self, envelope):
        """Drops a new message in the queue for delivery. The first delivery
        attempt is made immediately (depending on relay pool availability).
        This method is not typically called directly, |Edge| objects use it
        when they receive new messages.

        :param envelope: |Envelope| object to enqueue.
        :returns: Zipped list of envelopes and their respective queue IDs (or
                  thrown :exc:`QueueError` objects).

        """
        now = time.time()
        envelopes = self._run_policies(envelope)
        ids = self._pool_imap('store', self.store.write, envelopes,
                              repeat(now))
        results = zip(envelopes, ids)
        for env, id in results:
            if not isinstance(id, BaseException):
                self._pool_spawn('relay', self._attempt, id, env, 0)
            elif not isinstance(id, QueueError):
                raise id  # Re-raise exceptions that are not QueueError.
        return results

    def _load_all(self):
        for entry in self.store.load():
            self._add_queued(entry)

    def _bounce(self, envelope, reply):
        bounce = self.bounce_factory(envelope, reply)
        if bounce:
            return self.enqueue(bounce)

    def _perm_fail(self, id, envelope, reply):
        self._pool_spawn('store', self.store.remove, id)
        if envelope.sender:  # Can't bounce to null-sender.
            self._pool_spawn('bounce', self._bounce, envelope, reply)

    def _retry_later(self, id, envelope, reply):
        attempts = self.store.increment_attempts(id)
        wait = self.backoff(envelope, attempts)
        if wait is None:
            reply.message += ' (Too many retries)'
            self._perm_fail(id, envelope, reply)
        else:
            when = time.time() + wait
            self.store.set_timestamp(id, when)
            self._add_queued((when, id))

    def _attempt(self, id, envelope, attempts):
        try:
            self.relay._attempt(envelope, attempts)
        except TransientRelayError as e:
            self._pool_spawn('store', self._retry_later, id, envelope, e.reply)
        except PermanentRelayError as e:
            self._perm_fail(id, envelope, e.reply)
        except Exception as e:
            log_exception(__name__)
            reply = Reply('450', '4.0.0 Unhandled delivery error: ' + str(e))
            self._pool_spawn('store', self._retry_later, id, envelope, reply)
            raise
        else:
            self._pool_spawn('store', self.store.remove, id)

    def _dequeue(self, id):
        envelope, attempts = self.store.get(id)
        self._pool_spawn('relay', self._attempt, id, envelope, attempts)

    def _check_ready(self, now):
        last_i = 0
        for i, entry in enumerate(self.queued):
            timestamp, id = entry
            if now >= timestamp:
                self._pool_spawn('store', self._dequeue, id)
                last_i = i + 1
            else:
                break
        if last_i > 0:
            self.queued = self.queued[last_i:]

    def _wait_ready(self, now):
        try:
            first = self.queued[0]
        except IndexError:
            self.wake.wait()
            self.wake.clear()
            return
        first_timestamp = first[0]
        if first_timestamp > now:
            self.wake.wait(first_timestamp - now)
            self.wake.clear()

    def flush(self):
        """Attempts to immediately flush all messages waiting in the queue,
        regardless of their retry timers.

        ***Note:*** This can be a very expensive operation, use with care.

        """
        self.wake.set()
        self.wake.clear()
        self.queued_lock.acquire()
        try:
            for entry in self.queued:
                self._pool_spawn('store', self._dequeue, entry[1])
            self.queued = []
        finally:
            self.queued_lock.release()

    def _run(self):
        self._pool_spawn('store', self._load_all)
        while True:
            self.queued_lock.acquire()
            try:
                now = time.time()
                self._check_ready(now)
                self._wait_ready(now)
            finally:
                self.queued_lock.release()
Esempio n. 32
0
class MemoryDB(BaseDB):
    def __init__(self):
        self.__db = SQ.connect(":memory:")
        self.cur = self.__db.cursor()
        self.cur.execute(
            "CREATE TABLE hosts (id TEXT, addr TEXT, hostname TEXT)")
        self.cur.execute(
            "CREATE TABLE workers (id TEXT, host_id TEXT, service TEXT, addr TEXT, pid INT, online BOOLEAN)"
        )
        self.cur.execute("CREATE TABLE services (host_id TEXT, name TEXT)")
        self.SEMA = Semaphore()

    def close(self):
        self.__db.close()

    # hosts
    # -------------------------

    def host_add(self, ID, address):  #, hostname):
        self.SEMA.acquire()
        try:
            self.cur.execute("INSERT INTO hosts ('id','addr') VALUES (?,?)",
                             (ID, address))
            self.__db.commit()
        finally:
            self.SEMA.release()

    def host_get(self, ID):
        self.SEMA.acquire()
        try:
            self.cur.execute("SELECT ID,addr,hostname FROM hosts WHERE id=?",
                             (ID, ))
            res = self.cur.fetchone()
        finally:
            self.SEMA.release()
        if res is None:
            return None
        return {'id': res[0], 'addr': res[1], 'hostname': res[2]}

    def host_del(self, ID):
        self.SEMA.acquire()
        try:
            self.cur.execute("DELETE FROM hosts WHERE id=?", (ID, ))
            self.__db.commit()
        finally:
            self.SEMA.release()

    def host_list(self):
        self.SEMA.acquire()
        try:
            self.cur.execute("SELECT id,addr,hostname FROM hosts")
            res = self.cur.fetchall()
        finally:
            self.SEMA.release()
        for u, a, h in res:
            yield {'id': u, 'addr': a, 'hostname': h}

    def service_add(self, host_id, name):
        """
        Dodanie serwisu do hosta
        """
        self.SEMA.acquire()
        try:
            self.cur.execute(
                "INSERT INTO services ('host_id','name') VALUES (?,?)",
                (host_id, name))
            self.__db.commit()
        finally:
            self.SEMA.release()

    def service_del(self, host_id, name):
        """
        Usunięcie serwisu z hosta
        """
        self.SEMA.acquire()
        try:
            self.cur.execute("DELETE FROM services WHERE host_id=? AND name=?",
                             (host_id, name))
            self.__db.commit()
        finally:
            self.SEMA.release()

    def service_list(self, host_id):
        """
        List of services on host
        """
        self.SEMA.acquire()
        try:
            self.cur.execute("SELECT name FROM services WHERE host_id=?",
                             (host_id, ))
            res = self.cur.fetchall()
        finally:
            self.SEMA.release()
        for s in res:
            yield {'service': s[0]}

    def worker_add(self, host_id, worker_id, name, address, pid, online):
        self.SEMA.acquire()
        try:
            self.cur.execute(
                "INSERT INTO workers ('id', 'host_id', 'addr', 'service','pid', 'online') VALUES (?,?,?,?,?,?)",
                (worker_id, host_id, address, name, pid, online))
            self.__db.commit()
        finally:
            self.SEMA.release()

    def worker_set_state(self, worker_id, is_online):
        self.SEMA.acquire()
        try:
            self.cur.execute("UPDATE workers SET online=? WHERE id=?;",
                             (is_online, worker_id))
            self.__db.commit()
        finally:
            self.SEMA.release()

    def worker_get(self, worker_id):
        self.SEMA.acquire()
        try:
            self.cur.execute(
                "SELECT id,host_id,service,addr,pid, online FROM workers WHERE id=?",
                [
                    worker_id,
                ])
            res = self.cur.fetchone()
        finally:
            self.SEMA.release()

        if res is None:
            return
        return {
            'id': res[0],
            'host_id': res[1],
            'service': res[2],
            'addr': res[3],
            'pid': res[4],
            'online': res[5] > 0
        }

    def worker_del_by_addr(self, addr):
        """
        Delete worker from database identified by address
        """
        self.SEMA.acquire()
        try:
            self.cur.execute("DELETE FROM workers WHERE addr=?", (addr, ))
            self.__db.commit()
        finally:
            self.SEMA.release()

    def worker_del_by_id(self, id):
        """
        Delete worker from database identified by worker id
        """
        self.SEMA.acquire()
        try:
            self.cur.execute("DELETE FROM workers WHERE id=?", (id, ))
            self.__db.commit()
        finally:
            self.SEMA.release()

    def worker_list(self, host_id):
        '''
        list all workers on given host
        '''
        self.SEMA.acquire()
        try:
            self.cur.execute(
                "SELECT id,service,addr,pid, online FROM workers WHERE host_id=?",
                (host_id, ))
            res = self.cur.fetchall()
        finally:
            self.SEMA.release()
        if res is None:
            return
        for i, s, a, pid, o in res:
            yield {
                'id': i,
                'service': s,
                'addr': a,
                'pid': pid,
                'online': o > 0
            }

    def workers_for_service(self, service, only_online=True):
        """
        List of workers for service
        """
        self.SEMA.acquire()
        try:
            if only_online:
                self.cur.execute(
                    "SELECT id,addr FROM workers WHERE service=? AND online=?",
                    (service, 1))
            else:
                self.cur.execute("SELECT id,addr FROM workers WHERE service=?",
                                 (service, ))
            res = self.cur.fetchall()
        finally:
            self.SEMA.release()
        for i, a in res:
            yield {'id': i, 'addr': a}
Esempio n. 33
0
from locust import HttpLocust, TaskSet, task, events

from gevent.coros import Semaphore

all_locusts_spawned = Semaphore()
all_locusts_spawned.acquire()


def on_hatch_complete(**kw):
    all_locusts_spawned.release()


events.hatch_complete += on_hatch_complete


class UserTasks(TaskSet):
    def on_start(self):
        all_locusts_spawned.wait()
        self.wait()

    @task
    def index(self):
        self.client.get("/")


class WebsiteUser(HttpLocust):
    host = "http://127.0.0.1:8089"
    min_wait = 2000
    max_wait = 5000
    task_set = UserTasks
Esempio n. 34
0
class SchedValve(SchedCommon):
	"""Mirrors (and monitors) a valve."""
	locked = False # external command, don't change
	sched = None
	sched_ts = None
	sched_job = None
	sched_lock = None
	on = False
	on_ts = None
	flow = 0
	_flow_check = None

	def __new__(cls,v):
		if v.id in valves:
			return valves[v.id]
		self = object.__new__(cls)
		valves[v.id] = self
		self.v = v
		self.site = SchedSite(self.v.controller.site)
		self.env = EnvGroup(self.v.envgroup)
		self.controller = SchedController(self.v.controller)
		self.sched_lock = Semaphore()
		if self.site.ci:
			try:
				self.site.send_command("set","output","off",*(self.v.var.split()))
			except NotConnected:
				pass
		return self
	def __init__(self,v):
		pass

	def _on(self,sched=None,duration=None):
		print >>sys.stderr,"Open",self.v.var
		self.site.delay_on()
		if self.controller.has_max_on():
			print >>sys.stderr,"… but too many:", " ".join(str(v) for v in self.controller.c.valves.all() if SchedValve(v).on)
			if sched:
				sched.update(seen = False)
			self.on = False
			self.log("NOT running for %s: too many"%(duration,))
			raise TooManyOn(self)
		if duration is None and sched is not None:
			duration = sched.duration
		if duration is None:
			self.log("Run (indefinitely)")
			self.site.send_command("set","output","on",*(self.v.var.split()))
		else:
			self.log("Run for %s"%(duration,))
			if not isinstance(duration,(int,long)):
				duration = duration.total_seconds()
			self.site.send_command("set","output","on",*(self.v.var.split()), sub=(("for",duration),("async",)))
		if sched is not None:
			if self.v.verbose:
				self.log("Opened for %s"%(sched,))
			self.sched = sched
			if not sched.seen:
				sched.update(start=now(), seen = True)
				sched.refresh()
			#Save(sched)
		else:
			if self.v.verbose:
				self.log("Opened for %s"%(duration,))

	def _off(self):
		if self.on:
			if self.v.verbose:
				self.log("Closing")
			print >>sys.stderr,"Close",self.v.var
		try:
			self.site.send_command("set","output","off",*(self.v.var.split()))
		except NotConnected:
			pass

	def shutdown(self):
		if self._flow_check is not None:
			self._flow_check.dead()

	def run_schedule(self):
		if not self.sched_lock.acquire(blocking=False):
			if self.v.verbose:
				print >>sys.stderr,"SCHED LOCKED1 %s" % (self.v.name,)
			return
		try:
			self._run_schedule()
		except Exception:
			self.log(format_exc())
		finally:
			self.sched_lock.release()

	def _run_schedule(self):
		if self.sched_job is not None:
			self.sched_job.kill()
			self.sched_job = None
		if self.locked:
			if self.v.verbose:
				print >>sys.stderr,"SCHED LOCKED2 %s" % (self.v.name,)
			return
		n = now()

		try:
			if self.sched is not None:
				self.sched.refresh()
				if self.sched.start+self.sched.duration <= n:
					self._off()
					self.sched = None
				else:
					self.sched_job = gevent.spawn_later((self.sched.start+self.sched.duration-n).total_seconds(),self.run_sched_task,reason="_run_schedule 1")
					if self.v.verbose:
						print >>sys.stderr,"SCHED LATER %s: %s" % (self.v.name,humandelta(self.sched.start+self.sched.duration-n))
					return
		except ObjectDoesNotExist:
			pass # somebody deleted it *shrug*
		sched = None

		if self.sched_ts is None:
			try:
				sched = self.v.schedules.filter(start__lt=n).order_by("-start")[0]
			except IndexError:
				self.sched_ts = n-timedelta(1,0)
			else:
				self.sched_ts = sched.start+sched.duration
				if sched.start+sched.duration > n: # still running
					if self.v.verbose:
						print >>sys.stderr,"SCHED RUNNING %s: %s" % (self.v.name,humandelta(sched.start+sched.duration-n))
					try:
						self._on(sched, sched.start+sched.duration-n)
					except TooManyOn:
						self.log("Could not schedule: too many open valves")
					except NotConnected:
						self.log("Could not schedule: connection to HomEvenT failed")
					return

		try:
			sched = self.v.schedules.filter(start__gte=self.sched_ts).order_by("start")[0]
		except IndexError:
			if self.v.verbose:
				print >>sys.stderr,"SCHED EMPTY %s: %s" % (self.v.name,str_tz(self.sched_ts))
			self._off()
			return

		if sched.start > n:
			if self.v.verbose:
				print >>sys.stderr,"SCHED %s: sched %d in %s" % (self.v.name,sched.id,humandelta(sched.start-n))
			self._off()
			self.sched_job = gevent.spawn_later((sched.start-n).total_seconds(),self.run_sched_task,reason="_run_schedule 2")
			return
		try:
			self._on(sched)
		except TooManyOn:
			self.log("Could not schedule: too many open valves")
		except NotConnected:
			self.log("Could not schedule: connection to HomEvenT failed")
	
	def run_sched_task(self,reason="valve"):
		self.sched_job = None
		self.site.run_sched_task(reason=reason)

	def add_flow(self, val):
		if self._flow_check is not None:
			if self._flow_check.add_flow(val):
				return
		self.flow += val

	def check_flow(self,**k):
		cf = None
		try:
			cf = FlowCheck(self)
			cf.run()
		except Exception as ex:
			log_error(self.v)
			if cf is not None:
				cf._unlock()
		
	def refresh(self):
		self.v.refresh()
#		if self.sched is not None:
#			self.sched.refresh()

	def connect_monitors(self):
		if self.site.ci is None:
			return
		self.mon = self.site.ci.root.monitor(self.watch_state,"output","set","*","*",*(self.v.var.split()))
		self.ckf = self.site.ci.root.monitor(self.check_flow,"check","flow",*self.v.var.split())
		
	def watch_state(self,event=None,**k):
		"""output set OLD NEW NAME"""
		on = (str(event[3]).lower() in ("1","true","on"))
		if self._flow_check is not None:
			# TODO
			self.on = on
			self._flow_check.state(on)
			return
		if self.locked:
			self.on = on
			return
		try:
			if on != self.on:
				print >>sys.stderr,"Report %s" % ("ON" if on else "OFF"),self.v.var
				n=now()
				if self.sched is not None and self.sched.start+self.sched.duration <= n:
					self.sched.update(db_duration=(n-self.sched.start).total_seconds())
					self.sched.refresh()
					self.sched_ts = self.sched.start+self.sched.duration
					self.sched = None
				flow,self.flow = self.flow,0
				# If nothing happened, calculate.
				if not on:
					duration = n-self.on_ts
					maxflow = self.v.flow * duration.total_seconds()
					if (not flow and not self.v.feed.var) or flow > maxflow:
						flow = maxflow
				self.new_level_entry(flow)
				if not on:
					if self.v.level > self.v.stop_level + (self.v.start_level-self.v.stop_level)/5:
						self.v.update(priority=True)
					self.log("Done for %s, level is now %s"%(duration,self.v.level))
				self.on = on
				self.on_ts = n

		except Exception:
			print_exc()

	def sync(self):
		flow,self.flow = self.flow,0
		self.new_level_entry(flow)

	def sync_history(self):
		n=now()
		try:
			lv = self.v.levels.order_by("-time")[0]
		except IndexError:
			pass
		else:
			if self.v.time > lv.time:
				self.log("Timestamp downdate: %s %s" % (self.v.time,lv.time))
				self.v.update(time = lv.time)
				self.v.refresh()
				#Save(self.v)
		if (n-self.v.time).total_seconds() > 3500:
			self.new_level_entry()

	def new_level_entry(self,flow=0):
		self.site.current_history_entry()
		n=now()
		self.v.refresh()
		hts = None
		try:
			lv = self.v.levels.order_by("-time")[0]
		except IndexError:
			ts = n-timedelta(1,0)
		else:
			ts = lv.time
		sum_f = 0
		sum_r = 0
		for h in self.site.s.history.filter(time__gt=ts).order_by("time"):
			if self.v.verbose>2:
				self.log("Env factor for %s: T=%s W=%s S=%s"%(h,h.temp,h.wind,h.sun))
			f = self.env.env_factor(h, logger=self.log if self.v.verbose>2 else None)*self.v.adj
			if self.v.verbose>1:
				self.log("Env factor for %s is %s"%(h,f))
			sum_f += self.site.s.db_rate * self.v.do_shade(self.env.eg.factor*f) * (h.time-ts).total_seconds()
			sum_r += self.v.runoff*h.rain
			ts=h.time

		if self.v.verbose:
			self.log("Apply env %f, rain %r"%(sum_f,sum_r))

		if self.v.time == ts:
			return
		if self.v.level < 0:
			level = 0
		else:
			level = F('level')
		level += sum_f
		if flow > 0 and self.v.level > self.v.max_level:
			level = self.v.max_level
		level -= flow/self.v.area+sum_r
		#if level < 0:
		#	self.log("Level %s ?!?"%(self.v.level,))
		self.v.update(time=ts, level=level)
		self.v.refresh()

		lv = Level(valve=self.v,time=ts,level=self.v.level,flow=flow)
		lv.save()

	def log(self,txt):
		log(self.v,txt)
Esempio n. 35
0
class AsyncWorker(object):

    def __init__(self, worker_id):
        # used in process of selecting jobs
        self.own_async_id = worker_id
        # database setup
        dbb = settings.ASYNC_DB_BACKEND
        if dbb=="sqlite":
            from db.sqlite import SQLiteDatabase
            self.DB = SQLiteDatabase( worker_id )
        else:
            raise Exception("Unknown database backend defined in configuration: %r" % dbb)
        # serializer / deserializer
        self.serializer = Serializer()
        # caller
        self.PROXY = RawProxy()
        self._processing = True

        self.SEMA = Semaphore()


    def get_database_id(self):
        return self.DB.CONF['databaseid']

    def close(self):
        self._processing = False
        self.DB.close()


    # task processing loop

    def task_eater(self):
        rectime = time.time()
        while self._processing:
            taskproc = self.process_next_job()
            if taskproc:
                gevent.sleep()
            else:
                gevent.sleep(2)
            if rectime<time.time():
                g = gevent.Greenlet(self.check_lost_tasks)
                g.start()
                rectime = time.time() + settings.ASYNC_RECOVERY_TIME
                gevent.sleep()


    def start_eat(self):
        g = gevent.Greenlet(self.task_eater)
        g.start()


    # task scheduling


    def task_add_new(self, task, context, args, kwargs, ign_result=False):
        """
        Register task in database.
            task - task name with worker
            context - context data
            args, kwargs - function arguments
            ign_result - ignore result flag (True / False)
        """
        args = (args, kwargs)
        taskid = self.DB.task_add(
            taskname = task,
            time = time.time(),
            args = self.serializer.data_2_bin(args),
            context = self.serializer.data_2_bin(context),
            ign_result = ign_result,
        )
        #self._check_next()
        return taskid


    def process_task(self, taskid):
        """
        Process job with given ID
        """
        LOG.debug("Processing task %i" % taskid)
        # get task from database
        data = self.DB.task_start_process(taskid)
        if data is None:
            return
        # unpack data
        data['args'],data['kwargs'] = self.serializer.bin_2_data(data['args'])
        data['context'] = self.serializer.bin_2_data(data['context'])
        # send task to realize
        try:
            result = self.PROXY.sync_call(
                data['task'],
                data['context'],
                data['args'],
                data['kwargs'],
            )

        except exceptions.SerializationError:
            # Exception raised when serialization or deserialization fails.
            # If this exception occurs here, we can't try to run this task again, because
            # data stored in async database are probably screwed up (for example async daemon
            # died or was stopped, but current configuration is different and uses other
            # serialization methods). We mark this task as permanently broken (and newer will be repeated).
            self.DB.task_fail_permanently(taskid)
            return

        except exceptions.ServiceNotFound:
            # Worker not found occurs when destination service is currently not
            # available. Task will be repeated in future.
            self.DB.task_error_and_delay(taskid, settings.ASYNC_ERROR_TASK_DELAY)
            return

        except exceptions.ServiceBusException:
            # Any other internal exception
            # will bump error counter
            self.DB.task_error_and_delay(taskid, settings.ASYNC_ERROR_TASK_DELAY)
            return


        if result['message'] == messages.ERROR:
            # task prodoced error
            # not kasaya exception, but task's own error
            # it's not our fault ;-)

            # get task context or create it if not exist
            ctx = data['context']
            if ctx is None:
                ctx = {}

            # increace error count
            errcnt = ctx.get("err_count", 0) + 1
            ctx['err_count'] = errcnt

            # if error counter is limited, is that limit reached?
            maxerr = ctx.get("err_max", None)
            if not maxerr is None:
                no_retry = errcnt>=maxerr
            else:
                no_retry = False

            data['context'] = ctx
            if no_retry:
                self.DB.task_fail_permanently(
                    taskid,
                    settings.ASYNC_ERROR_TASK_DELAY,
                    self.serializer.data_2_bin(result) )
                self.DB.task_store_context(
                    taskid,
                    self.serializer.data_2_bin(ctx) )
            else:
                self.DB.task_error_and_delay(taskid,
                    settings.ASYNC_ERROR_TASK_DELAY,
                    self.serializer.data_2_bin(result) )
                self.DB.task_store_context(
                    taskid,
                    self.serializer.data_2_bin(ctx) )
            return

        # task is processed succesfully
        self.DB.task_finished_ok(taskid, self.serializer.data_2_bin( result ) )



    def process_next_job(self):
        """
        Check if is any job waiting and start processing it.
        """
        self.SEMA.acquire()
        try:
            taskid = self.DB.task_choose_for_process()
        finally:
            self.SEMA.release()

        if taskid is None:
            # nothing is waiting
            return False
        else:
            self.process_task( taskid )
            return True


    def check_lost_tasks(self):
        """
        Find tasks assigned to unexisting async workers and reassign them to self.
        once - if true, then not register task to do it again in future
        also:
        Check database for dead tasks: task waiting long with status=1
        (selected to process, but unprocessed)
        also:
        Find tasks with status=2 - processing, but without asyncid (after asyncid died).
        """
        # get all tasks belonging to dead async workers
        lost_asyncd = 0
        for asyncid in self.DB.async_list():
            if control.worker.exists( asyncid ):
                # worker exists
                continue
            # found lost async daemon tasks,
            # reassign them to self
            rc = self.DB.unlock_lost_tasks(asyncid)
            lost_asyncd =+ 1

        # process all tasks with status 1 -> selected for process but unprocessed long time
        self.DB.recover_unprocessed_tasks( settings.ASYNC_DEAD_TASK_TIME_LIMIT )

        # process all tasks with status 2 -> processing started, but async daemon died before receiving result
        self.DB.recover_unfinished_tasks()
class DMServerCore(OperationRequest):
    def __init__(self, server_ip, server_port, client_ip, client_port):
        super(DMServerCore, self).__init__()

        self.lwm2m_dm_server_ip = server_ip
        self.lwm2m_dm_server_port = server_port
        self.local_client_ip_ = client_ip
        self.local_client_port_ = client_port
        self.sem = Semaphore()
        self.sem_counter = 0

        self.lwm2m_resources = LWM2MResourceTree()
        self.registration = Registration(self.lwm2m_resources)
        self.execution = Execution(self.lwm2m_resources)
        self.discover = Discovery(lwm2m_resources=self.lwm2m_resources)
        self.observation = ObservationNotificationEngine(self.lwm2m_resources)
        self.read = Read(self.lwm2m_resources)
        self.write = Write(self.lwm2m_resources)
        self.create_object_instance = Create(self.lwm2m_resources)
        self.write_attributes = WriteAttributes(self.lwm2m_resources)

    def create_server(self, ):
        """ Creates and starts a LWM2M DM Server using Gevent DatagramServer. The server
        listens at the ip and port specified below. A handler is used to entertain the
        requests coming at that port """

        self.dm_server = DatagramServer((self.lwm2m_dm_server_ip, \
                                         self.lwm2m_dm_server_port), self.handle_request)
        self.dm_server.start()

    def stop_server(self, ):
        """ Stops the LWM2M DM Server """

        self.dm_server.stop()

    def handle_request(self, message, remote):
        """ Handles the requests coming at the specified ip and port """

        rx_record = connection.ReceptionRecord(None, message, remote)
        msg = rx_record.message
        uri_query = msg.findOption(options.UriQuery)
        self.process(rx_record, remote, uri_query)

    def handle_lwm2m_put(self, msg, uri_query, remote, rx_record):
        """ It consists of Normal Update, Write Operation, Write Attribute Operation.
        Write Operation is used to update the resource(s) as per the request. Write
        Attributes operation is used to update the attributes of the object, object
        instance or resource. """

        method = None
        try:
            method = uri_query[0].value.split("=")[1]
        except:
            pass

        if method == "write":
            path = msg.findOption(URI_PATH_VALUE)
            content_type_number = msg.findOption(options.ContentType)
            if content_type_number is None:
                content_type = "text/plain"
            else:
                content_type = constants.media_types[content_type_number.value]
            self.write.write_resource(msg.payload, path, content_type)

            payload_forward = msg.payload

            msg = connection.Message(connection.Message.ACK,
                                     code=constants.CHANGED,
                                     payload="Resource Updated")
            self.dm_server.sendto(msg._pack(rx_record.transaction_id), remote)

            client_port = self.generate_client_port()
            self.write.forward_write_request(path, payload_forward, \
                                        content_type, remote, client_port)

        elif method == "write_attributes":
            path = msg.findOption(URI_PATH_VALUE)
            content_type_number = msg.findOption(options.ContentType)
            if content_type_number is None:
                content_type = "text/plain"
            else:
                content_type = constants.media_types[content_type_number.value]
            payload = loads(msg.payload)
            self.write_attributes.set_attributes(path, remote, payload)
            msg = connection.Message(connection.Message.ACK,
                                     code=constants.CHANGED,
                                     payload="Resource Attributes Updated")
            self.dm_server.sendto(msg._pack(rx_record.transaction_id), remote)

            client_port = self.generate_client_port()
            self.write_attributes.forward_request(path, remote, payload,
                                                  content_type, client_port)

        else:

            endpoint_location = msg.findOption(URI_PATH_VALUE)[0].value
            if msg.payload == "":
                self.logger.info("Updating the Registration Params")
                endpoint_object = self.lwm2m_resources.return_endpoint_object(
                    endpoint_location=endpoint_location)
                endpoint_object.listener_ip = uri_query[6].value.split("=")[1]
                endpoint_object.local_ip = uri_query[6].value.split("=")[1]

                msg = connection.Message(connection.Message.ACK,
                                         code=constants.CHANGED,
                                         payload="Resource Updated")
                self.dm_server.sendto(msg._pack(rx_record.transaction_id),
                                      remote)
            else:
                self.logger.info("Adding/Updating the Resources")
                payload = self.update_resource(
                    loads(msg.payload), endpoint_location=endpoint_location)

                msg = connection.Message(connection.Message.ACK,
                                         code=constants.CHANGED,
                                         payload="Resource Updated")
                self.dm_server.sendto(msg._pack(rx_record.transaction_id),
                                      remote)

                self.logger.info("Forwarding the Notification")
                request = lwm2m_api()
                client_port = self.generate_client_port()

                response = request.send_notification(self.general_observation.listener_ip, self.general_observation.listener_port, \
                                                     self.general_observation.token_id, payload, content_type="application/json", client_port=client_port)

    def update_resource(self,
                        res_payload,
                        endpoint_location=None,
                        endpoint_name=None):

        total_res_dict = {}
        total_object_info = {}

        payload = res_payload
        endpoint_object = self.registration.handle_put_resource_updates(
            res_payload,
            endpoint_location=endpoint_location,
            endpoint_name=endpoint_name)

        for item, value in payload.iteritems():
            resources_dict = endpoint_object.objects_dict[item][
                "object"].resources_id_dict
            res_dict = {}
            for item1, value1 in resources_dict.iteritems():
                res_dict.update({item1: value1["object"].res_value})
            total_res_dict.update({item: {"resources": res_dict}})

        total_object_info = {endpoint_object.endpoint_name: total_res_dict}
        return total_object_info

    def process(self, rx_record, remote, uri_query):
        """ Processes various requests like CON (POST, PUT, GET) or NON.
        POST requests : Generally used for Registration and Execution
        PUT requests : Generally used for Updating the resources
        GET requests : Generally used for Discovery, Observation, Cancel Observation """

        msg = rx_record.message
        self.uri_query = uri_query
        if msg.transaction_type == connection.Message.CON:
            if constants.POST == msg.code:
                method = None
                try:
                    method = uri_query[0].value.split("=")[1]
                except:
                    pass
                if method == "create":
                    path = msg.findOption(URI_PATH_VALUE)
                    content_type_number = msg.findOption(options.ContentType)
                    if content_type_number is None:
                        content_type = "text/plain"
                    else:
                        content_type = constants.media_types[
                            content_type_number.value]
                    self.create_object_instance.create_instance(
                        path, remote, content_type, loads(msg.payload))
                    resources = loads(msg.payload)
                    msg = connection.Message(connection.Message.ACK,
                                             code=constants.CREATED,
                                             payload="Resource Created")
                    self.dm_server.sendto(msg._pack(rx_record.transaction_id),
                                          remote)

                    client_port = self.generate_client_port()
                    self.create_object_instance.forward_request(
                        path, remote, resources, content_type, client_port)

                elif method == "execute":
                    path = msg.findOption(URI_PATH_VALUE)
                    content_type_number = msg.findOption(options.ContentType)
                    if content_type_number is None:
                        content_type = "text/plain"
                    else:
                        content_type = constants.media_types[
                            content_type_number.value]
                    self.execution.execute_resource(path, remote, msg.payload)
                    execute_payload = msg.payload
                    msg = connection.Message(connection.Message.ACK,
                                             code=constants.CHANGED,
                                             payload="Resource Executed")
                    self.dm_server.sendto(msg._pack(rx_record.transaction_id),
                                          remote)

                    client_port = self.generate_client_port()
                    self.execution.forward_request(path, remote,
                                                   execute_payload,
                                                   client_port)

                elif method == "notify":
                    self.logger.info("Notification Received")
                    client_port = self.generate_client_port()
                    for item1, item2 in loads(msg.payload).iteritems():
                        if item1 == "observer_ip":
                            observer_ip = item2
                        elif item1 == "observer_port":
                            observer_port = item2
                        elif item1 != "observer_ip" and item1 != "observer_port":
                            endpoint_name = item1
                            for item3, item4 in item2.iteritems():
                                for item5, item6 in item4[
                                        "resources"].iteritems():
                                    pass

                    res = {
                        item3: {
                            "resources": {
                                item5.split("_")[0]: {
                                    "res_value": item6,
                                    "res_inst_id": item5.split("_")[1]
                                }
                            }
                        }
                    }
                    payload = {}
                    payload = self.update_resource(res,
                                                   endpoint_name=endpoint_name)

                    payload["observer_ip"] = observer_ip
                    payload["observer_port"] = observer_port

                    token_id = msg.token
                    observe_value = msg.findOption(options.Observe).value
                    self.logger.info("Forwarding Notification")

                    content_type = "application/json"
                    request = lwm2m_api()
                    response = request.send_notification(self.general_observation.listener_ip, self.general_observation.listener_port, token_id, payload, \
                                content_type=content_type, time_elapse=observe_value, client_port=client_port)

                    msg = connection.Message(connection.Message.ACK,
                                             code=constants.CREATED,
                                             payload="Notification Received")
                    self.dm_server.sendto(msg._pack(rx_record.transaction_id),
                                          remote)

                else:
                    """ Handles the Client Registration Request """

                    self.logger.info(
                        "Registering Client Endpoint in the LWM2M DM Server")

                    endpoint = self.registration.process_registration(
                        msg, uri_query)

                    response = self.registration.register_client(endpoint)
                    registered_client_location = response
                    if registered_client_location is not None:
                        self.logger.info(
                            "Client Endpoint Registration Successful for Endpoint : %s",
                            endpoint.endpoint_name)
                        self.logger.info("The registered location is %s",
                                         registered_client_location)
                        payload = self.set_general_observation_params()
                    else:
                        self.logger.info("Client Endpoint Registration Failed")

                    msg = connection.Message(
                        connection.Message.ACK,
                        code=constants.CREATED,
                        location=registered_client_location)
                    self.dm_server.sendto(msg._pack(rx_record.transaction_id),
                                          remote)

                    #Send the General Observation to the Registered Client
                    #self.send_general_observation(registered_client_location)

            elif constants.PUT == msg.code:
                """ It consists of Normal Update, Write Operation, Write Attribute Operation.
                Write Operation is used to update the resource(s) as per the request. Write
                Attributes operation is used to update the attributes of the object, object
                instance or resource. """
                self.handle_lwm2m_put(msg, uri_query, remote, rx_record)

            elif constants.GET == msg.code:
                """ Handles Requests like Discovery, Observation """
                try:
                    observe_value = msg.findOption(options.Observe).value
                except:
                    observe_value = ""

                if observe_value == OBSERVE_OPTION_VALUE_OBSERVATION:
                    """ Sets the Observation. Two types of observations. General Observation
                    and Specific Observation. General Observation is used for anything that is
                    not observed and updates are sent as general notifications using a general
                    token. Specific observation is implicitly defined by the observer(as request) 
                    and handled as specific notification with a specific token """

                    path = msg.findOption(URI_PATH_VALUE)
                    if len(path) == 1:
                        self.set_m2m_server_adapter_params(rx_record, remote)
                    else:
                        self.logger.info(
                            "Specific Observation Request Received")

                        content_type_number = msg.findOption(
                            options.ContentType)
                        if content_type_number is None:
                            content_type = "text/plain"
                        else:
                            content_type = constants.media_types[
                                content_type_number.value]

                        token_id = msg.token
                        app_ip = loads(msg.payload)["app_ip"]
                        app_port = loads(msg.payload)["app_port"]
                        client_port = self.generate_client_port()

                        response = self.observation.forward_request(path, remote, observe_value, \
                                            self.lwm2m_dm_server_ip, self.lwm2m_dm_server_port, \
                                            app_ip, app_port, token_id, client_port)

                        msg = connection.Message(connection.Message.ACK, code=constants.CONTENT, \
                                                 payload="test") #todo: payload to be replaced
                        self.dm_server.sendto(
                            msg._pack(rx_record.transaction_id, token_id),
                            remote)

                elif observe_value == OBSERVE_OPTION_VALUE_CANCEL_OBSERVATION:
                    """ Removes the observation from the List """
                    self.logger.info("Cancel Observation Request Received")
                    path = msg.findOption(URI_PATH_VALUE)
                    token_id = msg.token
                    app_ip = loads(msg.payload)["app_ip"]
                    app_port = loads(msg.payload)["app_port"]
                    client_port = self.generate_client_port()

                    response = self.observation.forward_request(path, remote, observe_value, \
                                self.lwm2m_dm_server_ip, self.lwm2m_dm_server_port, \
                                app_ip, app_port, token_id, client_port)

                    def _handle_response(response):
                        msg = connection.Message(connection.Message.ACK,
                                                 code=constants.CONTENT,
                                                 payload=response)
                        self.dm_server.sendto(
                            msg._pack(rx_record.transaction_id), remote)

                    response.then(_handle_response)

                else:
                    method = None
                    try:
                        method = uri_query[0].value.split("=")[1]
                    except:
                        pass

                    if method == "read":
                        path = msg.findOption(URI_PATH_VALUE)
                        self.read.read_resource(path, remote)
                        msg = connection.Message(connection.Message.ACK, code=constants.CONTENT, \
                                             payload="info read", content_type="text/plain")
                        self.dm_server.sendto(
                            msg._pack(rx_record.transaction_id), remote)

                    elif method == "discover":
                        if msg.payload == "/.well-known/core":
                            payload = dumps(self.discover.get_all_resources())
                        else:
                            path = msg.findOption(URI_PATH_VALUE)
                            client_port = self.generate_client_port()
                            payload = self.discover.forward_request(
                                path, remote, client_port)

                        def _handle_response(payload):
                            msg = connection.Message(connection.Message.ACK, code=constants.CONTENT, \
                                                 payload=payload, content_type="application/json")
                            self.dm_server.sendto(
                                msg._pack(rx_record.transaction_id), remote)

                        if payload is Promise:
                            payload.then(_handle_response)
                        else:
                            _handle_response(payload)

        elif msg.transaction_type == connection.Message.NON:
            print "reached msg non"
            payload = msg.payload
            print payload

    def set_general_observation_params(self, ):
        return {
            "listener_ip": self.lwm2m_dm_server_ip,
            "listener_port": self.lwm2m_dm_server_port
        }

    def send_general_observation(self, registered_client_location):
        if registered_client_location is not None:
            payload = dumps(self.set_general_observation_params())
            endpoint_object = self.lwm2m_resources.return_endpoint_object(
                endpoint_location=registered_client_location)
            client_listener_ip = endpoint_object.listener_ip
            client_listener_port = endpoint_object.listener_port

            request = lwm2m_api()
            response = request.observe_resource(client_listener_ip, client_listener_port, \
                                                payload=payload, client_port=self.generate_client_port())

    def set_m2m_server_adapter_params(self, rx_record, remote):
        msg = rx_record.message
        #content_type is application/json
        listener_ip = loads(msg.payload)["listener_ip"]
        listener_port = loads(msg.payload)["listener_port"]
        token_id = msg.token

        self.general_observation = GeneralObservationInformation(
            listener_ip, listener_port, token_id)

        response = "Observation Started on the LWM2M Server"
        msg = connection.Message(connection.Message.ACK,
                                 code=constants.CONTENT,
                                 payload=response)
        self.dm_server.sendto(msg._pack(rx_record.transaction_id, token_id),
                              remote)

    def generate_client_port(self, ):
        if self.sem_counter >= 1000:
            self.sem_counter = 0

        self.sem.acquire()
        self.sem_counter += 1

        sem_counter = self.sem_counter
        self.sem.release()

        client_port = self.local_client_port_ + sem_counter
        return client_port
Esempio n. 37
0
class SchedValve(SchedCommon):
    """Mirrors (and monitors) a valve."""
    locked = False  # external command, don't change
    sched = None
    sched_ts = None
    sched_job = None
    sched_lock = None
    on = False
    on_ts = None
    flow = 0
    _flow_check = None

    def __new__(cls, v):
        if v.id in valves:
            return valves[v.id]
        self = object.__new__(cls)
        valves[v.id] = self
        self.v = v
        self.site = SchedSite(self.v.controller.site)
        self.env = EnvGroup(self.v.envgroup)
        self.controller = SchedController(self.v.controller)
        self.sched_lock = Semaphore()
        if self.site.qb:
            try:
                self.site.send_command("set", "output", "off",
                                       *(self.v.var.split()))
            except NotConnected:
                pass
            except Exception as e:
                raise RuntimeError(self.v.var) from e
        return self

    def __init__(self, v):
        pass

    def _on(self, caller, sched=None, duration=None):
        print("Open", caller, self.v.var, file=sys.stderr)
        self.site.delay_on()
        if duration is None and sched is not None:
            duration = sched.duration
        if self.controller.has_max_on():
            print("… but too many:",
                  ", ".join(
                      str(v) for v in self.controller.c.valves.all()
                      if SchedValve(v).on),
                  file=sys.stderr)
            if sched:
                sched.update(seen=False)
            self.log("NOT running %s for %s: too many" % (
                self.v,
                duration,
            ))
            raise TooManyOn(self)
        if duration is None:
            self.log("Run (indefinitely)")
            self.site.send_command("set", "output", "on",
                                   *(self.v.var.split()))
        else:
            self.log("Run for %s" % (duration, ))
            if not isinstance(duration, six.integer_types):
                duration = duration.total_seconds()
            try:
                self.site.send_command("set",
                                       "output",
                                       "on",
                                       *(self.v.var.split()),
                                       sub=(("for", duration), ("async", )))
            except Exception:
                # Something broke. Try to turn this thing off.
                self.log(format_exc())

                self.site.send_command("set", "output", "off",
                                       *(self.v.var.split()))
                raise RuntimeError("Could not start (logged)")

        if sched is not None:
            if self.v.verbose:
                self.log("Opened for %s" % (sched, ))
            self.sched = sched
            if not sched.seen:
                sched.update(start=now(), seen=True)
                sched.refresh()
            #Save(sched)
        else:
            if self.v.verbose:
                self.log("Opened for %s" % (duration, ))

    def _off(self, num):
        if self.on:
            if self.v.verbose:
                self.log("Closing " + str(num))
            print("Close", self.v.var, file=sys.stderr)
        try:
            self.site.send_command("set", "output", "off",
                                   *(self.v.var.split()))
        except NotConnected:
            pass

    def shutdown(self):
        if self._flow_check is not None:
            self._flow_check.dead()

    def run_schedule(self):
        if not self.sched_lock.acquire(blocking=False):
            if self.v.verbose:
                print("SCHED LOCKED1 %s" % (self.v.name, ), file=sys.stderr)
            return
        try:
            self._run_schedule()
        except Exception:
            self.log(format_exc())
        finally:
            self.sched_lock.release()

    def _run_schedule(self):
        if self.sched_job is not None:
            self.sched_job.kill()
            self.sched_job = None
        if self.locked:
            if self.v.verbose:
                print("SCHED LOCKED2 %s" % (self.v.name, ), file=sys.stderr)
            return
        n = now()

        try:
            if self.sched is not None:
                self.sched.refresh()
                if self.sched.end <= n:
                    if self.v.verbose:
                        print("Turn off: %s+%s <= %s" %
                              (self.sched.start, self.sched.duration, n),
                              file=sys.stderr)
                    self._off(2)
                    self.sched = None
                else:
                    self.sched_job = gevent.spawn_later(
                        (self.sched.end - n).total_seconds(),
                        connwrap,
                        self.run_sched_task,
                        reason="_run_schedule 1")
                    if self.v.verbose:
                        print("SCHED LATER %s: %s" %
                              (self.v.name, humandelta(self.sched.end - n)),
                              file=sys.stderr)
                    return
        except ObjectDoesNotExist:
            pass  # somebody deleted it *shrug*
        sched = None

        if self.sched_ts is None:
            try:
                sched = self.v.schedules.filter(
                    start__lt=n).order_by("-start")[0]
            except IndexError:
                self.sched_ts = n - timedelta(1, 0)
            else:
                self.sched_ts = sched.end
                if sched.end > n:  # still running
                    if self.v.verbose:
                        print("SCHED RUNNING %s: %s" %
                              (self.v.name, humandelta(sched.end - n)),
                              file=sys.stderr)
                    try:
                        self._on(1, sched, sched.end - n)
                    except TooManyOn:
                        self.log("Could not schedule: too many open valves")
                    except NotConnected:
                        self.log(
                            "Could not schedule: connection to MoaT failed")
                    return

        try:
            sched = self.v.schedules.filter(
                start__gte=self.sched_ts).order_by("start")[0]
        except IndexError:
            if self.v.verbose:
                print("SCHED EMPTY %s: %s" %
                      (self.v.name, str_tz(self.sched_ts)),
                      file=sys.stderr)
            self._off(3)
            return

        if sched.end <= n:
            if self.v.verbose:
                print("SCHED %s: sched %d done for %s" %
                      (self.v.name, sched.id, humandelta(n - sched.end)),
                      file=sys.stderr)
            self.sched_ts = None
            return
        if sched.start > n:
            if self.v.verbose:
                print("SCHED %s: sched %d in %s" %
                      (self.v.name, sched.id, humandelta(sched.start - n)),
                      file=sys.stderr)
            self._off(4)
            self.sched_job = gevent.spawn_later(
                (sched.start - n).total_seconds(),
                connwrap,
                self.run_sched_task,
                reason="_run_schedule 2")
            return
        try:
            self._on(2, sched)
        except TooManyOn:
            self.log("Could not schedule: too many open valves")
        except NotConnected:
            self.log("Could not schedule: connection to MoaT failed")

    def run_sched_task(self, reason="valve"):
        self.sched_job = None
        self.site.run_sched_task(reason=reason)

    run_sched_ext = async_gevent(run_sched_task)

    def add_flow(self, val):
        if self._flow_check is not None:
            if self._flow_check.add_flow(val):
                return
        if self.v.verbose:
            print("FLOW %s: %s %s" % (self.v.name, self.flow, val),
                  file=sys.stderr)
        self.flow += val

    @async_gevent
    def check_flow(self, **k):
        cf = None
        try:
            cf = FlowCheck(self)
            cf.run()
        except Exception as ex:
            log_error(self.v)
            if cf is not None:
                cf._unlock()

    def refresh(self):
        self.v.refresh()


#		if self.sched is not None:
#			self.sched.refresh()

    def connect_monitors(self):
        if self.site.qb is None:
            return
        n = self.v.var.replace(' ', '.')
        self.mon = self.site.qb.register_alert_gevent(
            "moat.event.output.change." + n,
            self.watch_state,
            call_conv=CC_DICT)
        self.ckf = self.site.qb.register_rpc_gevent("rain.check.flow." + n,
                                                    self.check_flow,
                                                    call_conv=CC_DICT)

    @async_gevent
    def watch_state(self, value=None, **kv):
        """output change NAME ::value ON"""
        on = (str(value).lower() in ("1", "true", "on"))
        if self._flow_check is not None:
            # TODO
            self.on = on
            self._flow_check.state(on)
            return
        if self.locked:
            self.on = on
            return
        try:
            if on != self.on:
                n = now()
                print("Report %s" % ("ON" if on else "OFF"),
                      self.v.var,
                      self.sched,
                      file=sys.stderr)
                if self.sched is not None and not on:
                    self.sched.update(
                        db_duration=(n - self.sched.start).total_seconds())
                    self.sched.refresh()
                    self.sched_ts = self.sched.end
                    self.sched = None
                flow, self.flow = self.flow, 0
                # If nothing happened, calculate.
                if not on:
                    duration = n - self.on_ts
                    maxflow = self.v.flow * duration.total_seconds()
                    if (not flow or not self.v.feed.var) or flow > 2 * maxflow:
                        flow = maxflow
                self.new_level_entry(flow)
                if not on:
                    if self.v.level > self.v.stop_level + (
                            self.v.start_level - self.v.stop_level) / 5:
                        self.v.update(priority=True)
                    self.log("Done for %s, level is now %s" %
                             (duration, self.v.level))
                self.on = on
                self.on_ts = n

        except Exception:
            print_exc()

    def sync(self):
        flow, self.flow = self.flow, 0
        self.new_level_entry(flow)

    def sync_history(self):
        n = now()
        try:
            lv = self.v.levels.order_by("-time")[0]
        except IndexError:
            pass
        else:
            if self.v.time > lv.time:
                self.log("Timestamp downdate: %s %s" % (self.v.time, lv.time))
                self.v.update(time=lv.time)
                self.v.refresh()
                #Save(self.v)
        if (n - self.v.time).total_seconds() >= 295:
            flow, self.flow = self.flow, 0
            self.new_level_entry(flow)

    def new_level_entry(self, flow=0):
        self.site.current_history_entry()
        n = now()
        self.v.refresh()
        hts = None
        try:
            lv = self.v.levels.order_by("-time")[0]
        except IndexError:
            ts = n - timedelta(1, 0)
        else:
            ts = lv.time
        sum_f = 0
        sum_r = 0
        for h in self.site.s.history.filter(time__gt=ts).order_by("time"):
            if self.v.verbose > 2:
                self.log("Env factor for %s: T=%s W=%s S=%s" %
                         (h, h.temp, h.wind, h.sun))
            f = self.env.env_factor(
                h,
                logger=self.log if self.v.verbose > 2 else None) * self.v.adj
            if self.v.verbose > 1:
                self.log("Env factor for %s is %s" % (h, f))
            sum_f += self.site.s.db_rate * self.v.do_shade(
                self.env.eg.factor * f) * (h.time - ts).total_seconds()
            sum_r += self.v.runoff * h.rain
            ts = h.time

        if self.v.verbose:
            self.log("Apply env %f, rain %r,, flow %f = %f" %
                     (sum_f, sum_r, flow, flow / self.v.area))

        if self.v.time == ts:
            return
        if self.v.level < 0:
            level = 0
        else:
            level = F('level')
        level += sum_f
        if (flow > 0 or sum_r > 0) and self.v.level > self.v.max_level:
            level = self.v.max_level
        level -= flow / self.v.area + sum_r
        #if level < 0:
        #	self.log("Level %s ?!?"%(self.v.level,))
        self.v.update(time=ts, level=level)
        self.v.refresh()

        lv = Level(valve=self.v, time=ts, level=self.v.level, flow=flow)
        lv.save()

        if self.on and not (self.sched and self.sched.forced
                            ) and self.v.level <= self.v.stop_level:
            self._off(5)

    def log(self, txt):
        log(self.v, txt)
Esempio n. 38
0
class Client(object):
    px_per_tick = 10
    
    def __init__(self, canvas, socket, address):
        self.canvas = canvas
        self.socket = socket
        self.address = address
        self.connect_ts = time.time()
        # This buffer discards all but the newest 1024 messages
        self.sendbuffer = deque([], 1024)
        # And this is used to limit clients to X messages per tick
        # We start at 0 (instead of x) to add a reconnect-penalty.
        self.limit = Semaphore(0)
        print 'CONNECT', address

    def send(self, line):
        self.sendbuffer.append(line.strip() + '\n')

    def disconnect(self):
        print 'DISCONNECT', self.address
        self.socket.close()
        del self.canvas.clients[self.address]

    def serve(self):
        sendall = self.socket.sendall
        readline = self.socket.makefile().readline
        try:
            while True:
                # Idea: Sand first, recieve later. If the client is to
                # slow to get the sendbuffer empty, he cannot send.
                while self.sendbuffer:
                    sendall(self.sendbuffer.popleft())
                line = readline()
                if not line:
                    break
                arguments = line.split()
                command = arguments.pop(0)
                if command == 'PX':
                    self.on_PX(arguments)
                elif command == 'SIZE':
                    self.on_SIZE(arguments)
        finally:
            self.disconnect()

    def on_SIZE(self, args):
        self.send('SIZE %d %d' % self.canvas.get_size())

    def on_PX(self, args):
        self.limit.acquire()
        x,y,color = args
        x,y = int(x), int(y)
        c = int(color, 16)
        if c <= 16777215:
            r = (c & 0xff0000) >> 16
            g = (c & 0x00ff00) >> 8
            b =  c & 0x0000ff
            a =      0xff
        else:
            r = (c & 0xff000000) >> 24
            g = (c & 0x00ff0000) >> 16
            b = (c & 0x0000ff00) >> 8
            a =  c & 0x000000ff
        self.canvas.set_pixel(x, y, r, g, b, a)

    def tick(self):
        while self.limit.counter <= self.px_per_tick:
            self.limit.release()
Esempio n. 39
0
class RateLimiterState(object):
    def __init__(self, fail_limit, fail_period, blackout_wait):
        self.__local_rlservice = None
        self.__request_fails = 0
        self.__fails = deque()
        self.__fail_limit = fail_limit
        self.__fail_period = fail_period
        self.__blackout_start = None
        self.__blackout_wait = blackout_wait
        self.__is_ec2 = utils.is_ec2()
        self.__service_init_semaphore = Semaphore()
        stack_info = libs.ec2_utils.get_stack()
        self.__stack_name = 'localhost'
        self.__node_name = 'localhost'
        if stack_info is not None:
            self.__stack_name = stack_info.instance.stack
            self.__node_name = stack_info.instance.name
        self.__last_email_time = 0
        self.__emails = deque()

        # determine the private ip address of the ratelimiter instance for this stack
        self._getHost()
        print('### host: %s' % self.__host)

    class FailLog(object):
        def __init__(self, exception):
            self.timestamp = time.time()
            self.exception = exception

    @property
    def _local_rlservice(self):
        if self.__local_rlservice is None:
            # use a semaphore here because if two requests come in immediately, we might instantiate two services
            self.__service_init_semaphore.acquire()
            if self.__local_rlservice is None:
                if self.__is_ec2:
                    self.__local_rlservice = StampedRateLimiterService(throttle=True)
                else:
                    self.__local_rlservice = StampedRateLimiterService(throttle=False)
            self.__service_init_semaphore.release()

        return self.__local_rlservice

    def _getHost(self):
        ratelimiter_nodes = None
        try:
            ratelimiter_nodes = libs.ec2_utils.get_nodes('ratelimiter')
        except:
            logs.warning("Could not find a node with tag 'ratelimiter' on same stack")
        if ratelimiter_nodes is None:
            self.__host = 'localhost'
        else:
            self.__host = ratelimiter_nodes[0]['private_ip_address']
        self.__port = 18861

    def sendFailLogEmail(self):
        if len(self.__fails) == 0:
            return

        output = '<html>'
        output += "<h3>RateLimiter RPC Server Failure on %s</h3>" % self.__stack_name
        output += "<p>On stack '%s' instance '%s'.</p>" % (self.__stack_name, self.__node_name)
        output += "<p><i>There were %s failed requests to the rpc server within the last %s seconds</i></p>" %\
                  (self.__fail_limit, self.__fail_period)
        back_online = time.strftime('%m/%d/%Y %H:%M:%S', time.localtime(self.__blackout_start + self.__blackout_wait)) # Timestamp
        output += "<p>Waiting for %s seconds.  Will use local Rate Limiter service until: %s</p>" % (self.__blackout_wait, back_online)

        output += '<h3>Fail Log</h3>'

        output += '<table border=1 cellpadding=5>'
        output += '<tr>'
        labels = ['Timestamp', 'Exception']
        for label in labels:
            output += '<td style="font-weight:bold">%s</td>' % label
        output += '</tr>'

        for fail in self.__fails:
            output += '<tr>'
            output += '<td valign=top>%s</td>' % time.strftime('%m/%d/%Y %H:%M:%S', time.localtime(fail.timestamp)) # Timestamp
            output += '<td valign=top>%s</td>' % fail.exception
            output += '</tr>'

        output += '</table>'

        output += '</html>'


        try:
            email = {}
            email['from'] = 'Stamped <*****@*****.**>'
            email['to'] = '*****@*****.**'
            email['subject'] = "%s RateLimiter RPC server failure" % self.__stack_name
            email['body'] = output
            utils.sendEmail(email, format='html')
        except Exception as e:
            print('UNABLE TO SEND EMAIL: %s' % e)

        return output

    def _fail(self, exception):
        try:
            del localData.rateLimiter
        except:
            pass

        self._getHost()
        if self.__blackout_start is not None:
            return

        self.__fails.append(self.FailLog(exception))

        now = time.time()

        cutoff = now - self.__fail_period
        count = 0

        while len(self.__fails) > 0:
            if self.__fails[0].timestamp < cutoff:
                self.__fails.popleft()
            else:
                break

        count = len(self.__fails)

        if count >= self.__fail_limit:
            print('### RPC server fail threshold reached')
            self.__blackout_start = time.time()
            #self.__local_rlservice.loadDbLog()    # update the local call log from the db


            logs.error('RPC server request FAIL THRESHOLD REACHED')
            # Email dev if a fail limit was reached
            if self.__is_ec2:
                if self.__last_email_time is not None and (time.time() - self.__last_email_time) > EMAIL_WAIT:
                    self.sendFailLogEmail()
                    self.__last_email_time = time.time()

    def _isBlackout(self):
        if self.__blackout_start is None:
            return False
        if self.__blackout_start + self.__blackout_wait > time.time():
            return True
        else:
            self.__blackout_start = None
            self.__request_fails = 0
            #            self.__local_rlservice.shutdown()
            #            self.__local_rlservice = None
            return False

    @property
    def _rpc_service_connection(self):
        try:
            return localData.rateLimiter
        except AttributeError:
            config = {
                'allow_pickle' : True,
                'allow_all_attrs' : True,
                'instantiate_custom_exceptions' : True,
                'import_custom_exceptions' : True,
                }
            localData.rateLimiter = rpyc.connect(self.__host, self.__port, config=config)
            return localData.rateLimiter

    def _rpc_service_request(self, service, method, url, body, header, priority, timeout):
        async_request = rpyc.async(self._rpc_service_connection.root.request)
        asyncresult = async_request(service, priority, timeout, method, url, pickle.dumps(body), pickle.dumps(header))
        asyncresult.set_expiry(timeout)
        response, content = asyncresult.value

        return pickle.loads(response), content

    def _local_service_request(self, service, method, url, body, header, priority, timeout):
        response, content = self._local_rlservice.handleRequest(service, priority, timeout, method, url, body, header)
        return response, content

    def request(self, service, method, url, body, header, priority, timeout):
        if not self.__is_ec2 or self._isBlackout():
            return self._local_service_request(service, method.upper(), url, body, header, priority, timeout)
        try:
            logs.info('### attempting rpc service request')
            return self._rpc_service_request(service, method.upper(), url, body, header, priority, timeout)
        except DailyLimitException as e:
            raise StampedThirdPartyRequestFailError("Hit daily rate limit for service: '%s'" % service)
        except WaitTooLongException as e:
            raise StampedThirdPartyRequestFailError("'%s' request estimated wait time longer than timeout" % service)
        except TimeoutException as e:
            raise StampedThirdPartyRequestFailError("'%s' request timed out." % service)
        except TooManyFailedRequestsException as e:
            raise StampedThirdPartyRequestFailError("%s" % e)
        except Exception as e:
            import traceback
            print('### caught exception  type: %s  e: %s' % (type(e), e))
            logs.info("RPC Service Request fail."
                      "service: %s  method: %s  url: %s  body: %s  header: %s"
                      "priority: %s  timeout: %s  Exception: %s  Stack: %s" %
                      (service, method, url, body, header, priority, timeout, e, traceback.format_exc()))
            self._fail(e)
        logs.info('### Falling back to local rate limiter request')
        return self._local_service_request(service, method.upper(), url, body, header, priority, timeout)
Esempio n. 40
0
 def test_acquire_returns_false_after_timeout(self):
     s = Semaphore(value=0)
     result = s.acquire(timeout=0.01)
     assert result is False, repr(result)
Esempio n. 41
0
class Pool(Group):
    def __init__(self, size=None, greenlet_class=None):
        if size is not None and size < 1:
            raise ValueError(
                'Invalid size for pool (positive integer or None required): %r'
                % (size, ))
        Group.__init__(self)
        self.size = size
        if greenlet_class is not None:
            self.greenlet_class = greenlet_class
        if size is None:
            self._semaphore = DummySemaphore()
        else:
            self._semaphore = Semaphore(size)

    def wait_available(self):
        self._semaphore.wait()

    def full(self):
        return self.free_count() <= 0

    def free_count(self):
        if self.size is None:
            return 1
        return max(0, self.size - len(self))

    def start(self, greenlet):
        self._semaphore.acquire()
        try:
            self.add(greenlet)
        except:
            self._semaphore.release()
            raise
        greenlet.start()

    def spawn(self, *args, **kwargs):
        self._semaphore.acquire()
        try:
            greenlet = self.greenlet_class.spawn(*args, **kwargs)
            self.add(greenlet)
        except:
            self._semaphore.release()
            raise
        return greenlet

    def spawn_link(self, *args, **kwargs):
        self._semaphore.acquire()
        try:
            greenlet = self.greenlet_class.spawn_link(*args, **kwargs)
            self.add(greenlet)
        except:
            self._semaphore.release()
            raise
        return greenlet

    def spawn_link_value(self, *args, **kwargs):
        self._semaphore.acquire()
        try:
            greenlet = self.greenlet_class.spawn_link_value(*args, **kwargs)
            self.add(greenlet)
        except:
            self._semaphore.release()
            raise
        return greenlet

    def spawn_link_exception(self, *args, **kwargs):
        self._semaphore.acquire()
        try:
            greenlet = self.greenlet_class.spawn_link_exception(
                *args, **kwargs)
            self.add(greenlet)
        except:
            self._semaphore.release()
            raise
        return greenlet

    def discard(self, greenlet):
        Group.discard(self, greenlet)
        self._semaphore.release()
Esempio n. 42
0
class DataFrameIO(io.RawIOBase):

    """Raw I/O implementation for stream sockets.

    This class supports the makefile() method on sockets.  It provides
    the raw I/O interface on top of a socket object.
    """

    def __init__(self):
        io.RawIOBase.__init__(self)
        self.buf = bytearray()
        self.target_size = 0
        self.semaphore = Semaphore()
        self._closed = False

    @property
    def closed(self):
        return self._closed

    def readinto(self, b):
        """Read up to len(b) bytes into the writable buffer *b* and return
        the number of bytes read.  If the socket is non-blocking and no bytes
        are available, None is returned.

        If *b* is non-empty, a 0 return value indicates that the connection
        was shutdown at the other end.
        """

        self.target_size = len(b)

        if self.target_size > len(self.buf):
            self.semaphore.acquire()

        b[0 : self.target_size] = self.buf[0 : self.target_size]
        del self.buf[0 : self.target_size]

        return self.target_size

    def write(self, b):
        """Write the given bytes or bytearray object *b* to the socket
        and return the number of bytes written.  This can be less than
        len(b) if not all data could be written.  If the socket is
        non-blocking and no bytes could be written None is returned.
        """

        self.buf += b

        if self.target_size <= len(self.buf):
            self.semaphore.release()

        return len(b)

    def readable(self):

        return True

    def writable(self):

        return True

    def seekable(self):

        return False

    def fileno(self):
        return None

    def close(self):
        io.RawIOBase.close(self)
        self._closed = True
        self.buf = bytearray()
Esempio n. 43
0
class BaseDataHandler(object):
    _params = {
        'POLLING_INTERVAL' : 3600,
        'PATCHABLE_CONFIG_KEYS' : ['stream_id','constraints']
    }
    _polling = False
    _polling_glet = None
    _dh_config = {}
    _rr_cli = None

    def __init__(self, rr_cli, stream_registrar, dh_config):
        self._dh_config=dh_config
        self._stream_registrar = stream_registrar
        self._rr_cli = rr_cli

    def set_event_callback(self, evt_callback):
        self._event_callback = evt_callback

    def _dh_event(self, type, value):
        event = {
            'type' : type,
            'value' : value,
            'time' : time.time()
        }
        self._event_callback(event)

    def _poll(self):
        """
        Internal polling method, run inside a greenlet, that triggers execute_acquire_data without configuration mods
        The polling interval (in seconds) is retrieved from the POLLING_INTERVAL parameter
        """
        self._polling = True
        interval = get_safe(self._params, 'POLLING_INTERVAL', 3600)
        log.debug('Polling interval: {0}'.format(interval))
        while self._polling:
            self.execute_acquire_data()
            time.sleep(interval)

    def cmd_dvr(self, cmd, *args, **kwargs):
        """
        Command a DataHandler by request-reply messaging. Package command
        message and send on blocking command socket. Block on same socket
        to receive the reply. Return the driver reply.
        @param cmd The DataHandler command identifier.
        @param args Positional arguments of the command.
        @param kwargs Keyword arguments of the command.
        @retval Command result.
        """
        # Package command dictionary.

        #need to account for observatory_execute_resource commands
        #connect -> Not used
        #get_current_state -> Not used
        #discover -> Not used
        #disconnect -> Not used

        log.debug('cmd_dvr received command \'{0}\' with: args={1} kwargs={2}'.format(cmd, args, kwargs))

        reply = None
        if cmd == 'initialize':
            # Delegate to BaseDataHandler.initialize()
            reply = self.initialize(*args, **kwargs)
        elif cmd == 'get':
            # Delegate to BaseDataHandler.get()
            reply = self.get(*args, **kwargs)
        elif cmd == 'set':
            # Delegate to BaseDataHandler.set()
            reply = self.set(*args, **kwargs)
        elif cmd == 'get_resource_params':
            # Delegate to BaseDataHandler.get_resource_params()
            reply = self.get_resource_params(*args, **kwargs)
        elif cmd == 'get_resource_commands':
            # Delegate to BaseDataHandler.get_resource_commands()
            reply = self.get_resource_commands(*args, **kwargs)
        elif cmd == 'execute_acquire_data':
            # Delegate to BaseDataHandler.execute_acquire_data()
            reply = self.execute_acquire_data(*args, **kwargs)
        elif cmd == 'execute_start_autosample':
            # Delegate to BaseDataHandler.execute_start_autosample()
            reply = self.execute_start_autosample(*args, **kwargs)
        elif cmd == 'execute_stop_autosample':
            # Delegate to BaseDataHandler.execute_stop_autosample()
            reply = self.execute_stop_autosample(*args, **kwargs)
        elif cmd in ['configure','connect','disconnect','get_current_state','discover','execute_acquire_sample']:
            # Disregard
            log.info('Command \'{0}\' not used by DataHandler'.format(cmd))
            pass
        else:
            desc='Command \'{0}\' unknown by DataHandler'.format(cmd)
            log.info(desc)
            raise InstrumentCommandException(desc)

        return reply

    def initialize(self, *args, **kwargs):
        #TODO: Add documentation
        #TODO: This should put the DataHandler back into an 'unconfigured' state
        """
        Called from:
                      InstrumentAgent._handler_idle_reset
                      InstrumentAgent._handler_idle_go_inactive
                      InstrumentAgent._handler_stopped_reset
                      InstrumentAgent._handler_stopped_go_inactive
                      InstrumentAgent._handler_observatory_reset
                      InstrumentAgent._handler_observatory_go_inactive
                      InstrumentAgent._handler_uninitialized_initialize
                      |--> ExternalDataAgent._start_driver
        """
        log.debug('Initializing DataHandler...')
        self._glet_queue = []
        self._semaphore=Semaphore()
        return None

    def configure(self, *args, **kwargs):
        #TODO: Add documentation
        #TODO: This should configure the DataHandler for the particular dataset
        """
        Called from:
                      InstrumentAgent._handler_inactive_go_active
        """
        log.debug('Configuring DataHandler: args = {0}'.format(args))
        try:
            self._dh_config = args[0]

        except IndexError:
            raise InstrumentParameterException('\'acquire_data\' command requires a config dict as the first argument')

        return

    def execute_acquire_data(self, *args):
        """
        Creates a copy of self._dh_config, creates a publisher, and spawns a greenlet to perform a data acquisition cycle
        If the args[0] is a dict, any entries keyed with one of the 'PATCHABLE_CONFIG_KEYS' are used to patch the config
        Greenlet binds to BaseDataHandler._acquire_data and passes the publisher and config
        Disallows multiple "new data" (unconstrained) requests using BaseDataHandler._semaphore lock
        Called from:
                      InstrumentAgent._handler_observatory_execute_resource
                       |-->  ExternalDataAgent._handler_streaming_execute_resource

        @parameter args First argument can be a config dictionary
        """
        log.debug('Executing acquire_data: args = {0}'.format(args))

        # Make a copy of the config to ensure no cross-pollution
        config = self._dh_config.copy()

        # Patch the config if mods are passed in
        try:
            config_mods = args[0]
            if not isinstance(config_mods, dict):
                raise IndexError()

            log.debug('Configuration modifications provided: {0}'.format(config_mods))
            for k in self._params['PATCHABLE_CONFIG_KEYS']:
                if get_safe(config_mods, k):
                   config[k] = config_mods[k]

        except IndexError:
            log.info('No configuration modifications were provided')

        # Verify that there is a stream_id member in the config
        stream_id = get_safe(config, 'stream_id')
        if not stream_id:
            raise ConfigurationError('Configuration does not contain required \'stream_id\' member')

        isNew = get_safe(config, 'constraints') is None

        if isNew and not self._semaphore.acquire(blocking=False):
            log.warn('Already acquiring new data - action not duplicated')
            return

        if isNew:
            # Get any NewDataCheck attachments and add them to the config
            ext_ds_id = get_safe(config,'external_dataset_res_id')
            if ext_ds_id:
                try:
                    attachment_objs, _ = self._rr_cli.find_objects(ext_ds_id, PRED.hasAttachment, RT.Attachment, False)
                    for attachment_obj in attachment_objs:
                        kwds = set(attachment_obj.keywords)
                        if 'NewDataCheck' in kwds:
                            log.debug('Found NewDataCheck attachment: {0}'.format(attachment_obj))
                            config['new_data_check'] = attachment_obj.content
                            break
                        else:
                            log.debug('Found attachment: {0}'.format(attachment_obj))
                except NotFound:
                    raise InstrumentException('ExternalDatasetResource \'{0}\' not found'.format(ext_ds_id))

        if not get_safe(config, 'new_data_check'):
            config['new_data_check'] = None

            # Create a publisher to pass into the greenlet
        publisher = self._stream_registrar.create_publisher(stream_id=stream_id)

        # Spawn a greenlet to do the data acquisition and publishing
        g = spawn(self._acquire_data, config, publisher, self._unlock_new_data_callback)
        log.debug('** Spawned {0}'.format(g))
        self._glet_queue.append(g)

    def execute_start_autosample(self, *args, **kwargs):
        #TODO: Add documentation
        #TODO: Fix raises statements
        """
        Put the DataHandler into streaming mode and start polling for new data
        Called from:
                      InstrumentAgent._handler_observatory_go_streaming

        @raises InstrumentTimeoutException:
        @raises InstrumentProtocolException:
        @raises NotImplementedException:
        @raises InstrumentParameterException:
        """
        log.debug('Entered execute_start_autosample with args={0} & kwargs={1}'.format(args, kwargs))
        if not self._polling and self._polling_glet is None:
            self._polling_glet = spawn(self._poll)

        return None

    def execute_stop_autosample(self, *args, **kwargs):
        #TODO: Add documentation
        #TODO: Fix raises statements
        """
        Stop polling for new data and put the DataHandler into observatory mode
        Called from:
                      InstrumentAgent._handler_streaming_go_observatory

        @raises InstrumentTimeoutException:
        @raises InstrumentProtocolException:
        @raises NotImplementedException:
        @raises InstrumentParameterException:
        """
        log.debug('Entered execute_stop_autosample with args={0} & kwargs={1}'.format(args, kwargs))
        if self._polling and not self._polling_glet is None:
            self._polling_glet.kill()
            self._polling = False
            self._polling_glet = None

        return None

    def get(self, *args, **kwargs):
        #TODO: Add documentation
        #TODO: Fix raises statements
        """
        Called from:
                      InstrumentAgent._handler_get_params

        @raises InstrumentTimeoutException:
        @raises InstrumentProtocolException:
        @raises NotImplementedException:
        @raises InstrumentParameterException:
        """
        try:
            pnames=args[0]
        except IndexError:
            log.warn("No argument provided to get, return all parameters")
            pnames = [DataHandlerParameter.ALL]

        result = None
        if DataHandlerParameter.ALL in pnames:
            result = self._params
        else:
            if not isinstance(pnames, (list,tuple)):
                raise InstrumentParameterException('Get argument not a list or tuple: {0}'.format(pnames))
            result={}
            for pn in pnames:
                try:
                    log.debug('Get parameter with key: {0}'.format(pn))
                    result[pn] = self._params[pn]
                except KeyError:
                    log.debug('\'{0}\' not found in self._params'.format(pn))
                    raise InstrumentParameterException('{0} is not a valid parameter for this DataHandler.'.format(pn))

        return result

    def set(self, *args, **kwargs):
        #TODO: Add documentation
        #TODO: Fix raises statements
        """
        Called from:
                      InstrumentAgent._handler_observatory_set_params

        @raises InstrumentTimeoutException:
        @raises InstrumentProtocolException:
        @raises NotImplementedException:
        @raises InstrumentParameterException:
        """
        # Retrieve required parameter.
        # Raise if no parameter provided, or not a dict.
        try:
            params = args[0]

        except IndexError:
            raise InstrumentParameterException('Set command requires a parameter dict.')

        to_raise = []

        if not isinstance(params, dict):
            raise InstrumentParameterException('Set parameters not a dict.')
        else:
            for (key, val) in params.iteritems():
                if key in self._params:
                    log.debug('Set parameter \'{0}\' = {1}'.format(key, val))
                    self._params[key] = val
                else:
                    log.debug('Parameter \'{0}\' not in self._params and cannot be set'.format(key))
                    to_raise.append(key)

        if len(to_raise) > 0:
            log.debug('Raise InstrumentParameterException for un-set parameters: {0}'.format(to_raise))
            raise InstrumentParameterException('Invalid parameter(s) could not be set: {0}'.format(to_raise))

    def get_resource_params(self, *args, **kwargs):
        """
        Return list of resource parameters. Implemented in specific DataHandlers
        Called from:
                      InstrumentAgent._handler_get_resource_params
        """
        return self._params.keys()

    def get_resource_commands(self, *args, **kwargs):
        """
        Return list of DataHandler execute commands available.
        Called from:
                      InstrumentAgent._handler_get_resource_commands
        """
        cmds = [cmd.replace('execute_','') for cmd in dir(self) if cmd.startswith('execute_')]
        return cmds

    def _unlock_new_data_callback(self, caller):
        log.debug('** Release {0}'.format(caller))
        self._semaphore.release()

    @classmethod
    def _acquire_data(cls, config, publisher, unlock_new_data_callback):
        """
        Ensures required keys (such as stream_id) are available from config, configures the publisher and then calls:
             BaseDataHandler._new_data_constraints (only if config does not contain 'constraints')
             BaseDataHandler._publish_data passing BaseDataHandler._get_data as a parameter
        @param config Dict containing configuration parameters, may include constraints, formatters, etc
        @param unlock_new_data_callback BaseDataHandler callback function to allow conditional unlocking of the BaseDataHandler._semaphore
        """
        log.debug('start _acquire_data: config={0}'.format(config))

        cls._init_acquisition_cycle(config)

        constraints = get_safe(config,'constraints')
        if not constraints:
            gevent.getcurrent().link(unlock_new_data_callback)
            constraints = cls._new_data_constraints(config)
            if constraints is None:
                raise InstrumentParameterException("Data constraints returned from _new_data_constraints cannot be None")
            config['constraints'] = constraints

        cls._publish_data(publisher, cls._get_data(config))

        # Publish a 'TestFinished' event
        if get_safe(config,'TESTING'):
            log.debug('Publish TestingFinished event')
            pub = EventPublisher('DeviceCommonLifecycleEvent')
            pub.publish_event(origin='BaseDataHandler._acquire_data', description='TestingFinished')

    @classmethod
    def _init_acquisition_cycle(cls, config):
        """
        Allows the concrete implementation to initialize/prepare objects the data handler
        will use repeatedly (such as a dataset object) in cls._new_data_constraints and/or cls._get_data
        Objects should be added to the config so they are available later in the workflow
        """
        raise NotImplementedException('{0}.{1} must implement \'_init_acquisition_cycle\''.format(cls.__module__,cls.__name__))

    @classmethod
    def _new_data_constraints(cls, config):
        #TODO: Document what "constraints" looks like (yml)!!
        """
        Determines the appropriate constraints for acquiring any "new data" from the external dataset
        Returned value cannot be None and is assigned to config['constraints']
        The format of the constraints are documented:
        @param config dict of configuration parameters - may be used to generate the returned 'constraints' dict
        @retval dict that contains the constraints for retrieval of new data from the external dataset
        """
        raise NotImplementedException('{0}.{1} must implement \'_new_data_constraints\''.format(cls.__module__,cls.__name__))

    @classmethod
    def _get_data(cls, config):
        """
        Iterable function that acquires data from a source iteratively based on constraints provided by config
        Passed into BaseDataHandler._publish_data and iterated to publish samples.
        @param config dict containing configuration parameters, may include constraints, formatters, etc
        @return an iterable that returns well-formed Granule objects on each iteration
        """
        raise NotImplementedException('{0}.{1} must implement \'_get_data\''.format(cls.__module__,cls.__name__))

    @classmethod
    def _publish_data(cls, publisher, data_generator):
        """
        Iterates over the data_generator and publishes granules to the stream indicated in stream_id
        """
        if data_generator is None or not hasattr(data_generator,'__iter__'):
            raise InstrumentDataException('Invalid object returned from _get_data: returned object cannot be None and must have \'__iter__\' attribute')

        for count, gran in enumerate(data_generator):
            if isinstance(gran, Granule):
                publisher.publish(gran)
            else:
                log.warn('Could not publish object returned by _get_data: {0}'.format(gran))

            #TODO: Persist the 'state' of this operation so that it can be re-established in case of failure

        #TODO: When finished publishing, update (either directly, or via an event callback to the agent) the UpdateDescription

    @classmethod
    def _calc_iter_cnt(cls, total_recs, max_rec):
        """
        Given the total number of records and the maximum records allowed in a granule,
        calculates the number of iterations required to traverse the entire array in chunks of size max_rec
        @param total_recs The total number of records
        @param max_rec The maximum number of records allowed in a granule
        """
        cnt = total_recs / max_rec
        if total_recs % max_rec > 0:
            cnt += 1

        return cnt
def process_event(sockfd_list, queue, addr, i):
    semaphore = Semaphore()
    while 1:
        semaphore.acquire()
        sockfd = sockfd_list[i]
        event = queue.get()
        chunk_size = 1024
        if check_filetype(event.pathname):
            #print "Greenlet %s got item: %s" % (i, event)
            filepath = event.path
            filename = event.name
            filesize = os.stat(event.pathname).st_size
            filepath_len = len(filepath)
            filename_len = len(filename)
                
            data = struct.pack("!LL128s128sL",filepath_len, filename_len, filepath,filename,filesize)
            fd = open(event.pathname,'rb')
            fcntl.flock(fd,fcntl.LOCK_SH)
            sockfd.write(data)
            sockfd.flush()
            
            print "File %s size: %s" % (filename, filesize)
            print 11111111111111
            offset = 0
            writen_size = 0
            if "sendfile" in sys.modules:
                #print "use sendfile(2)"
                if filesize > chunk_size:
                    while 1:
                        sent = sendfile(sockfd.fileno(), fd.fileno(), offset, chunk_size)
                        if sent == 0:
                            break
                        offset += sent
                else:
                    sendfile(sockfd.fileno(), fd.fileno(), offset, filesize)
            else:
                #print 22222222222222222
                #if filesize > chunk_size:
                #    times = filesize / chunk_size
                #    first_part_size = times * chunk_size
                #    second_part_size = filesize % chunk_size
                #    print "times: %s  first_part_size:%s  second_part_size:%s" % (times,first_part_size,second_part_size)
                #    print 3333333333333333
                #    # print "use original send function"
                #    while 1:
                #        data = fd.read(chunk_size)
                #        writen_size += len(data)
                #        sockfd.write(data)
                #        sockfd.flush()
                #        if writen_size == first_part_size:
                #            break
                #    print "writen_size in first_par: %s" % writen_size
                #    print 44444444444444444
                #    if second_part_size:
                #        data = fd.read(second_part_size)
                #        writen_size += len(data)
                #        sockfd.write(data)
                #        sockfd.flush()
                #    
                #else:
                #    data = fd.read(filesize)
                #    sockfd.write(data)
                #    sockfd.flush()
                print 222222222222222222
                while 1:
                    data = fd.read(chunk_size)
                    if not data: break
                    sockfd.write(data)
                    sockfd.flush()
                    
            print '333333333333333333\n'
            fcntl.flock(fd,fcntl.LOCK_UN)
            fd.close()
        if debug:
            return
        semaphore.acquire()
        gevent.sleep(0)
class nscl_dm_adapter(Plugin):
    def _init(self, ):
        self._initialized()

    def _start(self, ):
        self.sem = Semaphore()
        self.sem_counter = 0
        self.set_configurations()
        self.api.run_task(self.create_server)
        self.subscribe_nscl()
        self.api.run_task(self.subscribe_dm_server)
        if self.config["enable_test"]:
            pass
            # self.api.run_task(self.send_execute_command)
            # Uncomment to check these operations
            # self.api.run_task(self.send_specific_observation)
            # self.api.run_task(self.send_specific_observation1)
            # self.api.run_task(self.send_cancel_observation)
            #self.api.run_task(self.send_discover_resources)
            #self.api.run_task(self.send_write_attributes)
            #self.api.run_task(self.send_create)
        self._started()

    def _stop(self, ):
        self.local_server.stop()
        self._stopped()

    def set_configurations(self, ):
        self.lwm2m_server_ip = self.config["lwm2m_dm_server_ip"]
        self.lwm2m_server_port = self.config["lwm2m_dm_server_port"]
        self.nscl_dm_adapter_listener_ip = self.config[
            "nscl_dm_adapter_listener_ip"]
        self.nscl_dm_adapter_listener_port = self.config[
            "nscl_dm_adapter_listener_port"]
        self.nscl_dm_adapter_client_ip = self.config[
            "nscl_dm_adapter_client_ip"]
        self.nscl_dm_adapter_client_port = self.config[
            "nscl_dm_adapter_client_port"]

    def create_server(self, ):
        self.local_server = DatagramServer(
            (self.nscl_dm_adapter_listener_ip,
             self.nscl_dm_adapter_listener_port), self.handle_request)
        self.local_server.start()

    def handle_request(self, message, remote):
        rx_record = connection.ReceptionRecord(None, message, remote)
        msg = rx_record.message
        uriQuery = msg.findOption(options.UriQuery)
        self.process(rx_record, remote, uriQuery)

    def process(self, rx_record, remote, uri_query):
        if rx_record.message.transaction_type == connection.Message.CON:
            if constants.POST == rx_record.message.code:
                if self.general_notification_token == rx_record.message.token:
                    self.logger.info("General Notification received")
                    msg = connection.Message(connection.Message.ACK,
                                             code=constants.CREATED)
                    self.local_server.sendto(
                        msg._pack(rx_record.transaction_id), remote)

                    self.process_resources(
                        json.loads(rx_record.message.payload))
                else:
                    self.logger.info("Specific Notification received")
                    msg = connection.Message(connection.Message.ACK,
                                             code=constants.CREATED)
                    self.local_server.sendto(
                        msg._pack(rx_record.transaction_id), remote)

                    payload = json.loads(rx_record.message.payload)
                    observer_ip = payload["observer_ip"]
                    observer_port = payload["observer_port"]
                    del payload["observer_ip"]
                    del payload["observer_port"]
                    self.process_resources(payload,
                                           observer_ip=observer_ip,
                                           observer_port=observer_port)

        elif rx_record.message.transaction_type == connection.Message.NON:
            if self.general_notification_token == rx_record.message.token:
                self.logger.info("General Notification received")
                self.process_resources(json.loads(rx_record.message.payload))
            else:
                self.logger.info("Specific Notification received")
                payload = json.loads(rx_record.message.payload)
                observer_ip = payload["observer_ip"]
                observer_port = payload["observer_port"]
                del payload["observer_ip"]
                del payload["observer_port"]
                self.process_resources(payload,
                                       observer_ip=observer_ip,
                                       observer_port=observer_port)

    def process_resources(self, payload, observer_ip=None, observer_port=None):
        total_resources = payload
        if observer_ip != None and observer_port != None:
            self.logger.info("The notification should be sent to %s:%s",
                             observer_ip, observer_port)

        for ep_name, object_resources in total_resources.iteritems():
            endpoint_name = ep_name
            for object_ids, resources in object_resources.iteritems():
                object_id = object_ids.split("_")[0]
                object_inst_id = object_ids.split("_")[1]
                resources_dict = {}
                for res_ids, res_value in resources["resources"].iteritems():
                    res_id = res_ids.split("_")[0]
                    res_inst_id = res_ids.split("_")[1]
                    res_value = res_value
                    resource_name = lwm2m_dict_objects[str(
                        object_id)]["resource_list"][str(res_id)]["resName"]
                    is_multi_inst = lwm2m_dict_objects[str(
                        object_id)]["resource_list"][str(res_id)]["multiInst"]
                    if not is_multi_inst:
                        resources_dict.update({resource_name: res_value})
                    else:
                        resources_dict.update({
                            resource_name + "_" + str(res_inst_id):
                            res_value
                        })

                self.handle_m2m_server(endpoint_name, object_id,
                                       object_inst_id, res_id, res_inst_id,
                                       resource_name, res_value,
                                       resources_dict)

    def handle_m2m_server(self, endpoint_name, object_id, object_inst_id,
                          res_id, res_inst_id, res_name, res_value,
                          resources_dict):
        preferred_scl = endpoint_name.split("/")[0]
        if endpoint_name.find("attachedDevices") == -1:
            bool_attachedDevices = False
        else:
            attached_device_name = endpoint_name.split("/")[-1]
            bool_attachedDevices = True
        object_name = lwm2m_dict_objects[str(object_id)]["object_name"]
        resource_name = lwm2m_dict_objects[str(object_id)]["resource_list"][
            str(res_id)]["resName"]
        moID_value = lwm2m_dict_objects[str(object_id)]["urn"]
        res_name_res_inst_id = resource_name + "_" + str(res_inst_id)

        def add_parameters(response):
            path = response.resource.path
            resource = ('{"mgmtObjs" : ' + json.dumps(resources_dict) + '}')
            request = UpdateRequestIndication(path,
                                              resource,
                                              content_type="application/json")
            response = self.api.handle_request_indication(request)

        def handle_mgmtobjs(response):
            mgmtobj_exists = False
            for mgmtobj in response.resource.mgmtObjCollection:
                if mgmtobj.name == object_name + "_" + str(object_inst_id):
                    mgmtobj_exists = True
                    path = mgmtobj.path
                    request = RetrieveRequestIndication(path)
                    response = self.api.handle_request_indication(request)
                    try:
                        if res_name_res_inst_id in response.value.resource.flex_values:
                            if response.value.resource.flex_values[
                                    res_name_res_inst_id] == str(res_value):
                                continue
                        elif res_name in response.value.resource.flex_values:
                            if response.value.resource.flex_values[
                                    res_name] == str(res_value):
                                continue
                    except:
                        pass
                    resource = ('{"mgmtObjs" : ' + json.dumps(resources_dict) +
                                '}')
                    request = UpdateRequestIndication(
                        path, resource, content_type="application/json")
                    response = self.api.handle_request_indication(request)
                    break

            if not mgmtobj_exists:
                mgmtobj_ = MgmtObj(id=str(object_name) + "_" +
                                   str(object_inst_id),
                                   moID=moID_value)
                path = response.resource.path
                request = CreateRequestIndication(path, mgmtobj_)
                response = self.api.handle_request_indication(request)
                response.then(add_parameters)

        def retrieve_mgmtobjs(response):
            path = response.resource.path + "/mgmtObjs"
            request = RetrieveRequestIndication(path)
            response = self.api.handle_request_indication(request)
            response.then(handle_mgmtobjs)

        def handle_attached_devices(response):
            attached_device_exists = False
            for attached_device in response.resource.attachedDeviceCollection:
                if attached_device.name == attached_device_name:
                    attached_device_exists = True
                    path = attached_device.path + "/mgmtObjs"
                    request = RetrieveRequestIndication(path)
                    response = self.api.handle_request_indication(request)
                    response.then(handle_mgmtobjs)
                    break
            if not attached_device_exists:
                attached_device_object = AttachedDevice(
                    id=attached_device_name)
                path = response.resource.path
                request = CreateRequestIndication(
                    path=path, resource=attached_device_object)
                response = self.api.handle_request_indication(request)
                response.then(retrieve_mgmtobjs)

        def retrieve_attached_devices(response):
            path = response.resource.path + "/attachedDevices"
            request = RetrieveRequestIndication(path)
            response = self.api.handle_request_indication(request)
            response.then(handle_attached_devices)

        def handle_scl(response):
            scl_exists = False
            for _scl in response.resource.sclCollection:
                if _scl.name == preferred_scl:
                    scl_exists = True
                    if bool_attachedDevices:
                        path = _scl.path + "/attachedDevices"
                    else:
                        path = _scl.path + "/mgmtObjs"
                    request = RetrieveRequestIndication(path)
                    response = self.api.handle_request_indication(request)
                    if bool_attachedDevices:
                        response.then(handle_attached_devices)
                    else:
                        response.then(handle_mgmtobjs)
                    break
            if not scl_exists:
                scl_object = Scl(sclId=preferred_scl,
                                 link="127.0.0.1",
                                 sclType="GSCL",
                                 mgmtProtocolType="LWM2M")
                request = CreateRequestIndication(path="/m2m/scls",
                                                  resource=scl_object)
                response = self.api.handle_request_indication(request)
                if bool_attachedDevices:
                    response.then(retrieve_attached_devices)
                else:
                    response.then(retrieve_mgmtobjs)

        path = "/m2m/scls"
        request = RetrieveRequestIndication(path)
        response = self.api.handle_request_indication(request)
        response.then(handle_scl)

    def _handle_mgmtcmd_created(self, instance, request_indication):
        pass

    def _handle_mgmtcmd_updated(self, instance, request_indication):
        pass

    def _handle_mgmtobj_created(self, instance, request_indication):
        pass

    def _handle_mgmtobj_updated(self, instance, request_indication):
        filter_keyword = "TransportMgmtPolicy"
        filter_keyword1 = "DeviceCapability"

        mgmtobj_name = instance.path.split("/")[-1]
        if mgmtobj_name.startswith(filter_keyword):
            self.handle_transport_mgmt_policy(instance, mgmtobj_name)
        elif mgmtobj_name.startswith(filter_keyword1):
            self.handle_device_capability(instance, mgmtobj_name,
                                          request_indication)

    def handle_device_capability(self, instance, mgmtobj_name,
                                 request_indication):
        generate_endpoint = instance.path.split("/")[3:-2]
        endpoint_name = "/".join(generate_endpoint)
        object_name = mgmtobj_name.split("_")[0]
        object_id = lwm2m_reverse_dict_objects[object_name]["object_id"]
        object_inst_id = mgmtobj_name.split("_")[1]

        if "opEnable" in request_indication.resource and "opDisable" in request_indication.resource:
            return
        elif "opEnable" in request_indication.resource:
            res_id = 5
            res_inst_id = 0
        elif "opDisable" in request_indication.resource:
            res_id = 6
            res_inst_id = 0
        else:
            return
        self.send_execute_resource(endpoint_name, object_id, object_inst_id,
                                   res_id, res_inst_id)

    def handle_transport_mgmt_policy(self, instance, mgmtobj_name):
        res_value_exists = False
        resources_dict = {}
        total_dict = {}
        endpoint_dict = {}
        generate_endpoint = instance.path.split("/")[3:-2]
        endpoint_name = "/".join(generate_endpoint)

        object_name = mgmtobj_name.split("_")[0]
        object_id = lwm2m_reverse_dict_objects[object_name]["object_id"]
        object_inst_id = mgmtobj_name.split("_")[1]
        for key, value in instance.flex_values.iteritems():
            res_name = key.split("_")[0]
            try:
                res_inst_id = key.split("_")[1]
            except:
                res_inst_id = 0
            res_value = value
            res_id = lwm2m_reverse_dict_objects[object_name]["resource_list"][
                res_name]["resId"]
            resources_dict.update(
                {res_id: {
                    "res_inst_id": res_inst_id,
                    "res_value": res_value
                }})
            if res_value != "" and not res_value_exists:
                res_value_exists = True

        if res_value_exists:
            self.logger.info("Sending the Resource Updates to LWM2M Server")
            payload = json.dumps(resources_dict)
            content_type = "application/json"

            request = lwm2m_api()
            self.sem.acquire()
            client_port = self.generate_client_port()
            response = request.write_resource(self.lwm2m_server_ip,
                                              self.lwm2m_server_port,
                                              endpoint_name,
                                              object_id,
                                              payload,
                                              content_type,
                                              object_inst_id=object_inst_id,
                                              client_port=client_port)
            self.sem.release()

    def generate_client_port(self, ):
        if self.sem_counter >= 1000:
            self.sem_counter = 0

        self.sem_counter += 1

        sem_counter = self.sem_counter

        client_port = self.nscl_dm_adapter_client_port + sem_counter
        return client_port

    def subscribe_dm_server(self, ):
        self.logger.info(
            "Trying to subscribe to LWM2M DM Server for General Subscription")
        payload = json.dumps({"listener_ip": self.nscl_dm_adapter_listener_ip, "listener_port": \
            self.nscl_dm_adapter_listener_port})
        content_type = "application/json"
        request = lwm2m_api()
        response = request.observe_resource(
            self.lwm2m_server_ip,
            self.lwm2m_server_port,
            payload=payload,
            content_type=content_type,
            client_port=self.generate_client_port())

        def _handle_response(response):
            self.logger.info(
                "Successfully subscribed to LWM2M DM Server for General Subscription"
            )
            self.general_notification_token = response.token

        def _handle_error(*args):
            self.subscribe_dm_server()

        response.then(_handle_response, _handle_error)

    def subscribe_nscl(self, ):
        self.events.resource_created.register_handler(
            self._handle_mgmtobj_created, MgmtObj)
        self.events.resource_updated.register_handler(
            self._handle_mgmtobj_updated, MgmtObj)
        self.events.resource_created.register_handler(
            self._handle_mgmtcmd_created, MgmtCmd)
        self.events.resource_updated.register_handler(
            self._handle_mgmtcmd_updated, MgmtCmd)

    def send_discover_resources(self, ):
        sleep(20)
        self.logger.info("Sending discover request to Dm server")
        server_ip = self.lwm2m_server_ip
        server_port = self.lwm2m_server_port
        payload = "/.well-known/core"
        request = lwm2m_api()
        response = request.discover_resources(
            server_ip,
            server_port,
            payload=payload,
            client_port=self.generate_client_port())
        discover = Discovery()
        payload = json.loads(response.payload)
        discover.display_all_resources(payload)

    def send_write_attributes(self, ):
        sleep(10)
        self.logger.info("Sending attributes info to DM server")
        server_ip = self.lwm2m_server_ip
        server_port = self.lwm2m_server_port

        endpoint_name = "emulated_device_nb_0"
        object_id = 3
        object_inst_id = 0
        res_id = 1
        res_inst_id = 0

        pmax = 50
        pmin = 10
        gt = None
        lt = None
        st = None
        cancel = None

        content_type = "application/json"
        payload = json.dumps({
            "pmax": pmax,
            "pmin": pmin,
            "gt": gt,
            "lt": lt,
            "st": st,
            "cancel": cancel
        })

        request = lwm2m_api()
        response = request.write_attributes(
            server_ip,
            server_port,
            endpoint_name,
            object_id,
            payload,
            content_type,
            object_inst_id=object_inst_id,
            res_id=res_id,
            res_inst_id=res_inst_id,
            client_port=self.generate_client_port())

    def send_create(self, ):
        sleep(10)
        self.logger.info("Sending create info to DM server")

        server_ip = self.lwm2m_server_ip
        server_port = self.lwm2m_server_port

        endpoint_name = "emulated_device_nb_0"
        object_id = 3
        object_inst_id = 4
        res_id = 0
        res_inst_id = 0
        res_value = "fokus"
        res_id_res_inst_id = str(res_id) + "_" + str(res_inst_id)
        payload = {}
        res_id_res_inst_id = str(res_id) + "_" + str(res_inst_id)
        payload[res_id_res_inst_id] = {
            "res_id": res_id,
            "res_inst_id": res_inst_id,
            "res_value": res_value
        }
        content_type = "application/json"

        request = lwm2m_api()
        response = request.create_object_instance(
            server_ip,
            server_port,
            endpoint_name,
            object_id,
            json.dumps(payload),
            content_type,
            object_inst_id=object_inst_id,
            client_port=self.generate_client_port())

    def send_specific_observation(self, ):
        sleep(15)
        self.logger.info("Sending specific observation to DM server")

        app_ip = "localhost"
        app_port = "1111"

        server_ip = self.lwm2m_server_ip
        server_port = self.lwm2m_server_port

        endpoint_name = "gscl/attachedDevices/PulseOximeter"
        object_id = 4200
        object_inst_id = 0
        res_id = 1
        res_inst_id = 0

        request = lwm2m_api()
        response = request.observe_resource(
            server_ip,
            server_port,
            app_ip=app_ip,
            app_port=app_port,
            endpoint_name=endpoint_name,
            object_id=object_id,
            object_inst_id=object_inst_id,
            res_id=res_id,
            res_inst_id=res_inst_id,
            client_port=self.generate_client_port())

        def _handle_response(response):
            self.logger.info("response token: %s", response.token)

        response.then(_handle_response)

    def send_specific_observation1(self, ):
        sleep(20)
        self.logger.info("Sending specific observation to DM server")

        app_ip = "localhost"
        app_port = "1115"

        server_ip = self.lwm2m_server_ip
        server_port = self.lwm2m_server_port

        endpoint_name = "gscl_PulseOximeter"
        object_id = 4200
        object_inst_id = 0
        res_id = 0
        res_inst_id = 0

        request = lwm2m_api()
        response = request.observe_resource(
            server_ip,
            server_port,
            app_ip=app_ip,
            app_port=app_port,
            endpoint_name=endpoint_name,
            object_id=object_id,
            object_inst_id=object_inst_id,
            res_id=res_id,
            res_inst_id=res_inst_id,
            client_port=self.generate_client_port())

        def _handle_response(response):
            self.logger.info("response token: %s", response.token)

        response.then(_handle_response)

    def send_cancel_observation(self, ):
        sleep(22)
        self.logger.info("Sending Cancel Observation to DM server")

        app_ip = "localhost"
        app_port = "1111"

        server_ip = self.lwm2m_server_ip
        server_port = self.lwm2m_server_port

        endpoint_name = "gscl/attachedDevices/PulseOximeter"
        object_id = 4200
        object_inst_id = 0
        res_id = 1
        res_inst_id = 0

        request = lwm2m_api()
        response = request.cancel_observe_resource(
            server_ip,
            server_port,
            app_ip,
            app_port,
            endpoint_name,
            object_id,
            object_inst_id=object_inst_id,
            res_id=res_id,
            res_inst_id=res_inst_id,
            client_port=self.generate_client_port())

        def _handle_response(response):
            self.logger.info("response token: %s", response.token)
            self.logger.info("response %s", response.payload)

        response.then(_handle_response)

    def send_execute_resource(self,
                              endpoint_name,
                              object_id,
                              object_inst_id,
                              res_id,
                              res_inst_id,
                              payload=None):
        self.logger.info("Sending execution to DM server")

        server_ip = self.lwm2m_server_ip
        server_port = self.lwm2m_server_port

        payload = None
        request = lwm2m_api()
        response = request.execute_resource(
            server_ip,
            server_port,
            endpoint_name,
            object_id,
            object_inst_id,
            res_id,
            res_inst_id=res_inst_id,
            payload=payload,
            client_port=self.generate_client_port())

        self.logger.info("Updating M2M Resource Tree")
        resources_dict = {}
        object_id_res_id = str(object_id) + "/" + str(res_id)
        if object_id_res_id in action_mapping:
            res_id = action_mapping[object_id_res_id]["target_res_id"]
            res_value = action_mapping[object_id_res_id]["target_action"]
            res_name = lwm2m_dict_objects[str(object_id)]["resource_list"][str(
                res_id)]["resName"]
            is_multi_inst = lwm2m_dict_objects[str(
                object_id)]["resource_list"][str(res_id)]["multiInst"]
            if not is_multi_inst:
                resources_dict.update({res_name: res_value})
            else:
                resources_dict.update(
                    {res_name + "_" + str(res_inst_id): res_value})
            self.handle_m2m_server(endpoint_name, object_id, object_inst_id,
                                   res_id, res_inst_id, res_name, res_value,
                                   resources_dict)
Esempio n. 46
0
class BaseDataHandler(object):
    _params = {DataHandlerParameter.POLLING_INTERVAL : 3600}
    _polling = False
    _polling_glet = None
    _config = {}

    def set_event_callback(self, evt_callback):
        self._event_callback = evt_callback

    def _dh_event(self, type, value):
        event = {
            'type' : type,
            'value' : value,
            'time' : time.time()
        }
        self._event_callback(event)

    def _poll(self):
        """
        Internal polling method, run inside a greenlet, that triggers acquire_data for "new" data
        The polling interval (in seconds) is retrieved from the POLLING_INTERVAL parameter
        """
        self._polling = True
        interval = get_safe(self._params, DataHandlerParameter.POLLING_INTERVAL, 3600)
        while True:
            self.execute_acquire_data({'stream_id':'first_new','TESTING':True})
            time.sleep(interval)

    def cmd_dvr(self, cmd, *args, **kwargs):
        """
        Command a DataHandler by request-reply messaging. Package command
        message and send on blocking command socket. Block on same socket
        to receive the reply. Return the driver reply.
        @param cmd The DataHandler command identifier.
        @param args Positional arguments of the command.
        @param kwargs Keyword arguments of the command.
        @retval Command result.
        """
        # Package command dictionary.

        #need to account for observatory_execute_resource commands
        #connect -> Not used
        #get_current_state -> Not used
        #discover -> Not used
        #disconnect -> Not used

        log.debug('cmd_dvr received command \'{0}\' with: args={1} kwargs={2}'.format(cmd, args, kwargs))

        reply = None
        if cmd == 'configure':
            # Delegate to BaseDataHandler.configure()
            reply = self.configure(*args, **kwargs)
        elif cmd == 'initialize':
            # Delegate to BaseDataHandler.initialize()
            reply = self.initialize(*args, **kwargs)
        elif cmd == 'get':
            # Delegate to BaseDataHandler.get()
            reply = self.get(*args, **kwargs)
        elif cmd == 'set':
            # Delegate to BaseDataHandler.set()
            reply = self.set(*args, **kwargs)
        elif cmd == 'get_resource_params':
            # Delegate to BaseDataHandler.get_resource_params()
            reply = self.get_resource_params(*args, **kwargs)
        elif cmd == 'get_resource_commands':
            # Delegate to BaseDataHandler.get_resource_commands()
            reply = self.get_resource_commands(*args, **kwargs)
        elif cmd == 'execute_acquire_data':
            # Delegate to BaseDataHandler.execute_acquire_data()
            reply = self.execute_acquire_data(*args, **kwargs)
        elif cmd == 'execute_acquire_sample':
            #TODO: Can we change these names?  acquire_data would be a better name for EOI...
            # Delegate to BaseDataHandler.execute_acquire_sample()
            reply = self.execute_acquire_sample(*args, **kwargs)
        elif cmd == 'execute_start_autosample':
            #TODO: Can we change these names?  stop_polling would be a better name for EOI...
            # Delegate to BaseDataHandler.execute_start_autosample()
            reply = self.execute_start_autosample(*args, **kwargs)
        elif cmd == 'execute_stop_autosample':
            #TODO: Can we change these names?  stop_polling would be a better name for EOI...
            # Delegate to BaseDataHandler.execute_stop_autosample()
            reply = self.execute_stop_autosample(*args, **kwargs)
        elif cmd in ['connect','disconnect','get_current_state','discover']:
            # Disregard
            pass
        else:
            desc='Command unknown by DataHandler: {0}'.format(cmd)
            log.info(desc)
            raise UnknownCommandError(desc)

        return reply

    def initialize(self, *args, **kwargs):
        #TODO: Add documentation
        #TODO: This should put the DataHandler back into an 'unconfigured' state
        """
        Called from:
                      InstrumentAgent._handler_idle_reset
                      InstrumentAgent._handler_idle_go_inactive
                      InstrumentAgent._handler_stopped_reset
                      InstrumentAgent._handler_stopped_go_inactive
                      InstrumentAgent._handler_observatory_reset
                      InstrumentAgent._handler_observatory_go_inactive
                      InstrumentAgent._handler_uninitialized_initialize
                      |--> ExternalDataAgent._start_driver
        """
        log.debug('Initializing DataHandler...')
        self._glet_queue = []
        self._semaphore=Semaphore()
        return None

    def configure(self, *args, **kwargs):
        #TODO: Add documentation
        #TODO: This should configure the DataHandler for the particular dataset
        """
        Called from:
                      InstrumentAgent._handler_inactive_go_active
        """
        log.debug('Configuring DataHandler: args = {0}'.format(args))
        try:
            self._config = args[0]

        except IndexError:
            raise ParameterError('\'acquire_data\' command requires a config dict as the first argument')

        return

    def execute_acquire_data(self, *args):
        """
        Spawns a greenlet to perform a data acquisition
        Calls BaseDataHandler._acquire_data
        Disallows multiple "new data" (unconstrained) requests using BaseDataHandler._semaphore lock
        Called from:
                      InstrumentAgent._handler_observatory_execute_resource
                       |-->  ExternalDataAgent._handler_streaming_execute_resource

        @parameter args First argument should be a config dictionary
        """
        try:
            config = args[0]

        except IndexError:
            raise ParameterError('\'acquire_data\' command requires a config dict.')

        if not isinstance(config, dict):
            raise TypeError('args[0] of \'acquire_data\' is not a dict.')
        else:
            if get_safe(config,'constraints') is None and not self._semaphore.acquire(blocking=False):
                log.warn('Already acquiring new data - action not duplicated')
                return

            g = spawn(self._acquire_data, config, self._unlock_new_data_callback)
            log.debug('** Spawned {0}'.format(g))
            self._glet_queue.append(g)

    def execute_acquire_sample(self, *args, **kwargs):
        #TODO: Add documentation
        #TODO: Fix raises statements
        """
        Called from:
                      InstrumentAgent._handler_observatory_execute_resource
                       |-->  ExternalDataAgent._handler_streaming_execute_resource
        """
        # This returns the sample to the agent as an event
        sample = {'stream_name':'data_stream','p': [-6.945], 'c': [0.08707], 't': [20.002], 'time': [1333752198.450622]}
        self._dh_event(DriverAsyncEvent.SAMPLE, sample)
        # This does 'rpc' style event
        return sample

    def execute_start_autosample(self, *args, **kwargs):
        #TODO: Add documentation
        #TODO: Fix raises statements
        """
        Put the DataHandler into streaming mode and start polling for new data
        Called from:
                      InstrumentAgent._handler_observatory_go_streaming

        @raises TimeoutError:
        @raises ProtocolError:
        @raises NotImplementedError:
        @raises ParameterError:
        """
        log.debug('Entered execute_start_autosample with args={0} & kwargs={1}'.format(args, kwargs))
        if not self._polling and self._polling_glet is None:
            self._polling_glet = spawn(self._poll)

        return None

    def execute_stop_autosample(self, *args, **kwargs):
        #TODO: Add documentation
        #TODO: Fix raises statements
        """
        Stop polling for new data and put the DataHandler into observatory mode
        Called from:
                      InstrumentAgent._handler_streaming_go_observatory

        @raises TimeoutError:
        @raises ProtocolError:
        @raises NotImplementedError:
        @raises ParameterError:
        """
        log.debug('Entered execute_stop_autosample with args={0} & kwargs={1}'.format(args, kwargs))
        if self._polling and not self._polling_glet is None:
            self._polling_glet.kill()
            self._polling = False
            self._polling_glet = None

        return None

    def get(self, *args, **kwargs):
        #TODO: Add documentation
        #TODO: Fix raises statements
        """
        Called from:
                      InstrumentAgent._handler_get_params

        @raises TimeoutError:
        @raises ProtocolError:
        @raises NotImplementedError:
        @raises ParameterError:
        """
        try:
            pnames=args[0]
        except IndexError:
            log.warn("No argument provided to get, return all parameters")
            pnames = [DataHandlerParameter.ALL]

        result = None
        if DataHandlerParameter.ALL in pnames:
            result = self._params
        else:
            if not isinstance(pnames, (list,tuple)):
                raise ParameterError('Get argument not a list or tuple: {0}'.format(pnames))
            result={}
            for pn in pnames:
                try:
                    result[pn] = self._params.get(pn)
                except KeyError:
                    raise ParameterError('{0} is not a valid parameter for this DataHandler.'.format(pn))

        return result

    def set(self, *args, **kwargs):
        #TODO: Add documentation
        #TODO: Fix raises statements
        """
        Called from:
                      InstrumentAgent._handler_observatory_set_params

        @raises TimeoutError:
        @raises ProtocolError:
        @raises NotImplementedError:
        @raises ParameterError:
        """
        # Retrieve required parameter.
        # Raise if no parameter provided, or not a dict.
        try:
            params = args[0]

        except IndexError:
            raise ParameterError('Set command requires a parameter dict.')

        if not isinstance(params, dict):
            raise ParameterError('Set parameters not a dict.')
        else:
            for (key, val) in params.iteritems():
                self._params[key] = val
                #TODO: Add rejection of unknown parameter

    def get_resource_params(self, *args, **kwargs):
        """
        Return list of resource parameters. Implemented in specific DataHandlers
        Called from:
                      InstrumentAgent._handler_get_resource_params
        """
        # TODO: Should raise NotImplementedError here, return temporarily for prototyping
#        raise NotImplementedError('get_resource_params() not implemented in BaseDataHandler')
        return [DataHandlerParameter.POLLING_INTERVAL]

    def get_resource_commands(self, *args, **kwargs):
        """
        Return list of DataHandler execute commands available.
        Called from:
                      InstrumentAgent._handler_get_resource_commands
        """
        cmds = [cmd.replace('execute_','') for cmd in dir(self) if cmd.startswith('execute_')]
        return cmds

    def _unlock_new_data_callback(self, caller):
        log.debug('** Release {0}'.format(caller))
        self._semaphore.release()

    @classmethod
    def _acquire_data(cls, config, unlock_new_data_callback):
        """
        Ensures required keys (such as stream_id) are available from config, configures the publisher and then calls:
             BaseDataHandler._new_data_constraints (only if config does not contain 'constraints')
             BaseDataHandler._publish_data passing BaseDataHandler._get_data as a parameter
        @param config Dict containing configuration parameters, may include constraints, formatters, etc
        @param unlock_new_data_callback BaseDataHandler callback function to allow conditional unlocking of the BaseDataHandler._semaphore
        """
        stream_id = get_safe(config, 'stream_id')
        if not stream_id:
            raise ConfigurationError('Configuration does not contain required \'stream_id\' key')
        #TODO: Configure the publisher
        publisher=None

        constraints = get_safe(config,'constraints')
        if not constraints:
            gevent.getcurrent().link(unlock_new_data_callback)
            constraints = cls._new_data_constraints(config)
            config['constraints']=constraints

        cls._publish_data(publisher, config, cls._get_data(config))

        # Publish a 'TestFinished' event
        if get_safe(config,'TESTING'):
            log.debug('Publish TestingFinished event')
            pub = EventPublisher('DeviceCommonLifecycleEvent')
            pub.publish_event(origin='BaseDataHandler._acquire_data', description='TestingFinished')

    @classmethod
    def _new_data_constraints(cls, config):
        #TODO: Document what "constraints" looks like (yml)!!
        """
        Determines the appropriate constraints for acquiring any "new data" from the external dataset
        The format of the constraints are documented:
        @param config Dict of configuration parameters - may be used to generate the returned 'constraints' dict
        @retval Dict that constrains retrieval of new data from the external dataset
        """
        raise NotImplementedError

    @classmethod
    def _get_data(cls, config):
        """
        Generator function that acquires data from a source iteratively based on constraints provided by config
        Passed into BaseDataHandler._publish_data and iterated to publish samples.
        Data should be conformant with the requirements of the publisher (granule)
        @param config Dict containing configuration parameters, may include constraints, formatters, etc
        """
        raise NotImplementedError

    @classmethod
    def _publish_data(cls, publisher, config, data_generator):
        """
        Iterates over the data_generator and publishes granules to the stream indicated in stream_id
        """
        stream_id=config['stream_id']
        log.debug('Start publishing to stream_id = {0}'.format(stream_id))
        for count, ivals in enumerate(data_generator):
            log.info('Publish data to stream \'{0}\' [{1}]: {2}'.format(stream_id,count,ivals))
Esempio n. 47
0
class Queue(Greenlet):
    """Manages the queue of |Envelope| objects waiting for delivery. This is not
    a standard FIFO queue, a message's place in the queue depends entirely on
    the timestamp of its next delivery attempt.

    :param store: Object implementing :class:`QueueStorage`.
    :param relay: |Relay| object used to attempt message deliveries.
    :param backoff: Function that, given an |Envelope| and number of delivery
                    attempts, will return the number of seconds before the next
                    attempt. If it returns ``None``, the message will be
                    permanently failed. The default backoff function simply
                    returns ``None`` and messages are never retried.
    :param bounce_factory: Function that produces a |Bounce| object given the
                           same parameters as the |Bounce| constructor. If the
                           function returns ``None``, no bounce is delivered. By
                           default, a new |Bounce| is created in every case.
    :param store_pool: Number of simultaneous operations performable against the
                       ``store`` object. Default is unlimited.
    :param relay_pool: Number of simultaneous operations performable against the
                       ``relay`` object. Default is unlimited.

    """

    def __init__(self, store, relay, backoff=None, bounce_factory=None,
                       store_pool=None, relay_pool=None):
        super(Queue, self).__init__()
        self.store = store
        self.relay = relay
        self.backoff = backoff or self._default_backoff
        self.bounce_factory = bounce_factory or Bounce
        self.wake = Event()
        self.queued = []
        self.queued_lock = Semaphore(1)
        self.queue_policies = []
        self._use_pool('store_pool', store_pool)
        self._use_pool('relay_pool', relay_pool)

    def add_policy(self, policy):
        """Adds a |QueuePolicy| to be executed before messages are persisted
        to storage.

        :param policy: |QueuePolicy| object to execute.

        """
        if isinstance(policy, QueuePolicy):
            self.queue_policies.append(policy)
        else:
            raise TypeError('Argument not a QueuePolicy.')

    @staticmethod
    def _default_backoff(envelope, attempts):
        pass

    def _run_policies(self, envelope):
        results = [envelope]
        def recurse(current, i):
            try:
                policy = self.queue_policies[i]
            except IndexError:
                return
            ret = policy.apply(current)
            if ret:
                results.remove(current)
                results.extend(ret)
                for env in ret:
                    recurse(env, i+1)
            else:
                recurse(current, i+1)
        recurse(envelope, 0)
        return results

    def _use_pool(self, attr, pool):
        if pool is None:
            pass
        elif isinstance(pool, Pool):
            setattr(self, attr, pool)
        else:
            setattr(self, attr, Pool(pool))

    def _pool_run(self, which, func, *args, **kwargs):
        pool = getattr(self, which+'_pool', None)
        if pool:
            ret = pool.spawn(func, *args, **kwargs)
            return ret.get()
        else:
            return func(*args, **kwargs)

    def _pool_imap(self, which, func, *iterables):
        pool = getattr(self, which+'_pool', gevent)
        threads = imap(pool.spawn, repeat(func), *iterables)
        ret = []
        for thread in threads:
            thread.join()
            ret.append(thread.exception or thread.value)
        return ret

    def _pool_spawn(self, which, func, *args, **kwargs):
        pool = getattr(self, which+'_pool', gevent)
        return pool.spawn(func, *args, **kwargs)

    def _add_queued(self, entry):
        for i, info in enumerate(self.queued):
            if info[0] > entry[0]: # [0] is the timestamp.
                self.queued.insert(i, entry)
                break
        else:
            self.queued.append(entry)
        self.wake.set()

    def enqueue(self, envelope):
        now = time.time()
        envelopes = self._run_policies(envelope)
        ids = self._pool_imap('store', self.store.write, envelopes, repeat(now))
        results = zip(envelopes, ids)
        for env, id in results:
            if not isinstance(id, BaseException):
                self._pool_spawn('relay', self._attempt, id, env, 0)
            elif not isinstance(id, QueueError):
                raise id # Re-raise exceptions that are not QueueError.
        return results

    def _load_all(self):
        for entry in self.store.load():
            self._add_queued(entry)

    def _bounce(self, envelope, reply):
        bounce = self.bounce_factory(envelope, reply)
        if bounce:
            return self.enqueue(bounce)

    def _perm_fail(self, id, envelope, reply):
        self._pool_spawn('store', self.store.remove, id)
        if envelope.sender: # Can't bounce to null-sender.
            self._pool_spawn('bounce', self._bounce, envelope, reply)

    def _retry_later(self, id, envelope, reply):
        attempts = self.store.increment_attempts(id)
        wait = self.backoff(envelope, attempts)
        if wait is None:
            reply.message += ' (Too many retries)'
            self._perm_fail(id, envelope, reply)
        else:
            when = time.time() + wait
            self.store.set_timestamp(id, when)
            self._add_queued((when, id))

    def _attempt(self, id, envelope, attempts):
        try:
            self.relay._attempt(envelope, attempts)
        except TransientRelayError as e:
            self._pool_spawn('store', self._retry_later, id, envelope, e.reply)
        except PermanentRelayError as e:
            self._perm_fail(id, envelope, e.reply)
        else:
            self._pool_spawn('store', self.store.remove, id)

    def _dequeue(self, id):
        envelope, attempts = self.store.get(id)
        self._pool_spawn('relay', self._attempt, id, envelope, attempts)

    def _check_ready(self, now):
        last_i = 0
        for i, entry in enumerate(self.queued):
            timestamp, id = entry
            if now >= timestamp:
                self._pool_spawn('store', self._dequeue, id)
                last_i = i+1
            else:
                break
        if last_i > 0:
            self.queued = self.queued[last_i:]

    def _wait_ready(self, now):
        try:
            first = self.queued[0]
        except IndexError:
            self.wake.wait()
            self.wake.clear()
            return
        first_timestamp = first[0]
        if first_timestamp > now:
            self.wake.wait(first_timestamp-now)
            self.wake.clear()

    def flush(self):
        """Attempts to immediately flush all messages waiting in the queue,
        regardless of their retry timers.

        ***Note:*** This can be a very expensive operation, use with care.

        """
        self.wake.set()
        self.wake.clear()
        self.queued_lock.acquire()
        try:
            for entry in self.queued:
                self._pool_spawn('store', self._dequeue, entry[1])
            self.queued = []
        finally:
            self.queued_lock.release()

    def _run(self):
        self._pool_spawn('store', self._load_all)
        while True:
            self.queued_lock.acquire()
            try:
                now = time.time()
                self._check_ready(now)
                self._wait_ready(now)
            finally:
                self.queued_lock.release()
Esempio n. 48
0
class SchedSite(SchedCommon):
    """Mirrors a site"""
    rain_timer = None
    rain_counter = 0
    _run_delay = None
    _sched = None
    _sched_running = None
    _delay_on = None
    running = False

    def __new__(cls, s):
        if s.id in sites:
            return sites[s.id]
        self = object.__new__(cls)
        sites[s.id] = self
        self.s = s
        self.connect()
        self._delay_on = Semaphore()

        self.controllers = set()
        self.envgroups = set()
        self.meters = {}
        for M in METERS:
            ml = set()
            self.meters[M.meter_type] = ml
            for d in getattr(self.s, M.meter_type + "_meters").all():
                ml.add(M(d))

        self.log("Startup")
        self.connect_monitors(do_controllers=False)
        signal.signal(signal.SIGINT, self.do_shutdown)
        signal.signal(signal.SIGTERM, self.do_shutdown)
        signal.signal(signal.SIGHUP, self.do_syncsched)

        self.running = True
        return self

    def __init__(self, s):
        pass

    def do_shutdown(self, x, y, **k):
        gevent.spawn_later(0.1, connwrap, self.shutdown)

    def do_syncsched(self, x, y):
        gevent.spawn_later(0.1, connwrap, self.syncsched)

    def syncsched(self):
        print("Sync+Sched", file=sys.stderr)
        self.sync()
        self.refresh()
        self.run_sched_task(reason="Sync+Sched")

    def delay_on(self):
        self._delay_on.acquire()
        gevent.spawn_later(1, self._delay_on.release)

    @async_gevent
    def check_flow(self, **k):
        for c in self.controllers:
            c.check_flow(**k)

    def connect(self):
        d = dict()
        if self.s.port:
            d['port'] = self.s.port
        if self.s.username:
            d['login'] = self.s.username
        if self.s.password:
            d['password'] = self.s.password
        if self.s.virtualhost:
            d['virtualhost'] = self.s.virtualhost
        try:
            self.qb = qbroker.make_unit_gevent(
                "moat.rain.runschedule",
                amqp=dict(server=dict(host=self.s.host, **d)))
        except Exception:
            print("Could not connect:", self.s.host, file=sys.stderr)
            raise

    def maybe_restart(self):
        self.log("reconnecting")
        try:
            self.connect()
        except Exception:
            print_exc()
            gevent.spawn_later(100, connwrap, self.maybe_restart)
        else:
            self.connect_monitors()

    def connect_monitors(self, do_controllers=True):
        if self.qb is None:
            return
        if do_controllers:
            for c in self.controllers:
                c.connect_monitors()
        for mm in self.meters.values():
            for m in mm:
                m.connect_monitors()
        n = self.s.var.replace(' ', '.')
        self.ckf = self.qb.register_rpc_gevent("rain.check.flow." + n,
                                               self.check_flow)
        self.cks = self.qb.register_rpc_gevent(
            "rain.read.schedule." + n,
            partial(self.run_sched_ext, reason="read schedule"))
        self.ckt = self.qb.register_rpc_gevent("rain.sync." + n, self.sync_ext)
        self.cku = self.qb.register_rpc_gevent("rain.shutdown." + n,
                                               self.do_shutdown)

    def sync(self, **k):
        print("Sync", file=sys.stderr)
        for c in self.controllers:
            c.sync()
        for eg in self.envgroups:
            eg.sync()
        for mm in self.meters.values():
            for m in mm:
                m.sync()
        self.run_main_task()
        #Save(None)
        print("Sync end", file=sys.stderr)

    sync_ext = async_gevent(sync)

    def shutdown(self, **k):
        print("Shutdown", file=sys.stderr)
        signal.signal(signal.SIGINT, signal.SIG_DFL)
        signal.signal(signal.SIGTERM, signal.SIG_DFL)
        if self.running:
            self.running = False
            self.sync()
            for eg in self.envgroups:
                eg.sync()
            for c in self.controllers:
                c.shutdown()
            for mm in self.meters.values():
                for m in mm:
                    m.shutdown()
        #Save(None)
        sys.exit(0)

    def run_schedule(self):
        for c in self.controllers:
            c.run_schedule()

    def refresh(self):
        self.s.refresh()
        for eg in self.envgroups:
            eg.refresh()
        for c in self.controllers:
            c.refresh()
        for mm in self.meters.values():
            for m in mm:
                m.refresh()

    def log(self, txt):
        log(self.s, txt)

    def add_controller(self, controller):
        self.controllers.add(controller)

    def no_rain(self):
        """Rain has stopped."""
        # called by timer
        self.rain_timer = None
        self.log("Stopped raining")
        self.run_main_task()

    def has_rain(self):
        """Some monitor told us that it started raining"""
        r, self.rain_timer = self.rain_timer, gevent.spawn_later(
            self.s.db_rain_delay, connwrap, self.no_rain)
        if r:
            r.kill()
            return
        self.log("Started raining")
        self.rain = True

        #for v in self.s.valves.all():
        vo = Valve.objects.filter(controller__site=self.s, runoff__gt=0)
        for v in vo.all():
            valve = SchedValve(v)
            if valve.locked:
                continue
            try:
                valve._off(1)
            except NotConnected:
                pass
            except Exception:
                self.log_error(v)
        Schedule.objects.filter(valve__in=vo,
                                start__gte=now() - timedelta(1),
                                seen=False).delete()
        self.run_main_task()

    def send_command(self, *a, **k):
        # TODO: return a sensible error and handle that correctly
        if self.qb is None:
            raise NotConnected
        try:
            self.qb.rpc_gevent("moat.cmd", args=a, _dest="moat.main", **k)
        except asyncio.TimeoutError as e:
            print("Timeout sending %s %s" % (repr(a), repr(k)))

    def run_every(self, delay):
        """Initiate running the calculation and scheduling loop every @delay seconds."""

        if self._run_delay is not None:
            self._run_delay = delay  # just update
            return
        self._run_delay = delay
        self._run_last = now()
        self._running = Semaphore()
        self._run_result = None
        sd = self._run_delay.total_seconds() / 10
        if sd < 66: sd = 66
        self._run = gevent.spawn_later(sd,
                                       connwrap,
                                       self.run_main_task,
                                       kill=False)
        if self._sched is not None:
            self._sched.kill()
        self._sched = gevent.spawn_later(2,
                                         connwrap,
                                         self.run_sched_task,
                                         kill=False,
                                         reason="run_every")

    def run_main_task(self, kill=True):
        """Run the calculation loop."""
        res = None
        if not self._running.acquire(blocking=False):
            return self._run_result.get()
        try:
            self._run_result = AsyncResult()
            if kill:
                self._run.kill()
            n = now()
            ts = (n - self._run_last).total_seconds()
            if ts < 5:
                try:
                    res = self.s.history.order_by("-time")[0]
                except IndexError:
                    return None
                else:
                    return res
            self._run_last = n

            res = self.main_task()
            return res
        finally:
            self._run = gevent.spawn_later(
                (self._run_last + self._run_delay - n).total_seconds(),
                connwrap,
                self.run_main_task,
                kill=False)
            r, self._run_result = self._run_result, None
            self._running.release()
            r.set(res)

    def current_history_entry(self, delta=15):
        # assure that the last history entry is reasonably current
        try:
            he = self.s.history.order_by("-time")[0]
        except IndexError:
            pass
        else:
            if (now() - he.time).total_seconds() < delta:
                return he
        return self.new_history_entry()

    def new_history_entry(self, rain=0):
        """Create a new history entry"""
        values = {}
        n = now()
        for t, ml in self.meters.items():
            sum_it = False
            sum_val = 0
            sum_f = 0
            for m in ml:
                f = m.weight
                v = m.get_value()
                if m.last_time is None:
                    f *= 0.01
                else:
                    s = (n - m.last_time).total_seconds()
                    if s > METER_MAXTIME:
                        f *= 0.01
                    elif s > METER_TIME:
                        f *= METER_TIME / s
                if v is not None:
                    sum_val += f * v
                    sum_f += f
                if m.sum_it: sum_it = True
            if sum_f:
                if not sum_it:
                    sum_val /= sum_f
                values[t] = sum_val

        print("Values:", values, file=sys.stderr)
        h = History(site=self.s, time=now(), **values)
        h.save()
        return h

    def sync_history(self):
        for c in self.controllers:
            c.sync_history()

    def main_task(self):
        print("MainTask", file=sys.stderr)
        self.refresh()
        h = self.current_history_entry(3)
        self.sync_history()

        gevent.spawn_later(2, connwrap, self.sched_task)
        print("MainTask end", h, file=sys.stderr)
        return h

    def run_sched_task(self, delayed=False, reason=None, kill=True, **k):
        if self._sched_running is not None:
            print("RunSched.running", reason, file=sys.stderr)
            return self._sched_running.get()
        if self._sched is not None:
            if kill:
                self._sched.kill()
        if delayed:
            print("RunSched.delay", reason, file=sys.stderr)
            self._sched = gevent.spawn_later(10,
                                             connwrap,
                                             self.run_sched_task,
                                             kill=False,
                                             reason="Timer 10")
            return
        print("RunSched", reason, file=sys.stderr)
        self._sched = None
        self._sched_running = AsyncResult()
        try:
            self.sched_task()
        except Exception:
            self.log(format_exc())
        finally:
            r, self._sched_running = self._sched_running, None
            if self._sched is None:
                self._sched = gevent.spawn_later(600,
                                                 connwrap,
                                                 self.run_sched_task,
                                                 kill=False,
                                                 reason="Timer 600")
            if r is not None:
                r.set(None)
        print("RunSched end", file=sys.stderr)

    run_sched_ext = async_gevent(run_sched_task)

    def sched_task(self, kill=True):
        self.refresh()
        self.run_schedule()
class M2M_Device_Gateway_DM_Adapter(LoggerMixin):
    def __init__(self, events, config, api, client):

        self.events = events
        self.config = config
        self.api = api
        self.client = client

        self.clients_collection = {}
        self.gscl_collection = {}
        self.sem = Semaphore()
        self.sem_counter = 0
        self.events.resource_created.register_handler(self._handle_scl_created,
                                                      Scl)
        self.events.resource_created.register_handler(
            self._handle_mgmtobj_created, MgmtObj)
        self.events.resource_created.register_handler(
            self._handle_mgmtcmd_created, MgmtCmd)
        self.events.resource_updated.register_handler(
            self._handle_mgmtobj_updated, MgmtObj)
        self.events.resource_created.register_handler(
            self._handle_attached_device_created, AttachedDevice)

    def _handle_scl_created(self, instance, request_indication):
        gscl_name = instance.path.split("/")[-1]
        if gscl_name != "nscl":
            self.create_scl_client(gscl_name)

    def create_scl_client(self, gscl_name):
        self.sem.acquire()
        self.sem_counter += 1
        sem_counter = self.sem_counter
        self.sem.release()

        local_listener_port = self.config["local_listener_port"] + sem_counter
        local_client_port = self.config["local_client_port"] + sem_counter

        dm_client = self.client.create_client(local_listener_port,
                                              local_client_port)
        if dm_client is not None:
            self.gscl_collection[gscl_name] = dm_client
            dm_client.load_dm_adapter(self)
            self.client.do_registration(gscl_name)
        else:
            self.client.do_registration(gscl_name,
                                        status=True,
                                        cb=self.load_elements)

    def load_elements(self, endpoint_name, dm_client_object):
        self.gscl_collection[endpoint_name] = dm_client_object
        dm_client_object.load_dm_adapter(self)

    def load_elements1(self, endpoint_name, dm_client_object):
        self.clients_collection[
            endpoint_name] = dm_client_object  #self.gscl_collection[instance.path.split("/")[3]]

    def _handle_attached_device_created(self, instance, request_indication):

        #self.sem.acquire()
        self.sem_counter += 1
        sem_counter = self.sem_counter
        #self.sem.release()
        generate_endpoint = instance.path.split("/")[3:]
        endpoint_name = "/".join(generate_endpoint)
        local_client_port = self.config["local_client_port"] + sem_counter
        try:
            self.clients_collection[endpoint_name] = self.gscl_collection[
                instance.path.split("/")[3]]
            self.client.do_registration(endpoint_name,
                                        local_client_port=local_client_port)
        except KeyError:
            self.client.do_registration(endpoint_name,
                                        local_client_port=local_client_port,
                                        status=True,
                                        cb=self.load_elements1)

    def _handle_mgmtobj(self, dm_client, endpoint_name, instance,
                        request_indication):
        send_update_flag = False
        object_and_resources = {}
        resources = {}
        path = instance.path
        resource = request_indication.resource
        #get object name and resource name
        mgmt_obj_name = path.split("/")[-1]

        self.logger.debug("endpoint_name: %s", endpoint_name)
        self.logger.debug("management object %s was added" % (mgmt_obj_name))
        lwm2m_mgmt_obj = lwm2m_reverse_dict_objects[mgmt_obj_name.split("_")
                                                    [0]]
        lwm2m_mgmt_obj_inst_id = mgmt_obj_name.split("_")[1]
        if lwm2m_mgmt_obj is not None:
            lwm2m_mgmt_obj_id = lwm2m_mgmt_obj["object_id"]
            self.logger.debug("flex values are " + str(instance.flex_values))
            for param_name, param_value in instance.flex_values.items():
                self.logger.debug("param name [%s] and param value [%s]" %
                                  (param_name, param_value))
                lwm2m_resource_id = lwm2m_mgmt_obj["resource_list"][
                    param_name.split("_")[0]]["resId"]
                if lwm2m_mgmt_obj["resource_list"][param_name.split("_")
                                                   [0]]["multiInst"]:
                    lwm2m_resource_id_inst = param_name.split("_")[1]
                else:
                    lwm2m_resource_id_inst = ""
                resource_change_flag = dm_client.add_resource(endpoint_name, lwm2m_mgmt_obj_id, \
                                        lwm2m_resource_id, param_value, lwm2m_mgmt_obj_inst_id=lwm2m_mgmt_obj_inst_id)

                if resource_change_flag:
                    send_update_flag = True
                resources.update({
                    lwm2m_resource_id: {
                        "res_inst_id": lwm2m_resource_id_inst,
                        "res_value": param_value
                    }
                })

            object_and_resources = {
                str(lwm2m_mgmt_obj_id) + "_" + str(lwm2m_mgmt_obj_inst_id): {
                    "resources": resources
                }
            }
            mgmt_obj_id_inst_id = str(lwm2m_mgmt_obj_id) + "_" + str(
                lwm2m_mgmt_obj_inst_id)
            return mgmt_obj_id_inst_id, object_and_resources, send_update_flag
        else:
            self.logger.error(
                "could not find management object for management object name %s"
                % (mgmt_obj_name))
            return None

    def create_mgmtobj_dict(self, endpoint_name, instance, request_indication):
        send_update_flag = False
        object_and_resources = {}
        resources = {}
        path = instance.path
        resource = request_indication.resource
        mgmt_obj_name = path.split("/")[-1]

        lwm2m_mgmt_obj = lwm2m_reverse_dict_objects[mgmt_obj_name.split("_")
                                                    [0]]
        lwm2m_mgmt_obj_inst_id = mgmt_obj_name.split("_")[1]
        if lwm2m_mgmt_obj is not None:
            lwm2m_mgmt_obj_id = lwm2m_mgmt_obj["object_id"]
            for param_name, param_value in instance.flex_values.items():
                lwm2m_resource_id = lwm2m_mgmt_obj["resource_list"][
                    param_name.split("_")[0]]["resId"]
                if lwm2m_mgmt_obj["resource_list"][param_name.split("_")
                                                   [0]]["multiInst"]:
                    lwm2m_resource_id_inst = param_name.split("_")[1]
                else:
                    lwm2m_resource_id_inst = ""

                resources.update({
                    lwm2m_resource_id: {
                        "res_inst_id": lwm2m_resource_id_inst,
                        "res_value": param_value
                    }
                })

            object_and_resources = {
                str(lwm2m_mgmt_obj_id) + "_" + str(lwm2m_mgmt_obj_inst_id): {
                    "resources": resources
                }
            }
            mgmt_obj_id_inst_id = str(lwm2m_mgmt_obj_id) + "_" + str(
                lwm2m_mgmt_obj_inst_id)
            return mgmt_obj_id_inst_id, object_and_resources
        else:
            self.logger.error("Could not find management object name %s" %
                              (mgmt_obj_name))
            return None

    def _handle_mgmtobj_created(self, instance, request_indication):
        generate_endpoint = instance.path.split("/")[3:-2]
        endpoint_name = "/".join(generate_endpoint)
        if endpoint_name == "":
            endpoint_name = self.config["global"]["scl_id"]
            if not self.gscl_collection.has_key(endpoint_name):
                if not any(self.gscl_collection):
                    self.create_scl_client(endpoint_name)
                else:
                    for _, obj in self.gscl_collection.iteritems():
                        self.gscl_collection[endpoint_name] = obj
                        self.sem_counter += 1
                        sem_counter = self.sem_counter
                        local_client_port = self.config[
                            "local_client_port"] + sem_counter
                        self.client.do_registration(
                            endpoint_name, local_client_port=local_client_port)
                        break
        try:
            dm_client = self.clients_collection[endpoint_name]
        except KeyError:
            try:
                dm_client = self.gscl_collection[endpoint_name]
            except KeyError:
                self.logger.error(
                    "Error while creating mgmtObj!! Endpoint mayn't be registered. Storing MgmtObj contents"
                )
                self.logger.debug("%s, %s", instance.path, endpoint_name)
                mgmt_obj_id_inst_id, object_and_resources = self.create_mgmtobj_dict(
                    endpoint_name, instance, request_indication)
                self.client.store_mgmtobj(endpoint_name, mgmt_obj_id_inst_id,
                                          object_and_resources)
                return
        mgmt_obj_id_inst_id, object_and_resources, send_update_flag = self._handle_mgmtobj(
            dm_client, endpoint_name, instance, request_indication)
        if object_and_resources is not None:
            self.logger.info(
                "Sending add resource updates from gateway(create) to client")
            response = dm_client.send_add_resources(object_and_resources,
                                                    endpoint_name,
                                                    mgmt_obj_id_inst_id)
        else:
            self.logger.info(
                "Not sending add resource updates from gateway(create) to client"
            )

    def _handle_mgmtcmd_created(self, instance, request_indication):
        path = instance.path
        return

    def _handle_mgmtobj_updated(self, instance, request_indication):
        generate_endpoint = instance.path.split("/")[3:-2]
        endpoint_name = "/".join(generate_endpoint)

        if endpoint_name == "":
            endpoint_name = self.config["global"]["scl_id"]

        try:
            dm_client = self.clients_collection[endpoint_name]
        except KeyError:
            try:
                dm_client = self.gscl_collection[endpoint_name]
            except KeyError:
                self.logger.error(
                    "Error while updating mgmtObj!! Endpoint mayn't be registered. Storing MgmtObj contents"
                )
                self.logger.debug("%s, %s", instance.path, endpoint_name)
                mgmt_obj_id_inst_id, object_and_resources = self.create_mgmtobj_dict(
                    endpoint_name, instance, request_indication)
                self.client.store_mgmtobj(endpoint_name, mgmt_obj_id_inst_id,
                                          object_and_resources)
                return
        mgmt_obj_id_inst_id, object_and_resources, send_update_flag = self._handle_mgmtobj(
            dm_client, endpoint_name, instance, request_indication)
        if object_and_resources is not None and send_update_flag:
            self.logger.info(
                "Sending add resource updates from gateway(update) to client")
            response = dm_client.send_add_resources(object_and_resources,
                                                    endpoint_name,
                                                    mgmt_obj_id_inst_id)
        else:
            self.logger.info(
                "Not sending add resource updates from gateway(udpate) to client!! Already has updated information"
            )

    def update_resources(self,
                         endpoint_name,
                         object_id,
                         object_inst_id,
                         payload,
                         content_type=None):
        """ Push the resouces change/update on the gateway resource tree """

        self.logger.info("Updating the Gateway Resource Tree")
        resource_dict = {}
        resources = loads(payload)
        object_id_object_inst_id = str(object_id) + "_" + str(object_inst_id)

        object_name = lwm2m_dict_objects[str(object_id)]["object_name"]
        object_name_obj_inst_id = object_name + "_" + str(object_inst_id)
        for res_id, res in resources.iteritems():
            res_name = lwm2m_dict_objects[str(object_id)]["resource_list"][str(
                res_id)]["resName"]
            if lwm2m_dict_objects[str(object_id)]["resource_list"][str(
                    res_id)]["multiInst"]:
                res_name_res_inst_id = res_name + "_" + res["res_inst_id"]
            else:
                res_name_res_inst_id = res_name
            res_value = res["res_value"]
            resource_dict.update({res_name_res_inst_id: res_value})

        def success(result):
            self.logger.info("Resource Tree is updated")

        def failure(result):
            self.logger.error("Error occurred: %s", result)

        def error_handling(result):
            path = "/m2m/scls/mgmtObjs/" + object_name_obj_inst_id
            self.logger.warning("Path not found. %s", result)
            self.logger.info("Looking for other path at %s", path)
            resource = ('{"mgmtObjs" : ' + dumps(resource_dict) + '}')
            request = UpdateRequestIndication(path,
                                              resource,
                                              content_type="application/json")
            response = self.api.handle_request_indication(request)
            response.then(success, failure)

        path = "/m2m/scls/" + endpoint_name + "/mgmtObjs/" + object_name_obj_inst_id

        resource = ('{"mgmtObjs" : ' + dumps(resource_dict) + '}')
        request = UpdateRequestIndication(path,
                                          resource,
                                          content_type="application/json")
        response = self.api.handle_request_indication(request)
        response.then(success, error_handling)
Esempio n. 50
0
class KasayaLocalClient(Sender):
    """
    KasayaLocalClient is communication class to talk with kasaya daemon.
    It's used by workers and clients.
    """

    __metaclass__ = SingletonCreator

    def __init__(self, *args, **kwargs):
        # connect to kasaya
        super(KasayaLocalClient, self).__init__(
            'tcp://127.0.0.1:' + str(settings.KASAYAD_CONTROL_PORT), *args,
            **kwargs)
        self.SEMA = Semaphore()

    # worker methods

    def setup(self, servicename, address, ID, pid):
        self.srvname = servicename
        self.ID = ID
        self.__pingmsg = {
            "message": messages.WORKER_LIVE,
            "addr": address,
            "id": self.ID,
            "service": servicename,
            "pid": pid,
            "status": 0,
        }

    def notify_worker_live(self, status):
        self.SEMA.acquire()
        self.__pingmsg['status'] = status
        try:
            self.send(self.__pingmsg)
            return True
        except ConnectionClosed:
            return False
        finally:
            self.SEMA.release()

    def notify_worker_stop(self):
        self.SEMA.acquire()
        msg = {
            "message": messages.WORKER_LEAVE,
            "id": self.ID,
        }
        try:
            self.send(msg)
            return True
        except ConnectionClosed:
            return False
        finally:
            self.SEMA.release()

    # client methods

    def query(self, service):
        """
        odpytuje lokalny serwer kasaya o to gdzie realizowany
        jest serwis o żądanej nazwie
        """
        msg = {'message': messages.QUERY, 'service': service}
        self.SEMA.acquire()
        try:
            return self.send_and_receive(msg)
        finally:
            self.SEMA.release()

    def control_task(self, msg):
        """
        zadanie tego typu jest wysyłane do serwera kasayad nie do workera!
        """
        self.SEMA.acquire()
        try:
            return self.send_and_receive(msg)
        finally:
            self.SEMA.release()