Esempio n. 1
0
class ProxyMiddleWare(object):
    def __init__(self):
        self.p = Proxy()
        self.proxy_status = True
        self.lock = DeferredLock()
        self.try_count = 3
        self.auth = base64.b64encode(bytes("proxy:proxy123!", 'utf-8'))

    def process_request(self, request, spider):
        if "porxy" not in request.meta and self.proxy_status:
            self.lock.acquire()
            self.proxy_status = False
            request.headers["Proxy-Authorization"] = b'Basic ' + self.auth
            proxy = self.p.get_proxy()
            request.meta["proxy"] = proxy
            self.lock.release()

    def process_response(self, request, response, spider):
        if response.status in [403, 302, 301]:
            self.lock.acquire()
            try:
                del request.meta["proxy"]
            except:
                pass
            self.proxy_status = True
            time.sleep(3)
            self.lock.release()
            self.try_count -= 1
            if self.try_count == 0:
                self.try_count = 3
                return response
            return request
        return response
Esempio n. 2
0
class UAMiddleware(object):
    def __init__(self):
        self.lock = DeferredLock()
        self.update_time = datetime.now()
        self.UA_List = USER_AGENT

    def process_request(self, request, spider):
        self.lock.acquire()
        if self.is_expiring:
            ua = random.choice(self.UA_List)
            request.headers['User-Agent'] = ua
            print(request.headers['User-Agent'])
        self.lock.release()

    def process_response(self, request, response, spider):
        return response

    def process_exception(self, request, exception, spider):
        pass

    @property
    def is_expiring(self):
        now = datetime.now()
        if (now - self.update_time) > timedelta(seconds=30):
            self.update_time = datetime.now()
            print("跟换USER_AGENT")
            return True
        else:
            return False
Esempio n. 3
0
class IPProxy(object):

    PROXY_URL = "http://webapi.http.zhimacangku.com/getip?num=1&type=2&pro=0&city=0&yys=0&port=1&pack=21267&ts=1&ys=0&cs=0&lb=1&sb=0&pb=4&mr=1&regions="

    def __init__(self):

        self.lock = DeferredLock()
        self.current_proxy = None

    def process_request(self, request, spider):
        if 'proxy' not in request.meta or self.current_proxy.is_expiring:
            self.update_proxy()
        request.meta['proxy'] = self.current_proxy.proxy

    def process_response(self, request, response, spider):
        if response.status != 200:
            if not self.current_proxy.is_block:
                self.current_proxy.is_block = True
            self.update_proxy()
            return request
        return response

    def update_proxy(self):
        self.lock.acquire()
        if self.current_proxy is None or self.current_proxy.is_expiring or self.current_proxy.is_block:
            response_json = requests.get(self.PROXY_URL).json()
            try:
                print(response_json)
                self.current_proxy = ProxyModel(response_json['data'][0])
            except:
                print('出错了!')
                print(response_json)
        self.lock.release()
Esempio n. 4
0
class IPProxyDownloadMiddleware(object):
    PROXY_URL = 'http://webapi.http.zhimacangku.com/getip?num=1&type=2&pro=&city=0&yys=0&port=11&pack=61856&ts=1&ys=0&cs=0&lb=1&sb=0&pb=4&mr=1&regions='

    def __init__(self):
        super().__init__()
        self.current_proxy = None
        self.lock = DeferredLock()

    def process_request(self, request, spider):
        if 'proxy' not in request.meta or self.current_proxy.is_expiring:
            self.update_proxy()
        request.meta['proxy'] = self.current_proxy.proxy

    def process_response(self, request, response, spider):
        if response.status != 200 or 'captcha' in response.url:
            if not self.current_proxy.blacked:
                self.current_proxy.blacked = True
            self.update_proxy()
            print('%s这个代理被加入黑名单了' % self.current_proxy)
            return request
        return response

    def update_proxy(self):
        self.lock.acquire()
        if not self.current_proxy or self.current_proxy.is_expiring or self.current_proxy.blacked:
            response = requests.get(self.PROXY_URL)
            text = response.text
            print('重新获取了一个代理:', text)
            result = json.loads(text)
            if len(result['data']) > 0:
                data = result['data'][0]
                proxy_model = ProxyModel(data)
                self.current_proxy = proxy_model
                self.current_proxy.blacked = False
        self.lock.release()
Esempio n. 5
0
class IPProxyDownloadMiddleware(object):
    PROXY_URL = ''

    def __init__(self):
        super(IPProxyDownloadMiddleware, self).__init__()
        self.current_proxy = None
        self.lock = DeferredLock()

    def process_request(self, request, spider):
        if 'proxy' not in request.meta or self.current_proxy.is_expiring:
            self.update_proxy()
        request.meta['proxy'] = self.current_proxy.proxy

    def process_response(self, request, response, spider):
        if response.status != 200 or "captcha" in response.url:
            if not self.current_proxy.blacked:
                self.current_proxy.blacked = True
            self.update_proxy()
            # 如果来到这里说明这个请求被识别为爬虫,所以这个请求就没有获取到,要重新请求
            return request
        # 来到这里是正常的,要返回response给爬虫解析
        return response

    def update_proxy(self):
        self.lock.acquire()
        if not self.current_proxy or self.current_proxy.is_expiring or self.current_proxy.blacked:
            response = requests.get(self.PROXY_URL)
            text = response.text
            print(text)
            result = json.loads(text)
            if len(result['data']) > 0:
                data = result['data'][0]
                proxy_model = ProxyModel(data)
                self.current_proxy = proxy_model
        self.lock.release()
Esempio n. 6
0
class ProxyMiddleWare(object):
    def __init__(self):
        self.p = Proxy()
        self.proxy_status = True
        self.lock = DeferredLock()
        self.try_count = 2

    def process_request(self, request, spider):

        if request.url.split('/')[3] == 'shop':
            request.cookies = {}

        if request.url.split('/')[3] == 'shop' and self.proxy_status:
            self.lock.acquire()
            self.proxy_status = False
            proxy = self.p.get_proxy()
            request.meta["proxy"] = proxy
            self.lock.release()
        #elif "proxy" in request.meta:
        #    self.proxy_status = True
        #    del request.meta["proxy"]
    def process_response(self, request, response, spider):

        if response.status == 403 or "meituan" in response.url or response.status == 302:
            self.lock.acquire()
            del request.meta["proxy"]
            self.proxy_status = True
            self.lock.release()
            self.try_count -= 1
            return request
        elif self.try_count == 0:
            self.try_count = 2
        return response
Esempio n. 7
0
class IPProxyDownloadMiddlware(object):
    PROXY_URL = '购买的代理链接'

    def __init__(self):
        super(IPProxyDownloadMiddlware, self).__init__()
        self.current_proxy = None
        self.lock= DeferredLock()

    def prooess_request(self, request, spider):
        if 'proxy' not in request.meta or self.current_proxy.is_expiring:
            self.update_proxy()
            request.meta['proxy'] = self.current_proxy.proxy

    #         请求代理

    def prooess_response(self,request, response, spider):
        if response.status !=200 or 'captcha' in response.url:
            self.update_proxy()
            return request
        return response

    def update_proxy(self):
        self.lock.acquire()
        if not self.current_proxy or self.current_proxy.is_expiring:
            response = requests.get(self.PROXY_URL)
            text = response.text
            result = json.loads(text)
            if len(result['data'])>0:
                data = result['data'][0]
                proxy_model = ProxyModel(data)
                self.current_proxy = proxy_model
                return proxy_model
        self.lock.release()
Esempio n. 8
0
class IPProxyMiddleware():
    PROXY_URL = "http://webapi.http.zhimacangku.com/getip?num=3&type=2&pro=&city=0&yys=0&port=11&time=2&ts=1&ys=0&cs=0&lb=1&sb=0&pb=45&mr=1&regions="

    def __init__(self):
        super(IPProxyMiddleware, self).__init__()
        self.current_proxy = None
        self.lock = DeferredLock()

    def process_request(self, request, spider):
        if 'proxy' not in request.meta or self.current_proxy.is_expireing:
            #请求代理
            self.update_proxy()
        request.meta['proxy'] = self.current_proxy.proxy

    def process_response(self, request, response, spider):
        if response.status != 200 or "captcha" in response.url:
            if not self.current_proxy.blacked:
                self.current_proxy.blacked = True
            print('%s这个代理被加入黑名单了' % self.current_proxy.ip)
            self.update_proxy()
            return request
        return response

    def update_proxy(self):
        self.lock.acquire()
        if not self.current_proxy or self.current_proxy.is_expireing or self.current_proxy.blacked:
            response = requests.get(self.PROXY_URL)
            text = response.text
            print("重新获取了一个代理:", text)
            result = json.loads(text)
            if len(result['data']) > 0:
                data = result['data'][random.randint(0, 2)]
                proxy_model = ProxyModel(data)
                self.current_proxy = proxy_model
        self.lock.release()
Esempio n. 9
0
class RandomProxy(object):
    def __init__(self):
        self.current_proxy = None
        self.lock = DeferredLock()

    def process_request(self, request, spider):
        pass
        # user_agent = random.choice(USER_AGENT_LIST)
        # request.headers['User-Agent'] = user_agent
        # if 'proxy' not in request.meta or self.current_proxy.is_expiring:
        #     #请求代理
        #     self.update_proxy()
        #     request.meta['proxy'] = self.current_proxy.proxy

    def process_response(self, request, response, spider):
        # 如果对方重定向(302)去验证码的网页,换掉代理IP
        # 'captcha' in response.url 指的是有时候验证码的网页返回的状态码是200,所以用这个作为辨识的标志
        if response.status != 200 or 'captcha' in response.url:
            # 如果来到这里,说明这个请求已经被boss直聘识别为爬虫了
            # 所以这个请求就相当于什么都没有获取到
            # 所以要重新返回request,让这个请求重新加入到调度中
            # 下次再发送

            # if not self.current_proxy.blacked:
            #     self.current_proxy.blacked = True
            # self.update_proxy()
            # print('%s代理失效' % self.current_proxy.proxy)
            # request.meta['proxy'] = self.current_proxy.proxy

            return request

        # 如果是正常的话,记得最后要返回response
        # 如果不返回,这个response就不会被传到爬虫那里去
        # 也就得不到解析
        return response

    def update_proxy(self):
        #lock是属于多线程中的一个概念,因为这里scrapy是采用异步的,可以直接看成多线程
        #所以有可能出现这样的情况,爬虫在爬取一个网页的时候,忽然被对方封了,这时候就会来到这里
        #获取新的IP,但是同时会有多条线程来这里请求,那么就会出现浪费代理IP的请求,所以这这里加上了锁
        #锁的作用是在同一时间段,所有线程只能有一条线程可以访问锁内的代码,这个时候一条线程获得新的代理IP
        #而这个代理IP是可以用在所有线程的,这样子别的线程就可以继续运行了,减少了代理IP(钱)的浪费
        self.lock.acquire()
        # 判断换线程的条件
        # 1.目前没有使用代理IP
        # 2.到线程过期的时间了
        # 3.目前IP已经被对方封了
        # 满足以上其中一种情况就可以换代理IP了
        if not self.current_proxy or self.current_proxy.is_expiring or self.current_proxy.blacked:
            url = r'https://h.wandouip.com/get/ip-list?pack=%s&num=1&xy=1&type=2&lb=\r\n&mr=1&' % random.randint(
                100, 1000)
            response = requests.get(url=url, headers=DEFAULT_REQUEST_HEADERS)
            text = json.loads(response.text)
            print(text)
            data = text['data'][0]
            proxy_model = ProxyModel(data)
            print('重新获取了一个代理:%s' % proxy_model.proxy)
            self.current_proxy = proxy_model
            # return proxy_model
        self.lock.release()
Esempio n. 10
0
class IPProxyDownloadMiddleware(object):
    PROXY_URL = 'http://webapi.http.zhimacangku.com/getip?num=1&type=2&pro=&city=0&yys=0&port=11&time=1&ts=1&ys=0&cs=0&lb=1&sb=0&pb=45&mr=1&regions='

    def __init__(self):
        super(IPProxyDownloadMiddleware, self).__init__()
        self.lock = DeferredLock()
        self.current_proxy = None
        self.username = None

    def process_request(self, request, spider):
        if not self.current_proxy:
            self.update_proxy()
            return request
        if self.current_proxy.is_expiring:
            self.update_proxy()
        request.meta['proxy'] = self.current_proxy.proxy
        print(request.meta['proxy'])

    def process_response(self, request, response, spider):
        if response.status != 200:
            self.update_proxy()
            return request
        return response

    def update_proxy(self):
        self.lock.acquire()
        if not self.current_proxy or self.current_proxy.is_expiring:
            response = requests.get(self.PROXY_URL)
            result = json.loads(response.text)
            data = result['data']
            proxy = ProxyModel(data[0])
            print('请求到了:', response.text)
            self.current_proxy = proxy
        self.lock.release()
Esempio n. 11
0
class ProxyDownloaderMiddleware(object):
    def __init__(self):
        super(ProxyDownloaderMiddleware, self).__init__()
        self.PROXY_URL = "http://webapi.http.zhimacangku.com/getip?num=1&type=2&pro=&city=0&yys=0&port=11&time=1&ts=1&ys=0&cs=0&lb=1&sb=0&pb=4&mr=1&regions="
        self.current_proxy = None
        self.lock = DeferredLock()

    def process_request(self, request, spider):
        if 'proxy' not in request.meta or self.current_proxy.is_expiring:
            self.upgrade_proxy()
            request.meta['proxy'] = self.current_proxy.proxy

    def process_process(self, request, response, spider):
        if response.status != 200:
            self.upgrade_proxy()
            return request
        return response

    def upgrade_proxy(self):
        self.lock.acquire()
        if not self.current_proxy or self.current_proxy.is_expiring:
            resp = requests.get(self.PROXY_URL)
            result = resp.text
            data = json.loads(result)['data'][0]
            if data:
                proxy_model = ProxyModel(data)
                self.current_proxy = proxy_model
        self.lock.release()
Esempio n. 12
0
class IPProxyDownloaderMiddleware(object):
    '''
    IP代理 ,
    '''
    # 获取代理ip信息地址 例如芝麻代理、快代理等
    IP_URL = r'http://127.0.0.1:8000/?types=0&count=1&country=国内'

    def __init__(self):
        # super(IPProxyDownloaderMiddleware, self).__init__(self)
        super(IPProxyDownloaderMiddleware, self).__init__()

        self.current_proxy = None
        self.lock = DeferredLock()

    def process_request(self, request, spider):
        if 'proxy' not in request.meta or self.current_proxy.is_expire:
            self.updateProxy()

        request.meta['proxy'] = self.current_proxy.address

    def process_response(self, request, response, spider):
        if response.status != 200:
            # 如果来到这里,这个请求相当于被识别为爬虫了
            # 所以这个请求被废掉了
            # 如果不返回request,那么这个请求就是没有获取到数据
            # 返回了request,那么这个这个请求会被重新添加到调速器
            if not self.current_proxy.blacked:
                self.current_proxy.blacked = True
                print("被拉黑了")
            self.updateProxy()
            return request
        # 正常的情况下,返回response
        return response

    def updateProxy(self):
        '''
        获取新的代理ip
        :return:
        '''
        # 因为是异步请求,为了不同时向芝麻代理发送过多的请求这里在获取代理IP
        # 的时候,需要加锁
        self.lock.acquire()
        if not self.current_proxy or self.current_proxy.is_expire or self.current_proxy.blacked:
            response = requests.get(self.IP_URL)
            text = response.text

            # # 返回值 {"code":0,"success":true,"msg":"0","data":[{"ip":"49.70.152.188","port":4207,"expire_time":"2019-05-28 18:53:15"}]}
            # text=text.split(',')

            jsonString = json.loads(text)

            data = jsonString['data']
            if len(data) > 0:
                proxyModel = IPProxyModel(data=data[0])
                self.current_proxy = proxyModel
        self.lock.release()
Esempio n. 13
0
class ProxyMiddleware():
    def __init__(self, proxy_url):
        self.logger = logging.getLogger(__name__)
        self.proxy_url = proxy_url
        self.update_time = datetime.now()
        self.proxy_wrong = True
        self.lock = DeferredLock()

    @classmethod
    def from_crawler(cls, crawler):
        settings = crawler.settings
        return cls(proxy_url=settings.get('PROXY_URL'))

    def get_random_proxy(self):
        try:
            response = requests.get(self.proxy_url)
            if response.status_code == 200:
                proxy = response.text
                return proxy
        except requests.ConnectionError:
            return False

    def process_request(self, request, spider):
        print("进入了ip代理的process_request")
        self.lock.acquire()
        if request.meta.get(
                'retry_times') or self.proxy_wrong or self.is_expiring:
            print("我要去修改ip代理")
            proxy = self.get_random_proxy()
            if proxy:
                uri = 'http://{proxy}'.format(proxy=proxy)
                self.logger.debug('使用代理 ' + proxy)
                request.meta['proxy'] = uri
                print('使用代理:' + uri)
                self.proxy_wrong = False
                self.update_time = datetime.now()
        self.lock.release()

    def process_response(self, request, response, spider):
        if response.status != 200 or "很抱歉,您的访问被我们识别为机器行为" in response.text:
            print("出现了验证码!")
            self.proxy_wrong = True
            return request
        return response

    @property
    def is_expiring(self):
        now = datetime.now()
        if (now - self.update_time) > timedelta(seconds=30):
            self.update_time = datetime.now()
            print("执行了is_expiring")
            return True
        else:
            return False
class Pulser_729(LabradServer):
    
    name = 'Pulser_729'
       
    def initServer(self):
        self.api  = api()
        self.inCommunication = DeferredLock()
        self.initializeBoard()
    
    def initializeBoard(self):
        connected = self.api.connectOKBoard()
        if not connected:
            raise Exception("Pulser Not Found")
    
    @setting(0, 'Reset DDS', returns = '')
    def resetDDS(self , c):
        """
        Reset the ram position to 0
        """
        yield self.inCommunication.acquire()
        yield deferToThread(self.api.resetAllDDS)
        self.inCommunication.release()
        
    @setting(1, "Program DDS", program = '*(is)', returns = '')
    def programDDS(self, c, program):
        """
        Programs the DDS, the input is a tuple of channel numbers and buf objects for the channels
        """
        yield self.inCommunication.acquire()
        yield deferToThread(self._programDDSSequence, program)
        self.inCommunication.release()
    
    @setting(2, "Reinitialize DDS", returns = '')
    def reinitializeDDS(self, c):
        """
        Reprograms the DDS chip to its initial state
        """
        yield self.inCommunication.acquire()
        yield deferToThread(self.api.initializeDDS)
        self.inCommunication.release()
    
    def _programDDSSequence(self, program):
        '''takes the parsed dds sequence and programs the board with it'''
        for chan, buf in program:
            self.api.setDDSchannel(chan)
            self.api.programDDS(buf)
        self.api.resetAllDDS()
    
    def wait(self, seconds, result=None):
        """Returns a deferred that will be fired later"""
        d = Deferred()
        reactor.callLater(seconds, d.callback, result)
        return d
Esempio n. 15
0
class IPProxyDownloadMiddleware(object):
    # 网上代理服务器生成的API,以下为芝麻代理的
    PROXY_URL = 'http://webapi.http.zhimacangku.com/getip?num=1&type=2&pro=&city=0&port=11&ts=1&ys=0&cs=0&lb=1&sb=0&pb=45&mr=1&regions='

    def __init__(self):
        super(IPProxyDownloadMiddleware, self).__init__()
        self.current_proxy = None
        self.lock = DeferredLock()  # 定义创建锁

    def process_request(self, request, spider):
        # 判断request里面是否有设置过代理 或者 当前代理即将过期
        if 'proxy' not in request.meta or self.current_proxy.is_expiring:
            # 请求代理
            self.update_proxy()
            request.meta['proxy'] = self.current_proxy.proxy

    def process_response(self, request, response, spider):
        # 如果返回的状态码不等于200或者跳转到验证码当中,即重新获取代理
        if response.status != 200 or "captcha" in response.url:
            if not self.current_proxy.blacked:
                self.current_proxy.blacked = True
            print("%s这个代理被识别并被加入黑名单了" % self.current_proxy.ip)
            self.update_proxy()
            # 如果来到这里,说明这个请求已经被BOSS直聘识别为爬虫
            # 所以这个请求就相当于什么都没有获取到
            # 如果不返回request,那么这个request就相当于没有获取到数据
            # 也就是说,这个请求就被废掉了,这个数据就没有被抓取到
            # 所以要重新返回request,让这个请求重新加入到调度中,下次再请求
            return request
        # 如果是正常的,那么要记得返回response
        # 如果不返回,那么这个response就不会被传到爬虫那里去,也就得不到解析
        return response

    # process_request和process_response(如遇到403页面的时候)里面都可能需要请求代理
    # 请求代理的代码多个地方需要用到,所有单独定义一个方法 get_proxy()
    def update_proxy(self):
        self.lock.acquire()  # 上锁
        # 判断如果没有或者即将过期又或者被拉黑
        if not self.current_proxy or self.current_proxy.is_expirin or self.current_proxy.blacked:
            response = requests.get(self.PROXY_URL)
            text = response.text  # 此处得到的text是个json格式的字符串,需要load成字典
            print("重新获取了一个代理:", text)
            # 从代理池api返回回来的数据格式如下:
            # {"code":0,"success":true,"msg":"0","data":[{"ip":"223.242.123.50","port":3212,"expire_time":"2019-01-15 10:15:20"}]}
            result = json.loads(text)
            if len(result['data']) > 0:
                data = result['data'][0]
                proxy_model = ProxyModel(data)
                self.current_proxy = proxy_model
                # return proxy_model
        self.lock.release()  # 解锁操作
Esempio n. 16
0
    def acquire(self, timeout=None):
        """
        This method operates the same as :meth:`DeferredLock.acquire` does
        except it requires a timeout argument.

        :param int timeout:
            The number of seconds to wait before timing out.

        :raises LockTimeoutError:
            Raised if the timeout was reached before we could acquire
            the lock.
        """
        assert timeout is None \
            or isinstance(timeout, (int, float)) and timeout > 0

        lock = DeferredLock.acquire(self)
        if timeout is None:
            return lock

        # Schedule a call to trigger finished.errback() which will raise
        # an exception.  If lock finishes first however cancel the timeout
        # and unlock the lock by calling finished.
        finished = Deferred()
        lock.addCallback(
            self._cancel_timeout,
            reactor.callLater(timeout, self._timeout, finished))
        lock.addCallback(self._call_callback, finished)

        return finished
Esempio n. 17
0
class Client(object):
    def __init__(self, specFilename, exchange='signals'):
        self.exchange = exchange
        spec = txamqp.spec.load(specFilename)

        delegate = TwistedDelegate()
        self.clientConnected = ClientCreator(reactor, AMQClient,
                                             delegate=delegate, vhost="/",
                                     spec=spec).connectTCP("localhost", 5672)
        self.conn = None
        self.chan = None
        self.finishLock = DeferredLock()

    @inlineCallbacks
    def finishChannelOpen(self):
        yield self.finishLock.acquire()
        if self.conn is None:
            print "opening connection for", self
            self.conn = yield self.clientConnected

            authentication = {"LOGIN": "******", "PASSWORD": "******"}
            yield self.conn.start(authentication)

        if self.chan is None:
            self.chan = yield self.conn.channel(1)
            yield self.chan.channel_open()
            yield self.newChan()
            print "made channel for", self
        self.finishLock.release()
            
    def newChan(self):
        # called once when the new channel is opened
        return succeed(None)
Esempio n. 18
0
class IPProxyDownloadMiddleware(object):

    PROXY_URL = 'http://webapi.http.zhimacangku.com/getip?num=1&type=2&pro=&city=0&yys=0&port=11&time=1&ts=1&ys=0&cs=0&lb=1&sb=0&pb=45&mr=1&regions='

    def __init__(self):
        super(IPProxyDownloadMiddleware, self).__init__()
        self.current_proxy = None
        self.lock = DeferredLock()

    def process_request(self, request, spider):
        if 'proxy' not in request.meta or self.current_proxy.is_expiring:
            # 请求代理
            self.update_proxy()

        request.meta['proxy'] = self.current_proxy.proxy

    def process_response(self, request, response, spider):
        if response.status != 200 or "captcha" in response.url:
            if not self.current_proxy.blacked:
                self.current_proxy.blacked = True
            print('%s这个代理被加入黑名单了' % self.current_proxy.ip)
            self.update_proxy()
            # 如果来到这里,说明这个请求已经被boss直聘识别为爬虫了
            # 所有这个请求就相当于什么都没有获取到
            # 如果不返回request,那么这个request就相当于没有获取到数据
            # 也就是说,这个请求就被废掉了,这个数据就没有被抓取到
            # 所有要重新返回request,让这个请求重新加入到调度中,
            # 下次再发送
            return request
        # 如果是正常的,那么要记得返回response
        # 如果不返回,那么这个resposne就不会被传到爬虫那里去
        # 也就得不到解析
        return response

    def update_proxy(self):
        self.lock.acquire()
        if not self.current_proxy or self.current_proxy.is_expiring or self.current_proxy.blacked:
            response = requests.get(self.PROXY_URL)
            text = response.text
            print('重新获取了一个代理:', text)
            result = json.loads(text)
            if len(result['data']) > 0:
                data = result['data'][0]
                proxy_model = ProxyModel(data)
                self.current_proxy = proxy_model
        self.lock.release()
Esempio n. 19
0
class IPProxyDownloadMiddleware(object):
    PROXY_URL = "http://http.tiqu.alicdns.com/getip3?num=1&type=2&pro=&city=0&yys=0&port=11&time=1&ts=1&ys=0&cs=1&lb=1&sb=0&pb=45&mr=1&regions=&gm=4"

    def __init__(self):
        super(IPProxyDownloadMiddleware, self).__init__()
        self.current_proxy = None
        self.lock = DeferredLock()

    def process_request(self, request, spider):
        if 'proxy' not in request.meta or self.current_proxy.is_expireing:
            # 请求代理
            self.update_proxy()

        request.meta['proxy'] = self.current_proxy.proxy

    def process_response(self, request, response, spider):
        if response.status != 200 or "captcha" in response.url:
            if not self.current_proxy.block:
                self.current_proxy.block = True
            print("%s这个代理被加入黑名单了!" % self.current_proxy)
            self.update_proxy()
            # 如果来到这里 说明这个请求已被boss直聘识别为爬虫
            # 所以这个请求什么也没有获取到
            # 如果不返回request 那么这个request就相当于没有获取到数据
            # 也就是说,这个请求废掉了 这个数据也没有抓取到
            # 所以要重新返回request,让请求重新加入到调度中
            # 下次再发送
            return request
        # 正常 就要返回response
        # 如果不返回 那么这个response就不会被传到爬虫哪里
        # 也得不到解析
        return response

    def update_proxy(self):
        self.lock.acquire()
        if not self.current_proxy or self.current_proxy.is_expireing or self.current_proxy.block:
            response = requests.get(self.PROXY_URL)
            text = response.text
            print("重新获取了一个代理:", text)
            result = json.loads(text)
            if len(result['data']) > 0:
                data = result['data'][0]
                proxy_model = ProxyModel(data)
                self.current_proxy = proxy_model
        self.lock.release()
Esempio n. 20
0
class IpProxyMiddleware(object):
    PROXY_URL = 'http://webapi.http.zhimacangku.com/getip?num=1&type=2&pro=&city=0&yys=0&port=11&time=1&ts=1&ys=0&cs=0&lb=1&sb=0&pb=4&mr=1&regions='

    def __init__(self):
        super(IpProxyMiddleware, self).__init__()
        self.current_proxy = None
        self.lock = DeferredLock()

    def process_request(self, request, spider):
        #引擎发送给下载器之前调用
        if 'proxy' not in request.meta or self.current_proxy.is_expiring:
            self.update_proxy()
        request.meta['proxy'] = self.current_proxy.proxy
        #这个proxy就是  # https://ip:port
    def process_response(self, request, response, spider):
        if response.status != 200 or 'captcha' in response.url:
            if not self.current_proxy.blacked:
                self.current_proxy.blacked = True
            print("%s ip被封锁" % self.current_proxy.proxy)
            self.update_proxy()
            #如果走到这里说明 被识别为爬虫
            #所以说这个请求就是什么都没有获取到
            #这个时候我们应该返回request 也就是说要重新进行下载
            return request
        return response
        #如果是正常的 记得返回response
        #如果不返回 传不到爬虫 也就是说得不到解析

    def update_proxy(self):
        #scrapy爬取的时候用的twisted 也就是异步 可以理解成多线程
        #如果异步都来请求代理造成IP浪费 处于节约IP的目的 异步上锁
        self.lock.acquire()
        if not self.current_proxy or self.current_proxy.is_expiring or self.current_proxy.blacked:
            response = requests.get(self.PROXY_URL)
            text = response.text
            print("重新获取了一个代理", text)
            result = json.loads(text)
            #芝麻代理不能让你频繁请求  也就是说 返回的data  可能没有值
            if len(result['data']) > 0:
                data = result['data'][
                    0]  #{'ip': '106.46.136.7', 'port': 4225, 'expire_time': '2019-04-12 09:46:28'}
                #因为我们需要对data 进行多个操作 比如ip 和端口号拼接 时间转化成datetime类型判断是否过期
                proxy_model = ProxyModel(data)
                self.current_proxy = proxy_model
        self.lock.release()
Esempio n. 21
0
 def add_scan_to_queue(self, scan, priority='Normal'):
     #increment counter
     scan_id = self.scan_ID_counter
     self.scan_ID_counter += 1
     #add to queue
     if priority == 'Normal':
         order = self.queue.put_last(1, (scan_id, scan, 1))
     elif priority == 'First in Queue':
         order = self.queue.put_first(1, (scan_id, scan, 1))
     elif priority == 'Pause All Others':
         order = self.queue.put_last(0, (scan_id, scan, 0))
     else:
         raise Exception("Unrecognized priority type")
     self.signals.on_queued_new_script((scan_id, scan.name, order))
     d = DeferredLock()
     d.acquire()
     self.running_locks[scan_id] = d
     self.launch_scripts()
     return scan_id
 def add_scan_to_queue(self, scan, priority = 'Normal'):
     #increment counter
     scan_id = self.scan_ID_counter
     self.scan_ID_counter += 1
     #add to queue
     if priority == 'Normal':
         order = self.queue.put_last(1, (scan_id, scan,  1))
     elif priority == 'First in Queue':
         order = self.queue.put_first(1, (scan_id, scan, 1))
     elif priority == 'Pause All Others':
         order = self.queue.put_last(0, (scan_id, scan, 0))
     else: 
         raise Exception ("Unrecognized priority type")
     self.signals.on_queued_new_script((scan_id, scan.name, order))
     d = DeferredLock()
     d.acquire()
     self.running_locks[scan_id] = d
     self.launch_scripts()
     return scan_id
Esempio n. 23
0
class IPProxyDownloaderMiddleware(object):
    PROXY_URL = "http://webapi.http.zhimacangku.com/getip?num=1&type=2&pro=&city=0&yys=0&port=1&time=1&ts=1&ys=0&cs=1&lb=1&sb=0&pb=45&mr=1&regions="

    # 初始化函数
    def __init__(self):
        super(IPProxyDownloaderMiddleware, self).__init__()
        self.current_proxy = None
        self.lock = DeferredLock()

    # 处理请求
    def process_request(self, request, spider):
        # 如果请求头中不包含代理,则请求代理
        if 'proxy' not in request.meta or self.current_proxy.is_expiring:
            # 请求代理
            self.update_proxy()
        request.meta['proxy'] = self.current_proxy.proxy

    # 处理响应
    def process_response(self, request, response, spider):
        if response.status != 200 or "captcha" in response.url:
            # 如果IP没有被拉黑,则
            if not self.current_proxy.blacked:
                self.current_proxy.blacked = True
            print("%s这个Ip被拉入黑名单了" % self.current_proxy.ip)
            self.update_proxy()
            return request
        return response

    # 更新代理
    def update_proxy(self):
        self.lock.acquire()
        if not self.current_proxy or self.current_proxy.is_expiring or self.current_proxy.blacked:
            response = requests.get(self.PROXY_URL)
            text = response.text
            print("重新获取了一个代理:", text)
            # print(text)
            results = json.loads(text)
            if len(results['data']) > 0:
                data = results['data'][0]
                proxy_model = ProxyModel(data)
                self.current_proxy = proxy_model
        self.lock.release()
Esempio n. 24
0
class IPProxyDownloadMiddleware(object):
    proxy_url = "http://webapi.http.zhimacangku.com/getip?num=1&type=2&pro=&city=0&yys=0&port=11&time=1&ts=1&ys=0&cs=0&lb=1&sb=0&pb=4&mr=1&regions="

    def __init__(self):
        super(IPProxyDownloadMiddleware, self).__init__()
        self.current_proxy = None
        self.lock = DeferredLock()

    def process_request(self, request, spider):
        print(request.url, "当前请求的链接")

        if 'proxy' not in request.meta or self.current_proxy.is_expring:
            print("重新获取了一个代理")
            self.update_proxy()
        request.meta['proxy'] = self.current_proxy.proxy

    def process_response(self, request, response, spider):
        print("%s当前网页状态" % response.status)
        if "zpAntispam" in response.url:
            print("%s当前网页链接,这个链接包含zpAntispam" % response.url)
        if response.status != 200 or "zpAntispam" in response.url:
            print("%s这个代理被加入黑名单了" % self.current_proxy.ip)
            if not self.current_proxy.blacked:
                self.current_proxy.blacked = True
            self.update_proxy()
            return request
        return response

    def update_proxy(self):
        self.lock.acquire()
        if not self.current_proxy or self.current_proxy.is_expring or self.current_proxy.blacked:
            resp = requests.get(url=self.proxy_url)
            text = resp.text
            jsonHTML = json.loads(text)
            if len(jsonHTML['data']) > 0:
                respJ = jsonHTML['data'][0]
                proxy_model = ProxyModules(data=respJ)
                self.current_proxy = proxy_model
                return proxy_model
        self.lock.release()
Esempio n. 25
0
class IPProxyDownloadMiddleware(object):
    PROXY_URL = "代理服务API链接 "

    def __init__(self):
        super(IPProxyDownloadMiddleware,self).__init__()
        self.current_proxy = None
        self.lock = DeferredLock()

    def process_request(self,request,spider):
        if 'proxy' not in request.meta or self.current_proxy.is_expiring:
            #请求代理
            self.get_proxy()

        request.meta['proxy'] = self.current_proxy.proxy

    def process_response(self,request,response,spider):
        if response.status != 200 or "captcha" in response.url:
            if not self.current_proxy.blacked:
                self.current_proxy.blacked = True
            print("%s代理被加入到黑名单"%self.current_proxy.ip)
            self.update_proxy()
            #如果来到这里,说明这个请求被识别为爬虫,该请求相当于什么都没有获取
            #如果不返回request,则该request相当于没有获取到数据,就被废掉了
            #所以要重新返回request,将该请求重新加入到调度中,下次再发送
            return request
        #正常则返回response,不返回则response不会被传到爬虫那里去,得不到解析
        return response

    def update_proxy(self):
        self.lock.acquire()
        if not self.current_proxy or self.current_proxy.is_expiring or self.current_proxy.blacked:
            response = requests.get(self.PROXY_URL)
            text = response.text
            result = json.loads(text)
            print('重新获取了一个代理',text)
            if len(result['data'])>0:
                data = result['data'][0]
                proxy_model = ProxyModel(data)
                self.current_proxy = proxy_model
        self.lock.release()
Esempio n. 26
0
class UserAgentAndIPProxyMiddleware(object):
    """
    配置随机请求头和IP代理
    """
    PROXY_URL = "http://webapi.http.zhimacangku.com/getip?num=1&type=2&pro=0&city=0&yys=0&port=11&time=1&ts=1&ys=0&cs=0&lb=1&sb=0&pb=45&mr=1&regions="

    def __init__(self):
        super(UserAgentAndIPProxyMiddleware,self).__init__()
        self.ua = UserAgent()
        self.current_proxy = ProxyModel(dict({"ip":"120.43.134.119","port":4242,"expire_time":"2020-09-11 13:04:26"}))
        self.lock = DeferredLock()
        #当前的代理

    def get_proxy(self):
        self.lock.acquire()
        #由于scrapy框架下的twisted是异步执行,会导致几乎同时有很多运行这个函数多次,使用锁机制,不会多次无意义请求代理
        if self.current_proxy is None or self.current_proxy.is_expiring:
            resp = requests.get(self.PROXY_URL).text
            result = json.loads(resp)
            if len(result["data"]) > 0:
                data = result["data"][0]
                proxy_model = ProxyModel(data)
                self.current_proxy = proxy_model
        self.lock.release()

    def process_request(self,request,spider):
        request.headers["User-Agent"] = self.ua.random
        if  "proxy" not in request.meta or self.current_proxy.is_expiring:
            #如果请求的meta里面没有传递proxy,就给一个代理IP
            self.get_proxy()
            request.meta['proxy'] = self.current_proxy.proxy
        print(self.current_proxy.ip)

    def process_response(self,request,response,spider):
        if response.status != 200 or "captcha-verify" in response.url:
            self.get_proxy()
            return request
        #如果返回的状态不是200或者返回的URL里面带有了captcha-verify,说明代理不可用,更换代理并且重新返回这个请求进行重新请求
        return response
Esempio n. 27
0
class AmazonDownloaderMiddleware(object):
    # Not all methods need to be defined. If a method is not defined,
    # scrapy acts as if the downloader middleware does not modify the
    # passed objects.
    def __init__(self):
        self.helper = Proxy_helper()
        self.loc = DeferredLock()

    @classmethod
    def from_crawler(cls, crawler):
        # This method is used by Scrapy to create your spiders.
        s = cls()
        crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)
        return s

    def process_request(self, request, spider):
        # Called for each request that goes through the downloader
        # middleware.
        self.loc.acquire()
        request.meta['proxy'] = self.helper.get_proxy()
        self.loc.release()

        # Must either:
        # - return None: continue processing this request
        # - or return a Response object
        # - or return a Request object
        # - or raise IgnoreRequest: process_exception() methods of
        #   installed downloader middleware will be called
        return None

    def process_response(self, request, response, spider):
        # Called with the response returned from the downloader.
        if response.status != 200 or response.status != 301 or response.status != 302:
            self.loc.acquire()
            self.helper.update_proixy(request.meta['proxy'])
            self.loc.release()
            return request

        # Must either;
        # - return a Response object
        # - return a Request object
        # - or raise IgnoreRequest
        return response

    def process_exception(self, request, exception, spider):
        # Called when a download handler or a process_request()
        # (from other downloader middleware) raises an exception.
        self.loc.acquire()
        self.helper.update_proixy(request.meta['proxy'])
        self.loc.release()
        return request
        # Must either:
        # - return None: continue processing this exception
        # - return a Response object: stops process_exception() chain
        # - return a Request object: stops process_exception() chain
        pass

    def spider_opened(self, spider):
        spider.logger.info('Spider opened: %s' % spider.name)
Esempio n. 28
0
class IPProxyDownloaderMiddleware:
    """ scrapy 可使用此方案 """
    PROXIES = [
        'wind13zero:[email protected]:16817',
        'wind13zero:[email protected]:16817'
    ]

    def __init__(self):
        super().__init__()
        self.proxy = None
        self.lock = DeferredLock()

    def process_request(self, request, spider):
        self.lock.acquire()
        if not self.proxy:
            proxy = random.choice(self.PROXIES)
            request.meta['proxy'] = proxy
        self.lock.release()
        # print('Proxy %s' %proxy)
        print('='*30)
        print(request.meta['proxy'])
        return None
Esempio n. 29
0
class IPProxyDownloadMiddleware:
    """设置IP代理下载中间件"""
    def __init__(self):
        super(IPProxyDownloadMiddleware, self).__init__()
        self.current_proxy = None
        self.lock = DeferredLock()

    def process_request(self, request, spider):
        if 'proxy' not in request.meta or self.current_proxy.is_expired:
            # 请求代理
            self.updata_proxy()

        request.meta['proxy'] = self.current_proxy.proxy

    def process_response(self, request, response, spider):
        if response.status != 200 or 'captcha' in response.url:
            if not self.current_proxy.balcked:
                self.current_proxy.balcked = True
            self.updata_proxy()
            # 该请求上一次请求时被禁,所以将之返回,并重新请求一次
            return request
        # 该请求没被禁的话,就将其返回
        return response

    def updata_proxy(self):
        """更新代理"""
        # scrapy底层为twisted异步框架,更新IP代理时设置锁,防止浪费IP代理
        self.lock.acquire()
        if not self.current_proxy or self.current_proxy.is_expired or self.current_proxy.balcked:
            if PROXY_URL:
                text = requests.get(url=PROXY_URL).text
                result = json.loads(text)
                if result['data']:
                    data = result['data'][0]
                    proxy_model = ProxyModel(data)
                    self.current_proxy = proxy_model

        # 更新IP代理之后释放锁
        self.lock.release()
Esempio n. 30
0
class BNCSerial(BNCPulser):
    @inlineCallbacks
    def connect(self, server, port, baud):
        self.server = server

        #usage ensures only one read/write operation at a time
        self._portLock = DeferredLock()

        print 'connecting to "%s" on port "%s"...' % (server.name, port)

        p = self.server.packet()
        p.open(port)
        p.baudrate(baud)
        p.parity('N')  #no parity bit
        p.bytesize(8L)
        p.stopbits(1L)
        p.write_line(':SYST:COMM:SER:ECHO ON')
        yield p.send()

        chList = yield self.channel_list()
        self.chMap = dict(chList)

    @inlineCallbacks
    def query(self, line):
        yield self._portLock.acquire()
        p = self.server.packet()
        resp = yield p.read()\
                      .write_line(line)\
                      .pause(U.Value(40.0, 'ms'))\
                      .read_line()\
                      .send()
        print 'RESPONSE: %s' % resp['read_line']
        returnValue(resp['read_line'])

    @inlineCallbacks
    def send(self, line):
        yield self._portLock.run(self.server.write_line, line)

    @inlineCallbacks
    def all_ch_states(self):
        chns = self.chMap.keys()
        p = self.server.packet()
        p.read()  #clear buffer
        for n in chns:
            p.write(':PULSE%d:STATE?' % n)
            p.pause(U.Value(30, 'ms'))
            p.read(key=str(n))
        resp = yield p.send()

        parser = TYPE_MAP['bool'][1]
        returnValue([(n, parser(resp[str(n)])) for n in chns])
Esempio n. 31
0
class IPProxyDownloadMiddleware(object):
    #需购买代理IP(高匿),如快代理
    #需更换下面的ip,否则报错
    #PROXIES = ["114.234.76.131:8060", "183.129.207.82:11845"]

    #或者把代理IP网站(如:芝麻代理)的链接复制过来,如下
    PROXY_URL = 'http://webapi.http.zhimacangku.com/getip?num=1&type=2&pro=&city=0&yys=0&port=11&time=1&ts=1&ys=0&cs=0&lb=1&sb=0&ph=45&mr=1&regions='

    def __init__(self):
        super(IPProxyDownloadMiddleware, self).__init__()
        self.current_proxy = None
        self.lock = DeferredLock()

    def process_request(self, request, spider):
        if 'proxy' not in request.meta or self.current_proxy.is_expiring:
            # 请求代理
            self.update_proxy()
            request.meta['proxy'] = self.current_proxy.proxy

    def process_response(self, request, response, spider):
        if response.status != 200 or "captcha" in response.url:
            if not self.current_proxy.blacked:
                self.current_proxy.blacked = True
            self.update_proxy()
            return request
        return response

    def update_proxy(self):
        self.lock.acquire()
        if not self.current_proxy or self.current_proxy.is_expiring or self.current_proxy.blacked:
            response = requests.get(self.PROXY_URL)
            text = response.text
            result = json.loads(text)
            if len(result['data']) > 0:
                data = result['data'][0]
                proxy_model = ProxyModel(data)
                self.current_proxy = proxy_model
        self.lock.release()
Esempio n. 32
0
class IPProxy(object):
    PROXY_URL = ""

    def __init__(self):

        self.lock = DeferredLock()
        self.current_proxy = None

    def process_request(self, request, spider):
        if 'proxy' not in request.meta or self.current_proxy.is_expiring:
            self.update_proxy()
        request.meta['proxy'] = self.current_proxy.proxy

    def process_response(self, request, response, spider):
        contents = eval(response.text).get("data")

        if contents == None:
            if not self.current_proxy.is_block:
                self.current_proxy.is_block = True
                print('%s代理失效' % self.current_proxy.proxy)
            self.update_proxy()

            return request
        return response

    def update_proxy(self):
        self.lock.acquire()
        if self.current_proxy is None or self.current_proxy.is_expiring or self.current_proxy.is_block:
            response_json = requests.get(self.PROXY_URL).json()
            try:
                print(response_json)

                self.current_proxy = ProxyModel(response_json['data'][0])

            except:
                print('出错了!')
                print(response_json)
        self.lock.release()
    def process_request(self, request, spider):
        if 'proxy' in request.meta and not request.meta.get('_round_proxy'):
            return
        proxy = self.proxies.get_proxy()
        if not proxy:
            if self.stop_if_no_proxies:
                raise CloseSpider("no_proxies")
            else:
                logger.warning("No proxies available; marking all proxies "
                               "as unchecked")
                from twisted.internet.defer import DeferredLock
                lock = DeferredLock()
                lock.acquire()
                self.proxies.reset()
                lock.release()
                proxy = self.proxies.get_proxy()
                if proxy is None:
                    logger.error("No proxies available even after a reset.")
                    raise CloseSpider("no_proxies_after_reset")

        request.meta['proxy'] = proxy
        request.meta['download_slot'] = self.get_proxy_slot(proxy)
        request.meta['_round_proxy'] = True
Esempio n. 34
0
class InterfaceUpper:
    def __init__(self, iface):
        self.iface = iface
        self._lock = DeferredLock()
        CompositeStatePublisher(lambda x: x, [
            netlink_monitor.get_state_publisher(iface, IFSTATE.PLUGGED),
            netlink_monitor.get_state_publisher(iface, IFSTATE.UP),
        ]).subscribe(self._cb)
        self._is_shutdown = False
        self.state = None
        reactor.addSystemEventTrigger('before', 'shutdown', self._shutdown)
   
    @inlineCallbacks
    def restart(self):
        yield self._lock.acquire()
        try:
            yield system.system('ifconfig', self.iface, '0.0.0.0')
            yield system.system('ifconfig', self.iface, 'down')
        finally:
            self._lock.release()

    @inlineCallbacks
    def _cb(self, old_state, new_state):
        plugged, up = new_state
        self.state = new_state
        if plugged and not up and not self._is_shutdown:
            yield self._lock.acquire()
            try:
                yield system.system('ifconfig', self.iface, 'up')
            finally:
                self._lock.release()

    @inlineCallbacks
    def _shutdown(self):
        self._is_shutdown = True
        if self.state:
            yield self.restart()
Esempio n. 35
0
class InterfaceUpper:
    def __init__(self, iface):
        self.iface = iface
        self._lock = DeferredLock()
        CompositeStatePublisher(lambda x: x, [
            netlink_monitor.get_state_publisher(iface, IFSTATE.PLUGGED),
            netlink_monitor.get_state_publisher(iface, IFSTATE.UP),
        ]).subscribe(self._cb)
        self._is_shutdown = False
        self.state = None
        reactor.addSystemEventTrigger('before', 'shutdown', self._shutdown)

    @inlineCallbacks
    def restart(self):
        yield self._lock.acquire()
        try:
            yield system.system('ifconfig', self.iface, '0.0.0.0')
            yield system.system('ifconfig', self.iface, 'down')
        finally:
            self._lock.release()

    @inlineCallbacks
    def _cb(self, old_state, new_state):
        plugged, up = new_state
        self.state = new_state
        if plugged and not up and not self._is_shutdown:
            yield self._lock.acquire()
            try:
                yield system.system('ifconfig', self.iface, 'up')
            finally:
                self._lock.release()

    @inlineCallbacks
    def _shutdown(self):
        self._is_shutdown = True
        if self.state:
            yield self.restart()
Esempio n. 36
0
class NotificationConnector(object):
    """Provide ready-to-use AMQP channels."""

    def __init__(self, service, clock=reactor):
        """
        @param service: An object implementing the same whenConnected() API as
            the twisted.application.internet.ClientService class.
        @param clock: An object implementing IReactorTime.
        """
        self._service = service
        self._clock = clock
        self._channel = None
        self._channel_lock = DeferredLock()

    @inlineCallbacks
    def __call__(self):
        """
        @return: A deferred firing with a ready-to-use txamqp.protocol.Channel.
        """
        # Serialize calls, in order to setup new channels only once.
        yield self._channel_lock.acquire()
        try:
            if self._channel and self._channel.client.closed:
                # If we have a client but it's closed, let's wait for it to be
                # fully disconnected and spin a reactor iteration to give
                # change to the AMQClient.connectionLost callback chain to
                # settle (in particular our ClientService will be notified and
                # will start connecting again).
                yield self._channel.client.disconnected.wait()
                yield deferLater(self._clock, 0, lambda: None)

            client = yield self._service.whenConnected()
            channel = yield client.channel(1)
            # Check if we got a new channel, and initialize it if so.
            if channel is not self._channel:
                self._channel = channel
                yield self._channel.channel_open()
                # This tells the broker to deliver us at most one message at
                # a time to support using multiple processes (e.g. in a
                # load-balanced/HA deployment). If NotificationSource.get()
                # gets called against the same UUID first by process A and then
                # when it completes by process B, we're guaranteed that process
                # B will see the very next message in the queue, because
                # process A hasn't fetched any more messages than the one it
                # received. See #729140.
                yield self._channel.basic_qos(prefetch_count=1)
        finally:
            self._channel_lock.release()
        returnValue(self._channel)
Esempio n. 37
0
class emailer( LabradServer ):
    name = 'Email Server'
    
    @inlineCallbacks
    def initServer( self ):
        self.username, self.fromaddr, self.password = yield self.getInfoReg()
        self.password = base64.b64decode(self.password)
        self.toaddrs = {}
        self.smtp = 'smtp.gmail.com:587'
        self.sending = DeferredLock()
    
    @inlineCallbacks
    def getInfoReg(self):
        reg = self.client.registry
        yield reg.cd(['Servers','Email Server'])
        username = yield reg.get('username')
        fromaddr = yield reg.get('address')
        password = yield reg.get('password')
        returnValue([username,fromaddr,password])
        
    @setting(0, "Set Recipients", recepients = '*s', returns = '')
    def setRecepients(self, c, recepients):
        """Set the recipients of the email as a list of strings of email addresses"""
        self.toaddrs[c.ID] = recepients
    
    @setting(1, "Send", subject = 's', message = 's', returns = '')
    def selectDP(self, c, subject, message):
        """Select Double Pass in the current context"""
        if not self.toaddrs[c.ID]: raise Exception("Recipients not set")
        yield self.sending.acquire()  
        session = smtplib.SMTP(self.smtp)
        session.starttls()
        session.login(self.username,self.password)
        toaddrs = self.toaddrs[c.ID]
        msg = MIMEMultipart()
        msg['From'] = self.fromaddr
        msg['To'] = COMMASPACE.join(toaddrs)
        msg['Subject'] = subject
        msg.attach(MIMEText(message, 'plain'))    
        session.sendmail(self.fromaddr, toaddrs, msg.as_string())
        session.quit()
        self.sending.release()
    
    def initContext(self, c):
        """Initialize a new context object."""
        pass
    
    def expireContext(self, c):
        del(self.toaddrs[c.ID])
Esempio n. 38
0
class _DhcpSetterCommon:
    # Provides support common code for shutting down on shutdown, and
    # handles locking.
    def __init__(self):
        self._lock = DeferredLock()
        self.is_shutdown = False
        reactor.addSystemEventTrigger('before', 'shutdown', self._shutdown)
    
    @inlineCallbacks
    def _cb(self, old_state, new_state):
        if self.is_shutdown:
            return
        yield self._lock.acquire()
        try:
            yield self._locked_cb(old_state, new_state)
        finally:
            self._lock.release()
    
    @inlineCallbacks
    def _shutdown(self):
        #print "Shutdown", self
        yield self._cb(None, None)
        self.is_shutdown = True
Esempio n. 39
0
class Agent(object):
    """
    Main class associated with getting getting the internals of the
    agent's operations up and running including adding or updating
    itself with the master, starting the periodic task manager,
    and handling shutdown conditions.
    """
    def __init__(self):
        # so parts of this instance are accessible elsewhere
        assert "agent" not in config
        config["agent"] = self
        self.services = {}
        self.register_shutdown_events = False
        self.last_free_ram_post = time.time()
        self.repeating_call_counter = {}
        self.shutdown_timeout = None
        self.post_shutdown_lock = DeferredLock()
        self.stop_lock = DeferredLock()
        self.reannounce_lock = utility.TimedDeferredLock()
        self.stopped = False

        # Register a callback so self.shutdown_timeout is set when
        # "shutting_down" is set or modified.
        config.register_callback(
            "shutting_down", self.callback_shutting_down_changed)

    @classmethod
    def agent_api(cls):
        """
        Return the API url for this agent or None if `agent_id` has not
        been set
        """
        try:
            return cls.agents_endpoint() + str(config["agent_id"])
        except KeyError:
            svclog.error(
                "The `agent_id` configuration value has not been set yet")
            return None

    @classmethod
    def agents_endpoint(cls):
        """
        Returns the API endpoint for used for updating or creating
        agents on the master
        """
        return config["master_api"] + "/agents/"

    @property
    def shutting_down(self):
        return config.get("shutting_down", False)

    @shutting_down.setter
    def shutting_down(self, value):
        assert value in (True, False)
        config["shutting_down"] = value

    def repeating_call(
            self, delay, function, function_args=None, function_kwargs=None,
            now=True, repeat_max=None, function_id=None):
        """
        Causes ``function`` to be called repeatedly up until ``repeat_max``
        or until stopped.

        :param int delay:
            Number of seconds to delay between calls of ``function``.

            ..  note::

                ``delay`` is an approximate interval between when one call ends
                and the next one begins.  The exact time can vary due
                to how the Twisted reactor runs, how long it takes
                ``function`` to run and what else may be going on in the
                agent at the time.

        :param function:
            A callable function to run

        :type function_args: tuple, list
        :keyword function_args:
            Arguments to pass into ``function``

        :keyword dict function_kwargs:
            Keywords to pass into ``function``

        :keyword bool now:
            If True then run ``function`` right now in addition
            to scheduling it.

        :keyword int repeat_max:
            Repeat calling ``function`` this may times.  If not provided
            then we'll continue to repeat calling ``function`` until
            the agent shuts down.

        :keyword uuid.UUID function_id:
            Used internally to track a function's execution count.  This
            keyword exists so if you call :meth:`repeating_call` multiple
            times on the same function or method it will handle ``repeat_max``
            properly.
        """
        if self.shutting_down:
            svclog.debug(
                "Skipping task %s(*%r, **%r) [shutting down]",
                function.__name__, function_args, function_kwargs
            )
            return

        if function_args is None:
            function_args = ()

        if function_kwargs is None:
            function_kwargs = {}

        if function_id is None:
            function_id = uuid.uuid4()

        assert isinstance(delay, NUMERIC_TYPES[:-1])
        assert callable(function)
        assert isinstance(function_args, (list, tuple))
        assert isinstance(function_kwargs, dict)
        assert repeat_max is None or isinstance(repeat_max, int)
        repeat_count = self.repeating_call_counter.setdefault(function_id, 0)

        if repeat_max is None or repeat_count < repeat_max:
            svclog.debug(
                "Executing task %s(*%r, **%r).  Next execution in %s seconds.",
                function.__name__, function_args, function_kwargs, delay
            )

            # Run this function right now using another deferLater so
            # it's scheduled by the reactor and executed before we schedule
            # another.
            if now:
                deferLater(
                    reactor, 0, function, *function_args, **function_kwargs
                )
                self.repeating_call_counter[function_id] += 1
                repeat_count = self.repeating_call_counter[function_id]

            # Schedule the next call but only if we have not hit the max
            if repeat_max is None or repeat_count < repeat_max:
                deferLater(
                    reactor, delay, self.repeating_call, delay,
                    function, function_args=function_args,
                    function_kwargs=function_kwargs, now=True,
                    repeat_max=repeat_max, function_id=function_id
                )

    def should_reannounce(self):
        """Small method which acts as a trigger for :meth:`reannounce`"""
        if self.reannounce_lock.locked or self.shutting_down:
            return False

        contacted = config.master_contacted(update=False)
        if contacted is None:
            return True

        return utility.total_seconds(
            datetime.utcnow() - contacted) > config["agent_master_reannounce"]

    @inlineCallbacks
    def reannounce(self, force=False):
        """
        Method which is used to periodically contact the master.  This
        method is generally called as part of a scheduled task.
        """
        # Attempt to acquire the reannounce lock but fail after 70%
        # of the total time between reannouncements elapses.  This should
        # help prevent an accumulation of requests in the event the master
        # is having issues.
        try:
            yield self.reannounce_lock.acquire(
                config["agent_master_reannounce"] * .70
            )
        except utility.LockTimeoutError:
            svclog.debug("Timed out while waiting to acquire reannounce_lock")
            returnValue(None)

        if not self.should_reannounce() and not force:
            yield self.reannounce_lock.release()
            returnValue(None)

        svclog.debug("Announcing %s to master", config["agent_hostname"])
        data = None
        num_retry_errors = 0
        while True:  # for retries
            try:
                response = yield post_direct(
                    self.agent_api(),
                    data={
                        "state": config["state"],
                        "current_assignments": config.get(
                            "current_assignments", {} # may not be set yet
                        ),
                        "free_ram": memory.free_ram(),
                        "disks": disks.disks(as_dict=True)
                    }
                )

            except (ResponseNeverReceived, RequestTransmissionFailed) as error:
                num_retry_errors += 1
                if num_retry_errors > config["broken_connection_max_retry"]:
                    svclog.error(
                        "Failed to announce self to the master, "
                        "caught try-again type errors %s times in a row.",
                        num_retry_errors)
                    break
                else:
                    svclog.debug("While announcing self to master, caught "
                                 "%s. Retrying immediately.",
                                 error.__class__.__name__)
            except Exception as error:
                if force:
                    delay = http_retry_delay()
                    svclog.error(
                        "Failed to announce self to the master: %s.  Will "
                        "retry in %s seconds.", error, delay)
                    deferred = Deferred()
                    reactor.callLater(delay, deferred.callback, None)
                    yield deferred
                else:
                    # Don't retry because reannounce is called periodically
                    svclog.error(
                        "Failed to announce self to the master: %s.  This "
                        "request will not be retried.", error)
                    break

            else:
                data = yield treq.json_content(response)
                if response.code == OK:
                    config.master_contacted(announcement=True)
                    svclog.info("Announced self to the master server.")
                    break

                elif response.code >= INTERNAL_SERVER_ERROR:
                    if not self.shutting_down:
                        delay = http_retry_delay()
                        svclog.warning(
                            "Could not announce self to the master server, "
                            "internal server error: %s.  Retrying in %s "
                            "seconds.", data, delay)

                        deferred = Deferred()
                        reactor.callLater(delay, deferred.callback, None)
                        yield deferred
                    else:
                        svclog.warning(
                            "Could not announce to master. Not retrying "
                            "because of pending shutdown.")
                        break

                elif response.code == NOT_FOUND:
                    svclog.warning("The master says it does not know about our "
                                   "agent id. Posting as a new agent.")
                    yield self.post_agent_to_master()
                    break

                # If this is a client problem retrying the request
                # is unlikely to fix the issue so we stop here
                elif response.code >= BAD_REQUEST:
                    svclog.error(
                        "Failed to announce self to the master, bad "
                        "request: %s.  This request will not be retried.",
                        data)
                    break

                else:
                    svclog.error(
                        "Unhandled error when posting self to the "
                        "master: %s (code: %s).  This request will not be "
                        "retried.", data, response.code)
                    break

        yield self.reannounce_lock.release()
        returnValue(data)

    def system_data(self, requery_timeoffset=False):
        """
        Returns a dictionary of data containing information about the
        agent.  This is the information that is also passed along to
        the master.
        """
        # query the time offset and then cache it since
        # this is typically a blocking operation
        if config["agent_time_offset"] == "auto":
            config["agent_time_offset"] = None

        if requery_timeoffset or config["agent_time_offset"] is None:
            ntplog.info(
                "Querying ntp server %r for current time",
                config["agent_ntp_server"])

            ntp_client = NTPClient()
            try:
                pool_time = ntp_client.request(
                    config["agent_ntp_server"],
                    version=config["agent_ntp_server_version"])

            except Exception as e:
                ntplog.warning("Failed to determine network time: %s", e)

            else:
                config["agent_time_offset"] = \
                    int(pool_time.tx_time - time.time())

                # format the offset for logging purposes
                utcoffset = datetime.utcfromtimestamp(pool_time.tx_time)
                iso_timestamp = utcoffset.isoformat()
                ntplog.debug(
                    "network time: %s (local offset: %r)",
                    iso_timestamp, config["agent_time_offset"])

                if config["agent_time_offset"] != 0:
                    ntplog.warning(
                        "Agent is %r second(s) off from ntp server at %r",
                        config["agent_time_offset"],
                        config["agent_ntp_server"])

        data = {
            "id": config["agent_id"],
            "hostname": config["agent_hostname"],
            "version": config.version,
            "os_class": system.operating_system(),
            "os_fullname": platform(),
            "ram": int(config["agent_ram"]),
            "cpus": config["agent_cpus"],
            "cpu_name": cpu.cpu_name(),
            "port": config["agent_api_port"],
            "free_ram": memory.free_ram(),
            "time_offset": config["agent_time_offset"] or 0,
            "state": config["state"],
            "mac_addresses": list(network.mac_addresses()),
            "current_assignments": config.get(
                "current_assignments", {}), # may not be set yet
            "disks": disks.disks(as_dict=True)
        }

        try:
            gpu_names = graphics.graphics_cards()
            data["gpus"] = gpu_names
        except graphics.GPULookupError:
            pass

        if "remote_ip" in config:
            data.update(remote_ip=config["remote_ip"])

        if config["farm_name"]:
            data["farm_name"] = config["farm_name"]

        return data

    def build_http_resource(self):
        svclog.debug("Building HTTP Service")
        root = Resource()

        # static endpoints to redirect resources
        # to the right objects
        root.putChild(
            "favicon.ico",
            StaticPath(join(config["agent_static_root"], "favicon.ico"),
                       defaultType="image/x-icon"))
        root.putChild(
            "static",
            StaticPath(config["agent_static_root"]))

        # external endpoints
        root.putChild("", Index())
        root.putChild("configuration", Configuration())

        # api endpoints
        api = root.putChild("api", APIRoot())
        api.putChild("versions", Versions())
        v1 = api.putChild("v1", APIRoot())

        # Top level api endpoints
        v1.putChild("assign", Assign(self))
        v1.putChild("tasks", Tasks())
        v1.putChild("config", Config())
        v1.putChild("task_logs", TaskLogs())

        # Endpoints which are generally used for status
        # and operations.
        v1.putChild("status", Status())
        v1.putChild("stop", Stop())
        v1.putChild("restart", Restart())
        v1.putChild("update", Update())
        v1.putChild("check_software", CheckSoftware())

        return root

    def _start_manhole(self, port, username, password):
        """
        Starts the manhole server so we can connect to the agent
        over telnet
        """
        if "manhole" in self.services:
            svclog.warning(
                "Telnet manhole service is already running on port %s",
                self.services["manhole"].port)
            return

        svclog.info("Starting telnet manhole on port %s", port)

        # Since we don't always need this module we import
        # it here to save on memory and other resources
        from pyfarm.agent.manhole import manhole_factory

        # Contains the things which will be in the top level
        # namespace of the Python interpreter.
        namespace = {
            "config": config, "agent": self,
            "jobtypes": config["jobtypes"],
            "current_assignments": config["current_assignments"]}

        factory = manhole_factory(namespace, username, password)
        self.services["manhole"] = reactor.listenTCP(port, factory)

    def _start_http_api(self, port):
        """
        Starts the HTTP api so the master can communicate
        with the agent.
        """
        if "api" in self.services:
            svclog.warning(
                "HTTP API service already running on port %s",
                self.services["api"].port)
            return

        http_resource = self.build_http_resource()
        self.services["api"] = reactor.listenTCP(port, Site(http_resource))

    def start(self, shutdown_events=True, http_server=True):
        """
        Internal code which starts the agent, registers it with the master,
        and performs the other steps necessary to get things running.

        :param bool shutdown_events:
            If True register all shutdown events so certain actions, such as
            information the master we're going offline, can take place.

        :param bool http_server:
            If True then construct and serve the externally facing http
            server.
        """
        if config["agent_manhole"]:
            self._start_manhole(config["agent_manhole_port"],
                                config["agent_manhole_username"],
                                config["agent_manhole_password"])

        # setup the internal http server so external entities can
        # interact with the service.
        if http_server:
            self._start_http_api(config["agent_api_port"])

        # Update the configuration with this pid (which may be different
        # than the original pid).
        config["pids"].update(child=os.getpid())

        # get ready to 'publish' the agent
        config.register_callback(
            "agent_id",
            partial(
                self.callback_agent_id_set, shutdown_events=shutdown_events))
        return self.post_agent_to_master()

    @inlineCallbacks
    def stop(self):
        """
        Internal code which stops the agent.  This will terminate any running
        processes, inform the master of the terminated tasks, update the
        state of the agent on the master.
        """
        yield self.stop_lock.acquire()
        if self.stopped:
            yield self.stop_lock.release()
            svclog.warning("Agent is already stopped")
            returnValue(None)

        svclog.info("Stopping the agent")

        self.shutting_down = True
        self.shutdown_timeout = (
            datetime.utcnow() + timedelta(
                seconds=config["agent_shutdown_timeout"]))

        if self.agent_api() is not None:
            try:
                yield self.post_shutdown_to_master()
            except Exception as error:  # pragma: no cover
                svclog.warning(
                    "Error while calling post_shutdown_to_master()", error)
        else:
            svclog.warning("Cannot post shutdown, agent_api() returned None")

        utility.remove_file(
            config["agent_lock_file"], retry_on_exit=True, raise_=False)

        svclog.debug("Stopping execution of jobtypes")
        for jobtype_id, jobtype in config["jobtypes"].items():
            try:
                jobtype.stop()
            except Exception as error:  # pragma: no cover
                svclog.warning(
                    "Error while calling stop() on %s (id: %s): %s",
                    jobtype, jobtype_id, error
                )
                config["jobtypes"].pop(jobtype_id)

        svclog.info(
            "Waiting on %s job types to terminate", len(config["jobtypes"]))

        while config["jobtypes"] and datetime.utcnow() < self.shutdown_timeout:
            for jobtype_id, jobtype in config["jobtypes"].copy().items():
                if not jobtype._has_running_processes():
                    svclog.warning(
                        "%r has not removed itself, forcing removal",
                        jobtype)
                    config["jobtypes"].pop(jobtype_id)

            # Brief delay so we don't tie up the cpu
            delay = Deferred()
            reactor.callLater(1, delay.callback, None)
            yield delay

        self.stopped = True
        yield self.stop_lock.release()
        returnValue(None)

    def sigint_handler(self, *_):
        utility.remove_file(
            config["run_control_file"], retry_on_exit=True, raise_=False)

        def errback(failure):
            svclog.error(
                "Error while attempting to shutdown the agent: %s", failure)

            # Stop the reactor but handle the exit code ourselves otherwise
            # Twisted will just exit with 0.
            reactor.stop()
            sys.exit(1)

        # Call stop() and wait for it to finish before we stop
        # the reactor.
        # NOTE: We're not using inlineCallbacks here because reactor.stop()
        # would be called in the middle of the generator unwinding
        deferred = self.stop()
        deferred.addCallbacks(lambda _: reactor.stop(), errback)

    @inlineCallbacks
    def post_shutdown_to_master(self):
        """
        This method is called before the reactor shuts down and lets the
        master know that the agent's state is now ``offline``
        """
        # We're under the assumption that something's wrong with
        # our code if we try to call this method before self.shutting_down
        # is set.
        assert self.shutting_down
        yield self.post_shutdown_lock.acquire()

        svclog.info("Informing master of shutdown")

        # Because post_shutdown_to_master is blocking and needs to
        # stop the reactor from finishing we perform the retry in-line
        data = None
        tries = 0
        num_retry_errors = 0
        response = None
        timed_out = False
        while True:
            tries += 1
            try:
                response = yield post_direct(
                    self.agent_api(),
                    data={
                        "state": AgentState.OFFLINE,
                        "free_ram": memory.free_ram(),
                        "current_assignments": config["current_assignments"]})

            except (ResponseNeverReceived, RequestTransmissionFailed) as error:
                num_retry_errors += 1
                if num_retry_errors > config["broken_connection_max_retry"]:
                    svclog.error(
                        "Failed to post shutdown to the master, "
                        "caught try-again errors %s times in a row.",
                        num_retry_errors)
                    break
                elif self.shutdown_timeout < datetime.utcnow():
                    svclog.error("While posting shutdown to master, caught "
                                 "%s. Shutdown timeout has been reached, not "
                                 "retrying.",
                                 error.__class__.__name__)
                    break
                else:
                    svclog.debug("While posting shutdown to master, caught "
                                 "%s. Retrying immediately.",
                                 error.__class__.__name__)
            # When we get a hard failure it could be an issue with the
            # server, although it's unlikely, so we retry.  Only retry
            # for a set period of time though since the shutdown as a timeout
            except Exception as failure:
                if self.shutdown_timeout > datetime.utcnow():
                    delay = http_retry_delay()
                    svclog.warning(
                        "State update failed due to unhandled error: %s.  "
                        "Retrying in %s seconds",
                        failure, delay)

                    # Wait for 'pause' to fire, introducing a delay
                    pause = Deferred()
                    reactor.callLater(delay, pause.callback, None)
                    yield pause

                else:
                    timed_out = True
                    svclog.warning(
                        "State update failed due to unhandled error: %s.  "
                        "Shutdown timeout reached, not retrying.",
                        failure)
                    break

            else:
                data = yield treq.json_content(response)
                if response.code == NOT_FOUND:
                    svclog.warning(
                        "Agent %r no longer exists, cannot update state.",
                        config["agent_id"])
                    break

                elif response.code == OK:
                    svclog.info(
                        "Agent %r has POSTed shutdown state change "
                        "successfully.",
                        config["agent_id"])
                    break

                elif response.code >= INTERNAL_SERVER_ERROR:
                    if self.shutdown_timeout > datetime.utcnow():
                        delay = http_retry_delay()
                        svclog.warning(
                            "State update failed due to server error: %s.  "
                            "Retrying in %s seconds.",
                            data, delay)

                        # Wait for 'pause' to fire, introducing a delay
                        pause = Deferred()
                        reactor.callLater(delay, pause.callback, None)
                        yield pause
                    else:
                        timed_out = True
                        svclog.warning(
                            "State update failed due to server error: %s.  "
                            "Shutdown timeout reached, not retrying.",
                            data)
                        break

        yield self.post_shutdown_lock.release()
        extra_data = {
            "response": response,
            "timed_out": timed_out,
            "tries": tries,
            "retry_errors": num_retry_errors
        }

        if isinstance(data, dict):
            data.update(extra_data)
        else:
            data = extra_data

        returnValue(data)

    @inlineCallbacks
    def post_agent_to_master(self):
        """
        Runs the POST request to contact the master.  Running this method
        multiple times should be considered safe but is generally something
        that should be avoided.
        """
        url = self.agents_endpoint()
        data = self.system_data()

        try:
            response = yield post_direct(url, data=data)
        except Exception as failure:
            delay = http_retry_delay()
            if isinstance(failure, ConnectionRefusedError):
                svclog.error(
                    "Failed to POST agent to master, the connection was "
                    "refused. Retrying in %s seconds", delay)
            else:  # pragma: no cover
                svclog.error(
                    "Unhandled error when trying to POST the agent to the "
                    "master. The error was %s.", failure)

            if not self.shutting_down:
                svclog.info(
                    "Retrying failed POST to master in %s seconds.", delay)
                yield deferLater(reactor, delay, self.post_agent_to_master)
            else:
                svclog.warning("Not retrying POST to master, shutting down.")

        else:
            # Master might be down or have some other internal problems
            # that might eventually be fixed.  Retry the request.
            if response.code >= INTERNAL_SERVER_ERROR:
                if not self.shutting_down:
                    delay = http_retry_delay()
                    svclog.warning(
                        "Failed to post to master due to a server side error "
                        "error %s, retrying in %s seconds",
                        response.code, delay)
                    yield deferLater(reactor, delay, self.post_agent_to_master)
                else:
                    svclog.warning(
                        "Failed to post to master due to a server side error "
                        "error %s. Not retrying, because the agent is "
                        "shutting down", response.code)

            # Master is up but is rejecting our request because there's
            # something wrong with it.  Do not retry the request.
            elif response.code >= BAD_REQUEST:
                text = yield response.text()
                svclog.error(
                    "%s accepted our POST request but responded with code %s "
                    "which is a client side error.  The message the server "
                    "responded with was %r.  Sorry, but we cannot retry this "
                    "request as it's an issue with the agent's request.",
                    url, response.code, text)

            else:
                data = yield treq.json_content(response)
                config["agent_id"] = data["id"]
                config.master_contacted()

                if response.code == OK:
                    svclog.info(
                        "POST to %s was successful. Agent %s was updated.",
                        url, config["agent_id"])

                elif response.code == CREATED:
                    svclog.info(
                        "POST to %s was successful.  A new agent "
                        "with an id of %s was created.",
                        url, config["agent_id"])

                returnValue(data)

    def callback_agent_id_set(
            self, change_type, key, new_value, old_value, shutdown_events=True):
        """
        When `agent_id` is created we need to:

            * Register a shutdown event so that when the agent is told to
              shutdown it will notify the master of a state change.
            * Star the scheduled task manager
        """
        if key == "agent_id" and change_type == config.CREATED \
                and not self.register_shutdown_events:
            if shutdown_events:
                self.register_shutdown_events = True

            # set the initial free_ram
            config["free_ram"] = memory.free_ram()

            config.master_contacted()
            svclog.debug(
                "`%s` was %s, adding system event trigger for shutdown",
                key, change_type)

            self.repeating_call(
                config["agent_master_reannounce"], self.reannounce)

    def callback_shutting_down_changed(
            self, change_type, key, new_value, old_value):
        """
        When `shutting_down` is changed in the configuration, set or
        reset self.shutdown_timeout
        """
        if change_type not in (config.MODIFIED, config.CREATED):
            return

        if new_value is not True:
            self.shutdown_timeout = None
            return

        self.shutdown_timeout = timedelta(
            seconds=config["agent_shutdown_timeout"]) + datetime.utcnow()
        svclog.debug("New shutdown_timeout is %s", self.shutdown_timeout)
Esempio n. 40
0
class NormalPMTFlow( LabradServer):
    
    name = 'NormalPMTFlow'
    onNewCount = Signal(SIGNALID, 'signal: new count', 'v')
    
    @inlineCallbacks
    def initServer(self):
        #improve on this to start in arbitrary order
        self.dv = yield self.client.data_vault
        self.n = yield self.client.normalpmtcountfpga
        self.pbox = yield self.client.paul_box
        self.trigger = yield self.client.trigger
        self.saveFolder = ['','PMT Counts']
        self.dataSetName = 'PMT Counts'
        self.dataSet = None
        self.collectTimes = {'Normal':0.100, 'Differential':0.100}
        self.lastDifferential = {'ON': 0, 'OFF': 0}
        self.currentMode = 'Normal'
        self.running = DeferredLock()
        self.requestList = []
        self.keepRunning = False
    
#    @inlineCallbacks
#    def confirmPBoxScripting(self):
#        self.script = 'DifferentialPMTCount.py'
#        self.variable = 'CountingInterval'
#        allScripts = yield self.pbox.get_available_scripts()
#        if script not in allScripts: raise Exception('Pauls Box script {} does not exist'.format(script))
#        allVariables = yield self.pbox.get_variable_list(script)
#        if variable not in allVariables[0]: raise Exception('Variable {} not found'.format(variable))
    
    @inlineCallbacks
    def makeNewDataSet(self):
        dir = self.saveFolder
        name = self.dataSetName
        yield self.dv.cd(dir, True)
        self.dataSet = yield self.dv.new(name, [('t', 'num')], [('KiloCounts/sec','866 ON','num'),('KiloCounts/sec','866 OFF','num'),('KiloCounts/sec','Differential Signal','num')])
        self.startTime = time.time()
        yield self.addParameters()
    
    @inlineCallbacks
    def addParameters(self):
        yield self.dv.add_parameter('plotLive',True)
        yield self.dv.add_parameter('startTime',self.startTime)
    
    @setting(0, 'Set Save Folder', folder = '*s', returns = '')
    def setSaveFolder(self,c , folder):
        yield self.dv.cd(folder, True)
        self.saveFolder = folder
    
    @setting(1, 'Start New Dataset', setName = 's', returns = '')
    def setNewDataSet(self, c, setName = None):
        """Starts new dataset, if name not provided, it will be the same"""
        if setName is not None: self.dataSetName = setName
        yield self.makeNewDataSet()
    
    @setting( 2, "Set Mode", mode = 's', returns = '' )
    def setMode(self,c, mode):
        """
        Start recording Time Resolved Counts into Data Vault
        """
        if mode not in self.collectTimes.keys(): raise('Incorrect Mode')
        if not self.keepRunning:
            self.currentMode = mode
            yield self.n.set_mode(mode)
        else:
            yield self.dostopRecording()
            self.currentMode = mode
            yield self.n.set_mode(mode)
            yield self.dorecordData()

    @setting(3, 'getCurrentMode', returns = 's')
    def getCurrentMode(self, c):
        """
        Returns the currently running mode
        """
        return self.currentMode
    
    @setting(4, 'Record Data', returns = '')
    def recordData(self, c):
        """
        Starts recording data of the current PMT mode into datavault
        """
        yield self.dorecordData()
    
    @inlineCallbacks
    def dorecordData(self):
        self.keepRunning = True
        yield self.n.set_collection_time(self.collectTimes[self.currentMode], self.currentMode)
        yield self.n.set_mode(self.currentMode)
        if self.currentMode == 'Differential':
            yield self._programPBOXDiff()
        if self.dataSet is None:
            yield self.makeNewDataSet()
        reactor.callLater(0, self._record)
    
    @setting(5, returns = '')
    def stopRecording(self,c):
        """
        Stop recording counts into Data Vault
        """
        yield self.dostopRecording()
    
    @inlineCallbacks
    def dostopRecording(self):
        self.keepRunning = False
        yield self.running.acquire()
        self.running.release()
        yield self._programPBOXEmpty()
        
    @setting(6, returns = 'b')
    def isRunning(self,c):
        """
        Returns whether or not currently recording
        """
        return self.keepRunning
        
    @setting(7, returns = 's')
    def currentDataSet(self,c):
        if self.dataSet is None: return ''
        name = self.dataSet[1]
        return name
    
    @setting(8, 'Set Time Length', timelength = 'v', mode = 's')
    def setTimeLength(self, c, timelength, mode = None):
        if mode is None: mode = self.currentMode
        if mode not in self.collectTimes.keys(): raise('Incorrect Mode')
        if not 0 < timelength < 5.0: raise ('Incorrect Recording Time')
        self.collectTimes[mode] = timelength
        if mode == self.currentMode:
            yield self.running.acquire()
            yield self.n.set_collection_time(timelength, mode)
            if mode == 'Differential':
                yield self._programPBOXDiff()
            self.running.release()
        else:
            yield self.n.set_collection_time(timelength, mode)
        
    @setting(9, 'Get Next Counts', type = 's', number = 'w', average = 'b', returns = ['*v', 'v'])
    def getNextCounts(self, c, type, number, average = False):
        """
        Acquires next number of counts, where type can be 'ON' or 'OFF' or 'DIFF'
        Average is optionally True if the counts should be averaged
        
        Note in differential mode, Diff counts get updates every time, but ON and OFF
        get updated every 2 times.
        """
        if type not in ['ON', 'OFF','DIFF']: raise('Incorrect type')
        if type in ['OFF','DIFF'] and self.currentMode == 'Normal':raise('in the wrong mode to process this request')
        if not 0 < number < 1000: raise('Incorrect Number')
        if not self.keepRunning: raise('Not currently recording')
        d = Deferred()
        self.requestList.append(self.readingRequest(d, type, number))
        data = yield d
        if average:
            data = sum(data) / len(data)
        returnValue(data)
    
    @setting(10, 'Get Time Length', returns = 'v')
    def getMode(self, c):
        """
        Returns the current timelength of in the current mode
        """
        return self.collectTimes[self.currentMode]
    
    @inlineCallbacks
    def _programPBOXDiff(self):
        yield self.pbox.send_command('DifferentialPMTCount.py',[['FLOAT','CountingInterval',str(10**6 * self.collectTimes['Differential'])]])
        yield deferToThread(time.sleep,.2) #give it enough time to finish programming
        yield self.trigger.trigger('PaulBox')
    
    @inlineCallbacks
    def _programPBOXEmpty(self):
        yield self.pbox.send_command('emptySequence.py',[['FLOAT','nothing','0']])
        yield self.trigger.trigger('PaulBox')
        
    class readingRequest():
        def __init__(self, d, type, count):
            self.d = d
            self.count = count
            self.type = type
            self.data = []
    
    def processRequests(self, data):
        for dataPoint in data:
            for req in self.requestList:
                if dataPoint[1] != 0 and req.type == 'ON':
                    req.data.append(dataPoint[1])
                    if len(req.data) == req.count:
                        req.d.callback(req.data)
                if dataPoint[2] != 0 and req.type == 'OFF':
                    req.data.append(dataPoint[1])
                    if len(req.data) == req.count:
                        req.d.callback(req.data)
                if dataPoint[3] != 0 and req.type == 'DIFF':
                    req.data.append(dataPoint[1])
                    if len(req.data) == req.count:
                        req.d.callback(req.data)
                        
    @inlineCallbacks
    def _record(self):
        yield self.running.acquire()
        if self.keepRunning:
            rawdata = yield self.n.get_all_counts()
            if len(rawdata) != 0:
                if self.currentMode == 'Normal':
                    toDataVault = [ [elem[2] - self.startTime, elem[0], 0, 0] for elem in rawdata] # converting to format [time, normal count, 0 , 0]
                elif self.currentMode =='Differential':
                    toDataVault = self.convertDifferential(rawdata)
                self.processRequests(toDataVault)
                self.processSignals(toDataVault)
                yield self.dv.add(toDataVault)
            self.running.release()
            delayTime = self.collectTimes[self.currentMode]/2 #set to half the collection time no to miss anythign
            reactor.callLater(delayTime,self._record)
        else:
            self.running.release()
    
    def processSignals(self, data):
        lastPt = data[-1]
        NormalCount = lastPt[1]
        self.onNewCount(NormalCount)
    
    def convertDifferential(self, rawdata):
        totalData = []
        for dataPoint in rawdata:
            t = str(dataPoint[1])
            self.lastDifferential[t] = float(dataPoint[0])
            diff = self.lastDifferential['ON'] - self.lastDifferential['OFF']
            totalData.append( [ dataPoint[2] - self.startTime, self.lastDifferential['ON'], self.lastDifferential['OFF'], diff ] )
        return totalData
Esempio n. 41
0
class UnitLifecycle(object):
    """Manager for a unit lifecycle.

    Primarily used by the workflow interaction, to modify unit behavior
    according to the current unit workflow state and transitions.
    """

    def __init__(self, client, unit, service, unit_path, executor):
        self._client = client
        self._unit = unit
        self._service = service
        self._executor = executor
        self._unit_path = unit_path
        self._relations = {}
        self._running = False
        self._watching_relation_memberships = False
        self._watching_relation_resolved = False
        self._run_lock = DeferredLock()
        self._log = logging.getLogger("unit.lifecycle")

    def get_relation_workflow(self, relation_id):
        """Accessor to a unit relation workflow, by relation id.

        Primarily intended for and used by unit tests. Raises
        a KeyError if the relation workflow does not exist.
        """
        return self._relations[relation_id]

    @inlineCallbacks
    def install(self, fire_hooks=True):
        """Invoke the unit's install hook.
        """
        if fire_hooks:
            yield self._execute_hook("install")

    @inlineCallbacks
    def upgrade_charm(self, fire_hooks=True):
        """Invoke the unit's upgrade-charm hook.
        """
        if fire_hooks:
            yield self._execute_hook("upgrade-charm", now=True)
        # Restart hook queued hook execution.
        self._executor.start()

    @inlineCallbacks
    def start(self, fire_hooks=True):
        """Invoke the start hook, and setup relation watching.
        """
        self._log.debug("pre-start acquire, running:%s", self._running)
        yield self._run_lock.acquire()
        self._log.debug("start running, unit lifecycle")
        watches = []

        try:
            # Verify current state
            assert not self._running, "Already started"

            # Execute the start hook
            if fire_hooks:
                yield self._execute_hook("config-changed")
                yield self._execute_hook("start")

            # If we have any existing relations in memory, start them.
            if self._relations:
                self._log.debug("starting relation lifecycles")

            for workflow in self._relations.values():
                yield workflow.transition_state("up")

            # Establish a watch on the existing relations.
            if not self._watching_relation_memberships:
                self._log.debug("starting service relation watch")
                watches.append(self._service.watch_relation_states(
                    self._on_service_relation_changes))
                self._watching_relation_memberships = True

            # Establish a watch for resolved relations
            if not self._watching_relation_resolved:
                self._log.debug("starting unit relation resolved watch")
                watches.append(self._unit.watch_relation_resolved(
                    self._on_relation_resolved_changes))
                self._watching_relation_resolved = True

            # Set current status
            self._running = True
        finally:
            self._run_lock.release()

        # Give up the run lock before waiting on initial watch invocations.
        results = yield DeferredList(watches, consumeErrors=True)

        # If there's an error reraise the first one found.
        errors = [e[1] for e in results if not e[0]]
        if errors:
            returnValue(errors[0])

        self._log.debug("started unit lifecycle")

    @inlineCallbacks
    def stop(self, fire_hooks=True):
        """Stop the unit, executes the stop hook, and stops relation watching.
        """
        self._log.debug("pre-stop acquire, running:%s", self._running)
        yield self._run_lock.acquire()
        try:
            # Verify state
            assert self._running, "Already Stopped"

            # Stop relation lifecycles
            if self._relations:
                self._log.debug("stopping relation lifecycles")

            for workflow in self._relations.values():
                yield workflow.transition_state("down")

            if fire_hooks:
                yield self._execute_hook("stop")

            # Set current status
            self._running = False
        finally:
            self._run_lock.release()
        self._log.debug("stopped unit lifecycle")

    @inlineCallbacks
    def configure(self, fire_hooks=True):
        """Inform the unit that its service config has changed.
        """
        if not fire_hooks:
            returnValue(None)
        yield self._run_lock.acquire()
        try:
            # Verify State
            assert self._running, "Needs to be running."

            # Execute hook
            yield self._execute_hook("config-changed")
        finally:
            self._run_lock.release()
        self._log.debug("configured unit")

    @inlineCallbacks
    def _on_relation_resolved_changes(self, event):
        """Callback for unit relation resolved watching.

        The callback is invoked whenever the relation resolved
        settings change.
        """
        self._log.debug("relation resolved changed")
        # Acquire the run lock, and process the changes.
        yield self._run_lock.acquire()

        try:
            # If the unit lifecycle isn't running we shouldn't process
            # any relation resolutions.
            if not self._running:
                self._log.debug("stop watch relation resolved changes")
                self._watching_relation_resolved = False
                raise StopWatcher()

            self._log.info("processing relation resolved changed")
            if self._client.connected:
                yield self._process_relation_resolved_changes()
        finally:
            yield self._run_lock.release()

    @inlineCallbacks
    def _process_relation_resolved_changes(self):
        """Invoke retry transitions on relations if their not running.
        """
        relation_resolved = yield self._unit.get_relation_resolved()
        if relation_resolved is None:
            returnValue(None)
        else:
            yield self._unit.clear_relation_resolved()

        keys = set(relation_resolved).intersection(self._relations)
        for rel_id in keys:
            relation_workflow = self._relations[rel_id]
            relation_state = yield relation_workflow.get_state()
            if relation_state == "up":
                continue
            yield relation_workflow.transition_state("up")

    @inlineCallbacks
    def _on_service_relation_changes(self, old_relations, new_relations):
        """Callback for service relation watching.

        The callback is used to manage the unit relation lifecycle in
        accordance with the current relations of the service.

        @param old_relations: Previous service relations for a service. On the
               initial execution, this value is None.
        @param new_relations: Current service relations for a service.
        """
        self._log.debug(
            "services changed old:%s new:%s", old_relations, new_relations)

        # Acquire the run lock, and process the changes.
        yield self._run_lock.acquire()
        try:
            # If the lifecycle is not running, then stop the watcher
            if not self._running:
                self._log.debug("stop service-rel watcher, discarding changes")
                self._watching_relation_memberships = False
                raise StopWatcher()

            self._log.debug("processing relations changed")
            yield self._process_service_changes(old_relations, new_relations)
        finally:
            self._run_lock.release()

    @inlineCallbacks
    def _process_service_changes(self, old_relations, new_relations):
        """Add and remove unit lifecycles per the service relations Determine.
        """
        # changes relation delta of global zk state with our memory state.
        new_relations = dict([(service_relation.internal_relation_id,
                               service_relation) for
                              service_relation in new_relations])
        added = set(new_relations.keys()) - set(self._relations.keys())
        removed = set(self._relations.keys()) - set(new_relations.keys())

        # Stop and remove, old ones.

        # Trying to directly transition this causes additional yielding
        # operations, which means that concurrent events for subsequent
        # watch firings will be executed. ie. if the relation
        # is broken, but a subsequent modify comes in for a related unit,
        # it will cause the modify to have a hook execution. To prevent
        # this we stop the lifecycle immediately before executing the
        # transition. see UnitLifecycleTest.test_removed_relation_depart
        for relation_id in removed:
            yield self._relations[relation_id].lifecycle.stop()

        for relation_id in removed:
            workflow = self._relations.pop(relation_id)
            yield workflow.transition_state("departed")

        # Process new relations.
        for relation_id in added:
            service_relation = new_relations[relation_id]
            try:
                unit_relation = yield service_relation.get_unit_state(
                    self._unit)
            except UnitRelationStateNotFound:
                # This unit has not yet been assigned a unit relation state,
                # Go ahead and add one.
                unit_relation = yield service_relation.add_unit_state(
                    self._unit)

            self._log.debug(
                "Starting new relation: %s", service_relation.relation_name)

            workflow = self._get_unit_relation_workflow(unit_relation,
                                                        service_relation)
            # Start it before storing it.
            yield workflow.fire_transition("start")
            self._relations[service_relation.internal_relation_id] = workflow

    def _get_unit_path(self):
        """Retrieve the root path of the unit.
        """
        return self._unit_path

    def _get_unit_relation_workflow(self, unit_relation, service_relation):

        lifecycle = UnitRelationLifecycle(self._client,
                                          self._unit.unit_name,
                                          unit_relation,
                                          service_relation.relation_name,
                                          self._get_unit_path(),
                                          self._executor)

        state_directory = os.path.abspath(os.path.join(
            self._unit_path, "../../state"))

        workflow = RelationWorkflowState(
            self._client, unit_relation, lifecycle, state_directory)

        return workflow

    @inlineCallbacks
    def _execute_hook(self, hook_name, now=False):
        """Execute the hook with the given name.

        For priority hooks, the hook is scheduled and then the
        executioner started, before wait on the result.
        """
        unit_path = self._get_unit_path()
        hook_path = os.path.join(unit_path, "charm", "hooks", hook_name)
        socket_path = os.path.join(unit_path, HOOK_SOCKET_FILE)

        invoker = Invoker(HookContext(self._client, self._unit.unit_name),
                          None, "constant", socket_path,
                          self._unit_path, hook_log)
        if now:
            yield self._executor.run_priority_hook(invoker, hook_path)
        else:
            yield self._executor(invoker, hook_path)
class SBProtocol(Protocol):
    """
    Superbox Protocol
    """

    connection_count = 0
    countPendingCmd = 0

    def __init__(self):
        """
        Constructor
        """
        self.m_buffer = ""
        self.lockBuffer = DeferredLock()
        self.tmActivate = time.time()
        self.dictWaitResp = {}
        self.lock_dictWaitResp = threading.RLock()
        self.dictControlling = {}
        self.cond_dictControlling = threading.Condition()
        # self.timer=threading.Timer(Config.time_heartbeat,self.timeout)
        # self.timer.start()
        self.timer = reactor.callLater(Config.time_heartbeat, self.timeout)
        self.lockCmd = threading.RLock()
        self.HeaderTagType = -1  # -1: not decided, 0: no header_tag, 1: has header_tag
        self.rcv_alarm = "False"
        self.role = ""

    def dataReceived(self, data):
        Protocol.dataReceived(self, data)
        self.lockBuffer.acquire().addCallback(self.AddDataAndDecode, data)

    def connectionMade(self):
        # print "a connection made: ", id(self.transport), self.transport.getPeer().host
        ip = self.transport.getPeer().host
        #         if ip.find("10.")!=0:
        #             logging.info("a connection made:%s,%s ", id(self.transport), ip)
        #         pass
        #
        with self.factory.lockPendingCmd:
            SBProtocol.connection_count = SBProtocol.connection_count + 1
            if SBProtocol.connection_count > Config.count_connection:
                self.transport.loseConnection()
                print "close connection due to reaching connection limit."

    def RunCommand(self, command):
        with self.factory.lockPendingCmd:
            SBProtocol.countPendingCmd = SBProtocol.countPendingCmd - 1
        command.Run()

    def AddDataAndDecode(self, lock, data):
        print "data received in transport %d : %s (%s)" % (id(self.transport), Util.asscii_string(data), data)
        self.m_buffer += data
        while len(self.m_buffer) >= Command.BaseCommand.CBaseCommand.HEAD_LEN:
            self.m_buffer, command, = self.Decode(self.m_buffer)
            if command == None:
                break

            # the maximum pending command is set to equal with connection count, one command for one connection by average
            if SBProtocol.countPendingCmd < Config.count_connection / 100:
                threads.deferToThread(self.RunCommand, command)
                with self.factory.lockPendingCmd:
                    SBProtocol.countPendingCmd = SBProtocol.countPendingCmd + 1
            else:
                try:
                    cmd_resp = command.GetResp()
                    cmd_resp.SetErrorCode(Command.BaseCommand.CS_SERVERBUSY)
                    cmd_resp.Send()
                except:
                    pass
        lock.release()

    def Decode(self, data):
        """
        return a tuple: new data,command
        """
        if self.HeaderTagType < 0:  # not decide
            if data[:4] == self.factory.SBMP_HEADERTAG:
                self.HeaderTagType = 1
            else:
                self.HeaderTagType = 0

        if self.HeaderTagType == 1:
            tag_position = data.find(self.factory.SBMP_HEADERTAG)
            if tag_position < 0:
                return (data, None)
            data = data[tag_position + 4 :]  # remove head tag
        length, command_id = struct.unpack("!2I", data[:8])
        command = None
        if length <= len(data):
            command_data = data[:length]
            if Command.dicInt_Type.has_key(command_id):
                try:
                    command = Command.dicInt_Type[command_id](command_data, self)
                except Exception, e:
                    logging.error(
                        "build command exception in transport %d: %s :%s",
                        id(self.transport),
                        str(e),
                        Util.asscii_string(command_data),
                    )
                    command = None
            else:
                command = Command.BaseCommand.CMesscodeCommand(command_data, self)
            data = data[length:]
        else:
Esempio n. 43
0
class UnitRelationLifecycle(object):
    """Unit Relation Lifcycle management.

    Provides for watching related units in a relation, and executing hooks
    in response to changes. The lifecycle is driven by the workflow.

    The Unit relation lifecycle glues together a number of components.
    It controls a watcher that recieves watch events from zookeeper,
    and it controls a hook scheduler which gets fed those events. When
    the scheduler wants to execute a hook, the executor is called with
    the hook path and the hook invoker.

    **Relation hook invocation do not maintain global order or
    determinism across relations**. They only maintain ordering and
    determinism within a relation. A shared scheduler across relations
    would be needed to maintain such behavior.
    """

    def __init__(self, client, unit_name, unit_relation, relation_name, unit_path, executor):
        self._client = client
        self._unit_path = unit_path
        self._relation_name = relation_name
        self._unit_relation = unit_relation
        self._executor = executor
        self._run_lock = DeferredLock()
        self._log = logging.getLogger("unit.relation.lifecycle")
        self._error_handler = None

        self._scheduler = HookScheduler(client,
                                        self._execute_change_hook,
                                        self._unit_relation,
                                        self._relation_name,
                                        unit_name=unit_name)
        self._watcher = None

    @inlineCallbacks
    def _execute_change_hook(self, context, change, hook_name=None):
        """Invoked by the contained HookScheduler, to execute a hook.

        We utilize the HookExecutor to execute the hook, if an
        error occurs, it will be reraised, unless an error handler
        is specified see ``set_hook_error_handler``.
        """
        socket_path = os.path.join(self._unit_path, HOOK_SOCKET_FILE)
        if hook_name is None:
            if change.change_type == "departed":
                hook_names = [
                    "%s-relation-departed" % self._relation_name]
            elif change.change_type == "joined":
                hook_names = [
                    "%s-relation-joined" % self._relation_name,
                    "%s-relation-changed" % self._relation_name]
            else:
                hook_names = ["%s-relation-changed" % self._relation_name]
        else:
            hook_names = [hook_name]

        invoker = RelationInvoker(
            context, change, "constant", socket_path, self._unit_path,
            hook_log)

        for hook_name in hook_names:
            hook_path = os.path.join(
                self._unit_path, "charm", "hooks", hook_name)
            yield self._run_lock.acquire()
            self._log.debug("Executing hook %s", hook_name)
            try:
                yield self._executor(invoker, hook_path)
            except Exception, e:
                yield self._run_lock.release()
                self._log.warn("Error in %s hook: %s", hook_name, e)

                if not self._error_handler:
                    raise
                self._log.info(
                    "Invoked error handler for %s hook", hook_name)
                # We can't hold the run lock, when we invoke the error
                # handler, or we get a deadlock if the handler
                # manipulates the lifecycle.
                yield self._error_handler(change, e)
            else:
                yield self._run_lock.release()
class PicomotorServer(LabradServer):

    name = 'PicomotorServer'
    
    # signal arguments are (axis, new absolute position)
    on_position_change = Signal(144821, 'signal: position change', '(ii)' )

    def construct_command(self, axis, command, nn = None):
        if nn is None:
            return str(axis) + command
        else:
            return str(axis) + command + str(nn)

    @inlineCallbacks
    def initServer(self):
        self.controller = yield Controller( idProduct=0x4000, idVendor=0x104d )
        self.position_dict = dict.fromkeys( [1, 2, 3, 4], 0)
        self.setpoint = dict.fromkeys( [1, 2, 3, 4], 0)
        self.inCommunication = DeferredLock()
        self.listeners = set()


    @setting(0, 'Get Position', axis = 'i', returns = 'i')
    def get_position(self, c, axis):
        """
        Query the controller for the position of the given axis
        and also update position_dict
        """
        yield self.inCommunication.acquire()
        pos = yield self.controller.get_position(axis)
        self.inCommunication.release()

        self.position_dict[axis] = pos
        self.notifyOtherListeners(c, (axis, pos))
        returnValue(pos)

    @setting(1, 'Absolute Move', axis = 'i', pos = 'i')
    def absolute_move(self, c, axis, pos):
        """
        Move the given axis to a given absolute position
        """
        yield self.inCommunication.acquire()
        yield self.controller.absolute_move(axis, pos)
        self.inCommunication.release()

        self.position_dict[axis] = pos
        self.notifyOtherListeners(c, (axis, pos))    

    @setting(2, 'Relative Move', axis = 'i', steps = 'i', returns = 'i')
    def relative_move(self, c, axis, steps):
        """
        Move the given axis the given number of steps.
        Returns the new absolute position.
        """
        yield self.inCommunication.acquire()
        yield self.controller.relative_move(axis, steps)
        self.inCommunication.release()

        self.position_dict[axis] += steps
        self.notifyOtherListeners(c, (axis, self.position_dict[axis]) )
        
        returnValue(self.position_dict[axis])

    @setting(3, 'Mark current setpoint')
    def mark_setpoint(self, c):
        """
        Save the current position of all the axes
        to possibly return to later
        """
        
        axes = [1, 2, 3, 4]
        yield self.inCommunication.acquire()
        for axis in axes:
            pos = yield self.controller.get_position(axis)
            self.position_dict[axis] = pos
        self.inCommunication.release()
        
        self.setpoint = position_dict.copy()

    @setting(4, 'Return to setpoint')
    def return_to_setpoint(self, c):
        """
        Return all axes to the saved setpoint
        """
        axes = [1, 2, 3, 4]
        yield self.inCommunication.acquire()
        for axis in axes:
            yield self.controller.absolute_move( axis, self.setpoint[axis] )
            pos = self.setpoint[axis]
            self.position_dict[axis] = pos
            self.notifyOtherListeners(c, (axis, pos))
        self.inCommunication.release()

    def notifyOtherListeners(self, context, message):
        notified = self.listeners.copy()
        notified.remove(context.ID)
        self.on_position_change(message, notified)

    def initContext(self, c):
        self.listeners.add(c.ID)
    
    def expireContext(self, c):
        self.listeners.remove(c.ID)
Esempio n. 45
0
class AndorServer(LabradServer):
    """ Contains methods that interact with the Andor CCD Cameras"""

    name = "Andor Server"

    def initServer(self):
        self.listeners = set()
        self.camera = AndorCamera()
        self.lock = DeferredLock()
        self.gui = AndorVideo(self)

    def initContext(self, c):
        """Initialize a new context object."""
        self.listeners.add(c.ID)

    def expireContext(self, c):
        self.listeners.remove(c.ID)

    def getOtherListeners(self,c):
        notified = self.listeners.copy()
        notified.remove(c.ID)
        return notified
    '''
    Temperature Related Settings
    '''
    @setting(0, "Get Temperature", returns = 'v[degC]')
    def get_temperature(self, c):
        """Gets Current Device Temperature"""
        temperature = None
        print 'acquiring: {}'.format(self.get_temperature.__name__)
        yield self.lock.acquire()
        try:
            print 'acquired : {}'.format(self.get_temperature.__name__)
            temperature  = yield deferToThread(self.camera.get_temperature)
        finally:
            print 'releasing: {}'.format(self.get_temperature.__name__)
            self.lock.release()
        if temperature is not None:
            temperature = WithUnit(temperature, 'degC')
            returnValue(temperature)

    @setting(1, "Get Cooler State", returns = 'b')
    def get_cooler_state(self, c):
        """Returns Current Cooler State"""
        cooler_state = None
        print 'acquiring: {}'.format(self.get_cooler_state.__name__)
        yield self.lock.acquire()
        try:
            print 'acquired : {}'.format(self.get_cooler_state.__name__)
            cooler_state = yield deferToThread(self.camera.get_cooler_state)
        finally:
            print 'releasing: {}'.format(self.get_cooler_state.__name__)
            self.lock.release()
        if cooler_state is not None:
            returnValue(cooler_state)

    @setting(3, "Set Temperature", setTemp = 'v[degC]', returns = '')
    def set_temperature(self, c, setTemp):
        """Sets The Target Temperature"""
        print 'acquiring: {}'.format(self.set_temperature.__name__)
        yield self.lock.acquire()
        try:
            print 'acquired : {}'.format(self.set_temperature.__name__)
            yield deferToThread(self.camera.set_temperature, setTemp['degC'])
        finally:
            print 'releasing: {}'.format(self.set_temperature.__name__)
            self.lock.release()

    @setting(4, "Set Cooler On", returns = '')
    def set_cooler_on(self, c):
        """Turns Cooler On"""
        print 'acquiring: {}'.format(self.set_cooler_on.__name__)
        yield self.lock.acquire()
        try:
            print 'acquired : {}'.format(self.set_cooler_on.__name__)
            yield deferToThread(self.camera.set_cooler_on)
        finally:
            print 'releasing: {}'.format(self.set_cooler_on.__name__)
            self.lock.release()

    @setting(5, "Set Cooler Off", returns = '')
    def set_cooler_off(self, c):
        """Turns Cooler On"""
        print 'acquiring: {}'.format(self.set_cooler_off.__name__)
        yield self.lock.acquire()
        try:
            print 'acquired : {}'.format(self.set_cooler_off.__name__)
            yield deferToThread(self.camera.set_cooler_off)
        finally:
            print 'releasing: {}'.format(self.set_cooler_off.__name__)
            self.lock.release()

    '''
    EMCCD Gain Settings
    '''
    @setting(6, "Get EMCCD Gain", returns = 'i')
    def getEMCCDGain(self, c):
        """Gets Current EMCCD Gain"""
        gain = None
        print 'acquiring: {}'.format(self.getEMCCDGain.__name__)
        yield self.lock.acquire()
        try:
            print 'acquired : {}'.format(self.getEMCCDGain.__name__)
            gain = yield deferToThread(self.camera.get_emccd_gain)
        finally:
            print 'releasing: {}'.format(self.getEMCCDGain.__name__)
            self.lock.release()
        if gain is not None:
            returnValue(gain)

    @setting(7, "Set EMCCD Gain", gain = 'i', returns = '')
    def setEMCCDGain(self, c, gain):
        """Sets Current EMCCD Gain"""
        print 'acquiring: {}'.format(self.setEMCCDGain.__name__)
        yield self.lock.acquire()
        try:
            print 'acquired : {}'.format(self.setEMCCDGain.__name__)
            yield deferToThread(self.camera.set_emccd_gain, gain)
        finally:
            print 'releasing: {}'.format(self.setEMCCDGain.__name__)
            self.lock.release()
        if c is not None:
            self.gui.set_gain(gain)
    '''
    Read mode
    '''
    @setting(8, "Get Read Mode", returns = 's')
    def getReadMode(self, c):
        return self.camera.get_read_mode()

    @setting(9, "Set Read Mode", readMode = 's', returns = '')
    def setReadMode(self, c, readMode):
        """Sets Current Read Mode"""
        mode = None
        print 'acquiring: {}'.format(self.setReadMode.__name__)
        yield self.lock.acquire()
        try:
            print 'acquired : {}'.format(self.setReadMode.__name__)
            yield deferToThread(self.camera.set_read_mode, readMode)
        finally:
            print 'releasing: {}'.format(self.setReadMode.__name__)
            self.lock.release()
        if mode is not None:
            returnValue(mode)

    '''
    Shutter Mode
    '''

    @setting(100, "get_shutter_mode", returns = 's')
    def get_shutter_mode(self, c):
        return self.camera.get_shutter_mode()

    @setting(101, "set_shutter_mode", shutterMode = 's', returns = '')
    def set_shutter_mode(self, c, shutterMode):
        """Sets Current Shutter Mode"""
        mode = None
        print 'acquiring: {}'.format(self.set_shutter_mode.__name__)
        yield self.lock.acquire()
        try:
            print 'acquired : {}'.format(self.set_shutter_mode.__name__)
            yield deferToThread(self.camera.set_shutter_mode, shutterMode)
        finally:
            print 'releasing: {}'.format(self.set_shutter_mode.__name__)
            self.lock.release()
        if mode is not None:
            returnValue(mode)

    '''
    Acquisition Mode
    '''
    @setting(10, "Get Acquisition Mode", returns = 's')
    def getAcquisitionMode(self, c):
        """Gets Current Acquisition Mode"""
        return self.camera.get_acquisition_mode()

    @setting(11, "Set Acquisition Mode", mode = 's', returns = '')
    def setAcquisitionMode(self, c, mode):
        """Sets Current Acquisition Mode"""
        print 'acquiring: {}'.format(self.setAcquisitionMode.__name__)
        yield self.lock.acquire()
        try:
            print 'acquired : {}'.format(self.setAcquisitionMode.__name__)
            yield deferToThread(self.camera.set_acquisition_mode, mode)
        finally:
            print 'releasing: {}'.format(self.setAcquisitionMode.__name__)
            self.lock.release()
        self.gui.set_acquisition_mode(mode)
    '''
    Trigger Mode
    '''
    @setting(12, "Get Trigger Mode", returns = 's')
    def getTriggerMode(self, c):
        """Gets Current Trigger Mode"""
        return self.camera.get_trigger_mode()

    @setting(13, "Set Trigger Mode", mode = 's', returns = '')
    def setTriggerMode(self, c, mode):
        """Sets Current Trigger Mode"""
        print 'acquiring: {}'.format(self.setTriggerMode.__name__)
        yield self.lock.acquire()
        try:
            print 'acquired : {}'.format(self.setTriggerMode.__name__)
            yield deferToThread(self.camera.set_trigger_mode, mode)
        finally:
            print 'releasing: {}'.format(self.setTriggerMode.__name__)
            self.lock.release()
        self.gui.set_trigger_mode(mode)

    '''
    Exposure Time
    '''
    @setting(14, "Get Exposure Time", returns = 'v[s]')
    def getExposureTime(self, c):
        """Gets Current Exposure Time"""
        time = self.camera.get_exposure_time()
        return WithUnit(time, 's')

    @setting(15, "Set Exposure Time", expTime = 'v[s]', returns = 'v[s]')
    def setExposureTime(self, c, expTime):
        """Sets Current Exposure Time"""
        print 'acquiring: {}'.format(self.setExposureTime.__name__)
        yield self.lock.acquire()
        try:
            print 'acquired : {}'.format(self.setExposureTime.__name__)
            yield deferToThread(self.camera.set_exposure_time, expTime['s'])
        finally:
            print 'releasing: {}'.format(self.setExposureTime.__name__)
            self.lock.release()
        #need to request the actual set value because it may differ from the request when the request is not possible
        time = self.camera.get_exposure_time()
        if c is not None:
            self.gui.set_exposure(time)
        returnValue(WithUnit(time, 's'))
    '''
    Image Region
    '''
    @setting(16, "Get Image Region", returns = '*i')
    def getImageRegion(self, c):
        """Gets Current Image Region"""
        return self.camera.get_image()

    @setting(17, "Set Image Region", horizontalBinning = 'i', verticalBinning = 'i', horizontalStart = 'i', horizontalEnd = 'i', verticalStart = 'i', verticalEnd = 'i', returns = '')
    def setImageRegion(self, c, horizontalBinning, verticalBinning, horizontalStart, horizontalEnd, verticalStart, verticalEnd):
        """Sets Current Image Region"""
        print 'acquiring: {}'.format(self.setImageRegion.__name__)
        yield self.lock.acquire()
        try:
            print 'acquired : {}'.format(self.setImageRegion.__name__)
            yield deferToThread(self.camera.set_image, horizontalBinning, verticalBinning, horizontalStart, horizontalEnd, verticalStart, verticalEnd)
        finally:
            print 'releasing: {}'.format(self.setImageRegion.__name__)
            self.lock.release()
    '''
    Acquisition
    '''
    @setting(18, "Start Acquisition", returns = '')
    def startAcquisition(self, c):
        print 'acquiring: {}'.format(self.startAcquisition.__name__)
        yield self.lock.acquire()
        try:
            print 'acquired : {}'.format(self.startAcquisition.__name__)
            #speeds up the call to start_acquisition
            yield deferToThread(self.camera.prepare_acqusition)
            yield deferToThread(self.camera.start_acquisition)
            #necessary so that start_acquisition call completes even for long kinetic series
            #yield self.wait(0.050)
            yield self.wait(0.1)
        finally:
            print 'releasing: {}'.format(self.startAcquisition.__name__)
            self.lock.release()

    @setting(19, "Wait For Acquisition", returns = '')
    def waitForAcquisition(self, c):
        print 'acquiring: {}'.format(self.waitForAcquisition.__name__)
        yield self.lock.acquire()
        try:
            print 'acquired : {}'.format(self.waitForAcquisition.__name__)
            yield deferToThread(self.camera.wait_for_acquisition)
        finally:
            print 'releasing: {}'.format(self.waitForAcquisition.__name__)
            self.lock.release()

    @setting(20, "Abort Acquisition", returns = '')
    def abortAcquisition(self, c):
        if c is not None and self.gui.live_update_running:
            yield self.gui.stop_live_display()
        print 'acquiring: {}'.format(self.abortAcquisition.__name__)
        yield self.lock.acquire()
        try:
            print 'acquired : {}'.format(self.abortAcquisition.__name__)
            yield deferToThread(self.camera.abort_acquisition)
        finally:
            print 'releasing: {}'.format(self.abortAcquisition.__name__)
            self.lock.release()

    @setting(21, "Get Acquired Data", num_images = 'i',returns = '*i')
    def getAcquiredData(self, c, num_images = 1):
        """Get the acquired images"""
        print 'acquiring: {}'.format(self.getAcquiredData.__name__)
        yield self.lock.acquire()
        try:
            print 'acquired : {}'.format(self.getAcquiredData.__name__)
            image = yield deferToThread(self.camera.get_acquired_data, num_images)
        finally:
            print 'releasing: {}'.format(self.getAcquiredData.__name__)
            self.lock.release()
        returnValue(image)

    @setting(33, "Get Summed Data", num_images = 'i', returns = '*i')
    def getSummedData(self, c, num_images = 1):
        ''' Get the counts with the vertical axis summed over. '''

        print 'acquiring: {}'.format(self.getAcquiredData.__name__)
        yield self.lock.acquire()
        try:
            print 'acquired: {}'.format(self.getAcquiredData.__name__)
            images = yield deferToThread(self.camera.get_acquired_data, num_images)
            hbin, vbin, hstart, hend, vstart, vend = self.camera.get_image()
            x_pixels = int( (hend - hstart + 1.) / (hbin) )
            y_pixels = int(vend - vstart + 1.) / (vbin)
            images = np.reshape(images, (num_images, y_pixels, x_pixels))
            images = images.sum(axis=1)
            images = np.ravel(images, order='C')
            images = images.tolist()
        finally:
            print 'releasing: {}'.format(self.getAcquiredData.__name__)
            self.lock.release()
        returnValue(images)
    '''
    General
    '''
    @setting(22, "Get Camera Serial Number", returns = 'i')
    def getCameraSerialNumber(self, c):
        """Gets Camera Serial Number"""
        return self.camera.get_camera_serial_number()

    @setting(23, "Get Most Recent Image", returns = '*i')
    def getMostRecentImage(self, c):
        """Get all Data"""
#         print 'acquiring: {}'.format(self.getMostRecentImage.__name__)
        yield self.lock.acquire()
        try:
#             print 'acquired : {}'.format(self.getMostRecentImage.__name__)
            image = yield deferToThread(self.camera.get_most_recent_image)
        finally:
#             print 'releasing: {}'.format(self.getMostRecentImage.__name__)
            self.lock.release()
        returnValue(image)

    @setting(24, "Start Live Display", returns = '')
    def startLiveDisplay(self, c):
        """Starts live display of the images on the GUI"""
        yield self.gui.start_live_display()

    @setting(25, "Is Live Display Running", returns = 'b')
    def isLiveDisplayRunning(self, c):
        return self.gui.live_update_running

    @setting(26, "Get Number Kinetics", returns = 'i')
    def getNumberKinetics(self, c):
        """Gets Number Of Scans In A Kinetic Cycle"""
        return self.camera.get_number_kinetics()

    @setting(27, "Set Number Kinetics", numKin = 'i', returns = '')
    def setNumberKinetics(self, c, numKin):
        """Sets Number Of Scans In A Kinetic Cycle"""
        print 'acquiring: {}'.format(self.setNumberKinetics.__name__)
        yield self.lock.acquire()
        try:
            print 'acquired : {}'.format(self.setNumberKinetics.__name__)
            yield deferToThread(self.camera.set_number_kinetics, numKin)
        finally:
            print 'releasing: {}'.format(self.setNumberKinetics.__name__)
            self.lock.release()
    # UPDATED THE TIMEOUT. FIX IT LATER
    @setting(28, "Wait For Kinetic", timeout = 'v[s]',returns = 'b')
    def waitForKinetic(self, c, timeout = WithUnit(1,'s')):
        '''Waits until the given number of kinetic images are completed'''
        requestCalls = int(timeout['s'] / 0.050 ) #number of request calls
        for i in range(requestCalls):
            print 'acquiring: {}'.format(self.waitForKinetic.__name__)
            yield self.lock.acquire()
            try:
                print 'acquired : {}'.format(self.waitForKinetic.__name__)
                status = yield deferToThread(self.camera.get_status)
                #useful for debugging of how many iterations have been completed in case of missed trigger pulses
                a,b = yield deferToThread(self.camera.get_series_progress)
                print a,b
                print status
            finally:
                print 'releasing: {}'.format(self.waitForKinetic.__name__)
                self.lock.release()
            if status == 'DRV_IDLE':
                returnValue(True)
            yield self.wait(0.050)
        returnValue(False)

    @setting(31, "Get Detector Dimensions", returns = 'ww')
    def get_detector_dimensions(self, c):
        print 'acquiring: {}'.format(self.get_detector_dimensions.__name__)
        yield self.lock.acquire()
        try:
            print 'acquired : {}'.format(self.get_detector_dimensions.__name__)
            dimensions = yield deferToThread(self.camera.get_detector_dimensions)
        finally:
            print 'releasing: {}'.format(self.get_detector_dimensions.__name__)
            self.lock.release()
        returnValue(dimensions)

    @setting(32, "getemrange", returns = '(ii)')
    def getemrange(self, c):
        #emrange = yield self.camera.get_camera_em_gain_range()
        #returnValue(emrange)
        return self.camera.get_camera_em_gain_range()


    def wait(self, seconds, result=None):
        """Returns a deferred that will be fired later"""
        d = Deferred()
        reactor.callLater(seconds, d.callback, result)
        return d

    def stop(self):
        self._stopServer()

    @inlineCallbacks
    def stopServer(self):
        """Shuts down camera before closing"""
        try:
            if self.gui.live_update_running:
                yield self.gui.stop_live_display()
            print 'acquiring: {}'.format(self.stopServer.__name__)
            yield self.lock.acquire()
            print 'acquired : {}'.format(self.stopServer.__name__)
            self.camera.shut_down()
            print 'releasing: {}'.format(self.stopServer.__name__)
            self.lock.release()
        except Exception:
            #not yet created
            pass
class Dataset(QtCore.QObject):
    
    def __init__(self, data_vault, context, dataset_location,reactor):
        super(Dataset, self).__init__()
        self.data = None
        self.accessingData = DeferredLock()
        self.reactor = reactor
        self.dataset_location = dataset_location
        self.data_vault = data_vault
        self.updateCounter = 0
        self.context = context
        self.connectDataVault()
        self.setupListeners()

    @inlineCallbacks
    def connectDataVault(self):
        yield self.data_vault.cd(self.dataset_location[0], context = self.context)
        path, dataset_name = yield self.data_vault.open(self.dataset_location[1], context = self.context)
        self.dataset_name = dataset_name

    @inlineCallbacks
    def setupListeners(self):
        yield self.data_vault.signal__data_available(11111, context = self.context)
        yield self.data_vault.addListener(listener = self.updateData, source = None, ID = 11111, context = self.context)


    @inlineCallbacks
    def openDataset(self):
        yield self.data_vault.cd(self.dataset_location[0], context = self.context)
        yield self.data_vault.open(self.dataset_location[1], context = self.context)

    @inlineCallbacks
    def getParameters(self):
        parameters = yield self.data_vault.parameters(context = self.context)
        parameterValues = []
        for parameter in parameters:
            parameterValue = yield self.data_vault.get_parameter(parameter, context = self.context)
            parameterValues.append( (parameter, parameterValue) )
        returnValue(parameterValues)

    def updateData(self, x, y):
        self.updateCounter += 1
        self.getData()

    @inlineCallbacks
    def getData(self):
        Data = yield self.data_vault.get(100, context = self.context)
        if (self.data == None):
            yield self.accessingData.acquire()
            try:
                self.data = Data.asarray
            except:
                self.data = Data
            self.accessingData.release()
        else:
            yield self.accessingData.acquire()
            try:
                self.data = np.append(self.data, Data.asarray, 0)
            except:
                self.data = np.append(self.data, Data, 0)
            self.accessingData.release()

    @inlineCallbacks
    def getLabels(self):
        labels = []
        yield self.openDataset()
        variables = yield self.data_vault.variables(context = self.context)
        for i in range(len(variables[1])):
            labels.append(variables[1][i][1] + ' - ' + self.dataset_name)
        returnValue(labels)

    @inlineCallbacks
    def disconnectDataSignal(self):
        yield self.data_vault.removeListener(listener = self.updateData, source = None, ID = 11111, context = self.context)
Esempio n. 47
0
class Dataset(QtCore.QObject):

    """Class to handle incoming data and prepare them for plotting """
    def __init__(self, parent, cxn, context, dataset, directory, datasetName, reactor):
        super(Dataset, self).__init__()
        self.accessingData = DeferredLock()
        self.parent = parent
        self.cxn = cxn
        self.context = context # context of the first dataset in the window
        self.dataset = dataset
        self.datasetName = datasetName
        self.directory = directory
        self.reactor = reactor
        self.data = None
#        self.hasPlotParameter = False
        self.cnt = 0
        self.setupDataListener(self.context)
#        self.setupFitListener(self.context)

#    @inlineCallbacks
#    def checkForPlotParameter(self):
#        self.parameters = yield self.cxn.data_vault.get_parameters(context = self.context)
#        if (self.parameters != None):
#            for (parameterName, value) in self.parameters:
#                if (str(parameterName) == 'plotLive'):
#                    self.hasPlotParameter = True
#                elif ((self.hasPlotParameter == True and str(parameterName) == 'Fit')):
#                      self.updateFit()

    @inlineCallbacks
    def getWindowParameter(self):
        try:
            value = yield self.cxn.data_vault.get_parameter('Window', context = self.context)
        except:
            value = None
        returnValue(value)

    # open dataset in order to listen for new data signals in current context
    @inlineCallbacks
    def openDataset(self, context):
        yield self.cxn.data_vault.cd(self.directory, context = context)
        yield self.cxn.data_vault.open(self.dataset, context = context)
        self.parameters = yield self.cxn.data_vault.parameters(context = context)
        self.parameterValues = []
        for parameter in self.parameters:
            parameterValue = yield self.cxn.data_vault.get_parameter(parameter, context = context)
            self.parameterValues.append(parameterValue)

#    @inlineCallbacks
#    def setupParameterListener(self, context):
#        yield self.cxn.data_vault.signal__new_parameter(66666, context = context)
#        yield self.cxn.data_vault.addListener(listener = self.updateParameter, source = None, ID = 66666, context = context)

#    # Over 60 seconds, check if the dataset has the appropriate 'plotLive' parameter
#    @inlineCallbacks
#    def listenForPlotParameter(self):
#        for i in range(20):
#            if (self.hasPlotParameter == True):
#                returnValue(self.hasPlotParameter)
##            yield deferToThread(time.sleep, .5)
#            yield self.wait(.5)
#        returnValue(self.hasPlotParameter)
#
#    def updateParameter(self, x, y):
#        self.checkForPlotParameter()

        #append whatever to self.parameters

#    # sets up the listener for new data
#    @inlineCallbacks
#    def setupFitListener(self, context):
#        yield self.cxn.data_vault.signal__new_parameter(22222, context = context)
#        yield self.cxn.data_vault.addListener(listener = self.updateFit, source = None, ID = 22222, context = context)

#    # new data signal
    @inlineCallbacks
#    def updateFit(self):
    def fit(self):
        value = yield self.cxn.data_vault.get_parameter('Fit', context = self.context)
        variables = yield self.cxn.data_vault.variables(context = self.context)
        numberDependentVariables = len(variables[1])
#       if (self.parameters != None):
        try:
            for window in self.parent.dwDict[self]:
                window.fitFromScript(self.dataset, self.directory, numberDependentVariables, value)
        except KeyError:
            print 'dwDict not created yet. Either the Fit parameter was added before data was created or the data is added too quickly. Try adding a pause after adding all the data intended for fitting.'
    # sets up the listener for new data
    @inlineCallbacks
    def setupDataListener(self, context):
        yield self.cxn.data_vault.signal__data_available(11111, context = context)
        yield self.cxn.data_vault.addListener(listener = self.updateData, source = None, ID = 11111, context = context)
        #self.setupDeferred.callback(True)
        self.updatecounter = 0
        self.timer = self.startTimer(100)

    # new data signal
    def updateData(self,x,y):
        self.updatecounter = self.updatecounter + 1
        self.getData(self.context)
#        print 'still happening dataset'

    def timerEvent(self,evt):
        #print self.updatecounter
#        print 'in dataset'
#        if self.updatecounter < 1:
#            print 'slowing down!, less than 1 dataupdate per 100milliseconds '
        self.updatecounter = 0

    def endTimer(self):
        self.killTimer(self.timer)

    @inlineCallbacks
    def disconnectDataSignal(self):
        yield self.cxn.data_vault.removeListener(listener = self.updateData, source = None, ID = 11111, context = self.context)
#        yield self.cxn.data_vault.removeListener(listener = self.updateParameter, source = None, ID = 66666, context = self.context)

    # returns the current data
    @inlineCallbacks
    def getData(self,context):
        Data = yield self.cxn.data_vault.get(100, context = context)
        if (self.data == None):
            self.data = Data
        else:
            yield self.accessingData.acquire()
            self.data = np.append(self.data, Data, 0)
            self.accessingData.release()

    @inlineCallbacks
    def emptyDataBuffer(self):
        yield self.accessingData.acquire()
        del(self.data)
        self.data = None
        self.accessingData.release()

    @inlineCallbacks
    def getYLabels(self):
        labels = []
        variables = yield self.cxn.data_vault.variables(context = self.context)
        for i in range(len(variables[1])):
            labels.append(variables[1][i][1] + ' - ' + self.datasetName)
        returnValue(labels)

    def wait(self, seconds, result=None):
        d = Deferred()
        self.reactor.callLater(seconds, d.callback, result)
        return d
Esempio n. 48
0
class FreqCounterFPGA(LabradServer):
    name = 'FreqCounter'
    
    def initServer(self):
        self.collectionTime = {0:1.0,1:1.0} #default collection times in the format channel:time(sec)
        self.inCommunication = DeferredLock()
        self.connectOKBoard()
    
    def connectOKBoard(self):
        self.xem = None
        fp = ok.FrontPanel()
        module_count = fp.GetDeviceCount()
        print "Found {} unused modules".format(module_count)
        for i in range(module_count):
            serial = fp.GetDeviceListSerial(i)
            tmp = ok.FrontPanel()
            tmp.OpenBySerial(serial)
            id = tmp.GetDeviceID()
            if id == okDeviceID:
                self.xem = tmp
                print 'Connected to {}'.format(id)
                self.programOKBoard(self.xem)
                return
        print 'Not found {}'.format(okDeviceID)
        print 'Will try again in {} seconds'.format(devicePollingPeriod)
        reactor.callLater(devicePollingPeriod, self.connectOKBoard)
    
    def programOKBoard(self, xem):
        print 'Programming FPGA'
        basepath = os.environ.get('LABRADPATH',None)
        if not basepath:
            raise Exception('Please set your LABRADPATH environment variable')
        path = os.path.join(basepath,'lattice/okfpgaservers/freqcounter.bit')
        prog = xem.ConfigureFPGA(path)
        if prog: raise("Not able to program FPGA")
        pll = ok.PLL22150()
        xem.GetEepromPLL22150Configuration(pll)
        pll.SetDiv1(pll.DivSrc_VCO,4)
        xem.SetPLL22150Configuration(pll)
    
    def _resetFIFO(channel, self):
        if channel == 0:
            self.xem.ActivateTriggerIn(0x40,0)
        elif channel == 1:
            self.xem.ActivateTriggerIn(0x40,1)
        
    def _setUpdateTime(self, channel, time):
        if channel == 0:
            self.xem.SetWireInValue(0x01,int(1000 * time))
        elif channel == 1:
            self.xem.SetWireInValue(0x02,int(1000 * time))
        self.xem.UpdateWireIns()
    
    @setting(0, 'Get Channels', returns = '*w')
    def getChannels(self, c):
        """
        Get Available Channels
        """
        return self.collectionTime.keys()
       
    @setting(1, 'Set Collection Time', channel = 'w', time = 'v', returns = '')
    def setCollectTime(self, c, channel, time):
        """
        Set collection time for the given channel
        """
        time = float(time)
        if not 0.0<time<5.0: raise('incorrect collection time')
        if channel not in self.collectionTime.keys(): raise("Incorrect channel")
        self.collectionTime[channel] = time
        yield self.inCommunication.acquire()
        yield deferToThread(self._setUpdateTime, channel, time)
        self.inCommunication.release()

    @setting(2, 'Reset FIFO', channel = 'w', returns = '')
    def resetFIFO(self,c, channel):
        """
        Resets the FIFO on board, deleting all queued counts
        """
        if channel not in self.collectionTime.keys(): raise("Incorrect channel")
        yield self.inCommunication.acquire()
        yield deferToThread(self._resetFIFO, channel)
        self.inCommunication.release()
    
    @setting(3, 'Get All Counts', channel = 'w', returns = '*(vv)')
    def getALLCounts(self, c, channel):
        """
        Returns the list of counts stored on the FPGA in the form (v1,v2) where v1 is the count rate in Hz
        and v2 is the approximate time of acquisition.
        
        NOTE: For some reason, FGPA ReadFromBlockPipeOut never time outs, so can not implement requesting more packets than
        currently stored because it may hang the device.
        """
        if channel not in self.collectionTime.keys(): raise("Incorrect channel")
        yield self.inCommunication.acquire()
        countlist = yield deferToThread(self.doGetAllCounts, channel)
        self.inCommunication.release()
        returnValue(countlist)
        
    def doGetAllCounts(self, channel):
        inFIFO = self._countsInFIFO(channel)
        reading = self._readCounts(channel, inFIFO)
        split = self.split_len(reading, 4)
        countlist = map(self.infoFromBuf, split)
        countlist = self.convertHz(channel, countlist)
        countlist = self.appendTimes(channel, countlist, time.time())
        return countlist
    
    def convertHz(self, channel, rawCounts):
        Hz = []
        for rawCount in rawCounts:
            Hz.append(float(rawCount) / self.collectionTime[channel])
        return Hz
        
    def appendTimes(self, channel, list, timeLast):
        "appends the collection times to the list using the last known time"
        collectionTime = self.collectionTime[channel]
        for i in range(len(list)):
            count = list[-i-1]
            list[-i - 1] = (count, timeLast - i * collectionTime) 
        print list
        return list
        
    def split_len(self,seq, length):
        #useful for splitting a string in length-long pieces
        return [seq[i:i+length] for i in range(0, len(seq), length)]
    
    def _countsInFIFO(self, channel):
        """
        returns how many counts are in FIFO
        """
        self.xem.UpdateWireOuts()
        if channel == 0:
            inFIFO16bit = self.xem.GetWireOutValue(0x21)
        elif channel == 1:
            inFIFO16bit = self.xem.GetWireOutValue(0x22)
        counts = inFIFO16bit / 2
        return counts
    
    def _readCounts(self, channel, number):
        """
        reads the next number of counts from the FPGA
        """
        buf = "\x00"* ( number * 4 )
        if channel == 0:
            self.xem.ReadFromBlockPipeOut(0xa0,4,buf)
        elif channel == 1:
            self.xem.ReadFromBlockPipeOut(0xa1,4,buf)
        return buf
    
    @staticmethod
    def infoFromBuf(buf):
        #converts the received buffer into useful information
        #the most significant digit of the buffer indicates wheter 866 is on or off
        count = 65536*(256*ord(buf[1])+ord(buf[0]))+(256*ord(buf[3])+ord(buf[2]))
        return count
Esempio n. 49
0
class DAC(LabradServer):
    
    name = 'DAC'
    onNewVoltage = Signal(123556, 'signal: new voltage', '(sv)')
    
    @inlineCallbacks
    def initServer(self):
        self.api_dac  = api_dac()
        self.inCommunication = DeferredLock()
        connected = self.api_dac.connectOKBoard()
        if not connected:
            raise Exception ("Could not connect to DAC")
        self.d = yield self.initializeDAC()
        self.listeners = set()     
    
    @inlineCallbacks
    def initializeDAC(self):
        '''creates dictionary for information storage'''
        d = {}
        for name,channel_number,min_voltage,vpp in [
                             ('comp1', 0, -40.0, 80.0),
                             ('comp2', 1, -40.0, 80.0),
                             ('endcap1', 2, -9.9552, 20.0),
                             ('endcap2', 3, -9.9561, 20.0),
                             ]:
            chan = dac_channel(name, channel_number, min_voltage, vpp)
            chan.voltage = yield self.getRegValue(name)
            d[name] = chan
            value = self.voltage_to_val(chan.voltage, chan.min_voltage, chan.vpp)
            yield self.do_set_voltage(channel_number, value)
        returnValue( d )
    
    @inlineCallbacks
    def getRegValue(self, name):
        yield self.client.registry.cd(['','Servers', 'DAC'], True)
        try:
            voltage = yield self.client.registry.get(name)
        except Exception:
            print '{} not found in registry'.format(name)
            voltage = 0
        returnValue(voltage)
            
    @setting(0, "Set Voltage",channel = 's', voltage = 'v[V]', returns = '')
    def setVoltage(self, c, channel, voltage):
        try:
            chan = self.d[channel]
            minim,total,channel_number = chan.min_voltage, chan.vpp, chan.channel_number
        except KeyError:
            raise Exception ("Channel {} not found".format(channel))
        voltage = voltage['V']
        value = self.voltage_to_val(voltage, minim, total)
        yield self.do_set_voltage(channel_number, value)
        chan.voltage = voltage
        self.notifyOtherListeners(c, (channel, voltage), self.onNewVoltage)
    
    @inlineCallbacks
    def do_set_voltage(self, channel_number, value):
        yield self.inCommunication.acquire()
        try:
            yield deferToThread(self.api_dac.setVoltage, channel_number, value)
            confirmation = yield deferToThread(self.api_dac.getVoltage, channel_number)
            print 'setting value', value
            if not value == confirmation:
                raise Exception("Board did not set the voltage not set properly")
        except Exception as e:
            raise e
        finally:
            self.inCommunication.release()
        
    def voltage_to_val(self, voltage, minim, total, prec = 16):
        '''converts voltage of a channel to FPGA-understood sequential value'''
        value = int((voltage - minim) / total * (2 ** prec  - 1) )
        if not  0 <= value <= 2**16 - 1: raise Exception ("Voltage Out of Range")
        return value
           
    @setting(1, "Get Voltage", channel = 's', returns = 'v[V]')
    def getVoltage(self, c, channel):
        try:
            voltage = self.d[channel].voltage
        except KeyError:
            raise Exception ("Channel {} not found".format(channel))
        return WithUnit(voltage, 'V')
    
    @setting(2, "Get Range", channel = 's', returns = '(v[V]v[V])')
    def getRange(self, c, channel):
        try:
            chan = self.d[channel]
            minim,maxim = chan.min_voltage,chan.max_voltage
        except KeyError:
            raise Exception ("Channel {} not found".format(channel))
        return (WithUnit(minim,'V'), WithUnit(maxim, 'V'))
    
    def notifyOtherListeners(self, context, message, f):
        """
        Notifies all listeners except the one in the given context, executing function f
        """
        notified = self.listeners.copy()
        notified.remove(context.ID)
        f(message,notified)
    
    def initContext(self, c):
        """Initialize a new context object."""
        self.listeners.add(c.ID)
    
    def expireContext(self, c):
        self.listeners.remove(c.ID)
    
    @inlineCallbacks
    def stopServer(self):
        '''save the latest voltage information into registry'''
        try:
            yield self.client.registry.cd(['','Servers', 'DAC'], True)
            for name,channel in self.d.iteritems():
                yield self.client.registry.set(name, channel.voltage)
        except AttributeError:
            #if dictionary doesn't exist yet (i.e bad identification error), do nothing
            pass

    @setting(3, "Set Endcaps", voltage = 'v[V]', returns = '')
    def setEndcaps(self, c, voltage):
        for channel in ['comp1', 'comp2']:
            try:
                chan = self.d[channel]
                minim,total,channel_number = chan.min_voltage, chan.vpp, chan.channel_number
            except KeyError:
                raise Exception ("Channel {} not found".format(channel))
            voltage_value = voltage['V']
            value = self.voltage_to_val(voltage_value, minim, total)
            yield self.do_set_voltage(channel_number, value)
            chan.voltage = voltage_value
            self.notifyOtherListeners(c, (channel, voltage_value), self.onNewVoltage)
Esempio n. 50
0
class Pulser(DDS, LineTrigger):
    
    name = 'Pulser'
    onSwitch = Signal(611051, 'signal: switch toggled', '(ss)')
    
    #@inlineCallbacks
    def initServer(self):
        self.api  = api()
        self.channelDict = hardwareConfiguration.channelDict
        self.collectionTime = hardwareConfiguration.collectionTime
        self.collectionMode = hardwareConfiguration.collectionMode
        self.sequenceType = hardwareConfiguration.sequenceType
        self.isProgrammed = hardwareConfiguration.isProgrammed
        self.timeResolution = float(hardwareConfiguration.timeResolution)
        self.ddsDict = hardwareConfiguration.ddsDict
        self.timeResolvedResolution = hardwareConfiguration.timeResolvedResolution
        self.remoteChannels = hardwareConfiguration.remoteChannels
        self.collectionTimeRange = hardwareConfiguration.collectionTimeRange
        self.sequenceTimeRange = hardwareConfiguration.sequenceTimeRange
        self.haveSecondPMT = hardwareConfiguration.secondPMT
        self.haveDAC = hardwareConfiguration.DAC
        self.inCommunication = DeferredLock()
        self.clear_next_pmt_counts = 0
        self.hwconfigpath = os.path.dirname(inspect.getfile(hardwareConfiguration))
        print self.hwconfigpath
        #LineTrigger.initialize(self)
        #self.initializeBoard()
        #yield self.initializeRemote()
        #self.initializeSettings()
        #yield self.initializeDDS()
        self.ddsLock = True
        self.listeners = set()

    def initializeBoard(self):
        connected = self.api.connectOKBoard()
        if not connected:
            raise Exception ("Pulser Not Found")
            
    def initializeSettings(self):
        for channel in self.channelDict.itervalues():
            channelnumber = channel.channelnumber
            if channel.ismanual:
                state = self.cnot(channel.manualinv, channel.manualstate)
                self.api.setManual(channelnumber, state)
            else:
                self.api.setAuto(channelnumber, channel.autoinv)
    
    @inlineCallbacks
    def initializeRemote(self):
        self.remoteConnections = {}
        if len(self.remoteChannels):
            from labrad.wrappers import connectAsync
            for name,rc in self.remoteChannels.iteritems():
                try:
                    self.remoteConnections[name] = yield connectAsync(rc.ip)
                    print 'Connected to {}'.format(name)
                except:
                    print 'Not Able to connect to {}'.format(name)
                    self.remoteConnections[name] = None

    @setting(0, "New Sequence", returns = '')
    def newSequence(self, c):
        """
        Create New Pulse Sequence
        """
        c['sequence'] = Sequence(self)
    
    @setting(1, "Program Sequence", returns = '')
    def programSequence(self, c, sequence):
        """
        Programs Pulser with the current sequence.
        """
        #print "program sequence"
        sequence = c.get('sequence')
        if not sequence: raise Exception("Please create new sequence first")
        dds,ttl = sequence.progRepresentation()
        yield self.inCommunication.acquire()
        yield deferToThread(self.api.programBoard, ttl)
        if dds is not None: yield self._programDDSSequence(dds)
        self.inCommunication.release()
        self.isProgrammed = True
        #self.api.resetAllDDS()
        #print "done programming"

    @setting(37, 'Get dds program representation', returns = '*(ss)')
    def get_dds_program_representation(self,c):   
        sequence = c.get('sequence')
        dds, ttl = sequence.progRepresentation()
        # As labrad cannot handle returnig the bytearray, we convert it to string first
        for key, value in dds.iteritems():
            dds[key] = str(value)
        # It also cannot handle dictionaries, so we recreate it as a list of tuples
        passable = dds.items()
        return passable

    @setting(38, 'Program dds and ttl')
    def program_dds_and_ttl(self,c,dds,ttl):
        dds = bytearray(dds)
        ttl = bytearray(ttl)
        yield self.inCommunication.acquire()
        yield deferToThread(self.api.programBoard, ttl)
        yield self._programDDSSequenceBurst(dds)
        yield self.inCommunication.release()
        self.isProgrammed = True
        returnValue(self.isProgrammed)
    
    @setting(2, "Start Infinite", returns = '')
    def startInfinite(self,c):
        if not self.isProgrammed: raise Exception ("No Programmed Sequence")
        yield self.inCommunication.acquire()
        yield deferToThread(self.api.setNumberRepeatitions, 0)
        yield deferToThread(self.api.resetSeqCounter)
        yield deferToThread(self.api.startLooped)
        self.sequenceType = 'Infinite'
        self.inCommunication.release()
    
    @setting(3, "Complete Infinite Iteration", returns = '')
    def completeInfinite(self,c):
        if self.sequenceType != 'Infinite': raise Exception( "Not Running Infinite Sequence")
        yield self.inCommunication.acquire()
        yield deferToThread(self.api.startSingle)
        self.inCommunication.release()
    
    @setting(4, "Start Single", returns = '')
    def start(self, c):
        if not self.isProgrammed: raise Exception ("No Programmed Sequence")
        yield self.inCommunication.acquire()
        yield deferToThread(self.api.resetSeqCounter)
        yield deferToThread(self.api.startSingle)
        self.sequenceType = 'One'
        self.inCommunication.release()
    
    @setting(5, 'Add TTL Pulse', channel = 's', start = 'v[s]', duration = 'v[s]')
    def addTTLPulse(self, c, channel, start, duration):
        """
        Add a TTL Pulse to the sequence, times are in seconds
        """
        if channel not in self.channelDict.keys(): raise Exception("Unknown Channel {}".format(channel))
        hardwareAddr = self.channelDict.get(channel).channelnumber
        sequence = c.get('sequence')
        start = start['s']
        duration = duration['s']
        #simple error checking
        if not ( (self.sequenceTimeRange[0] <= start <= self.sequenceTimeRange[1]) and (self.sequenceTimeRange[0] <= start + duration <= self.sequenceTimeRange[1])): raise Exception ("Time boundaries are out of range")
        if not duration >= self.timeResolution: raise Exception ("Incorrect duration")
        if not sequence: raise Exception ("Please create new sequence first")
        sequence.addPulse(hardwareAddr, start, duration)
    
    @setting(6, 'Add TTL Pulses', pulses = '*(sv[s]v[s])')
    def addTTLPulses(self, c, pulses):
        """
        Add multiple TTL Pulses to the sequence, times are in seconds. The pulses are a list in the same format as 'add ttl pulse'.
        """
        for pulse in pulses:
            channel = pulse[0]
            start = pulse[1]
            duration = pulse[2]
            yield self.addTTLPulse(c, channel, start, duration)
    
    @setting(7, "Extend Sequence Length", timeLength = 'v[s]')
    def extendSequenceLength(self, c, timeLength):
        """
        Allows to optionally extend the total length of the sequence beyond the last TTL pulse.
        """
        sequence = c.get('sequence')
        if not (self.sequenceTimeRange[0] <= timeLength['s'] <= self.sequenceTimeRange[1]): raise Exception ("Time boundaries are out of range")
        if not sequence: raise Exception ("Please create new sequence first")
        sequence.extendSequenceLength(timeLength['s'])
        
    @setting(8, "Stop Sequence")
    def stopSequence(self, c):
        """Stops any currently running sequence"""
        yield self.inCommunication.acquire()
        yield deferToThread(self.api.resetRam)
        if self.sequenceType =='Infinite':
            yield deferToThread(self.api.stopLooped)
        elif self.sequenceType =='One':
            yield deferToThread(self.api.stopSingle)
        elif self.sequenceType =='Number':
            yield deferToThread(self.api.stopLooped)
        self.inCommunication.release()
        self.sequenceType = None
        self.ddsLock = False
    
    @setting(9, "Start Number", repetition = 'w')
    def startNumber(self, c, repetition):
        """
        Starts the repetition number of iterations
        """
        if not self.isProgrammed: raise Exception ("No Programmed Sequence")
        repeatitions = int(repetition)
        
        #print "start iterations of ", repetition
        
        if not 1 <= repetition <= (2**16 - 1): raise Exception ("Incorrect number of pulses")
        yield self.inCommunication.acquire()
        yield deferToThread(self.api.setNumberRepeatitions, repetition)
        yield deferToThread(self.api.resetSeqCounter)
        yield deferToThread(self.api.startLooped)
        self.sequenceType = 'Number'
        self.inCommunication.release()

    @setting(10, "Human Readable TTL", returns = '*2s')
    def humanReadableTTL(self, c):
        """
        Returns a readable form of the programmed sequence for debugging
        """
        sequence = c.get('sequence')
        if not sequence: raise Exception ("Please create new sequence first")
        ttl,dds = sequence.humanRepresentation()
        return ttl.tolist()
    
    @setting(11, "Human Readable DDS", returns = '*(svv)')
    def humanReadableDDS(self, c):
        """
        Returns a readable form of the programmed sequence for debugging
        """
        sequence = c.get('sequence')
        if not sequence: raise Exception ("Please create new sequence first")
        ttl,dds = sequence.humanRepresentation()
        return dds
    
    @setting(12, 'Get Channels', returns = '*(sw)')
    def getChannels(self, c):
        """
        Returns all available channels, and the corresponding hardware numbers
        """
        d = self.channelDict
        keys = d.keys()
        numbers = [d[key].channelnumber for key in keys]
        return zip(keys,numbers)
    
    @setting(13, 'Switch Manual', channelName = 's', state= 'b')
    def switchManual(self, c, channelName, state = None):
        """
        Switches the given channel into the manual mode, by default will go into the last remembered state but can also
        pass the argument which state it should go into.
        """
        if channelName not in self.channelDict.keys(): raise Exception("Incorrect Channel")
        channel = self.channelDict[channelName]
        channelNumber = channel.channelnumber
        channel.ismanual = True
        if state is not None:
            channel.manualstate = state
        else:
            state = channel.manualstate
        yield self.inCommunication.acquire()
        yield deferToThread(self.api.setManual, channelNumber, self.cnot(channel.manualinv, state))
        self.inCommunication.release()
        if state:
            self.notifyOtherListeners(c,(channelName,'ManualOn'), self.onSwitch)
        else:
            self.notifyOtherListeners(c,(channelName,'ManualOff'), self.onSwitch)
    
    @setting(14, 'Switch Auto', channelName = 's', invert= 'b')
    def switchAuto(self, c, channelName, invert = None):
        """
        Switches the given channel into the automatic mode, with an optional inversion.
        """
        if channelName not in self.channelDict.keys(): raise Exception("Incorrect Channel")
        channel = self.channelDict[channelName]
        channelNumber = channel.channelnumber
        channel.ismanual = False
        if invert is not None:
            channel.autoinv = invert
        else:
            invert = channel.autoinv
        yield self.inCommunication.acquire()
        yield deferToThread(self.api.setAuto, channelNumber, invert)
        self.inCommunication.release()
        self.notifyOtherListeners(c,(channelName,'Auto'), self.onSwitch)

    @setting(15, 'Get State', channelName = 's', returns = '(bbbb)')
    def getState(self, c, channelName):
        """
        Returns the current state of the switch: in the form (Manual/Auto, ManualOn/Off, ManualInversionOn/Off, AutoInversionOn/Off)
        """
        if channelName not in self.channelDict.keys(): raise Exception("Incorrect Channel")
        channel = self.channelDict[channelName]
        answer = (channel.ismanual,channel.manualstate,channel.manualinv,channel.autoinv)
        return answer
    
    @setting(16, 'Wait Sequence Done', timeout = 'v', returns = 'b')
    def waitSequenceDone(self, c, timeout = None):
        """
        Returns true if the sequence has completed within a timeout period (in seconds)
        """
        if timeout is None: timeout = self.sequenceTimeRange[1]
        #print timeout
        requestCalls = int(timeout / 0.050 ) #number of request calls
        for i in range(requestCalls):
            yield self.inCommunication.acquire()
            done = yield deferToThread(self.api.isSeqDone)
            self.inCommunication.release()
            if done: returnValue(True)
            yield self.wait(0.050)
        returnValue(False)
    
    @setting(17, 'Repeatitions Completed', returns = 'w')
    def repeatitionsCompleted(self, c):
        """Check how many repeatitions have been completed in for the infinite or number modes"""
        yield self.inCommunication.acquire()
        completed = yield deferToThread(self.api.howManySequencesDone)
        self.inCommunication.release()
        returnValue(completed)

    
    @setting(21, 'Set Mode', mode = 's', returns = '')
    def setMode(self, c, mode):
        """
        Set the counting mode, either 'Normal' or 'Differential'
        In the Normal Mode, the FPGA automatically sends the counts with a preset frequency
        In the differential mode, the FPGA uses triggers the pulse sequence
        frequency and to know when the repumping light is swtiched on or off.
        """
        if mode not in self.collectionTime.keys(): raise Exception("Incorrect mode")
        self.collectionMode = mode
        countRate = self.collectionTime[mode]
        yield self.inCommunication.acquire()
        if mode == 'Normal':
            #set the mode on the device and set update time for normal mode
            yield deferToThread(self.api.setModeNormal)
            yield deferToThread(self.api.setPMTCountRate, countRate)
        elif mode == 'Differential':
            yield deferToThread(self.api.setModeDifferential)
        self.clear_next_pmt_counts = 3 #assign to clear next two counts
        self.inCommunication.release()
    
    @setting(22, 'Set Collection Time', new_time = 'v', mode = 's', returns = '')
    def setCollectTime(self, c, new_time, mode):
        """
        Sets how long to collect photonslist in either 'Normal' or 'Differential' mode of operation
        """
        new_time = float(new_time)
        if not self.collectionTimeRange[0]<=new_time<=self.collectionTimeRange[1]: raise Exception('incorrect collection time')
        if mode not in self.collectionTime.keys(): raise("Incorrect mode")
        if mode == 'Normal':
            self.collectionTime[mode] = new_time
            yield self.inCommunication.acquire()
            yield deferToThread(self.api.setPMTCountRate, new_time)
            self.clear_next_pmt_counts = 3 #assign to clear next two counts
            self.inCommunication.release()
        elif mode == 'Differential':
            self.collectionTime[mode] = new_time
            self.clear_next_pmt_counts = 3 #assign to clear next two counts
        
    @setting(23, 'Get Collection Time', returns = '(vv)')
    def getCollectTime(self, c):
        return self.collectionTimeRange
    
    @setting(24, 'Reset FIFO Normal', returns = '')
    def resetFIFONormal(self,c):
        """
        Resets the FIFO on board, deleting all queued counts
        """
        yield self.inCommunication.acquire()
        yield deferToThread(self.api.resetFIFONormal)
        self.inCommunication.release()
    
    @setting(25, 'Get PMT Counts', returns = '*(vsv)')
    def getALLCounts(self, c):
        """
        Returns the list of counts stored on the FPGA in the form (v,s1,s2) where v is the count rate in KC/SEC
        and s can be 'ON' in normal mode or in Differential mode with 866 on and 'OFF' for differential
        mode when 866 is off. s2 is the approximate time of acquisition.
        NOTE: For some reason, FGPA ReadFromBlockPipeOut never time outs, so can not implement requesting more packets than
        currently stored because it may hang the device.
        """
        yield self.inCommunication.acquire()
        countlist = yield deferToThread(self.doGetAllCounts)
        self.inCommunication.release()
        returnValue(countlist)
    
    @setting(26, 'Get Readout Counts', returns = '*v')
    def getReadoutCounts(self, c):
        yield self.inCommunication.acquire()
        countlist = yield deferToThread(self.doGetReadoutCounts)
        self.inCommunication.release()
        returnValue(countlist)
        
    @setting(27, 'Reset Readout Counts')
    def resetReadoutCounts(self, c):
        yield self.inCommunication.acquire()
        yield deferToThread(self.api.resetFIFOReadout)
        self.inCommunication.release()

    @setting(39, 'Get Metablock Counts')
    def getMetablockCounts(self, c):
        yield self.inCommunication.acquire()
        counts = yield deferToThread(self.api.getMetablockCounts)
        self.inCommunication.release()
        string = bin(counts)
        print string
        string = string[2:] #remove the leading '0b'
        started_programming = int(string[0],2)
        ended_programming = int(string[1],2)
        counts = int(string[2:],2)
        returnValue([counts,started_programming,ended_programming])

    @setting(40, 'Get hardwareconfiguration Path', returns = 's')
    def getHardwareconfigurationPath(self,c):
        ''' 
        Returns the path where the hwconfigurationfile is placed
        '''
        return self.hwconfigpath
        
    #debugging settings
    @setting(90, 'Internal Reset DDS', returns = '')
    def internal_reset_dds(self, c):
        yield self.inCommunication.acquire()
        yield deferToThread(self.api.resetAllDDS)
        self.inCommunication.release()
        
    @setting(91, 'Internal Advance DDS', returns = '')
    def internal_advance_dds(self, c):
        yield self.inCommunication.acquire()
        yield deferToThread(self.api.advanceAllDDS)
        self.inCommunication.release()
    
    @setting(92, "Reinitialize DDS", returns = '')
    def reinitializeDDS(self, c):
        """
        Reprograms the DDS chip to its initial state
        """
        yield self.inCommunication.acquire()
        yield deferToThread(self.api.initializeDDS)
        self.inCommunication.release()
        
    def doGetAllCounts(self):
        inFIFO = self.api.getNormalTotal()
        reading = self.api.getNormalCounts(inFIFO)
        split = self.split_len(reading, 4)
        countlist = map(self.infoFromBuf, split)
        countlist = map(self.convertKCperSec, countlist)
        countlist = self.appendTimes(countlist, time.time())
        countlist = self.clear_pmt_counts(countlist)
        return countlist

    def clear_pmt_counts(self, l):
        '''removes clear_next_pmt_counts count from the list'''
        try:
            while self.clear_next_pmt_counts:
                cleared = l.pop(0)
                self.clear_next_pmt_counts -= 1
            return l
        except IndexError:
            return []
    
    def doGetReadoutCounts(self):
        inFIFO = self.api.getReadoutTotal()
        reading = self.api.getReadoutCounts(inFIFO)
        split = self.split_len(reading, 4)
        countlist = map(self.infoFromBuf_readout, split)
        return countlist
    
    @staticmethod
    def infoFromBuf(buf):
        #converts the received buffer into useful information
        #the most significant digit of the buffer indicates wheter 866 is on or off
        count = 65536*(256*ord(buf[1])+ord(buf[0]))+(256*ord(buf[3])+ord(buf[2]))
        if count >= 2**31:
            status = 'OFF'
            count = count % 2**31
        else:
            status = 'ON'
        return [count, status]
    
    #should make nicer by combining with above.
    @staticmethod
    def infoFromBuf_readout(buf):
        count = 65536*(256*ord(buf[1])+ord(buf[0]))+(256*ord(buf[3])+ord(buf[2]))
        return count
    
    def convertKCperSec(self, inp):
        [rawCount,typ] = inp
        countKCperSec = float(rawCount) / self.collectionTime[self.collectionMode] / 1000.
        return [countKCperSec, typ]
        
    def appendTimes(self, l, timeLast):
        #in the case that we received multiple PMT counts, uses the current time
        #and the collectionTime to guess the arrival time of the previous readings
        #i.e ( [[1,2],[2,3]] , timeLAst = 1.0, normalupdatetime = 0.1) ->
        # ( [(1,2,0.9),(2,3,1.0)])
        collectionTime = self.collectionTime[self.collectionMode]
        for i in range(len(l)):
            l[-i - 1].append(timeLast - i * collectionTime)
            l[-i - 1] = tuple(l[-i - 1])
        return l
    
    def split_len(self,seq, length):
        '''useful for splitting a string in length-long pieces'''
        return [seq[i:i+length] for i in range(0, len(seq), length)]
    
    @setting(28, 'Get Collection Mode', returns = 's')
    def getMode(self, c):
        return self.collectionMode
    
    @setting(31, "Reset Timetags")
    def resetTimetags(self, c):
        """Reset the time resolved FIFO to clear any residual timetags"""
        yield self.inCommunication.acquire()
        yield deferToThread(self.api.resetFIFOResolved)
        self.inCommunication.release()
    
    @setting(32, "Get Timetags", returns = '*v')
    def getTimetags(self, c):
        """Get the time resolved timetags"""
        yield self.inCommunication.acquire()
        counted = yield deferToThread(self.api.getResolvedTotal)
        raw = yield deferToThread(self.api.getResolvedCounts, counted)
        self.inCommunication.release()
        arr = numpy.fromstring(raw, dtype = numpy.uint16)
        del(raw)
        arr = arr.reshape(-1,2)
        timetags =( 65536 * arr[:,0] + arr[:,1]) * self.timeResolvedResolution
        returnValue(timetags)
    
    @setting(33, "Get TimeTag Resolution", returns = 'v')
    def getTimeTagResolution(self, c):
        return self.timeResolvedResolution
    
    #Methods relating to using the optional second PMT
    @setting(36, 'Get Secondary PMT Counts', returns = '*(vsv)')
    def getAllSecondaryCounts(self, c):
        if not self.haveSecondPMT: raise Exception ("No Second PMT")
        yield self.inCommunication.acquire()
        countlist = yield deferToThread(self.doGetAllSecondaryCounts)
        self.inCommunication.release()
        returnValue(countlist)
            
    def doGetAllSecondaryCounts(self):
        if not self.haveSecondPMT: raise Exception ("No Second PMT")
        inFIFO = self.api.getSecondaryNormalTotal()
        reading = self.api.getSecondaryNormalCounts(inFIFO)
        split = self.split_len(reading, 4)
        countlist = map(self.infoFromBuf, split)
        countlist = map(self.convertKCperSec, countlist)
        countlist = self.appendTimes(countlist, time.time())
        return countlist        


    def wait(self, seconds, result=None):
        """Returns a deferred that will be fired later"""
        d = Deferred()
        reactor.callLater(seconds, d.callback, result)
        return d
    
    def cnot(self, control, inp):
        if control:
            inp = not inp
        return inp
    
    def notifyOtherListeners(self, context, message, f):
        """
        Notifies all listeners except the one in the given context, executing function f
        """
        notified = self.listeners.copy()
        notified.remove(context.ID)
        f(message,notified)
    
    def initContext(self, c):
        """Initialize a new context object."""
        self.listeners.add(c.ID)
    
    def expireContext(self, c):
        self.listeners.remove(c.ID)
class Dataset(QtCore.QObject):
    
    """Class to handle incoming data and prepare them for plotting """
    def __init__(self, cxn, context, dataset):
        super(Dataset, self).__init__()
        self.accessingData = DeferredLock()
        self.cxn = cxn
        self.context = context # context of the first dataset in the window
        self.dataset = dataset
        self.data = None
        self.setupDataListener(self.context)
        
    # open dataset in order to listen for new data signals in current context        
    @inlineCallbacks
    def openDataset(self):
        yield self.cxn.data_vault.cd(DIRECTORY, context = self.context)
        yield self.cxn.data_vault.open(self.dataset, context = self.context)
        
    # sets up the listener for new data
    @inlineCallbacks
    def setupDataListener(self, context):
        yield self.cxn.data_vault.signal__data_available(11111, context = context)
        yield self.cxn.data_vault.addListener(listener = self.updateData, source = None, ID = 11111, context = context)
        #self.setupDeferred.callback(True)
         
    # new data signal
    def updateData(self,x,y):
        self.getData(self.context)
    
    def waitfor(self):
        #set up a timer
        #start looping call
            #check if timer expired - then return False
            try:
                data_vault.get('plot')
            except:
                pass
            #if this paramter exists return True
      
#    # returns the number of things to plot
#    @inlineCallbacks
#    def getPlotnum(self,context):
#        variables = yield self.cxn.data_vault.variables(context = context)
#        plotNum = len(variables[1])
#        returnValue(plotNum) 

    # returns the current data
    @inlineCallbacks
    def getData(self,context):
        Data = yield self.cxn.data_vault.get(100, context = context)
        if (self.data == None):
            self.data = Data.asarray
        else:
            yield self.accessingData.acquire()         
            self.data = np.append(self.data, Data.asarray, 0)
            self.accessingData.release()
        
    @inlineCallbacks
    def emptyDataBuffer(self):
        print 'in empty, waiting to acquire'
        yield self.accessingData.acquire()
        del(self.data)
        self.data = None
        print 'self data should be none now'
        self.accessingData.release()
Esempio n. 52
0
class UnitRelationLifecycle(object):
    """Unit Relation Lifcycle management.

    Provides for watching related units in a relation, and executing hooks
    in response to changes. The lifecycle is driven by the workflow.

    The Unit relation lifecycle glues together a number of components.
    It controls a watcher that recieves watch events from zookeeper,
    and it controls a hook scheduler which gets fed those events. When
    the scheduler wants to execute a hook, the executor is called with
    the hook path and the hook invoker.

    **Relation hook invocation do not maintain global order or
    determinism across relations**. They only maintain ordering and
    determinism within a relation. A shared scheduler across relations
    would be needed to maintain such behavior.

    See docs/source/internals/unit-workflow-lifecycle.rst for a brief
    discussion of some of the more interesting implementation decisions.
    """

    def __init__(self, client, unit_name, unit_relation, relation_ident,
                 unit_dir, state_dir, executor):
        self._client = client
        self._unit_dir = unit_dir
        self._relation_ident = relation_ident
        self._relation_name = relation_ident.split(":")[0]
        self._unit_relation = unit_relation
        self._unit_name = unit_name
        self._executor = executor
        self._run_lock = DeferredLock()
        self._log = logging.getLogger("unit.relation.lifecycle")
        self._error_handler = None

        schedule_path = os.path.join(
            state_dir, "%s.schedule" % unit_relation.internal_relation_id)
        self._scheduler = HookScheduler(
            client, self._execute_change_hook, self._unit_relation,
            self._relation_ident, unit_name, schedule_path)
        self._watcher = None

    @property
    def watching(self):
        """Are we queuing up hook executions in response to state changes?"""
        return self._watcher and self._watcher.running

    @property
    def executing(self):
        """Are we currently dequeuing and executing any queued hooks?"""
        return self._scheduler.running

    def set_hook_error_handler(self, handler):
        """Set an error handler to be invoked if a hook errors.

        The handler should accept two parameters, the RelationChange that
        triggered the hook, and the exception instance.
        """
        self._error_handler = handler

    @inlineCallbacks
    def start(self, start_watches=True, start_scheduler=True):
        """Start watching related units and executing change hooks.

        :param bool start_watches: True to start relation watches

        :param bool start_scheduler: True to run the scheduler and actually
            react to any changes delivered by the watcher
        """
        yield self._run_lock.acquire()
        try:
            # Start the hook execution scheduler.
            if start_scheduler and not self.executing:
                self._scheduler.run()
            # Create a watcher if we don't have one yet.
            if self._watcher is None:
                self._watcher = yield self._unit_relation.watch_related_units(
                    self._scheduler.cb_change_members,
                    self._scheduler.cb_change_settings)
            # And start the watcher.
            if start_watches and not self.watching:
                yield self._watcher.start()
        finally:
            self._run_lock.release()
        self._log.debug(
            "started relation:%s lifecycle", self._relation_name)

    @inlineCallbacks
    def stop(self, stop_watches=True):
        """Stop executing relation change hooks; maybe stop watching changes.

        :param bool stop_watches: True to stop watches as well as scheduler
            (which will prevent changes from being detected and queued, as well
            as stopping them being executed).
        """
        yield self._run_lock.acquire()
        try:
            if stop_watches and self.watching:
                self._watcher.stop()
            if self._scheduler.running:
                self._scheduler.stop()
        finally:
            yield self._run_lock.release()
        self._log.debug("stopped relation:%s lifecycle", self._relation_name)

    @inlineCallbacks
    def depart(self):
        """Inform the charm that the service has departed the relation.
        """
        self._log.debug("depart relation lifecycle")
        unit_id = self._unit_relation.internal_unit_id
        context = DepartedRelationHookContext(
            self._client, self._unit_name, unit_id, self._relation_name,
            self._unit_relation.internal_relation_id)
        change = RelationChange(self._relation_ident, "departed", "")
        invoker = self._get_invoker(context, change)
        hook_name = "%s-relation-broken" % self._relation_name
        yield self._execute_hook(invoker, hook_name, change)

    def _get_invoker(self, context, change):
        socket_path = os.path.join(self._unit_dir, HOOK_SOCKET_FILE)
        return RelationInvoker(
            context, change, "constant", socket_path, self._unit_dir,
            hook_log)

    def _execute_change_hook(self, context, change):
        """Invoked by the contained HookScheduler, to execute a hook.

        We utilize the HookExecutor to execute the hook, if an
        error occurs, it will be reraised, unless an error handler
        is specified see ``set_hook_error_handler``.
        """
        if change.change_type == "departed":
            hook_name = "%s-relation-departed" % self._relation_name
        elif change.change_type == "joined":
            hook_name = "%s-relation-joined" % self._relation_name
        else:
            hook_name = "%s-relation-changed" % self._relation_name

        invoker = self._get_invoker(context, change)
        return self._execute_hook(invoker, hook_name, change)

    @inlineCallbacks
    def _execute_hook(self, invoker, hook_name, change):
        hook_path = os.path.join(
            self._unit_dir, "charm", "hooks", hook_name)
        yield self._run_lock.acquire()
        self._log.debug("Executing hook %s", hook_name)
        try:
            yield self._executor(invoker, hook_path)
        except Exception, e:
            # We can't hold the run lock when we invoke the error
            # handler, or we get a deadlock if the handler
            # manipulates the lifecycle.
            yield self._run_lock.release()
            self._log.warn("Error in %s hook: %s", hook_name, e)

            if not self._error_handler:
                raise
            self._log.info(
                "Invoked error handler for %s hook", hook_name)
            yield self._error_handler(change, e)
            returnValue(False)
        else:
Esempio n. 53
0
class ZipStream(object):

    def __init__(self, consumer):
        self.consumer = consumer
        assert IConsumer.implementedBy(consumer.__class__)

        self._producers = []

        self._sendingLock = DeferredLock()
        self._localHeaderLength = 0
        self._centralDirectoryLength = 0


    @inlineCallbacks
    def addProducer(self, producer):
        assert IZippyProducer.implementedBy(producer.__class__)

        size = yield producer.size()
        timestamp = yield producer.timestamp()
        crc32 = yield producer.crc32()
        key = yield producer.key()

        yield self._sendingLock.acquire()

        self._producers.append((producer, self._localHeaderLength))

        # local file header
        timestamp = dos_timestamp(timestamp)
        localHeader = struct.pack('<L5H3L2H', # format 
                                  0x04034b50, # magic (4 bytes)
                                  20, # version needed to extract (2 bytes)
                                  0, # general purpose bit flag (2 bytes)
                                  0, # compression method (2 bytes)
                                  timestamp[1], # last mod file time (2 bytes)
                                  timestamp[0], # last mod file date (2 bytes)
                                  crc32 & 0xffffffff, # CRC (4 bytes)
                                  size, # compressed size (4 bytes)
                                  size, # uncompressed size (4 bytes)
                                  len(key), # file name length (2 bytes)
                                  0, # extra field length (2 bytes)
                                 )

        localHeader += producer.key()
        self.consumer.write(localHeader)
        self._localHeaderLength += len(localHeader) + size

        # file data
        yield producer.beginProducing(self.consumer)

        self._sendingLock.release()


    @inlineCallbacks
    def centralDirectory(self):
        yield self._sendingLock.acquire()

        # file header
        for producer, offset in self._producers:
            size = yield producer.size()
            timestamp = yield producer.timestamp()
            timestamp = dos_timestamp(timestamp)
            crc32 = yield producer.crc32()
            key = yield producer.key()

            fileHeader = struct.pack('<L6H3L5H2L', # format
                                     0x02014b50, # magic (4 bytes)
                                     20, # version made by (2 bytes)
                                     20, # version needed to extract (2 bytes)
                                     0, # general purpose bit flag (2 bytes)
                                     0, # compression method (2 bytes)
                                     timestamp[1], # last mod file time (2 bytes)
                                     timestamp[0], # last mod file date (2 bytes)
                                     crc32 & 0xffffffff, # CRC (4 bytes)
                                     size, # compressed size (4 bytes)
                                     size, # uncompressed size(4 bytes)
                                     len(key), # file name length (2 bytes)
                                     0, # extra field length (2 bytes)
                                     0, # file comment length (2 bytes)
                                     0, # disk number start (2 bytes)
                                     0, # internal file attributes (2 bytes)
                                     0, # external file attributes (4 bytes)
                                     offset, # relative offset of local header (4 bytes)
                                    )

            fileHeader += producer.key()
            self._centralDirectoryLength += len(fileHeader)
            self.consumer.write(fileHeader)


        # end of central directory header
        endHeader = struct.pack('<L4H2LH', # format
                                0x06054b50, # magic (4 bytes)
                                0, # disk number (2 bytes)
                                0, # disk number with start of central directory (2 bytes)
                                len(self._producers), # total central directory entries on this disk (2 bytes)
                                len(self._producers), # total central directory entries (2 bytes)
                                self._centralDirectoryLength, # size of central directory (4 bytes)
                                self._localHeaderLength, # offset of start of central directory with respect to the starting disk number (4 bytes)
                                0, # zip file comment length (2 bytes)
                               )
        self.consumer.write(endHeader)



        self._sendingLock.release()
Esempio n. 54
0
class NotificationSource(object):
    """
    An AMQP consumer which handles messages sent over a "frontend" queue to
    set up temporary queues.  The L{get_message} method should be invoked to
    retrieve one single message from those temporary queues.

    @ivar timeout: time to wait for a message before giving up in C{get}.
    """

    # The timeout must be lower than the Apache one in front, which by default
    # is 5 minutes.
    timeout = 270

    def __init__(self, connector, prefix=None, clock=reactor):
        """
        @param connector: A callable returning a deferred which should fire
            with an opened AMQChannel. The deferred is expected to never
            errback (typically it will be fired by some code which in case
            of failure keeps retrying to connect to a broker or a cluster
            of brokers).
        @param prefix: Optional prefix for identifying the AMQP queues we
            should consume messages from.
        @param clock: An object implementing IReactorTime.
        """
        self._connector = connector
        self._prefix = prefix
        self._clock = clock
        self._channel_lock = DeferredLock()
        # Preserve compatibility by using special forms for naming when a
        # prefix is specified.
        if self._prefix is not None and len(self._prefix) != 0:
            self._tag_form = "%s.notifications-tag.%%s.%%s" % self._prefix
            self._queue_form = "%s.notifications-queue.%%s" % self._prefix
        else:
            self._tag_form = "%s.%s"
            self._queue_form = "%s"

    @inlineCallbacks
    def get(self, uuid, sequence):
        """Request the next L{Notification} for C{uuid}.

        @param uuid: The identifier of the notifications stream.
        @param sequence: Sequential number for identifying this particular
            request. This makes it possible to invoke this API more than once
            concurrently to handle the same notification. Typically only
            one notification will be actually processed and the other discarded
            as duplicates. The FrontEndAjax code makes use of this feature
            in order to get rid of dead requests. See #745708.

        If no notification is received within the number of seconds in
        L{timeout}, then the returned Deferred will errback with L{Timeout}.
        """
        # Attempt to a fetch a single notification retrying any transient error
        # until the timeout expires.
        timeout = self.timeout
        while timeout > 0:
            now = self._clock.seconds()
            channel = yield self._connector()
            try:
                notification = yield self._do(channel, uuid, sequence, timeout)
                returnValue(notification)
            except _Retriable:
                # Wait for the connection to shutdown.
                yield channel.client.disconnected.wait()
                timeout -= self._clock.seconds() - now
                continue
        raise Timeout()

    @inlineCallbacks
    def _do(self, channel, uuid, sequence, timeout):
        """Do fetch a single notification.

        If we hit a transient error, the _Retriable exception will be raised.
        """
        tag = self._tag_form % (uuid, sequence)
        try:
            yield self._check_retriable(
                channel.basic_consume, consumer_tag=tag,
                queue=self._queue_form % uuid)
        except ChannelClosed as error:
            # If the broker sent us channel-close because the queue doesn't
            # exists, raise NotFound. Otherwise just propagate.
            if error.args[0].reply_code == 404:
                # This will try to close the client cleanly (by sending 'close'
                # and waiting for 'close-ok'), but will force a connection
                # shutdown if that doesn't happen within 5 seconds (e.g because
                # the broker got shutdown exactly at this time).
                # See AMQClient.close().
                yield channel.client.close(within=5)
                raise NotFound()
            raise

        log.msg("Consuming from queue '%s'" % uuid)

        queue = yield channel.client.queue(tag)
        empty = False

        try:
            msg = yield queue.get(timeout)
        except Empty:
            empty = True
        except QueueClosed:
            # The queue has been closed, presumably because of a side effect.
            # Let's retry after reconnection.
            raise _Retriable()

        yield self._check_retriable(channel.basic_cancel, consumer_tag=tag)

        channel.client.queues.pop(tag, None)

        if empty:
            # Check for the messages arrived in the mean time
            if queue.pending:
                msg = queue.pending.pop()
            else:
                raise Timeout()

        returnValue(Notification(self, channel, msg))

    @inlineCallbacks
    def _check_retriable(self, method, **kwargs):
        """Invoke the given channel method and check for transient errors.

        @param method: A bound method of a txamqp.protocol.AMQChannel instance.
        @param kwargs: The keyword arguments to pass to the method.
        """
        # Serialize calls to channel method, because in case get() gets called
        # concurrently we don't want two calls in flight at the same time, as
        # in case of a failure txamqp would errback both calls and there's no
        # hit about which call actually failed.
        channel = method.im_self
        yield self._channel_lock.acquire()
        try:
            if channel.closed:
                # The channel got closed, e.g. because another call to
                # NotificationSource._do() hit an error. In this case we just
                # want to retry.
                raise _Retriable()
            yield method(**kwargs)
        except ConnectionClosed as error:
            # 320 (conncetion-forced) and 541 (internal-error) are transient
            # errors that can be retried, the most common being 320 which
            # happens if the broker gets restarted.
            # See also https://www.rabbitmq.com/amqp-0-9-1-reference.html.
            message = error.args[0]
            if message.reply_code in (320, 541):
                raise _Retriable()
            raise
        except Closed as error:
            reason = error.args[0]
            if isinstance(reason, Failure):
                if isinstance(reason.value, TransportClosed):
                    raise _Retriable()
            raise
        finally:
            self._channel_lock.release()

    @inlineCallbacks
    def _done(self, notification, successful):
        """Confirm that a notification has been handled (successfully or not).

        @param notification: The Notification to confirm.
        @param successful: If True, then the notification has been correctly
            processed and will be deleted. If False, it will be re-queued and
            be available at the next NotificationSource.get() call for the
            same UUID.
        """
        channel = notification._channel
        if successful:
            method = channel.basic_ack
        else:
            method = partial(channel.basic_reject, requeue=True)

        yield self._channel_lock.acquire()
        try:
            yield method(delivery_tag=notification._message.delivery_tag)
        except Closed:
            # If we hit any channel or connection error, we raise an error
            # since there's no way this can be re-tried.
            raise Bounced()
        finally:
            self._channel_lock.release()
Esempio n. 55
0
class OmegleBot():
    DISCONNECTED = 0
    CONNECTING = 1
    WAITING = 2
    CONNECTED = 3
    _serverRegex = re.compile('\<i?frame src="(.*?)"\>')
    _captchaImageRegex = re.compile(
        '\<center\>'
        '\<img width="\d+" height="\d+" alt="" src="image\?c\=(.*?)"\>'
        '\<\/center\>'
    )

    def __init__(self, omegleProto):
        """
        Initializes an L{OmegleBot}.

        @param omegleProto: an instance of a protocol that implements:
            * typingCallback: when the stranger is typing
            * stoppedTypingCallback: stranger no longer typing
            * disconnectCallback: stranger OR bot has disconnected
            * messageCallback: when the stranger has sent us a message
            * recaptchaFailedCallback: when our submitted captcha fails
            * recaptchaRequiredCallback: when omegle requires a captcha
            * connectCallback: when we have found a stranger
            * waitingCallback: when we are waiting for a stranger
        """

        for callback_name in ('typingCallback',
                              'stoppedTypingCallback',
                              'disconnectCallback',
                              'messageCallback',
                              'recaptchaFailedCallback',
                              'recaptchaRequiredCallback',
                              'connectCallback',
                              'waitingCallback',
                             ):
            setattr(self, callback_name, getattr(omegleProto, callback_name, None))

        self.status = DISCONNECTED
        self.server = None
        self.id = None
        self.lock = DeferredLock()
        self.activeRequests = set()
        self.challenge = None
        self.image = None

    def disconnect(self):
        """Disconnect if we are connected; otherwise, do nothing."""
        if self.status in (WAITING, CONNECTED):
            # | /dev/null
            self.getPage('disconnect',
                         addToActive=False,
                         data={'id': self.id}
                        ).addErrback(lambda r: None)

        if self.status == DISCONNECTED:
            return

        self.status = DISCONNECTED
        self.id = None
        self.challenge = None
        self.server = None
        self._cancelAllRequests()
        self.onDisconnect()

    def _cancelAllRequests(self):
        """
        Kills all active connetions, i/o and empties the message queue
        """
        self.lock.waiting[:] = []
        for d in list(self.activeRequests):
            d.cancel()

        self.activeRequests.clear()

    def getPage(self, url, addToActive=True, data=None, *args, **kwargs):
        """
        Retrieves a page using the twisted getPage function,
        and if addToActive is true,
        will add to the tracked requests that will cancel if we disconnect.
        """
        def removeFromActive(r):
            self.activeRequests.discard(d)
            return r

        if not url.startswith('http://') and self.server:
            url = self.server + url

        if data is not None:
            data = urlencode(data)
            kwargs.update({
                'method': 'POST',
                'postdata': data,
                'headers': {
                    'Content-Type': 'application/x-www-form-urlencoded',
                    'Content-Length': '%i' % len(data)
                }
            })

        d = getPage(url, agent=self.userAgent, *args, **kwargs)
        if addToActive:
            self.activeRequests.add(d)
            d.addBoth(removeFromActive)

        return d

    def say(self, message):
        """
        send a message to the connected user
        raises NotConnectedError if we're not connected

        @param message: the message to send
        @type message: string, unicode
        """

        if self.status != CONNECTED:
            raise NotConnectedError()

        def sentMessage(response):
            if response == 'win':
                return True
            else:
                raise SendError("Couldn't send message.")

        return self._doLockedCommand(
            'send', data={'id': self.id, 'msg': message}
        ).addCallback(sentMessage)

    def typing(self):
        """
        tells the connected user that we're typing
        raises NotConnectedError if we're not connected
        """
        if self.status != CONNECTED:
            raise NotConnectedError()
        self._doLockedCommand(
            'typing', data={'id': self.id}
        )

    def stoppedTyping(self):
        """
        tells the connected user that we're not typing anymore
        raises NotConnectedError if we're not connected
        """
        if self.status != CONNECTED:
            raise NotConnectedError()
        self._doLockedCommand(
            'stoppedtyping', data={'id': self.id}
        )

    def solveCaptcha(self, solution):
        """
        attempts to solve the captcha that omegle sent to us.

        @param solution: the solution to the captcha
        @type solution: string
        """

        if not self.challenge and self.image:
            raise CaptchaNotRequired()

        self.getPage('recaptcha', data={
            'id': self.id,
            'response': solution,
            'challenge': self.image
        })
        self.image, self.challenge = None, None

    def _doLockedCommand(self, url, data):
        """
        internal command that adds it to our DeferredLock queue,
        which will fire sequentially as they finish,
        allowing only one request to be processed at once
        """
        l = self.lock.acquire()

        def gotLock(lock):
            if self.status == CONNECTED:
                def releaseLock(r):
                    lock.release()
                    return r
                d = self.getPage(url, data=data)
                d.addBoth(releaseLock)
                return d
            else:
                lock.release()

        return l.addCallback(gotLock)

    @staticmethod
    def _get_rand_id():
        """Return a random 8 char all-cap alphanum string, eg '4B5MP9J6'."""
        ''.join(random.choice(string.ascii_uppercase + string.digits) for x in range(8))

    @inlineCallbacks
    def connect(self):
        """
        attempts to connect to the Omegle server.

        returns a deferred that will fire when we've established a connection
        """
        if self.status != DISCONNECTED:
            raise AlreadyRunningError()
        self.userAgent = getRandomUserAgent()
        self.status = CONNECTING

        """
        print 'connecting to omegle...'
        homePage = yield self.getPage('http://omegle.com/')

        print 'got page, searching for server'
        with open('markup.html', 'w') as f:
            f.write(homePage)

        match = self._serverRegex.search(homePage)
        if not match:
            raise ValueError("Could not find a server to connect to!")
        else:
            self.server = match.group(1)
        """

        self.server = 'http://front2.omegle.com/'
        id = yield self.getPage("start?rcs=1&spid=&randid=%s" % self._get_rand_id())

        self.id = json_decode(id)
        self.status = WAITING
        self.doEvents()
        returnValue((self.id, self.server))

    def doEvents(self):
        """
        main asynchronous io loop that handles events, and asks for more
        """
        if self.status not in (CONNECTED, WAITING):
            return

        def gotEvents(response):
            events = json_decode(response)
            if events is None:
                self.disconnect()
            else:
                for event in events:
                    event, params = event[0], event[1:]
                    callback = getattr(self, 'EVENT_%s' % event, None)
                    if callback:
                        callback(params)

                self.doEvents()

        def gotError(error):
            if not isinstance(error.value, CancelledError):
                self.disconnect()
                self.onError(error)

        return self.getPage('events', data={
            'id': self.id
        }).addCallbacks(gotEvents, gotError)

    def EVENT_waiting(self, params):
        """ we received a waiting event """
        self.status = WAITING
        self.runCallback(self.waitingCallback)

    def EVENT_connected(self, params):
        """ we're connected to a partner """
        self.status = CONNECTED
        self.runCallback(self.connectCallback)

    def EVENT_gotMessage(self, params):
        """ partner sent us a message! """
        self.runCallback(self.messageCallback, params)

    def EVENT_typing(self, params):
        """ partner is typing """
        self.runCallback(self.typingCallback)

    def EVENT_stoppedTyping(self, params):
        """ partner stopped typing """
        self.runCallback(self.stoppedTypingCallback)

    def EVENT_strangerDisconnected(self, params):
        """ partner disconnected """
        self.disconnect()

    def doCaptcha(self, challenge):
        """ returns a deferred that will fire when we have the location of the captcha image """
        def gotImage(r):
            self.image = r
            return r

        def error(error):
            self.onError(error)
            self.disconnect()
            return None

        d = self.getRecaptchaImage(challenge)
        d.addCallback(gotImage).addErrback(error)
        return d

    @inlineCallbacks
    def getRecaptchaImage(self, key):
        """ try and find the image to solve """
        page = 'http://www.google.com/recaptcha/api/noscript?'
        pg = yield self.getPage(page + urlencode({'k': key}), headers={
            'referer': 'http://www.omegle.com/'
        })
        match = self._captchaImageRegex.search(pg)
        if match:
            returnValue(match.group(1))
        else:
            raise ValueError("Could not find the image!")

    def EVENT_recaptchaRequired(self, params):
        """ omegle says we need a captcha to connect """
        #params = challenge,
        self.challenge = params[0]
        params.append(self.doCaptcha(self.challenge))
        self.runCallback(self.recaptchaRequiredCallback, params)

    def EVENT_recaptchaRejected(self, params):
        """ omegle says that our captcha was wrong! """
        self.challenge = params[0]
        params.append(self.doCaptcha(self.challenge))
        self.runCallback(self.recaptchaFailedCallback, params)

    def onDisconnect(self):
        """ we've disconnected """
        self.runCallback(self.disconnectCallback)

    def onError(self, error):
        """ an error has happened! """
        error.printBriefTraceback()

    def runCallback(self, callback, params=None):
        """ run our callback if it's set """
        if callback is None:
            return
        try:
            callback(self, params)
        except:
            from twisted.python import failure
            failure.Failure().printBriefTraceback()
Esempio n. 56
0
class SessionClient(ZookeeperClient):
    """A managed client that automatically re-establishes ephemerals and
    triggers watches after reconnecting post session expiration.

    This abstracts the client from session expiration handling. It does
    come at a cost though.

    There are two application constraints that need to be considered for usage
    of the SessionClient or ManagedClient. The first is that watch callbacks
    which examine the event, must be able to handle the synthetic session
    event which is sent to them when the session is re-established.

    The second and more problematic is that algorithms/patterns
    utilizing ephemeral sequence nodes need to be rethought, as the
    session client will recreate the nodes when reconnecting at their
    previous paths. Some algorithms (like the std zk lock recipe) rely
    on properties like the smallest valued ephemeral sequence node in
    a container to identify the lock holder, with the notion that upon
    session expiration a new lock/leader will be sought. Sequence
    ephemeral node recreation in this context is problematic as the
    node is recreated at the exact previous path. Alternative lock
    strategies that do work are fairly simple at low volume, such as
    owning a particular node path (ie. /locks/lock-holder) with an
    ephemeral.

    As a result the session client only tracks and restablishes non sequence
    ephemeral nodes. For coordination around ephemeral sequence nodes it
    provides for watching for the establishment of new sessions via
    `subscribe_new_session`
    """

    def __init__(self, servers=None, session_timeout=None,
                 connect_timeout=4000):
        """
        """
        super(SessionClient, self).__init__(servers, session_timeout)
        self._connect_timeout = connect_timeout
        self._watches = WatchManager()
        self._ephemerals = {}
        self._session_notifications = []
        self._reconnect_lock = DeferredLock()
        self.set_connection_error_callback(self._cb_connection_error)
        self.set_session_callback(self._cb_session_event)
        self._backoff_seconds = 0
        self._last_reconnect = time.time()

    def subscribe_new_session(self):
        d = Deferred()
        self._session_notifications.append(d)
        return d

    @inlineCallbacks
    def cb_restablish_session(self, e=None, forced=False):
        """Called on intercept of session expiration to create new session.

        This will reconnect to zk, re-establish ephemerals, and
        trigger watches.
        """
        yield self._reconnect_lock.acquire()
        log.debug(
            "Connection reconnect, lock acquired handle:%d", self.handle)

        try:
            # If its been explicitly closed, don't re-establish.
            if self.handle is None:
                log.debug("No handle, client closed")
                return

            # Don't allow forced reconnect hurds within a session.
            if forced and (
                    (time.time() - self._last_reconnect)
                    < self.session_timeout / 1000.0):
                forced = False

            if not forced and not self.unrecoverable:
                log.debug("Client already connected, allowing retry")
                return
            elif self.connected or self.handle >= 0:
                self.close()
                self.handle = -1

            # Re-establish
            yield self._cb_restablish_session().addErrback(
                self._cb_restablish_errback, e)

        except Exception, e:
            log.error("error while re-establish %r %s" % (e, e))
        finally:
Esempio n. 57
0
class CFProcessor(service.Service):
    implements(interfaces.IProcessor)

    def __init__(self, name, conf):
        _log.info("CF_INIT %s", name)
        self.name, self.conf = name, conf
        self.channel_dict = defaultdict(list)
        self.iocs = dict()
        self.client = None
        self.currentTime = getCurrentTime
        self.lock = DeferredLock()

    def startService(self):
        service.Service.startService(self)
        self.running = 1
        _log.info("CF_START")
        from channelfinder import ChannelFinderClient
        # Using the default python cf-client.
        # The usr, username, and password are provided by the channelfinder._conf module.
        if self.client is None:  # For setting up mock test client
            self.client = ChannelFinderClient()
        self.clean_service()

    def stopService(self):
        service.Service.stopService(self)
        #Set channels to inactive and close connection to client
        self.running = 0
        self.clean_service()
        _log.info("CF_STOP")

    @defer.inlineCallbacks
    def commit(self, transaction_record):
        yield self.lock.acquire()
        try:
            yield deferToThread(self.__commit__, transaction_record)
        finally:
            self.lock.release()

    def __commit__(self, TR):
        _log.debug("CF_COMMIT %s", TR.infos.items())
        pvNames = [unicode(rname, "utf-8") for rid, (rname, rtype) in TR.addrec.iteritems()]
        delrec = list(TR.delrec)
        iocName = TR.src.port
        hostName = TR.src.host
        iocid = hostName + ":" + str(iocName)
        owner = TR.infos.get('CF_USERNAME') or TR.infos.get('ENGINEER') or self.conf.get('username', 'cfstore')
        time = self.currentTime()
        if TR.initial:
            self.iocs[iocid] = {"iocname": iocName, "hostname": hostName, "owner": owner, "channelcount": 0}  # add IOC to source list
        if not TR.connected:
            delrec.extend(self.channel_dict.keys())
        for pv in pvNames:
            self.channel_dict[pv].append(iocid)  # add iocname to pvName in dict
            self.iocs[iocid]["channelcount"] += 1
        for pv in delrec:
            if iocid in self.channel_dict[pv]:
                self.channel_dict[pv].remove(iocid)
                self.iocs[iocid]["channelcount"] -= 1
                if self.iocs[iocid]['channelcount'] == 0:
                    self.iocs.pop(iocid, None)
                elif self.iocs[iocid]['channelcount'] < 0:
                    _log.error("channel count negative!")
                if len(self.channel_dict[pv]) <= 0:  # case: channel has no more iocs
                    del self.channel_dict[pv]
        poll(__updateCF__, self.client, pvNames, delrec, self.channel_dict, self.iocs, hostName, iocName, time, owner)
        dict_to_file(self.channel_dict, self.iocs, self.conf)

    def clean_service(self):
        sleep = 1
        retry_limit = 5
        owner = self.conf.get('username', 'cfstore')
        while 1:
            try:
                _log.debug("Cleaning service...")
                channels = self.client.findByArgs([('pvStatus', 'Active')])
                if channels is not None:
                    new_channels = []
                    for ch in channels or []:
                        new_channels.append(ch[u'name'])
                    if len(new_channels) > 0:
                        self.client.update(property={u'name': 'pvStatus', u'owner': owner, u'value': "Inactive"},
                                           channelNames=new_channels)
                    _log.debug("Service clean.")
                    return
            except RequestException:
                _log.exception("cleaning failed, retrying: ")

            time.sleep(min(60, sleep))
            sleep *= 1.5
            if self.running == 0 and sleep >= retry_limit:
                _log.debug("Abandoning clean.")
                return
Esempio n. 58
0
class TriggerFPGA(LabradServer):
    name = 'Trigger'
    onNewUpdate = Signal(SIGNALID, 'signal: switch toggled', '(sb)')
    
    def initServer(self):
        self.inCommunication = DeferredLock()
        self.connectOKBoard()
        #create dictionary for triggers and switches in the form 'trigger':channel;'switch:(channel , logicnotnegated, state'
        #the state written below represents the initial state of the server
        self.dict = {
                     'Triggers':{'PaulBox':0},
                     'Switches':{'866':[0x01,True, True], 'BluePI':[0x02,True, False], '397LocalHeating':[0x04,True,False]}
                     }
        self.initializeChannels()
        self.listeners = set()
        
    def connectOKBoard(self):
        self.xem = None
        fp = ok.FrontPanel()
        module_count = fp.GetDeviceCount()
        print "Found {} unused modules".format(module_count)
        for i in range(module_count):
            serial = fp.GetDeviceListSerial(i)
            tmp = ok.FrontPanel()
            tmp.OpenBySerial(serial)
            id = tmp.GetDeviceID()
            if id == okDeviceID:
                self.xem = tmp
                print 'Connected to {}'.format(id)
                self.programOKBoard(self.xem)
                return
        print 'Not found {}'.format(okDeviceID)
        print 'Will try again in {} seconds'.format(devicePollingPeriod)
        reactor.callLater(devicePollingPeriod, self.connectOKBoard)
    
    def programOKBoard(self, xem):
        print 'Programming FPGA'
        basepath = os.environ.get('LABRADPATH',None)
        if not basepath:
            raise Exception('Please set your LABRADPATH environment variable')
        path = os.path.join(basepath,'sqip/okfpgaservers/trigger.bit')
        prog = xem.ConfigureFPGA(path)
        if prog: raise("Not able to program FPGA")
        pll = ok.PLL22150()
        xem.GetEepromPLL22150Configuration(pll)
        pll.SetDiv1(pll.DivSrc_VCO,4)
        xem.SetPLL22150Configuration(pll)
    
    def initializeChannels(self):
        for switchName in self.dict['Switches'].keys():
            channel = self.dict['Switches'][switchName][0]
            value = self.dict['Switches'][switchName][1]
            initialize = self.dict['Switches'][switchName][2]
            if initialize:
                print 'initializing {0} to {1}'.format(switchName, value)
                self._switch( channel, value)
        
    def _isSequenceDone(self):
        self.xem.UpdateTriggerOuts()
        return self.xem.IsTriggered(0x6A,0b00000001)
    
    def _trigger(self, channel):
        self.xem.ActivateTriggerIn(0x40, channel)
    
    def _switch(self, channel, value):
        if value:
            self.xem.SetWireInValue(0x00,channel,channel)
        else:
            self.xem.SetWireInValue(0x00,0x00,channel)
        self.xem.UpdateWireIns()
    
    @setting(0, 'Get Trigger Channels', returns = '*s')
    def getTriggerChannels(self, c):
        """
        Returns available channels for triggering
        """
        return self.dict['Triggers'].keys()
    
    @setting(1, 'Get Switching Channels', returns = '*s')
    def getSwitchingChannels(self, c):
        """
        Returns available channels for switching
        """
        return self.dict['Switches'].keys()
    
    @setting(2, 'Trigger', channelName = 's')
    def trigger(self, c, channelName):
        """
        Triggers the select channel
        """
        if channelName not in self.dict['Triggers'].keys(): raise Exception("Incorrect Channel")
        yield self.inCommunication.acquire()
        channel = self.dict['Triggers'][channelName]
        yield deferToThread(self._trigger, channel)
        yield self.inCommunication.release()
    
    @setting(3, 'Switch', channelName = 's', state= 'b')
    def switch(self, c, channelName, state):  
        """
        Switches the given channel
        """
        if channelName not in self.dict['Switches'].keys(): raise Exception("Incorrect Channel")
        if not self.dict['Switches'][channelName][1]: state = not state #allows for easy reversal of high/low
        yield self.inCommunication.acquire()
        channel = self.dict['Switches'][channelName][0]
        yield deferToThread(self._switch, channel, state)
        yield self.inCommunication.release()
        self.dict['Switches'][channelName][2] = state
        if not self.dict['Switches'][channelName][1]: state = not state #(if needed) reverse again for notification
        self.notifyOtherListeners(c, (channelName, state))
    
    @setting(4, 'Get State', channelName = 's', returns = 'b')
    def getState(self, c, channelName):
        """
        Returns the current state of the switch
        """
        if channelName not in self.dict['Switches'].keys(): raise Exception("Incorrect Channel")
        state = self.dict['Switches'][channelName][2]
        if not self.dict['Switches'][channelName][1]: state = not state #allows for easy reversal of high/low
        return state
    
    @setting(5, 'Wait for PBox Completion', timeout = 'v', returns = 'b')
    def setCollectTime(self, c, timeout = 10):
        """
        Returns true if Paul Box sequence has completed within a timeout period
        """
        requestCalls = int(timeout / 0.050 ) #number of request calls
        for i in range(requestCalls):
            yield self.inCommunication.acquire()
            done = yield deferToThread(self._isSequenceDone)
            yield self.inCommunication.release()
            if done: returnValue(True)
            yield deferToThread(time.sleep, 0.050)
        returnValue(False)
        
    def notifyOtherListeners(self, context, message):
        """
        Notifies all listeners except the one in the given context
        """
        notified = self.listeners.copy()
        notified.remove(context.ID)
        self.onNewUpdate(message, notified)     
            
    def initContext(self, c):
        """Initialize a new context object."""
        self.listeners.add(c.ID)
    
    def expireContext(self, c):
        self.listeners.remove(c.ID)
Esempio n. 59
0
class UnitLifecycle(object):
    """Manager for a unit lifecycle.

    Primarily used by the workflow interaction, to modify unit behavior
    according to the current unit workflow state and transitions.

    See docs/source/internals/unit-workflow-lifecycle.rst for a brief
    discussion of some of the more interesting implementation decisions.
    """

    def __init__(self, client, unit, service, unit_dir, state_dir, executor):
        self._client = client
        self._unit = unit
        self._service = service
        self._executor = executor
        self._unit_dir = unit_dir
        self._state_dir = state_dir
        self._relations = None
        self._running = False
        self._watching_relation_memberships = False
        self._watching_relation_resolved = False
        self._run_lock = DeferredLock()
        self._log = logging.getLogger("unit.lifecycle")

    @property
    def running(self):
        return self._running

    def get_relation_workflow(self, relation_id):
        """Accessor to a unit relation workflow, by relation id.

        Primarily intended for and used by unit tests. Raises
        a KeyError if the relation workflow does not exist.
        """
        return self._relations[relation_id]

    @inlineCallbacks
    def install(self, fire_hooks=True):
        """Invoke the unit's install hook.
        """
        if fire_hooks:
            yield self._execute_hook("install")

    @inlineCallbacks
    def start(self, fire_hooks=True, start_relations=True):
        """Invoke the start hook, and setup relation watching.

        :param fire_hooks: False to skip running config-change and start hooks.
            Will not affect any relation hooks that happen to be fired as a
            consequence of starting up.

        :param start_relations: True to transition all "down" relation
            workflows to "up".
        """
        self._log.debug("pre-start acquire, running:%s", self._running)
        yield self._run_lock.acquire()
        self._log.debug("start running, unit lifecycle")
        watches = []

        try:
            if fire_hooks:
                yield self._execute_hook("config-changed")
                yield self._execute_hook("start")

            if self._relations is None:
                yield self._load_relations()

            if start_relations:
                # We actually want to transition from "down" to "up" where
                # applicable (ie a stopped unit is starting up again)
                for workflow in self._relations.values():
                    with (yield workflow.lock()):
                        state = yield workflow.get_state()
                        if state == "down":
                            yield workflow.transition_state("up")

            # Establish a watch on the existing relations.
            if not self._watching_relation_memberships:
                self._log.debug("starting service relation watch")
                watches.append(self._service.watch_relation_states(
                    self._on_service_relation_changes))
                self._watching_relation_memberships = True

            # Establish a watch for resolved relations
            if not self._watching_relation_resolved:
                self._log.debug("starting unit relation resolved watch")
                watches.append(self._unit.watch_relation_resolved(
                    self._on_relation_resolved_changes))
                self._watching_relation_resolved = True

            # Set current status
            self._running = True
        finally:
            self._run_lock.release()

        # Give up the run lock before waiting on initial watch invocations.
        results = yield DeferredList(watches, consumeErrors=True)

        # If there's an error reraise the first one found.
        errors = [e[1] for e in results if not e[0]]
        if errors:
            returnValue(errors[0])

        self._log.debug("started unit lifecycle")

    @inlineCallbacks
    def stop(self, fire_hooks=True, stop_relations=True):
        """Stop the unit, executes the stop hook, and stops relation watching.

        :param fire_hooks: False to skip running stop hooks.

        :param stop_relations: True to transition all "up" relation
            workflows to "down"; when False, simply shut down relation
            lifecycles (in preparation for process shutdown, for example).
        """
        self._log.debug("pre-stop acquire, running:%s", self._running)
        yield self._run_lock.acquire()
        try:
            # Verify state
            assert self._running, "Already Stopped"

            if stop_relations:
                # We actually want to transition relation states
                # (probably because the unit workflow state is stopped/error)
                for workflow in self._relations.values():
                    with (yield workflow.lock()):
                        yield workflow.transition_state("down")
            else:
                # We just want to stop the relations from acting
                # (probably because the process is going down)
                self._log.debug("stopping relation lifecycles")
                for workflow in self._relations.values():
                    yield workflow.lifecycle.stop()

            if fire_hooks:
                yield self._execute_hook("stop")

            # Set current status
            self._running = False
        finally:
            self._run_lock.release()
        self._log.debug("stopped unit lifecycle")

    @inlineCallbacks
    def configure(self, fire_hooks=True):
        """Inform the unit that its service config has changed.
        """
        if not fire_hooks:
            returnValue(None)
        yield self._run_lock.acquire()
        try:
            # Verify State
            assert self._running, "Needs to be running."

            # Execute hook
            yield self._execute_hook("config-changed")
        finally:
            self._run_lock.release()
        self._log.debug("configured unit")

    @inlineCallbacks
    def upgrade_charm(self, fire_hooks=True, force=False):
        """Upgrade the charm and invoke the upgrade-charm hook if requested.

        :param fire_hooks: if False, *and* the actual upgrade operation is not
            necessary, skip the upgrade-charm hook. When the actual charm has
            changed during this invocation, this flag is ignored: hooks will
            always be fired.

        :param force: Boolean, if true then we're merely putting the charm into
            place on disk, not executing charm hooks.
        """
        msg = "Upgrading charm"
        if force:
            msg += " - forced"
        self._log.debug(msg)
        upgrade = _CharmUpgradeOperation(
            self._client, self._service, self._unit, self._unit_dir)
        yield self._run_lock.acquire()
        try:
            yield upgrade.prepare()

            # Executor may already be stopped if we're retrying.
            if self._executor.running:
                self._log.debug("Pausing normal hook execution")
                yield self._executor.stop()

            if upgrade.ready:
                yield upgrade.run()
                fire_hooks = True

            if fire_hooks and not force:
                yield self._execute_hook("upgrade-charm", now=True)

            # Always restart executor on success; charm upgrade operations and
            # errors are the only reasons for the executor to be stopped.
            self._log.debug("Resuming normal hook execution.")
            self._executor.start()
        finally:
            self._run_lock.release()
            upgrade.cleanup()

    @inlineCallbacks
    def _on_relation_resolved_changes(self, event):
        """Callback for unit relation resolved watching.

        The callback is invoked whenever the relation resolved
        settings change.
        """
        self._log.debug("relation resolved changed")
        # Acquire the run lock, and process the changes.
        yield self._run_lock.acquire()

        try:
            # If the unit lifecycle isn't running we shouldn't process
            # any relation resolutions.
            if not self._running:
                self._log.debug("stop watch relation resolved changes")
                self._watching_relation_resolved = False
                raise StopWatcher()

            self._log.info("processing relation resolved changed")
            if self._client.connected:
                yield self._process_relation_resolved_changes()
        finally:
            yield self._run_lock.release()

    @inlineCallbacks
    def _process_relation_resolved_changes(self):
        """Invoke retry transitions on relations if their not running.
        """
        relation_resolved = yield self._unit.get_relation_resolved()
        if relation_resolved is None:
            returnValue(None)
        else:
            yield self._unit.clear_relation_resolved()

        keys = set(relation_resolved).intersection(self._relations)
        for internal_rel_id in keys:
            workflow = self._relations[internal_rel_id]
            with (yield workflow.lock()):
                state = yield workflow.get_state()
                if state != "up":
                    yield workflow.transition_state("up")

    @inlineCallbacks
    def _on_service_relation_changes(self, old_relations, new_relations):
        """Callback for service relation watching.

        The callback is used to manage the unit relation lifecycle in
        accordance with the current relations of the service.

        @param old_relations: Previous service relations for a service. On the
               initial execution, this value is None.
        @param new_relations: Current service relations for a service.
        """
        self._log.debug(
            "services changed old:%s new:%s", old_relations, new_relations)

        # Acquire the run lock, and process the changes.
        yield self._run_lock.acquire()
        try:
            # If the lifecycle is not running, then stop the watcher
            if not self._running:
                self._log.debug("stop service-rel watcher, discarding changes")
                self._watching_relation_memberships = False
                raise StopWatcher()

            self._log.debug("processing relations changed")
            yield self._process_service_changes(old_relations, new_relations)
        finally:
            self._run_lock.release()

    @inlineCallbacks
    def _process_service_changes(self, old_relations, new_relations):
        """Add and remove unit lifecycles per the service relations Determine.
        """
        # Calculate delta between zookeeper state and our stored state.
        new_relations = dict(
            (service_relation.internal_relation_id, service_relation)
            for service_relation in new_relations)

        if old_relations:
            old_relations = dict(
                (service_relation.internal_relation_id, service_relation)
                for service_relation in old_relations)

        added = set(new_relations.keys()) - set(self._relations.keys())
        removed = set(self._relations.keys()) - set(new_relations.keys())
        # Could this service be a principal container?
        is_principal = not (yield self._service.is_subordinate())

        # Once we know a relation is departed, *immediately* stop running
        # its hooks. We can't really handle the case in which a hook is
        # *already* running, but we can at least make sure it doesn't run
        # any *more* hooks (which could have been queued in the past, but
        # not yet executed).# This isn't *currently* an exceptionally big
        # deal, because:
        #
        # (1) The ZK state won't actually be deleted, so an inappropriate
        #     hook will still run happily.
        # (2) Even if the state is deleted, and the hook errors out, the
        #     only actual consequence is that we'll eventually run the
        #     error_depart transition rather than depart or down_depart.
        #
        # However, (1) will certainly change in the future, and (2) is not
        # necessarily a watertight guarantee.
        for relation_id in removed:
            yield self._relations[relation_id].lifecycle.stop()

        # Actually depart old relations.
        for relation_id in removed:
            workflow = self._relations.pop(relation_id)
            with (yield workflow.lock()):
                yield workflow.transition_state("departed")
            self._store_relations()

        # Process new relations.
        for relation_id in added:
            service_relation = new_relations[relation_id]
            yield self._add_relation(service_relation)
            if (is_principal and service_relation.relation_scope == "container"):
                self._add_subordinate_unit(service_relation)
            yield self._store_relations()

    @inlineCallbacks
    def _add_relation(self, service_relation):
        try:
            unit_relation = yield service_relation.get_unit_state(
                self._unit)
        except UnitRelationStateNotFound:
            # This unit has not yet been assigned a unit relation state,
            # Go ahead and add one.
            unit_relation = yield service_relation.add_unit_state(
                self._unit)

        lifecycle = UnitRelationLifecycle(
            self._client, self._unit.unit_name, unit_relation,
            service_relation.relation_ident,
            self._unit_dir, self._state_dir, self._executor)

        workflow = RelationWorkflowState(
            self._client, unit_relation, service_relation.relation_name,
            lifecycle, self._state_dir)

        self._relations[service_relation.internal_relation_id] = workflow

        with (yield workflow.lock()):
            yield workflow.synchronize()

    @inlineCallbacks
    def _do_unit_deploy(self, unit_name, machine_id, charm_dir):
        # this method exists to aid testing rather than being an
        # inline
        unit_deployer = UnitDeployer(self._client, machine_id, charm_dir)
        yield unit_deployer.start("subordinate")
        yield unit_deployer.start_service_unit(unit_name)

    @inlineCallbacks
    def _add_subordinate_unit(self, service_relation):
        """Deploy a subordinate unit for service_relation remote endpoint."""
        # Figure out the remote service state
        service_states = yield service_relation.get_service_states()
        subordinate_service = [s for s in service_states if
                               s.service_name != self._unit.service_name][0]

        # add a unit state to service (using self._unit as the
        # principal container)
        subordinate_unit = yield subordinate_service.add_unit_state(
            container=self._unit)
        machine_id = yield self._unit.get_assigned_machine_id()

        subordinate_unit_dir = os.path.dirname(self._unit_dir)
        charm_dir = os.path.join(subordinate_unit_dir,
                                 subordinate_unit.unit_name.replace(
                                     "/", "-"))
        state_dir = os.path.join(charm_dir, "state")
        if not os.path.exists(state_dir):
                os.makedirs(state_dir)

        self._log.debug("deploying %s as subordinate of %s",
                        subordinate_unit.unit_name,
                        self._unit.unit_name)
        # with the relation in place and the units added to the
        # container we can start the unit agent
        yield self._do_unit_deploy(subordinate_unit.unit_name,
                                   machine_id,
                                   charm_dir)

    @property
    def _known_relations_path(self):
        return os.path.join(
            self._state_dir, "%s.lifecycle.relations" % self._unit.internal_id)

    def _store_relations(self):
        """Store *just* enough information to recreate RelationWorkflowStates.

        Note that we don't need to store the actual states -- if we can
        reconstruct the RWS, it will be responsible for finding its own state
        -- but we *do* need to store the fact of their existence, so that we
        can still depart broken relations even if they break while we're not
        running.
        """
        state_dict = {}
        for relation_wf in self._relations.itervalues():
            state_dict.update(relation_wf.get_relation_info())
        state = yaml.dump(state_dict)
        temp_path = self._known_relations_path + "~"

        with open(temp_path, "w") as f:
            f.write(state)
        os.rename(temp_path, self._known_relations_path)

    @inlineCallbacks
    def _load_relations(self):
        """Recreate workflows for any relation we had previously stored.

        All relations (including those already departed) are stored in
        ._relations (and will be added or departed as usual); but only
        relations *not* already departed will be synchronized, to avoid
        errors caused by trying to access ZK state that may not exist any
        more.
        """
        self._relations = {}
        if not os.path.exists(self._known_relations_path):
            return

        rsm = RelationStateManager(self._client)
        relations = yield rsm.get_relations_for_service(self._service)
        relations_by_id = dict((r.internal_relation_id, r) for r in relations)

        with open(self._known_relations_path) as f:
            known_relations = yaml.load(f.read())

        for relation_id, relation_info in known_relations.items():
            if relation_id in relations_by_id:
                # The service relation's still around: set up workflow as usual
                yield self._add_relation(relations_by_id[relation_id])
            else:
                # The relation has departed. Create an *un*synchronized
                # workflow and place it in relations for detection and
                # removal (with hook-firing) in _process_service_changes.
                workflow = self._reconstruct_workflow(
                    relation_id,
                    relation_info["relation_name"],
                    relation_info["relation_scope"])
                self._relations[relation_id] = workflow

    def _reconstruct_workflow(self, relation_id, relation_ident, relation_scope):
        """Create a RelationWorkflowState which may refer to outdated state.

        This means that *if* this service has already departed the relevant
        relation, it is not safe to synchronize the resultant workflow,
        because its lifecycle may attempt to watch state that doesn't exist.

        Since synchronization is a one-time occurrence, and this method has
        only one client, this shouldn't be too hard to keep track of.
        """
        unit_relation = UnitRelationState(
            self._client, self._service.internal_id, self._unit.internal_id,
            relation_id, relation_scope)
        lifecycle = UnitRelationLifecycle(
            self._client, self._unit.unit_name, unit_relation, relation_ident,
            self._unit_dir, self._state_dir, self._executor)
        relation_name = relation_ident.split(":")[0]
        return RelationWorkflowState(
            self._client, unit_relation, relation_name, lifecycle,
            self._state_dir)

    @inlineCallbacks
    def _execute_hook(self, hook_name, now=False):
        """Execute the hook with the given name.

        For priority hooks, the hook is scheduled and then the
        executioner started, before wait on the result.
        """
        hook_path = os.path.join(self._unit_dir, "charm", "hooks", hook_name)
        socket_path = os.path.join(self._unit_dir, HOOK_SOCKET_FILE)
        invoker = Invoker(
            HookContext(self._client, self._unit.unit_name), None,
            _EVIL_CONSTANT, socket_path, self._unit_dir, hook_log)
        yield invoker.start()

        if now:
            yield self._executor.run_priority_hook(invoker, hook_path)
        else:
            yield self._executor(invoker, hook_path)
Esempio n. 60
0
class CalibrationServer(LabradServer):
    name = 'DAC Calibration'

    @inlineCallbacks
    def initServer(self):
        self.IQcalsets = {}
        self.DACcalsets = {}
        print 'loading server settings...',
        self.loadServerSettings()
        print 'done.'
        yield LabradServer.initServer(self)

    def loadServerSettings(self):
        """Load configuration information from the registry."""
        d = {}
        defaults = {
            'deconvIQ': True,
            'deconvZ': True,
            'bandwidthIQ': 0.4, #original default: 0.4
            'bandwidthZ': 0.13, #original default: 0.13
            'maxfreqZ': 0.45, #optimal parameter: 10% below Nyquist frequency of dac, 0.45
            'maxvalueZ': 5.0 #optimal parameter: 5.0, from the jitter in 1/H fourier amplitudes
        }
        for key in keys.SERVERSETTINGVALUES:
            default = defaults.get(key, None)
            keyval = default
            print key, ':', keyval
            d[key] = keyval
        self.serverSettings = d

    #@inlineCallbacks
    def initContext(self, c):
        c['Loop'] = False
        c['t0'] = 0
        c['Settling'] = ([], [])
        c['Reflection'] = ([], [])        
        c['Filter'] = 0.2
        c['deconvIQ'] = self.serverSettings['deconvIQ']
        c['deconvZ'] = self.serverSettings['deconvZ']

    @inlineCallbacks
    def call_sync(self, *args, **kw):
        """Call synchronous code in a separate thread outside the twisted event loop."""
        if not hasattr(self, '_sync_lock'):
            self._sync_lock = DeferredLock()
        yield self._sync_lock.acquire()
        try:
            result = yield deferToThread(*args, **kw)
            returnValue(result)
        finally:
            self._sync_lock.release()

    @inlineCallbacks
    def getIQcalset(self, c):
        """Get an IQ calset for the board in the given context, creating it if needed."""
        if 'Board' not in c:
            raise NoBoardSelectedError()
        board = c['Board']

        if board not in self.IQcalsets:
            calset = yield self.call_sync(IQcorrector, board,
                                                       None,
                                                       errorClass=CalibrationNotFoundError,
                                                       bandwidth=self.serverSettings['bandwidthIQ'])
            self.IQcalsets[board] = calset
        returnValue(self.IQcalsets[board])

    @inlineCallbacks
    def getDACcalset(self, c):
        """Get a DAC calset for the board and DAC in the given context, creating it if needed."""
        if 'Board' not in c:
            raise NoBoardSelectedError()
        board = c['Board']

        if 'DAC' not in c:
            raise NoDACSelectedError()
        dac = c['DAC']

        if board not in self.DACcalsets:
            self.DACcalsets[board] = {}
        if dac not in self.DACcalsets[board]:
            calset = yield self.call_sync(DACcorrector, board,
                                                        dac,
                                                        None,
                                                        errorClass=CalibrationNotFoundError,
                                                        bandwidth=self.serverSettings['bandwidthZ'],
                                                        maxfreqZ=self.serverSettings['maxfreqZ'])
            self.DACcalsets[board][dac] = calset
        returnValue(self.DACcalsets[board][dac])

    @setting(1, 'Board', board=['s'], returns=['s'])
    def board(self, c, board):
        """Sets the board for which to correct the data."""
        c['Board'] = board
        return board

    @setting(10, 'Frequency', frequency=['v[GHz]'], returns=['v[GHz]'])
    def frequency(self, c, frequency):
        """Sets the microwave driving frequency for which to correct the data.

        This also implicitly selects I/Q mode for the correction.
        """
        # c['Frequency'] = float(frequency)
        c['Frequency'] = frequency['GHz']
        c['DAC'] = None
        return frequency

    @setting(11, 'Loop', loopmode=['b: Loop mode'], returns=['b'])
    def loop(self, c, loopmode=True):
        c['Loop'] = loopmode
        return loopmode

    @setting(12, 'Time Offset', t0=['v[ns]'], returns=['v[ns]'])
    def set_time_offset(self, c, t0):
        # c['t0'] = float(t0)
        c['t0'] = t0['ns']
        return t0

    @setting(13, 'deconvIQ', deconvIQ=['b'], returns=['b'])
    def set_deconvIQ(self, c, deconvIQ):
        c['deconvIQ'] = deconvIQ
        return deconvIQ

    @setting(14, 'deconvZ', deconvZ=['b'], returns=['b'])
    def set_deconvZ(self, c, deconvZ):
        c['deconvZ'] = deconvZ
        return deconvZ

    @setting(15, 'getdeconvIQ', returns=['b'])
    def get_deconvIQ(self, c):
        return c['deconvIQ']

    @setting(16, 'getdeconvZ', returns=['b'])
    def get_deconvZ(self, c):
        return c['deconvZ']

    @setting(20, 'DAC', dac=['w: DAC channel 0 or 1', 's: DAC channel'], returns=['w'])
    def dac(self, c, dac):
        """Set the DAC for which to correct the data.

        This also implicitly selects single channel mode for the correction.
        If a string is passed in, the final character is used to select the DAC,
        and must be either 'A' ('a') or 'B' ('b').
        """
        if isinstance(dac, str):
            dac = dac[-1]
        if dac in [0, '0', 'a', 'A']:
            dac = 0
        elif dac in [1, '1', 'b', 'B']:
            dac = 1
        else:
            raise NoSuchDACError()

        c['Frequency'] = None
        c['DAC'] = dac
        return dac

    @setting(30,
        'Correct IQ',
        data=['*(v, v): I/Q data', '*c: I/Q data'],
        zero_ends='b',
        returns=['(*i, *i): Dual channel DAC values'])
    def correct_iq(self, c, data, zero_ends=False):
        """Correct IQ data specified in the time domain.

        Args:
            data (list of tuple or list of complex): The time-domain IQ sequence
                to be deconvolved.
            zero_ends (boolean): If true, the first and last 4 nanoseconds will
                be set to the deconvolved zero value to ensure microwaves are off.

        Returns:
            A tuple of deconvolved I DAC values and Q DAC values.
        """

        if len(data) == 0:
            returnValue([]) # special case for empty data

        if len(data.shape) == 2:
            data = data[:,0] + 1j * data[:,1]

        calset = yield self.getIQcalset(c)
        deconv = c['deconvIQ']
        corrected = yield self.call_sync(calset.DACify, c['Frequency'],
                                                  data,
                                                  loop=c['Loop'],
                                                  zipSRAM=False,
                                                  deconv=deconv,
                                                  zeroEnds=zero_ends)
        if deconv is False:
            print 'No deconv on board ' + c['Board'] 
        returnValue(corrected)

    @setting(31,
        'Correct IQ FT',
        data=['*(v, v): I/Q data', '*c: I/Q data'],
        zero_ends='b',
        returns=['(*i, *i): Dual channel DAC values'])
    def correct_iq_ft(self, c, data, zero_ends=False):
        """Correct IQ data specified in the frequency domain.

        This allows for sub-nanosecond timing resolution.

        Args:
            data (list of tuple or list of complex): The frequency-domain IQ
                sequence to be deconvolved.
            zero_ends (boolean): If true, the first and last 4 nanoseconds will
                be set to the deconvolved zero value to ensure microwaves are off.

        Returns:
            A tuple of deconvolved I DAC values and Q DAC values.
        """
        if len(data) == 0:
            returnValue([]) # special case for empty data

        if len(data.shape) == 2:
            data = data[:,0] + 1.0j * data[:,1]

        calset = yield self.getIQcalset(c)
        deconv = c['deconvIQ']
        corrected = yield self.call_sync(calset.DACifyFT, c['Frequency'],
                                                          data,
                                                          n=len(data),
                                                          t0=c['t0'],
                                                          loop=c['Loop'],
                                                          zipSRAM=False,
                                                          deconv=deconv,
                                                          zeroEnds=zero_ends)
        if deconv is False:
            print 'No deconv on board ' + c['Board']
        returnValue(corrected)

    @setting(32,
        'Correct Analog',
        data=['*v: Single channel data'],
        average_ends='b',
        dither='b',
        returns=['*i: Single channel DAC values'])
    def correct_analog(self, c, data, average_ends=False, dither=False):
        """Correct single channel data specified in the time domain.

        Args:
            data (list of float): The time-domain sequence to be deconvolved.
            average_ends (boolean): If true, the first and last 4 nanoseconds
                will be averaged and set to the constant average value to
                ensure the DAC output is constant after the sequence ends.
            dither (boolean): If true, the sequence will be dithered by adding
                random noise to reduce quantization noise.

        Returns:
            A list of deconvolved DAC values.
        """
        if len(data) == 0:
            returnValue([]) # special case for empty data

        calset = yield self.getDACcalset(c)
        calset.setSettling(*c['Settling'])
        calset.setReflection(*c['Reflection'])
        deconv = c['deconvZ']
        corrected = yield self.call_sync(calset.DACify, data,
                                                  loop=c['Loop'],
                                                  fitRange=False,
                                                  deconv=deconv,
                                                  dither=dither,
                                                  averageEnds=average_ends)
        if deconv is False:
            print 'No deconv on board ' + c['Board']
        returnValue(corrected)

    @setting(33,
        'Correct Analog FT',
        data=['*c: Single channel data'],
        average_ends='b',
        dither='b',
        returns=['*i: Single channel DAC values'])
    def correct_analog_ft(self, c, data, average_ends=False, dither=False):
        """Correct single channel data specified in the frequency domain.

        This allows for sub-nanosecond timing resolution.

        Args:
            data (list of float): The frequency-domain sequence to be deconvolved.
            average_ends (boolean): If true, the first and last 4 nanoseconds
                will be averaged and set to the constant average value to
                ensure the DAC output is constant after the sequence ends.
            dither (boolean): If true, the sequence will be dithered by adding
                random noise to reduce quantization noise.

        Returns:
            A list of deconvolved DAC values.
        """
        if len(data) == 0:
            returnValue([]) # special case for empty data

        calset = yield self.getDACcalset(c)
        calset.setSettling(*c['Settling'])
        calset.setReflection(*c['Reflection'])
        calset.setFilter(bandwidth=c['Filter'])
        deconv = c['deconvZ']
        corrected = yield self.call_sync(calset.DACifyFT, data,
                                                          n=(len(data)-1)*2,
                                                          t0=c['t0'],
                                                          loop=c['Loop'],
                                                          fitRange=False,
                                                          deconv=deconv,
                                                          maxvalueZ=self.serverSettings['maxvalueZ'],
                                                          dither=dither,
                                                          averageEnds=average_ends)
        if deconv is False:
            print 'No deconv on board ' + c['Board']
        returnValue(corrected)

    @setting(40, 'Set Settling', rates=['*v[GHz]: settling rates'], amplitudes=['*v: settling amplitudes'])
    def setsettling(self, c, rates, amplitudes):
        """
        If a calibration can be characterized by time constants, i.e.
        the step response function is
          0                                             for t <  0
          1 + sum(amplitudes[i]*exp(-decayrates[i]*t))  for t >= 0,
        then you don't need to load the response function explicitly
        but can just give the timeconstants and amplitudes.
        All previously used time constants will be replaced.
        """
        c['Settling'] = (rates, amplitudes)

    @setting(41, 'Set Reflection', rates=['*v[GHz]: reflection rates'], amplitudes=['*v: reflection amplitudes'])
    def setreflection(self, c, rates, amplitudes):
        """ Correct for reflections in the line.
        Impulse response of a line reflection is H = (1-amplitude) / (1-amplitude * exp( -2i*pi*f/rate) )
        All previously used time constants for the reflections will be replaced.
        """
        c['Reflection'] = (rates, amplitudes)

    @setting(45, 'Set Filter', bandwidth=['v[GHz]: bandwidth'])
    def setfilter(self, c, bandwidth):
        """
        Set the lowpass filter used for deconvolution.

        bandwidth: bandwidth are arguments passed to the lowpass
            filter function (see above)
        """
        c['Filter'] = float(bandwidth)

    @setting(50, 'Fast FFT Len', n='w')
    def fast_fft_len(self, c, n):
        """Given a sequence length n, get a new length nfft >= n which is efficient for calculating fft."""
        return fastfftlen(n)