Ejemplo n.º 1
0
class RandomProxy(object):
    def __init__(self):
        self.current_proxy = None
        self.lock = DeferredLock()

    def process_request(self, request, spider):
        pass
        # user_agent = random.choice(USER_AGENT_LIST)
        # request.headers['User-Agent'] = user_agent
        # if 'proxy' not in request.meta or self.current_proxy.is_expiring:
        #     #请求代理
        #     self.update_proxy()
        #     request.meta['proxy'] = self.current_proxy.proxy

    def process_response(self, request, response, spider):
        # 如果对方重定向(302)去验证码的网页,换掉代理IP
        # 'captcha' in response.url 指的是有时候验证码的网页返回的状态码是200,所以用这个作为辨识的标志
        if response.status != 200 or 'captcha' in response.url:
            # 如果来到这里,说明这个请求已经被boss直聘识别为爬虫了
            # 所以这个请求就相当于什么都没有获取到
            # 所以要重新返回request,让这个请求重新加入到调度中
            # 下次再发送

            # if not self.current_proxy.blacked:
            #     self.current_proxy.blacked = True
            # self.update_proxy()
            # print('%s代理失效' % self.current_proxy.proxy)
            # request.meta['proxy'] = self.current_proxy.proxy

            return request

        # 如果是正常的话,记得最后要返回response
        # 如果不返回,这个response就不会被传到爬虫那里去
        # 也就得不到解析
        return response

    def update_proxy(self):
        #lock是属于多线程中的一个概念,因为这里scrapy是采用异步的,可以直接看成多线程
        #所以有可能出现这样的情况,爬虫在爬取一个网页的时候,忽然被对方封了,这时候就会来到这里
        #获取新的IP,但是同时会有多条线程来这里请求,那么就会出现浪费代理IP的请求,所以这这里加上了锁
        #锁的作用是在同一时间段,所有线程只能有一条线程可以访问锁内的代码,这个时候一条线程获得新的代理IP
        #而这个代理IP是可以用在所有线程的,这样子别的线程就可以继续运行了,减少了代理IP(钱)的浪费
        self.lock.acquire()
        # 判断换线程的条件
        # 1.目前没有使用代理IP
        # 2.到线程过期的时间了
        # 3.目前IP已经被对方封了
        # 满足以上其中一种情况就可以换代理IP了
        if not self.current_proxy or self.current_proxy.is_expiring or self.current_proxy.blacked:
            url = r'https://h.wandouip.com/get/ip-list?pack=%s&num=1&xy=1&type=2&lb=\r\n&mr=1&' % random.randint(
                100, 1000)
            response = requests.get(url=url, headers=DEFAULT_REQUEST_HEADERS)
            text = json.loads(response.text)
            print(text)
            data = text['data'][0]
            proxy_model = ProxyModel(data)
            print('重新获取了一个代理:%s' % proxy_model.proxy)
            self.current_proxy = proxy_model
            # return proxy_model
        self.lock.release()
Ejemplo n.º 2
0
class ProxyMiddleWare(object):
    def __init__(self):
        self.p = Proxy()
        self.proxy_status = True
        self.lock = DeferredLock()
        self.try_count = 2

    def process_request(self, request, spider):

        if request.url.split('/')[3] == 'shop':
            request.cookies = {}

        if request.url.split('/')[3] == 'shop' and self.proxy_status:
            self.lock.acquire()
            self.proxy_status = False
            proxy = self.p.get_proxy()
            request.meta["proxy"] = proxy
            self.lock.release()
        #elif "proxy" in request.meta:
        #    self.proxy_status = True
        #    del request.meta["proxy"]
    def process_response(self, request, response, spider):

        if response.status == 403 or "meituan" in response.url or response.status == 302:
            self.lock.acquire()
            del request.meta["proxy"]
            self.proxy_status = True
            self.lock.release()
            self.try_count -= 1
            return request
        elif self.try_count == 0:
            self.try_count = 2
        return response
Ejemplo n.º 3
0
class IPProxyDownloadMiddleware(object):
    PROXY_URL = 'http://webapi.http.zhimacangku.com/getip?num=1&type=2&pro=&city=0&yys=0&port=11&pack=61856&ts=1&ys=0&cs=0&lb=1&sb=0&pb=4&mr=1&regions='

    def __init__(self):
        super().__init__()
        self.current_proxy = None
        self.lock = DeferredLock()

    def process_request(self, request, spider):
        if 'proxy' not in request.meta or self.current_proxy.is_expiring:
            self.update_proxy()
        request.meta['proxy'] = self.current_proxy.proxy

    def process_response(self, request, response, spider):
        if response.status != 200 or 'captcha' in response.url:
            if not self.current_proxy.blacked:
                self.current_proxy.blacked = True
            self.update_proxy()
            print('%s这个代理被加入黑名单了' % self.current_proxy)
            return request
        return response

    def update_proxy(self):
        self.lock.acquire()
        if not self.current_proxy or self.current_proxy.is_expiring or self.current_proxy.blacked:
            response = requests.get(self.PROXY_URL)
            text = response.text
            print('重新获取了一个代理:', text)
            result = json.loads(text)
            if len(result['data']) > 0:
                data = result['data'][0]
                proxy_model = ProxyModel(data)
                self.current_proxy = proxy_model
                self.current_proxy.blacked = False
        self.lock.release()
Ejemplo n.º 4
0
class IPProxyDownloadMiddleware(object):
    PROXY_URL = 'http://webapi.http.zhimacangku.com/getip?num=1&type=2&pro=&city=0&yys=0&port=11&time=1&ts=1&ys=0&cs=0&lb=1&sb=0&pb=45&mr=1&regions='

    def __init__(self):
        super(IPProxyDownloadMiddleware, self).__init__()
        self.lock = DeferredLock()
        self.current_proxy = None
        self.username = None

    def process_request(self, request, spider):
        if not self.current_proxy:
            self.update_proxy()
            return request
        if self.current_proxy.is_expiring:
            self.update_proxy()
        request.meta['proxy'] = self.current_proxy.proxy
        print(request.meta['proxy'])

    def process_response(self, request, response, spider):
        if response.status != 200:
            self.update_proxy()
            return request
        return response

    def update_proxy(self):
        self.lock.acquire()
        if not self.current_proxy or self.current_proxy.is_expiring:
            response = requests.get(self.PROXY_URL)
            result = json.loads(response.text)
            data = result['data']
            proxy = ProxyModel(data[0])
            print('请求到了:', response.text)
            self.current_proxy = proxy
        self.lock.release()
Ejemplo n.º 5
0
class ProxyMiddleWare(object):
    def __init__(self):
        self.p = Proxy()
        self.proxy_status = True
        self.lock = DeferredLock()
        self.try_count = 3
        self.auth = base64.b64encode(bytes("proxy:proxy123!", 'utf-8'))

    def process_request(self, request, spider):
        if "porxy" not in request.meta and self.proxy_status:
            self.lock.acquire()
            self.proxy_status = False
            request.headers["Proxy-Authorization"] = b'Basic ' + self.auth
            proxy = self.p.get_proxy()
            request.meta["proxy"] = proxy
            self.lock.release()

    def process_response(self, request, response, spider):
        if response.status in [403, 302, 301]:
            self.lock.acquire()
            try:
                del request.meta["proxy"]
            except:
                pass
            self.proxy_status = True
            time.sleep(3)
            self.lock.release()
            self.try_count -= 1
            if self.try_count == 0:
                self.try_count = 3
                return response
            return request
        return response
Ejemplo n.º 6
0
class IPProxyMiddleware():
    PROXY_URL = "http://webapi.http.zhimacangku.com/getip?num=3&type=2&pro=&city=0&yys=0&port=11&time=2&ts=1&ys=0&cs=0&lb=1&sb=0&pb=45&mr=1&regions="

    def __init__(self):
        super(IPProxyMiddleware, self).__init__()
        self.current_proxy = None
        self.lock = DeferredLock()

    def process_request(self, request, spider):
        if 'proxy' not in request.meta or self.current_proxy.is_expireing:
            #请求代理
            self.update_proxy()
        request.meta['proxy'] = self.current_proxy.proxy

    def process_response(self, request, response, spider):
        if response.status != 200 or "captcha" in response.url:
            if not self.current_proxy.blacked:
                self.current_proxy.blacked = True
            print('%s这个代理被加入黑名单了' % self.current_proxy.ip)
            self.update_proxy()
            return request
        return response

    def update_proxy(self):
        self.lock.acquire()
        if not self.current_proxy or self.current_proxy.is_expireing or self.current_proxy.blacked:
            response = requests.get(self.PROXY_URL)
            text = response.text
            print("重新获取了一个代理:", text)
            result = json.loads(text)
            if len(result['data']) > 0:
                data = result['data'][random.randint(0, 2)]
                proxy_model = ProxyModel(data)
                self.current_proxy = proxy_model
        self.lock.release()
Ejemplo n.º 7
0
class ProxyDownloaderMiddleware(object):
    def __init__(self):
        super(ProxyDownloaderMiddleware, self).__init__()
        self.PROXY_URL = "http://webapi.http.zhimacangku.com/getip?num=1&type=2&pro=&city=0&yys=0&port=11&time=1&ts=1&ys=0&cs=0&lb=1&sb=0&pb=4&mr=1&regions="
        self.current_proxy = None
        self.lock = DeferredLock()

    def process_request(self, request, spider):
        if 'proxy' not in request.meta or self.current_proxy.is_expiring:
            self.upgrade_proxy()
            request.meta['proxy'] = self.current_proxy.proxy

    def process_process(self, request, response, spider):
        if response.status != 200:
            self.upgrade_proxy()
            return request
        return response

    def upgrade_proxy(self):
        self.lock.acquire()
        if not self.current_proxy or self.current_proxy.is_expiring:
            resp = requests.get(self.PROXY_URL)
            result = resp.text
            data = json.loads(result)['data'][0]
            if data:
                proxy_model = ProxyModel(data)
                self.current_proxy = proxy_model
        self.lock.release()
Ejemplo n.º 8
0
class UAMiddleware(object):
    def __init__(self):
        self.lock = DeferredLock()
        self.update_time = datetime.now()
        self.UA_List = USER_AGENT

    def process_request(self, request, spider):
        self.lock.acquire()
        if self.is_expiring:
            ua = random.choice(self.UA_List)
            request.headers['User-Agent'] = ua
            print(request.headers['User-Agent'])
        self.lock.release()

    def process_response(self, request, response, spider):
        return response

    def process_exception(self, request, exception, spider):
        pass

    @property
    def is_expiring(self):
        now = datetime.now()
        if (now - self.update_time) > timedelta(seconds=30):
            self.update_time = datetime.now()
            print("跟换USER_AGENT")
            return True
        else:
            return False
Ejemplo n.º 9
0
class Client(object):
    def __init__(self, specFilename, exchange='signals'):
        self.exchange = exchange
        spec = txamqp.spec.load(specFilename)

        delegate = TwistedDelegate()
        self.clientConnected = ClientCreator(reactor, AMQClient,
                                             delegate=delegate, vhost="/",
                                     spec=spec).connectTCP("localhost", 5672)
        self.conn = None
        self.chan = None
        self.finishLock = DeferredLock()

    @inlineCallbacks
    def finishChannelOpen(self):
        yield self.finishLock.acquire()
        if self.conn is None:
            print "opening connection for", self
            self.conn = yield self.clientConnected

            authentication = {"LOGIN": "******", "PASSWORD": "******"}
            yield self.conn.start(authentication)

        if self.chan is None:
            self.chan = yield self.conn.channel(1)
            yield self.chan.channel_open()
            yield self.newChan()
            print "made channel for", self
        self.finishLock.release()
            
    def newChan(self):
        # called once when the new channel is opened
        return succeed(None)
Ejemplo n.º 10
0
class IPProxyDownloadMiddleware(object):
    PROXY_URL = ''

    def __init__(self):
        super(IPProxyDownloadMiddleware, self).__init__()
        self.current_proxy = None
        self.lock = DeferredLock()

    def process_request(self, request, spider):
        if 'proxy' not in request.meta or self.current_proxy.is_expiring:
            self.update_proxy()
        request.meta['proxy'] = self.current_proxy.proxy

    def process_response(self, request, response, spider):
        if response.status != 200 or "captcha" in response.url:
            if not self.current_proxy.blacked:
                self.current_proxy.blacked = True
            self.update_proxy()
            # 如果来到这里说明这个请求被识别为爬虫,所以这个请求就没有获取到,要重新请求
            return request
        # 来到这里是正常的,要返回response给爬虫解析
        return response

    def update_proxy(self):
        self.lock.acquire()
        if not self.current_proxy or self.current_proxy.is_expiring or self.current_proxy.blacked:
            response = requests.get(self.PROXY_URL)
            text = response.text
            print(text)
            result = json.loads(text)
            if len(result['data']) > 0:
                data = result['data'][0]
                proxy_model = ProxyModel(data)
                self.current_proxy = proxy_model
        self.lock.release()
Ejemplo n.º 11
0
class IPProxyDownloadMiddlware(object):
    PROXY_URL = '购买的代理链接'

    def __init__(self):
        super(IPProxyDownloadMiddlware, self).__init__()
        self.current_proxy = None
        self.lock= DeferredLock()

    def prooess_request(self, request, spider):
        if 'proxy' not in request.meta or self.current_proxy.is_expiring:
            self.update_proxy()
            request.meta['proxy'] = self.current_proxy.proxy

    #         请求代理

    def prooess_response(self,request, response, spider):
        if response.status !=200 or 'captcha' in response.url:
            self.update_proxy()
            return request
        return response

    def update_proxy(self):
        self.lock.acquire()
        if not self.current_proxy or self.current_proxy.is_expiring:
            response = requests.get(self.PROXY_URL)
            text = response.text
            result = json.loads(text)
            if len(result['data'])>0:
                data = result['data'][0]
                proxy_model = ProxyModel(data)
                self.current_proxy = proxy_model
                return proxy_model
        self.lock.release()
Ejemplo n.º 12
0
class IPProxy(object):

    PROXY_URL = "http://webapi.http.zhimacangku.com/getip?num=1&type=2&pro=0&city=0&yys=0&port=1&pack=21267&ts=1&ys=0&cs=0&lb=1&sb=0&pb=4&mr=1&regions="

    def __init__(self):

        self.lock = DeferredLock()
        self.current_proxy = None

    def process_request(self, request, spider):
        if 'proxy' not in request.meta or self.current_proxy.is_expiring:
            self.update_proxy()
        request.meta['proxy'] = self.current_proxy.proxy

    def process_response(self, request, response, spider):
        if response.status != 200:
            if not self.current_proxy.is_block:
                self.current_proxy.is_block = True
            self.update_proxy()
            return request
        return response

    def update_proxy(self):
        self.lock.acquire()
        if self.current_proxy is None or self.current_proxy.is_expiring or self.current_proxy.is_block:
            response_json = requests.get(self.PROXY_URL).json()
            try:
                print(response_json)
                self.current_proxy = ProxyModel(response_json['data'][0])
            except:
                print('出错了!')
                print(response_json)
        self.lock.release()
Ejemplo n.º 13
0
class IPProxyDownloaderMiddleware(object):
    '''
    IP代理 ,
    '''
    # 获取代理ip信息地址 例如芝麻代理、快代理等
    IP_URL = r'http://127.0.0.1:8000/?types=0&count=1&country=国内'

    def __init__(self):
        # super(IPProxyDownloaderMiddleware, self).__init__(self)
        super(IPProxyDownloaderMiddleware, self).__init__()

        self.current_proxy = None
        self.lock = DeferredLock()

    def process_request(self, request, spider):
        if 'proxy' not in request.meta or self.current_proxy.is_expire:
            self.updateProxy()

        request.meta['proxy'] = self.current_proxy.address

    def process_response(self, request, response, spider):
        if response.status != 200:
            # 如果来到这里,这个请求相当于被识别为爬虫了
            # 所以这个请求被废掉了
            # 如果不返回request,那么这个请求就是没有获取到数据
            # 返回了request,那么这个这个请求会被重新添加到调速器
            if not self.current_proxy.blacked:
                self.current_proxy.blacked = True
                print("被拉黑了")
            self.updateProxy()
            return request
        # 正常的情况下,返回response
        return response

    def updateProxy(self):
        '''
        获取新的代理ip
        :return:
        '''
        # 因为是异步请求,为了不同时向芝麻代理发送过多的请求这里在获取代理IP
        # 的时候,需要加锁
        self.lock.acquire()
        if not self.current_proxy or self.current_proxy.is_expire or self.current_proxy.blacked:
            response = requests.get(self.IP_URL)
            text = response.text

            # # 返回值 {"code":0,"success":true,"msg":"0","data":[{"ip":"49.70.152.188","port":4207,"expire_time":"2019-05-28 18:53:15"}]}
            # text=text.split(',')

            jsonString = json.loads(text)

            data = jsonString['data']
            if len(data) > 0:
                proxyModel = IPProxyModel(data=data[0])
                self.current_proxy = proxyModel
        self.lock.release()
Ejemplo n.º 14
0
class ProxyMiddleware():
    def __init__(self, proxy_url):
        self.logger = logging.getLogger(__name__)
        self.proxy_url = proxy_url
        self.update_time = datetime.now()
        self.proxy_wrong = True
        self.lock = DeferredLock()

    @classmethod
    def from_crawler(cls, crawler):
        settings = crawler.settings
        return cls(proxy_url=settings.get('PROXY_URL'))

    def get_random_proxy(self):
        try:
            response = requests.get(self.proxy_url)
            if response.status_code == 200:
                proxy = response.text
                return proxy
        except requests.ConnectionError:
            return False

    def process_request(self, request, spider):
        print("进入了ip代理的process_request")
        self.lock.acquire()
        if request.meta.get(
                'retry_times') or self.proxy_wrong or self.is_expiring:
            print("我要去修改ip代理")
            proxy = self.get_random_proxy()
            if proxy:
                uri = 'http://{proxy}'.format(proxy=proxy)
                self.logger.debug('使用代理 ' + proxy)
                request.meta['proxy'] = uri
                print('使用代理:' + uri)
                self.proxy_wrong = False
                self.update_time = datetime.now()
        self.lock.release()

    def process_response(self, request, response, spider):
        if response.status != 200 or "很抱歉,您的访问被我们识别为机器行为" in response.text:
            print("出现了验证码!")
            self.proxy_wrong = True
            return request
        return response

    @property
    def is_expiring(self):
        now = datetime.now()
        if (now - self.update_time) > timedelta(seconds=30):
            self.update_time = datetime.now()
            print("执行了is_expiring")
            return True
        else:
            return False
Ejemplo n.º 15
0
class IPProxyDownloadMiddleware(object):
    # 网上代理服务器生成的API,以下为芝麻代理的
    PROXY_URL = 'http://webapi.http.zhimacangku.com/getip?num=1&type=2&pro=&city=0&port=11&ts=1&ys=0&cs=0&lb=1&sb=0&pb=45&mr=1&regions='

    def __init__(self):
        super(IPProxyDownloadMiddleware, self).__init__()
        self.current_proxy = None
        self.lock = DeferredLock()  # 定义创建锁

    def process_request(self, request, spider):
        # 判断request里面是否有设置过代理 或者 当前代理即将过期
        if 'proxy' not in request.meta or self.current_proxy.is_expiring:
            # 请求代理
            self.update_proxy()
            request.meta['proxy'] = self.current_proxy.proxy

    def process_response(self, request, response, spider):
        # 如果返回的状态码不等于200或者跳转到验证码当中,即重新获取代理
        if response.status != 200 or "captcha" in response.url:
            if not self.current_proxy.blacked:
                self.current_proxy.blacked = True
            print("%s这个代理被识别并被加入黑名单了" % self.current_proxy.ip)
            self.update_proxy()
            # 如果来到这里,说明这个请求已经被BOSS直聘识别为爬虫
            # 所以这个请求就相当于什么都没有获取到
            # 如果不返回request,那么这个request就相当于没有获取到数据
            # 也就是说,这个请求就被废掉了,这个数据就没有被抓取到
            # 所以要重新返回request,让这个请求重新加入到调度中,下次再请求
            return request
        # 如果是正常的,那么要记得返回response
        # 如果不返回,那么这个response就不会被传到爬虫那里去,也就得不到解析
        return response

    # process_request和process_response(如遇到403页面的时候)里面都可能需要请求代理
    # 请求代理的代码多个地方需要用到,所有单独定义一个方法 get_proxy()
    def update_proxy(self):
        self.lock.acquire()  # 上锁
        # 判断如果没有或者即将过期又或者被拉黑
        if not self.current_proxy or self.current_proxy.is_expirin or self.current_proxy.blacked:
            response = requests.get(self.PROXY_URL)
            text = response.text  # 此处得到的text是个json格式的字符串,需要load成字典
            print("重新获取了一个代理:", text)
            # 从代理池api返回回来的数据格式如下:
            # {"code":0,"success":true,"msg":"0","data":[{"ip":"223.242.123.50","port":3212,"expire_time":"2019-01-15 10:15:20"}]}
            result = json.loads(text)
            if len(result['data']) > 0:
                data = result['data'][0]
                proxy_model = ProxyModel(data)
                self.current_proxy = proxy_model
                # return proxy_model
        self.lock.release()  # 解锁操作
Ejemplo n.º 16
0
class emailer( LabradServer ):
    name = 'Email Server'
    
    @inlineCallbacks
    def initServer( self ):
        self.username, self.fromaddr, self.password = yield self.getInfoReg()
        self.password = base64.b64decode(self.password)
        self.toaddrs = {}
        self.smtp = 'smtp.gmail.com:587'
        self.sending = DeferredLock()
    
    @inlineCallbacks
    def getInfoReg(self):
        reg = self.client.registry
        yield reg.cd(['Servers','Email Server'])
        username = yield reg.get('username')
        fromaddr = yield reg.get('address')
        password = yield reg.get('password')
        returnValue([username,fromaddr,password])
        
    @setting(0, "Set Recipients", recepients = '*s', returns = '')
    def setRecepients(self, c, recepients):
        """Set the recipients of the email as a list of strings of email addresses"""
        self.toaddrs[c.ID] = recepients
    
    @setting(1, "Send", subject = 's', message = 's', returns = '')
    def selectDP(self, c, subject, message):
        """Select Double Pass in the current context"""
        if not self.toaddrs[c.ID]: raise Exception("Recipients not set")
        yield self.sending.acquire()  
        session = smtplib.SMTP(self.smtp)
        session.starttls()
        session.login(self.username,self.password)
        toaddrs = self.toaddrs[c.ID]
        msg = MIMEMultipart()
        msg['From'] = self.fromaddr
        msg['To'] = COMMASPACE.join(toaddrs)
        msg['Subject'] = subject
        msg.attach(MIMEText(message, 'plain'))    
        session.sendmail(self.fromaddr, toaddrs, msg.as_string())
        session.quit()
        self.sending.release()
    
    def initContext(self, c):
        """Initialize a new context object."""
        pass
    
    def expireContext(self, c):
        del(self.toaddrs[c.ID])
Ejemplo n.º 17
0
class NotificationConnector(object):
    """Provide ready-to-use AMQP channels."""

    def __init__(self, service, clock=reactor):
        """
        @param service: An object implementing the same whenConnected() API as
            the twisted.application.internet.ClientService class.
        @param clock: An object implementing IReactorTime.
        """
        self._service = service
        self._clock = clock
        self._channel = None
        self._channel_lock = DeferredLock()

    @inlineCallbacks
    def __call__(self):
        """
        @return: A deferred firing with a ready-to-use txamqp.protocol.Channel.
        """
        # Serialize calls, in order to setup new channels only once.
        yield self._channel_lock.acquire()
        try:
            if self._channel and self._channel.client.closed:
                # If we have a client but it's closed, let's wait for it to be
                # fully disconnected and spin a reactor iteration to give
                # change to the AMQClient.connectionLost callback chain to
                # settle (in particular our ClientService will be notified and
                # will start connecting again).
                yield self._channel.client.disconnected.wait()
                yield deferLater(self._clock, 0, lambda: None)

            client = yield self._service.whenConnected()
            channel = yield client.channel(1)
            # Check if we got a new channel, and initialize it if so.
            if channel is not self._channel:
                self._channel = channel
                yield self._channel.channel_open()
                # This tells the broker to deliver us at most one message at
                # a time to support using multiple processes (e.g. in a
                # load-balanced/HA deployment). If NotificationSource.get()
                # gets called against the same UUID first by process A and then
                # when it completes by process B, we're guaranteed that process
                # B will see the very next message in the queue, because
                # process A hasn't fetched any more messages than the one it
                # received. See #729140.
                yield self._channel.basic_qos(prefetch_count=1)
        finally:
            self._channel_lock.release()
        returnValue(self._channel)
Ejemplo n.º 18
0
class emailer(LabradServer):
    name = 'Email Server'

    @inlineCallbacks
    def initServer(self):
        self.username, self.fromaddr, self.password = yield self.getInfoReg()
        self.password = base64.b64decode(self.password)
        self.toaddrs = {}
        self.smtp = 'smtp.gmail.com:587'
        self.sending = DeferredLock()

    @inlineCallbacks
    def getInfoReg(self):
        reg = self.client.registry
        yield reg.cd(['Servers', 'Email Server'])
        username = yield reg.get('username')
        fromaddr = yield reg.get('address')
        password = yield reg.get('password')
        returnValue([username, fromaddr, password])

    @setting(0, "Set Recipients", recepients='*s', returns='')
    def setRecepients(self, c, recepients):
        """Set the recipients of the email as a list of strings of email addresses"""
        self.toaddrs[c.ID] = recepients

    @setting(1, "Send", subject='s', message='s', returns='')
    def selectDP(self, c, subject, message):
        """Select Double Pass in the current context"""
        if not self.toaddrs[c.ID]: raise Exception("Recipients not set")
        yield self.sending.acquire()
        session = smtplib.SMTP(self.smtp)
        session.starttls()
        session.login(self.username, self.password)
        toaddrs = self.toaddrs[c.ID]
        msg = MIMEMultipart()
        msg['From'] = self.fromaddr
        msg['To'] = COMMASPACE.join(toaddrs)
        msg['Subject'] = subject
        msg.attach(MIMEText(message, 'plain'))
        session.sendmail(self.fromaddr, toaddrs, msg.as_string())
        session.quit()
        self.sending.release()

    def initContext(self, c):
        """Initialize a new context object."""
        pass

    def expireContext(self, c):
        del (self.toaddrs[c.ID])
Ejemplo n.º 19
0
class IPProxyDownloadMiddleware(object):

    PROXY_URL = 'http://webapi.http.zhimacangku.com/getip?num=1&type=2&pro=&city=0&yys=0&port=11&time=1&ts=1&ys=0&cs=0&lb=1&sb=0&pb=45&mr=1&regions='

    def __init__(self):
        super(IPProxyDownloadMiddleware, self).__init__()
        self.current_proxy = None
        self.lock = DeferredLock()

    def process_request(self, request, spider):
        if 'proxy' not in request.meta or self.current_proxy.is_expiring:
            # 请求代理
            self.update_proxy()

        request.meta['proxy'] = self.current_proxy.proxy

    def process_response(self, request, response, spider):
        if response.status != 200 or "captcha" in response.url:
            if not self.current_proxy.blacked:
                self.current_proxy.blacked = True
            print('%s这个代理被加入黑名单了' % self.current_proxy.ip)
            self.update_proxy()
            # 如果来到这里,说明这个请求已经被boss直聘识别为爬虫了
            # 所有这个请求就相当于什么都没有获取到
            # 如果不返回request,那么这个request就相当于没有获取到数据
            # 也就是说,这个请求就被废掉了,这个数据就没有被抓取到
            # 所有要重新返回request,让这个请求重新加入到调度中,
            # 下次再发送
            return request
        # 如果是正常的,那么要记得返回response
        # 如果不返回,那么这个resposne就不会被传到爬虫那里去
        # 也就得不到解析
        return response

    def update_proxy(self):
        self.lock.acquire()
        if not self.current_proxy or self.current_proxy.is_expiring or self.current_proxy.blacked:
            response = requests.get(self.PROXY_URL)
            text = response.text
            print('重新获取了一个代理:', text)
            result = json.loads(text)
            if len(result['data']) > 0:
                data = result['data'][0]
                proxy_model = ProxyModel(data)
                self.current_proxy = proxy_model
        self.lock.release()
Ejemplo n.º 20
0
class IpProxyMiddleware(object):
    PROXY_URL = 'http://webapi.http.zhimacangku.com/getip?num=1&type=2&pro=&city=0&yys=0&port=11&time=1&ts=1&ys=0&cs=0&lb=1&sb=0&pb=4&mr=1&regions='

    def __init__(self):
        super(IpProxyMiddleware, self).__init__()
        self.current_proxy = None
        self.lock = DeferredLock()

    def process_request(self, request, spider):
        #引擎发送给下载器之前调用
        if 'proxy' not in request.meta or self.current_proxy.is_expiring:
            self.update_proxy()
        request.meta['proxy'] = self.current_proxy.proxy
        #这个proxy就是  # https://ip:port
    def process_response(self, request, response, spider):
        if response.status != 200 or 'captcha' in response.url:
            if not self.current_proxy.blacked:
                self.current_proxy.blacked = True
            print("%s ip被封锁" % self.current_proxy.proxy)
            self.update_proxy()
            #如果走到这里说明 被识别为爬虫
            #所以说这个请求就是什么都没有获取到
            #这个时候我们应该返回request 也就是说要重新进行下载
            return request
        return response
        #如果是正常的 记得返回response
        #如果不返回 传不到爬虫 也就是说得不到解析

    def update_proxy(self):
        #scrapy爬取的时候用的twisted 也就是异步 可以理解成多线程
        #如果异步都来请求代理造成IP浪费 处于节约IP的目的 异步上锁
        self.lock.acquire()
        if not self.current_proxy or self.current_proxy.is_expiring or self.current_proxy.blacked:
            response = requests.get(self.PROXY_URL)
            text = response.text
            print("重新获取了一个代理", text)
            result = json.loads(text)
            #芝麻代理不能让你频繁请求  也就是说 返回的data  可能没有值
            if len(result['data']) > 0:
                data = result['data'][
                    0]  #{'ip': '106.46.136.7', 'port': 4225, 'expire_time': '2019-04-12 09:46:28'}
                #因为我们需要对data 进行多个操作 比如ip 和端口号拼接 时间转化成datetime类型判断是否过期
                proxy_model = ProxyModel(data)
                self.current_proxy = proxy_model
        self.lock.release()
Ejemplo n.º 21
0
class IPProxyDownloadMiddleware(object):
    PROXY_URL = "http://http.tiqu.alicdns.com/getip3?num=1&type=2&pro=&city=0&yys=0&port=11&time=1&ts=1&ys=0&cs=1&lb=1&sb=0&pb=45&mr=1&regions=&gm=4"

    def __init__(self):
        super(IPProxyDownloadMiddleware, self).__init__()
        self.current_proxy = None
        self.lock = DeferredLock()

    def process_request(self, request, spider):
        if 'proxy' not in request.meta or self.current_proxy.is_expireing:
            # 请求代理
            self.update_proxy()

        request.meta['proxy'] = self.current_proxy.proxy

    def process_response(self, request, response, spider):
        if response.status != 200 or "captcha" in response.url:
            if not self.current_proxy.block:
                self.current_proxy.block = True
            print("%s这个代理被加入黑名单了!" % self.current_proxy)
            self.update_proxy()
            # 如果来到这里 说明这个请求已被boss直聘识别为爬虫
            # 所以这个请求什么也没有获取到
            # 如果不返回request 那么这个request就相当于没有获取到数据
            # 也就是说,这个请求废掉了 这个数据也没有抓取到
            # 所以要重新返回request,让请求重新加入到调度中
            # 下次再发送
            return request
        # 正常 就要返回response
        # 如果不返回 那么这个response就不会被传到爬虫哪里
        # 也得不到解析
        return response

    def update_proxy(self):
        self.lock.acquire()
        if not self.current_proxy or self.current_proxy.is_expireing or self.current_proxy.block:
            response = requests.get(self.PROXY_URL)
            text = response.text
            print("重新获取了一个代理:", text)
            result = json.loads(text)
            if len(result['data']) > 0:
                data = result['data'][0]
                proxy_model = ProxyModel(data)
                self.current_proxy = proxy_model
        self.lock.release()
Ejemplo n.º 22
0
class IPProxyDownloaderMiddleware(object):
    PROXY_URL = "http://webapi.http.zhimacangku.com/getip?num=1&type=2&pro=&city=0&yys=0&port=1&time=1&ts=1&ys=0&cs=1&lb=1&sb=0&pb=45&mr=1&regions="

    # 初始化函数
    def __init__(self):
        super(IPProxyDownloaderMiddleware, self).__init__()
        self.current_proxy = None
        self.lock = DeferredLock()

    # 处理请求
    def process_request(self, request, spider):
        # 如果请求头中不包含代理,则请求代理
        if 'proxy' not in request.meta or self.current_proxy.is_expiring:
            # 请求代理
            self.update_proxy()
        request.meta['proxy'] = self.current_proxy.proxy

    # 处理响应
    def process_response(self, request, response, spider):
        if response.status != 200 or "captcha" in response.url:
            # 如果IP没有被拉黑,则
            if not self.current_proxy.blacked:
                self.current_proxy.blacked = True
            print("%s这个Ip被拉入黑名单了" % self.current_proxy.ip)
            self.update_proxy()
            return request
        return response

    # 更新代理
    def update_proxy(self):
        self.lock.acquire()
        if not self.current_proxy or self.current_proxy.is_expiring or self.current_proxy.blacked:
            response = requests.get(self.PROXY_URL)
            text = response.text
            print("重新获取了一个代理:", text)
            # print(text)
            results = json.loads(text)
            if len(results['data']) > 0:
                data = results['data'][0]
                proxy_model = ProxyModel(data)
                self.current_proxy = proxy_model
        self.lock.release()
Ejemplo n.º 23
0
class IPProxyDownloadMiddleware(object):
    proxy_url = "http://webapi.http.zhimacangku.com/getip?num=1&type=2&pro=&city=0&yys=0&port=11&time=1&ts=1&ys=0&cs=0&lb=1&sb=0&pb=4&mr=1&regions="

    def __init__(self):
        super(IPProxyDownloadMiddleware, self).__init__()
        self.current_proxy = None
        self.lock = DeferredLock()

    def process_request(self, request, spider):
        print(request.url, "当前请求的链接")

        if 'proxy' not in request.meta or self.current_proxy.is_expring:
            print("重新获取了一个代理")
            self.update_proxy()
        request.meta['proxy'] = self.current_proxy.proxy

    def process_response(self, request, response, spider):
        print("%s当前网页状态" % response.status)
        if "zpAntispam" in response.url:
            print("%s当前网页链接,这个链接包含zpAntispam" % response.url)
        if response.status != 200 or "zpAntispam" in response.url:
            print("%s这个代理被加入黑名单了" % self.current_proxy.ip)
            if not self.current_proxy.blacked:
                self.current_proxy.blacked = True
            self.update_proxy()
            return request
        return response

    def update_proxy(self):
        self.lock.acquire()
        if not self.current_proxy or self.current_proxy.is_expring or self.current_proxy.blacked:
            resp = requests.get(url=self.proxy_url)
            text = resp.text
            jsonHTML = json.loads(text)
            if len(jsonHTML['data']) > 0:
                respJ = jsonHTML['data'][0]
                proxy_model = ProxyModules(data=respJ)
                self.current_proxy = proxy_model
                return proxy_model
        self.lock.release()
Ejemplo n.º 24
0
class IPProxyDownloadMiddleware(object):
    PROXY_URL = "代理服务API链接 "

    def __init__(self):
        super(IPProxyDownloadMiddleware,self).__init__()
        self.current_proxy = None
        self.lock = DeferredLock()

    def process_request(self,request,spider):
        if 'proxy' not in request.meta or self.current_proxy.is_expiring:
            #请求代理
            self.get_proxy()

        request.meta['proxy'] = self.current_proxy.proxy

    def process_response(self,request,response,spider):
        if response.status != 200 or "captcha" in response.url:
            if not self.current_proxy.blacked:
                self.current_proxy.blacked = True
            print("%s代理被加入到黑名单"%self.current_proxy.ip)
            self.update_proxy()
            #如果来到这里,说明这个请求被识别为爬虫,该请求相当于什么都没有获取
            #如果不返回request,则该request相当于没有获取到数据,就被废掉了
            #所以要重新返回request,将该请求重新加入到调度中,下次再发送
            return request
        #正常则返回response,不返回则response不会被传到爬虫那里去,得不到解析
        return response

    def update_proxy(self):
        self.lock.acquire()
        if not self.current_proxy or self.current_proxy.is_expiring or self.current_proxy.blacked:
            response = requests.get(self.PROXY_URL)
            text = response.text
            result = json.loads(text)
            print('重新获取了一个代理',text)
            if len(result['data'])>0:
                data = result['data'][0]
                proxy_model = ProxyModel(data)
                self.current_proxy = proxy_model
        self.lock.release()
Ejemplo n.º 25
0
class AmazonDownloaderMiddleware(object):
    # Not all methods need to be defined. If a method is not defined,
    # scrapy acts as if the downloader middleware does not modify the
    # passed objects.
    def __init__(self):
        self.helper = Proxy_helper()
        self.loc = DeferredLock()

    @classmethod
    def from_crawler(cls, crawler):
        # This method is used by Scrapy to create your spiders.
        s = cls()
        crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)
        return s

    def process_request(self, request, spider):
        # Called for each request that goes through the downloader
        # middleware.
        self.loc.acquire()
        request.meta['proxy'] = self.helper.get_proxy()
        self.loc.release()

        # Must either:
        # - return None: continue processing this request
        # - or return a Response object
        # - or return a Request object
        # - or raise IgnoreRequest: process_exception() methods of
        #   installed downloader middleware will be called
        return None

    def process_response(self, request, response, spider):
        # Called with the response returned from the downloader.
        if response.status != 200 or response.status != 301 or response.status != 302:
            self.loc.acquire()
            self.helper.update_proixy(request.meta['proxy'])
            self.loc.release()
            return request

        # Must either;
        # - return a Response object
        # - return a Request object
        # - or raise IgnoreRequest
        return response

    def process_exception(self, request, exception, spider):
        # Called when a download handler or a process_request()
        # (from other downloader middleware) raises an exception.
        self.loc.acquire()
        self.helper.update_proixy(request.meta['proxy'])
        self.loc.release()
        return request
        # Must either:
        # - return None: continue processing this exception
        # - return a Response object: stops process_exception() chain
        # - return a Request object: stops process_exception() chain
        pass

    def spider_opened(self, spider):
        spider.logger.info('Spider opened: %s' % spider.name)
Ejemplo n.º 26
0
class IPProxyDownloaderMiddleware:
    """ scrapy 可使用此方案 """
    PROXIES = [
        'wind13zero:[email protected]:16817',
        'wind13zero:[email protected]:16817'
    ]

    def __init__(self):
        super().__init__()
        self.proxy = None
        self.lock = DeferredLock()

    def process_request(self, request, spider):
        self.lock.acquire()
        if not self.proxy:
            proxy = random.choice(self.PROXIES)
            request.meta['proxy'] = proxy
        self.lock.release()
        # print('Proxy %s' %proxy)
        print('='*30)
        print(request.meta['proxy'])
        return None
Ejemplo n.º 27
0
class UserAgentAndIPProxyMiddleware(object):
    """
    配置随机请求头和IP代理
    """
    PROXY_URL = "http://webapi.http.zhimacangku.com/getip?num=1&type=2&pro=0&city=0&yys=0&port=11&time=1&ts=1&ys=0&cs=0&lb=1&sb=0&pb=45&mr=1&regions="

    def __init__(self):
        super(UserAgentAndIPProxyMiddleware,self).__init__()
        self.ua = UserAgent()
        self.current_proxy = ProxyModel(dict({"ip":"120.43.134.119","port":4242,"expire_time":"2020-09-11 13:04:26"}))
        self.lock = DeferredLock()
        #当前的代理

    def get_proxy(self):
        self.lock.acquire()
        #由于scrapy框架下的twisted是异步执行,会导致几乎同时有很多运行这个函数多次,使用锁机制,不会多次无意义请求代理
        if self.current_proxy is None or self.current_proxy.is_expiring:
            resp = requests.get(self.PROXY_URL).text
            result = json.loads(resp)
            if len(result["data"]) > 0:
                data = result["data"][0]
                proxy_model = ProxyModel(data)
                self.current_proxy = proxy_model
        self.lock.release()

    def process_request(self,request,spider):
        request.headers["User-Agent"] = self.ua.random
        if  "proxy" not in request.meta or self.current_proxy.is_expiring:
            #如果请求的meta里面没有传递proxy,就给一个代理IP
            self.get_proxy()
            request.meta['proxy'] = self.current_proxy.proxy
        print(self.current_proxy.ip)

    def process_response(self,request,response,spider):
        if response.status != 200 or "captcha-verify" in response.url:
            self.get_proxy()
            return request
        #如果返回的状态不是200或者返回的URL里面带有了captcha-verify,说明代理不可用,更换代理并且重新返回这个请求进行重新请求
        return response
Ejemplo n.º 28
0
class IPProxyDownloadMiddleware:
    """设置IP代理下载中间件"""
    def __init__(self):
        super(IPProxyDownloadMiddleware, self).__init__()
        self.current_proxy = None
        self.lock = DeferredLock()

    def process_request(self, request, spider):
        if 'proxy' not in request.meta or self.current_proxy.is_expired:
            # 请求代理
            self.updata_proxy()

        request.meta['proxy'] = self.current_proxy.proxy

    def process_response(self, request, response, spider):
        if response.status != 200 or 'captcha' in response.url:
            if not self.current_proxy.balcked:
                self.current_proxy.balcked = True
            self.updata_proxy()
            # 该请求上一次请求时被禁,所以将之返回,并重新请求一次
            return request
        # 该请求没被禁的话,就将其返回
        return response

    def updata_proxy(self):
        """更新代理"""
        # scrapy底层为twisted异步框架,更新IP代理时设置锁,防止浪费IP代理
        self.lock.acquire()
        if not self.current_proxy or self.current_proxy.is_expired or self.current_proxy.balcked:
            if PROXY_URL:
                text = requests.get(url=PROXY_URL).text
                result = json.loads(text)
                if result['data']:
                    data = result['data'][0]
                    proxy_model = ProxyModel(data)
                    self.current_proxy = proxy_model

        # 更新IP代理之后释放锁
        self.lock.release()
Ejemplo n.º 29
0
class IPProxyDownloadMiddleware(object):
    #需购买代理IP(高匿),如快代理
    #需更换下面的ip,否则报错
    #PROXIES = ["114.234.76.131:8060", "183.129.207.82:11845"]

    #或者把代理IP网站(如:芝麻代理)的链接复制过来,如下
    PROXY_URL = 'http://webapi.http.zhimacangku.com/getip?num=1&type=2&pro=&city=0&yys=0&port=11&time=1&ts=1&ys=0&cs=0&lb=1&sb=0&ph=45&mr=1&regions='

    def __init__(self):
        super(IPProxyDownloadMiddleware, self).__init__()
        self.current_proxy = None
        self.lock = DeferredLock()

    def process_request(self, request, spider):
        if 'proxy' not in request.meta or self.current_proxy.is_expiring:
            # 请求代理
            self.update_proxy()
            request.meta['proxy'] = self.current_proxy.proxy

    def process_response(self, request, response, spider):
        if response.status != 200 or "captcha" in response.url:
            if not self.current_proxy.blacked:
                self.current_proxy.blacked = True
            self.update_proxy()
            return request
        return response

    def update_proxy(self):
        self.lock.acquire()
        if not self.current_proxy or self.current_proxy.is_expiring or self.current_proxy.blacked:
            response = requests.get(self.PROXY_URL)
            text = response.text
            result = json.loads(text)
            if len(result['data']) > 0:
                data = result['data'][0]
                proxy_model = ProxyModel(data)
                self.current_proxy = proxy_model
        self.lock.release()
Ejemplo n.º 30
0
class IPProxy(object):
    PROXY_URL = ""

    def __init__(self):

        self.lock = DeferredLock()
        self.current_proxy = None

    def process_request(self, request, spider):
        if 'proxy' not in request.meta or self.current_proxy.is_expiring:
            self.update_proxy()
        request.meta['proxy'] = self.current_proxy.proxy

    def process_response(self, request, response, spider):
        contents = eval(response.text).get("data")

        if contents == None:
            if not self.current_proxy.is_block:
                self.current_proxy.is_block = True
                print('%s代理失效' % self.current_proxy.proxy)
            self.update_proxy()

            return request
        return response

    def update_proxy(self):
        self.lock.acquire()
        if self.current_proxy is None or self.current_proxy.is_expiring or self.current_proxy.is_block:
            response_json = requests.get(self.PROXY_URL).json()
            try:
                print(response_json)

                self.current_proxy = ProxyModel(response_json['data'][0])

            except:
                print('出错了!')
                print(response_json)
        self.lock.release()
Ejemplo n.º 31
0
class _DhcpSetterCommon:
    # Provides support common code for shutting down on shutdown, and
    # handles locking.
    def __init__(self):
        self._lock = DeferredLock()
        self.is_shutdown = False
        reactor.addSystemEventTrigger('before', 'shutdown', self._shutdown)

    @inlineCallbacks
    def _cb(self, old_state, new_state):
        if self.is_shutdown:
            return
        yield self._lock.acquire()
        try:
            yield self._locked_cb(old_state, new_state)
        finally:
            self._lock.release()

    @inlineCallbacks
    def _shutdown(self):
        #print "Shutdown", self
        yield self._cb(None, None)
        self.is_shutdown = True
    def process_request(self, request, spider):
        if 'proxy' in request.meta and not request.meta.get('_round_proxy'):
            return
        proxy = self.proxies.get_proxy()
        if not proxy:
            if self.stop_if_no_proxies:
                raise CloseSpider("no_proxies")
            else:
                logger.warning("No proxies available; marking all proxies "
                               "as unchecked")
                from twisted.internet.defer import DeferredLock
                lock = DeferredLock()
                lock.acquire()
                self.proxies.reset()
                lock.release()
                proxy = self.proxies.get_proxy()
                if proxy is None:
                    logger.error("No proxies available even after a reset.")
                    raise CloseSpider("no_proxies_after_reset")

        request.meta['proxy'] = proxy
        request.meta['download_slot'] = self.get_proxy_slot(proxy)
        request.meta['_round_proxy'] = True
Ejemplo n.º 33
0
class _DhcpSetterCommon:
    # Provides support common code for shutting down on shutdown, and
    # handles locking.
    def __init__(self):
        self._lock = DeferredLock()
        self.is_shutdown = False
        reactor.addSystemEventTrigger('before', 'shutdown', self._shutdown)
    
    @inlineCallbacks
    def _cb(self, old_state, new_state):
        if self.is_shutdown:
            return
        yield self._lock.acquire()
        try:
            yield self._locked_cb(old_state, new_state)
        finally:
            self._lock.release()
    
    @inlineCallbacks
    def _shutdown(self):
        #print "Shutdown", self
        yield self._cb(None, None)
        self.is_shutdown = True
Ejemplo n.º 34
0
class InterfaceUpper:
    def __init__(self, iface):
        self.iface = iface
        self._lock = DeferredLock()
        CompositeStatePublisher(lambda x: x, [
            netlink_monitor.get_state_publisher(iface, IFSTATE.PLUGGED),
            netlink_monitor.get_state_publisher(iface, IFSTATE.UP),
        ]).subscribe(self._cb)
        self._is_shutdown = False
        self.state = None
        reactor.addSystemEventTrigger('before', 'shutdown', self._shutdown)
   
    @inlineCallbacks
    def restart(self):
        yield self._lock.acquire()
        try:
            yield system.system('ifconfig', self.iface, '0.0.0.0')
            yield system.system('ifconfig', self.iface, 'down')
        finally:
            self._lock.release()

    @inlineCallbacks
    def _cb(self, old_state, new_state):
        plugged, up = new_state
        self.state = new_state
        if plugged and not up and not self._is_shutdown:
            yield self._lock.acquire()
            try:
                yield system.system('ifconfig', self.iface, 'up')
            finally:
                self._lock.release()

    @inlineCallbacks
    def _shutdown(self):
        self._is_shutdown = True
        if self.state:
            yield self.restart()
Ejemplo n.º 35
0
class Pulser_729(LabradServer):
    
    name = 'Pulser_729'
       
    def initServer(self):
        self.api  = api()
        self.inCommunication = DeferredLock()
        self.initializeBoard()
    
    def initializeBoard(self):
        connected = self.api.connectOKBoard()
        if not connected:
            raise Exception("Pulser Not Found")
    
    @setting(0, 'Reset DDS', returns = '')
    def resetDDS(self , c):
        """
        Reset the ram position to 0
        """
        yield self.inCommunication.acquire()
        yield deferToThread(self.api.resetAllDDS)
        self.inCommunication.release()
        
    @setting(1, "Program DDS", program = '*(is)', returns = '')
    def programDDS(self, c, program):
        """
        Programs the DDS, the input is a tuple of channel numbers and buf objects for the channels
        """
        yield self.inCommunication.acquire()
        yield deferToThread(self._programDDSSequence, program)
        self.inCommunication.release()
    
    @setting(2, "Reinitialize DDS", returns = '')
    def reinitializeDDS(self, c):
        """
        Reprograms the DDS chip to its initial state
        """
        yield self.inCommunication.acquire()
        yield deferToThread(self.api.initializeDDS)
        self.inCommunication.release()
    
    def _programDDSSequence(self, program):
        '''takes the parsed dds sequence and programs the board with it'''
        for chan, buf in program:
            self.api.setDDSchannel(chan)
            self.api.programDDS(buf)
        self.api.resetAllDDS()
    
    def wait(self, seconds, result=None):
        """Returns a deferred that will be fired later"""
        d = Deferred()
        reactor.callLater(seconds, d.callback, result)
        return d
Ejemplo n.º 36
0
class Pulser(DDS, LineTrigger):
    
    name = 'Pulser'
    onSwitch = Signal(611051, 'signal: switch toggled', '(ss)')
    
    #@inlineCallbacks
    def initServer(self):
        self.api  = api()
        self.channelDict = hardwareConfiguration.channelDict
        self.collectionTime = hardwareConfiguration.collectionTime
        self.collectionMode = hardwareConfiguration.collectionMode
        self.sequenceType = hardwareConfiguration.sequenceType
        self.isProgrammed = hardwareConfiguration.isProgrammed
        self.timeResolution = float(hardwareConfiguration.timeResolution)
        self.ddsDict = hardwareConfiguration.ddsDict
        self.timeResolvedResolution = hardwareConfiguration.timeResolvedResolution
        self.remoteChannels = hardwareConfiguration.remoteChannels
        self.collectionTimeRange = hardwareConfiguration.collectionTimeRange
        self.sequenceTimeRange = hardwareConfiguration.sequenceTimeRange
        self.haveSecondPMT = hardwareConfiguration.secondPMT
        self.haveDAC = hardwareConfiguration.DAC
        self.inCommunication = DeferredLock()
        self.clear_next_pmt_counts = 0
        self.hwconfigpath = os.path.dirname(inspect.getfile(hardwareConfiguration))
        print self.hwconfigpath
        #LineTrigger.initialize(self)
        #self.initializeBoard()
        #yield self.initializeRemote()
        #self.initializeSettings()
        #yield self.initializeDDS()
        self.ddsLock = True
        self.listeners = set()

    def initializeBoard(self):
        connected = self.api.connectOKBoard()
        if not connected:
            raise Exception ("Pulser Not Found")
            
    def initializeSettings(self):
        for channel in self.channelDict.itervalues():
            channelnumber = channel.channelnumber
            if channel.ismanual:
                state = self.cnot(channel.manualinv, channel.manualstate)
                self.api.setManual(channelnumber, state)
            else:
                self.api.setAuto(channelnumber, channel.autoinv)
    
    @inlineCallbacks
    def initializeRemote(self):
        self.remoteConnections = {}
        if len(self.remoteChannels):
            from labrad.wrappers import connectAsync
            for name,rc in self.remoteChannels.iteritems():
                try:
                    self.remoteConnections[name] = yield connectAsync(rc.ip)
                    print 'Connected to {}'.format(name)
                except:
                    print 'Not Able to connect to {}'.format(name)
                    self.remoteConnections[name] = None

    @setting(0, "New Sequence", returns = '')
    def newSequence(self, c):
        """
        Create New Pulse Sequence
        """
        c['sequence'] = Sequence(self)
    
    @setting(1, "Program Sequence", returns = '')
    def programSequence(self, c, sequence):
        """
        Programs Pulser with the current sequence.
        """
        #print "program sequence"
        sequence = c.get('sequence')
        if not sequence: raise Exception("Please create new sequence first")
        dds,ttl = sequence.progRepresentation()
        yield self.inCommunication.acquire()
        yield deferToThread(self.api.programBoard, ttl)
        if dds is not None: yield self._programDDSSequence(dds)
        self.inCommunication.release()
        self.isProgrammed = True
        #self.api.resetAllDDS()
        #print "done programming"

    @setting(37, 'Get dds program representation', returns = '*(ss)')
    def get_dds_program_representation(self,c):   
        sequence = c.get('sequence')
        dds, ttl = sequence.progRepresentation()
        # As labrad cannot handle returnig the bytearray, we convert it to string first
        for key, value in dds.iteritems():
            dds[key] = str(value)
        # It also cannot handle dictionaries, so we recreate it as a list of tuples
        passable = dds.items()
        return passable

    @setting(38, 'Program dds and ttl')
    def program_dds_and_ttl(self,c,dds,ttl):
        dds = bytearray(dds)
        ttl = bytearray(ttl)
        yield self.inCommunication.acquire()
        yield deferToThread(self.api.programBoard, ttl)
        yield self._programDDSSequenceBurst(dds)
        yield self.inCommunication.release()
        self.isProgrammed = True
        returnValue(self.isProgrammed)
    
    @setting(2, "Start Infinite", returns = '')
    def startInfinite(self,c):
        if not self.isProgrammed: raise Exception ("No Programmed Sequence")
        yield self.inCommunication.acquire()
        yield deferToThread(self.api.setNumberRepeatitions, 0)
        yield deferToThread(self.api.resetSeqCounter)
        yield deferToThread(self.api.startLooped)
        self.sequenceType = 'Infinite'
        self.inCommunication.release()
    
    @setting(3, "Complete Infinite Iteration", returns = '')
    def completeInfinite(self,c):
        if self.sequenceType != 'Infinite': raise Exception( "Not Running Infinite Sequence")
        yield self.inCommunication.acquire()
        yield deferToThread(self.api.startSingle)
        self.inCommunication.release()
    
    @setting(4, "Start Single", returns = '')
    def start(self, c):
        if not self.isProgrammed: raise Exception ("No Programmed Sequence")
        yield self.inCommunication.acquire()
        yield deferToThread(self.api.resetSeqCounter)
        yield deferToThread(self.api.startSingle)
        self.sequenceType = 'One'
        self.inCommunication.release()
    
    @setting(5, 'Add TTL Pulse', channel = 's', start = 'v[s]', duration = 'v[s]')
    def addTTLPulse(self, c, channel, start, duration):
        """
        Add a TTL Pulse to the sequence, times are in seconds
        """
        if channel not in self.channelDict.keys(): raise Exception("Unknown Channel {}".format(channel))
        hardwareAddr = self.channelDict.get(channel).channelnumber
        sequence = c.get('sequence')
        start = start['s']
        duration = duration['s']
        #simple error checking
        if not ( (self.sequenceTimeRange[0] <= start <= self.sequenceTimeRange[1]) and (self.sequenceTimeRange[0] <= start + duration <= self.sequenceTimeRange[1])): raise Exception ("Time boundaries are out of range")
        if not duration >= self.timeResolution: raise Exception ("Incorrect duration")
        if not sequence: raise Exception ("Please create new sequence first")
        sequence.addPulse(hardwareAddr, start, duration)
    
    @setting(6, 'Add TTL Pulses', pulses = '*(sv[s]v[s])')
    def addTTLPulses(self, c, pulses):
        """
        Add multiple TTL Pulses to the sequence, times are in seconds. The pulses are a list in the same format as 'add ttl pulse'.
        """
        for pulse in pulses:
            channel = pulse[0]
            start = pulse[1]
            duration = pulse[2]
            yield self.addTTLPulse(c, channel, start, duration)
    
    @setting(7, "Extend Sequence Length", timeLength = 'v[s]')
    def extendSequenceLength(self, c, timeLength):
        """
        Allows to optionally extend the total length of the sequence beyond the last TTL pulse.
        """
        sequence = c.get('sequence')
        if not (self.sequenceTimeRange[0] <= timeLength['s'] <= self.sequenceTimeRange[1]): raise Exception ("Time boundaries are out of range")
        if not sequence: raise Exception ("Please create new sequence first")
        sequence.extendSequenceLength(timeLength['s'])
        
    @setting(8, "Stop Sequence")
    def stopSequence(self, c):
        """Stops any currently running sequence"""
        yield self.inCommunication.acquire()
        yield deferToThread(self.api.resetRam)
        if self.sequenceType =='Infinite':
            yield deferToThread(self.api.stopLooped)
        elif self.sequenceType =='One':
            yield deferToThread(self.api.stopSingle)
        elif self.sequenceType =='Number':
            yield deferToThread(self.api.stopLooped)
        self.inCommunication.release()
        self.sequenceType = None
        self.ddsLock = False
    
    @setting(9, "Start Number", repetition = 'w')
    def startNumber(self, c, repetition):
        """
        Starts the repetition number of iterations
        """
        if not self.isProgrammed: raise Exception ("No Programmed Sequence")
        repeatitions = int(repetition)
        
        #print "start iterations of ", repetition
        
        if not 1 <= repetition <= (2**16 - 1): raise Exception ("Incorrect number of pulses")
        yield self.inCommunication.acquire()
        yield deferToThread(self.api.setNumberRepeatitions, repetition)
        yield deferToThread(self.api.resetSeqCounter)
        yield deferToThread(self.api.startLooped)
        self.sequenceType = 'Number'
        self.inCommunication.release()

    @setting(10, "Human Readable TTL", returns = '*2s')
    def humanReadableTTL(self, c):
        """
        Returns a readable form of the programmed sequence for debugging
        """
        sequence = c.get('sequence')
        if not sequence: raise Exception ("Please create new sequence first")
        ttl,dds = sequence.humanRepresentation()
        return ttl.tolist()
    
    @setting(11, "Human Readable DDS", returns = '*(svv)')
    def humanReadableDDS(self, c):
        """
        Returns a readable form of the programmed sequence for debugging
        """
        sequence = c.get('sequence')
        if not sequence: raise Exception ("Please create new sequence first")
        ttl,dds = sequence.humanRepresentation()
        return dds
    
    @setting(12, 'Get Channels', returns = '*(sw)')
    def getChannels(self, c):
        """
        Returns all available channels, and the corresponding hardware numbers
        """
        d = self.channelDict
        keys = d.keys()
        numbers = [d[key].channelnumber for key in keys]
        return zip(keys,numbers)
    
    @setting(13, 'Switch Manual', channelName = 's', state= 'b')
    def switchManual(self, c, channelName, state = None):
        """
        Switches the given channel into the manual mode, by default will go into the last remembered state but can also
        pass the argument which state it should go into.
        """
        if channelName not in self.channelDict.keys(): raise Exception("Incorrect Channel")
        channel = self.channelDict[channelName]
        channelNumber = channel.channelnumber
        channel.ismanual = True
        if state is not None:
            channel.manualstate = state
        else:
            state = channel.manualstate
        yield self.inCommunication.acquire()
        yield deferToThread(self.api.setManual, channelNumber, self.cnot(channel.manualinv, state))
        self.inCommunication.release()
        if state:
            self.notifyOtherListeners(c,(channelName,'ManualOn'), self.onSwitch)
        else:
            self.notifyOtherListeners(c,(channelName,'ManualOff'), self.onSwitch)
    
    @setting(14, 'Switch Auto', channelName = 's', invert= 'b')
    def switchAuto(self, c, channelName, invert = None):
        """
        Switches the given channel into the automatic mode, with an optional inversion.
        """
        if channelName not in self.channelDict.keys(): raise Exception("Incorrect Channel")
        channel = self.channelDict[channelName]
        channelNumber = channel.channelnumber
        channel.ismanual = False
        if invert is not None:
            channel.autoinv = invert
        else:
            invert = channel.autoinv
        yield self.inCommunication.acquire()
        yield deferToThread(self.api.setAuto, channelNumber, invert)
        self.inCommunication.release()
        self.notifyOtherListeners(c,(channelName,'Auto'), self.onSwitch)

    @setting(15, 'Get State', channelName = 's', returns = '(bbbb)')
    def getState(self, c, channelName):
        """
        Returns the current state of the switch: in the form (Manual/Auto, ManualOn/Off, ManualInversionOn/Off, AutoInversionOn/Off)
        """
        if channelName not in self.channelDict.keys(): raise Exception("Incorrect Channel")
        channel = self.channelDict[channelName]
        answer = (channel.ismanual,channel.manualstate,channel.manualinv,channel.autoinv)
        return answer
    
    @setting(16, 'Wait Sequence Done', timeout = 'v', returns = 'b')
    def waitSequenceDone(self, c, timeout = None):
        """
        Returns true if the sequence has completed within a timeout period (in seconds)
        """
        if timeout is None: timeout = self.sequenceTimeRange[1]
        #print timeout
        requestCalls = int(timeout / 0.050 ) #number of request calls
        for i in range(requestCalls):
            yield self.inCommunication.acquire()
            done = yield deferToThread(self.api.isSeqDone)
            self.inCommunication.release()
            if done: returnValue(True)
            yield self.wait(0.050)
        returnValue(False)
    
    @setting(17, 'Repeatitions Completed', returns = 'w')
    def repeatitionsCompleted(self, c):
        """Check how many repeatitions have been completed in for the infinite or number modes"""
        yield self.inCommunication.acquire()
        completed = yield deferToThread(self.api.howManySequencesDone)
        self.inCommunication.release()
        returnValue(completed)

    
    @setting(21, 'Set Mode', mode = 's', returns = '')
    def setMode(self, c, mode):
        """
        Set the counting mode, either 'Normal' or 'Differential'
        In the Normal Mode, the FPGA automatically sends the counts with a preset frequency
        In the differential mode, the FPGA uses triggers the pulse sequence
        frequency and to know when the repumping light is swtiched on or off.
        """
        if mode not in self.collectionTime.keys(): raise Exception("Incorrect mode")
        self.collectionMode = mode
        countRate = self.collectionTime[mode]
        yield self.inCommunication.acquire()
        if mode == 'Normal':
            #set the mode on the device and set update time for normal mode
            yield deferToThread(self.api.setModeNormal)
            yield deferToThread(self.api.setPMTCountRate, countRate)
        elif mode == 'Differential':
            yield deferToThread(self.api.setModeDifferential)
        self.clear_next_pmt_counts = 3 #assign to clear next two counts
        self.inCommunication.release()
    
    @setting(22, 'Set Collection Time', new_time = 'v', mode = 's', returns = '')
    def setCollectTime(self, c, new_time, mode):
        """
        Sets how long to collect photonslist in either 'Normal' or 'Differential' mode of operation
        """
        new_time = float(new_time)
        if not self.collectionTimeRange[0]<=new_time<=self.collectionTimeRange[1]: raise Exception('incorrect collection time')
        if mode not in self.collectionTime.keys(): raise("Incorrect mode")
        if mode == 'Normal':
            self.collectionTime[mode] = new_time
            yield self.inCommunication.acquire()
            yield deferToThread(self.api.setPMTCountRate, new_time)
            self.clear_next_pmt_counts = 3 #assign to clear next two counts
            self.inCommunication.release()
        elif mode == 'Differential':
            self.collectionTime[mode] = new_time
            self.clear_next_pmt_counts = 3 #assign to clear next two counts
        
    @setting(23, 'Get Collection Time', returns = '(vv)')
    def getCollectTime(self, c):
        return self.collectionTimeRange
    
    @setting(24, 'Reset FIFO Normal', returns = '')
    def resetFIFONormal(self,c):
        """
        Resets the FIFO on board, deleting all queued counts
        """
        yield self.inCommunication.acquire()
        yield deferToThread(self.api.resetFIFONormal)
        self.inCommunication.release()
    
    @setting(25, 'Get PMT Counts', returns = '*(vsv)')
    def getALLCounts(self, c):
        """
        Returns the list of counts stored on the FPGA in the form (v,s1,s2) where v is the count rate in KC/SEC
        and s can be 'ON' in normal mode or in Differential mode with 866 on and 'OFF' for differential
        mode when 866 is off. s2 is the approximate time of acquisition.
        NOTE: For some reason, FGPA ReadFromBlockPipeOut never time outs, so can not implement requesting more packets than
        currently stored because it may hang the device.
        """
        yield self.inCommunication.acquire()
        countlist = yield deferToThread(self.doGetAllCounts)
        self.inCommunication.release()
        returnValue(countlist)
    
    @setting(26, 'Get Readout Counts', returns = '*v')
    def getReadoutCounts(self, c):
        yield self.inCommunication.acquire()
        countlist = yield deferToThread(self.doGetReadoutCounts)
        self.inCommunication.release()
        returnValue(countlist)
        
    @setting(27, 'Reset Readout Counts')
    def resetReadoutCounts(self, c):
        yield self.inCommunication.acquire()
        yield deferToThread(self.api.resetFIFOReadout)
        self.inCommunication.release()

    @setting(39, 'Get Metablock Counts')
    def getMetablockCounts(self, c):
        yield self.inCommunication.acquire()
        counts = yield deferToThread(self.api.getMetablockCounts)
        self.inCommunication.release()
        string = bin(counts)
        print string
        string = string[2:] #remove the leading '0b'
        started_programming = int(string[0],2)
        ended_programming = int(string[1],2)
        counts = int(string[2:],2)
        returnValue([counts,started_programming,ended_programming])

    @setting(40, 'Get hardwareconfiguration Path', returns = 's')
    def getHardwareconfigurationPath(self,c):
        ''' 
        Returns the path where the hwconfigurationfile is placed
        '''
        return self.hwconfigpath
        
    #debugging settings
    @setting(90, 'Internal Reset DDS', returns = '')
    def internal_reset_dds(self, c):
        yield self.inCommunication.acquire()
        yield deferToThread(self.api.resetAllDDS)
        self.inCommunication.release()
        
    @setting(91, 'Internal Advance DDS', returns = '')
    def internal_advance_dds(self, c):
        yield self.inCommunication.acquire()
        yield deferToThread(self.api.advanceAllDDS)
        self.inCommunication.release()
    
    @setting(92, "Reinitialize DDS", returns = '')
    def reinitializeDDS(self, c):
        """
        Reprograms the DDS chip to its initial state
        """
        yield self.inCommunication.acquire()
        yield deferToThread(self.api.initializeDDS)
        self.inCommunication.release()
        
    def doGetAllCounts(self):
        inFIFO = self.api.getNormalTotal()
        reading = self.api.getNormalCounts(inFIFO)
        split = self.split_len(reading, 4)
        countlist = map(self.infoFromBuf, split)
        countlist = map(self.convertKCperSec, countlist)
        countlist = self.appendTimes(countlist, time.time())
        countlist = self.clear_pmt_counts(countlist)
        return countlist

    def clear_pmt_counts(self, l):
        '''removes clear_next_pmt_counts count from the list'''
        try:
            while self.clear_next_pmt_counts:
                cleared = l.pop(0)
                self.clear_next_pmt_counts -= 1
            return l
        except IndexError:
            return []
    
    def doGetReadoutCounts(self):
        inFIFO = self.api.getReadoutTotal()
        reading = self.api.getReadoutCounts(inFIFO)
        split = self.split_len(reading, 4)
        countlist = map(self.infoFromBuf_readout, split)
        return countlist
    
    @staticmethod
    def infoFromBuf(buf):
        #converts the received buffer into useful information
        #the most significant digit of the buffer indicates wheter 866 is on or off
        count = 65536*(256*ord(buf[1])+ord(buf[0]))+(256*ord(buf[3])+ord(buf[2]))
        if count >= 2**31:
            status = 'OFF'
            count = count % 2**31
        else:
            status = 'ON'
        return [count, status]
    
    #should make nicer by combining with above.
    @staticmethod
    def infoFromBuf_readout(buf):
        count = 65536*(256*ord(buf[1])+ord(buf[0]))+(256*ord(buf[3])+ord(buf[2]))
        return count
    
    def convertKCperSec(self, inp):
        [rawCount,typ] = inp
        countKCperSec = float(rawCount) / self.collectionTime[self.collectionMode] / 1000.
        return [countKCperSec, typ]
        
    def appendTimes(self, l, timeLast):
        #in the case that we received multiple PMT counts, uses the current time
        #and the collectionTime to guess the arrival time of the previous readings
        #i.e ( [[1,2],[2,3]] , timeLAst = 1.0, normalupdatetime = 0.1) ->
        # ( [(1,2,0.9),(2,3,1.0)])
        collectionTime = self.collectionTime[self.collectionMode]
        for i in range(len(l)):
            l[-i - 1].append(timeLast - i * collectionTime)
            l[-i - 1] = tuple(l[-i - 1])
        return l
    
    def split_len(self,seq, length):
        '''useful for splitting a string in length-long pieces'''
        return [seq[i:i+length] for i in range(0, len(seq), length)]
    
    @setting(28, 'Get Collection Mode', returns = 's')
    def getMode(self, c):
        return self.collectionMode
    
    @setting(31, "Reset Timetags")
    def resetTimetags(self, c):
        """Reset the time resolved FIFO to clear any residual timetags"""
        yield self.inCommunication.acquire()
        yield deferToThread(self.api.resetFIFOResolved)
        self.inCommunication.release()
    
    @setting(32, "Get Timetags", returns = '*v')
    def getTimetags(self, c):
        """Get the time resolved timetags"""
        yield self.inCommunication.acquire()
        counted = yield deferToThread(self.api.getResolvedTotal)
        raw = yield deferToThread(self.api.getResolvedCounts, counted)
        self.inCommunication.release()
        arr = numpy.fromstring(raw, dtype = numpy.uint16)
        del(raw)
        arr = arr.reshape(-1,2)
        timetags =( 65536 * arr[:,0] + arr[:,1]) * self.timeResolvedResolution
        returnValue(timetags)
    
    @setting(33, "Get TimeTag Resolution", returns = 'v')
    def getTimeTagResolution(self, c):
        return self.timeResolvedResolution
    
    #Methods relating to using the optional second PMT
    @setting(36, 'Get Secondary PMT Counts', returns = '*(vsv)')
    def getAllSecondaryCounts(self, c):
        if not self.haveSecondPMT: raise Exception ("No Second PMT")
        yield self.inCommunication.acquire()
        countlist = yield deferToThread(self.doGetAllSecondaryCounts)
        self.inCommunication.release()
        returnValue(countlist)
            
    def doGetAllSecondaryCounts(self):
        if not self.haveSecondPMT: raise Exception ("No Second PMT")
        inFIFO = self.api.getSecondaryNormalTotal()
        reading = self.api.getSecondaryNormalCounts(inFIFO)
        split = self.split_len(reading, 4)
        countlist = map(self.infoFromBuf, split)
        countlist = map(self.convertKCperSec, countlist)
        countlist = self.appendTimes(countlist, time.time())
        return countlist        


    def wait(self, seconds, result=None):
        """Returns a deferred that will be fired later"""
        d = Deferred()
        reactor.callLater(seconds, d.callback, result)
        return d
    
    def cnot(self, control, inp):
        if control:
            inp = not inp
        return inp
    
    def notifyOtherListeners(self, context, message, f):
        """
        Notifies all listeners except the one in the given context, executing function f
        """
        notified = self.listeners.copy()
        notified.remove(context.ID)
        f(message,notified)
    
    def initContext(self, c):
        """Initialize a new context object."""
        self.listeners.add(c.ID)
    
    def expireContext(self, c):
        self.listeners.remove(c.ID)
Ejemplo n.º 37
0
class NormalPMTCountFPGA(LabradServer):
    name = 'NormalPMTCountFPGA'
    
    def initServer(self):
        self.collectionTime = {'Normal':0.100,'Differential':0.100}
        self.currentMode = 'Normal'
        self.inCommunication = DeferredLock()
        self.connectOKBoard()
    
    def connectOKBoard(self):
        self.xem = None
        fp = ok.FrontPanel()
        module_count = fp.GetDeviceCount()
        print "Found {} unused modules".format(module_count)
        for i in range(module_count):
            serial = fp.GetDeviceListSerial(i)
            tmp = ok.FrontPanel()
            tmp.OpenBySerial(serial)
            id = tmp.GetDeviceID()
            if id == okDeviceID:
                self.xem = tmp
                print 'Connected to {}'.format(id)
                self.programOKBoard(self.xem)
                return
        print 'Not found {}'.format(okDeviceID)
        print 'Will try again in {} seconds'.format(devicePollingPeriod)
        reactor.callLater(devicePollingPeriod, self.connectOKBoard)
    
    def programOKBoard(self, xem):
        print 'Programming FPGA'
        basepath = os.environ.get('LABRADPATH',None)
        if not basepath:
            raise Exception('Please set your LABRADPATH environment variable')
        path = os.path.join(basepath,'sqip/okfpgaservers/pmt.bit')
        prog = xem.ConfigureFPGA(path)
        if prog: raise("Not able to program FPGA")
        pll = ok.PLL22150()
        xem.GetEepromPLL22150Configuration(pll)
        pll.SetDiv1(pll.DivSrc_VCO,4)
        xem.SetPLL22150Configuration(pll)
    
    def _resetFIFO(self):
        self.xem.ActivateTriggerIn(0x40,0)
    
    def _setUpdateTime(self, time):
        self.xem.SetWireInValue(0x01,int(1000 * time))
        self.xem.UpdateWireIns()
      
    @setting(0, 'Set Mode', mode = 's', returns = '')
    def setMode(self, c, mode):
        """
        Set the counting mode, either 'Normal' or 'Differential'
        In the Normal Mode, the FPGA automatically sends the counts with a preset frequency
        In the differential mode, the FPGA uses triggers from Paul's box for the counting
        frequency and to know when the repumping light is swtiched on or off.
        """
        if mode not in self.collectionTime.keys(): raise("Incorrect mode")
        self.currentMode = mode
        yield self.inCommunication.acquire()
        if mode == 'Normal':
            #set the mode on the device and set update time for normal mode
            self.xem.SetWireInValue(0x00,0x0000)
            self._setUpdateTime(self.collectionTime[mode])
        elif mode == 'Differential':
            self.xem.SetWireInValue(0x00,0x0001)
        self.xem.UpdateWireIns()
        self._resetFIFO()
        self.inCommunication.release()
    
    @setting(1, 'Set Collection Time', time = 'v', mode = 's', returns = '')
    def setCollectTime(self, c, time, mode):
        """
        Sets how long to collect photonslist in either 'Normal' or 'Differential' mode of operation
        """
        time = float(time)
        if not 0.0<time<5.0: raise('incorrect collection time')
        if mode not in self.collectionTime.keys(): raise("Incorrect mode")
        if mode == 'Normal':
            self.collectionTime[mode] = time
            yield self.inCommunication.acquire()
            self._setUpdateTime(time)
            self.inCommunication.release()
        elif mode == 'Differential':
            self.collectionTime[mode] = time
    
    @setting(2, 'Reset FIFO', returns = '')
    def resetFIFO(self,c):
        """
        Resets the FIFO on board, deleting all queued counts
        """
        yield self.inCommunication.acquire()
        self._resetFIFO()
        self.inCommunication.release()
    
    @setting(3, 'Get All Counts', returns = '*(vsv)')
    def getALLCounts(self, c):
        """
        Returns the list of counts stored on the FPGA in the form (v,s1,s2) where v is the count rate in KC/SEC
        and s can be 'ON' in normal mode or in Differential mode with 866 on and 'OFF' for differential
        mode when 866 is off. s2 is the approximate time of acquisition.
        
        NOTE: For some reason, FGPA ReadFromBlockPipeOut never time outs, so can not implement requesting more packets than
        currently stored because it may hang the device.
        """
        yield self.inCommunication.acquire()
        countlist = yield deferToThread(self.doGetAllCounts)
        self.inCommunication.release()
        returnValue(countlist)
    
    @setting(4, 'Get Current Mode', returns = 's')
    def getMode(self, c):
        return self.currentMode
        
    def doGetAllCounts(self):
        inFIFO = self._countsInFIFO()
        reading = self._readCounts(inFIFO)
        split = self.split_len(reading, 4)
        countlist = map(self.infoFromBuf, split)
        countlist = map(self.convertKCperSec, countlist)
        countlist = self.appendTimes(countlist, time.time())
        return countlist
    
    def convertKCperSec(self, input):
        [rawCount,type] = input
        countKCperSec = float(rawCount) / self.collectionTime[self.currentMode] / 1000.
        return [countKCperSec, type]
        
    def appendTimes(self, list, timeLast):
        #in the case that we received multiple PMT counts, uses the current time
        #and the collectionTime to guess the arrival time of the previous readings
        #i.e ( [[1,2],[2,3]] , timeLAst = 1.0, normalupdatetime = 0.1) ->
        #    ( [(1,2,0.9),(2,3,1.0)])
        collectionTime = self.collectionTime[self.currentMode]
        for i in range(len(list)):
            list[-i - 1].append(timeLast - i * collectionTime)
            list[-i - 1] = tuple(list[-i - 1])
        return list
        
    def split_len(self,seq, length):
        #useful for splitting a string in length-long pieces
        return [seq[i:i+length] for i in range(0, len(seq), length)]
    
    def _countsInFIFO(self):
        """
        returns how many counts are in FIFO
        """
        self.xem.UpdateWireOuts()
        inFIFO16bit = self.xem.GetWireOutValue(0x21)
        counts = inFIFO16bit / 2
        return counts
    
    def _readCounts(self, number):
        """
        reads the next number of counts from the FPGA
        """
        buf = "\x00"* ( number * 4 )
        self.xem.ReadFromBlockPipeOut(0xa0,4,buf)
        return buf
    
    @staticmethod
    def infoFromBuf(buf):
        #converts the received buffer into useful information
        #the most significant digit of the buffer indicates wheter 866 is on or off
        count = 65536*(256*ord(buf[1])+ord(buf[0]))+(256*ord(buf[3])+ord(buf[2]))
        if count >= 2**31:
            status = 'OFF'
            count = count % 2**31
        else:
            status = 'ON'
        return [count, status]
Ejemplo n.º 38
0
class TriggerFPGA(LabradServer):
    name = 'Trigger'
    onNewUpdate = Signal(SIGNALID, 'signal: switch toggled', '(sb)')
    
    def initServer(self):
        self.inCommunication = DeferredLock()
        self.connectOKBoard()
        #create dictionary for triggers and switches in the form 'trigger':channel;'switch:(channel , logicnotnegated, state'
        #the state written below represents the initial state of the server
        self.dict = {
                     'Triggers':{'PaulBox':0},
                     'Switches':{'866':[0x01,True, True], 'BluePI':[0x02,True, False], '397LocalHeating':[0x04,True,False]}
                     }
        self.initializeChannels()
        self.listeners = set()
        
    def connectOKBoard(self):
        self.xem = None
        fp = ok.FrontPanel()
        module_count = fp.GetDeviceCount()
        print "Found {} unused modules".format(module_count)
        for i in range(module_count):
            serial = fp.GetDeviceListSerial(i)
            tmp = ok.FrontPanel()
            tmp.OpenBySerial(serial)
            id = tmp.GetDeviceID()
            if id == okDeviceID:
                self.xem = tmp
                print 'Connected to {}'.format(id)
                self.programOKBoard(self.xem)
                return
        print 'Not found {}'.format(okDeviceID)
        print 'Will try again in {} seconds'.format(devicePollingPeriod)
        reactor.callLater(devicePollingPeriod, self.connectOKBoard)
    
    def programOKBoard(self, xem):
        print 'Programming FPGA'
        basepath = os.environ.get('LABRADPATH',None)
        if not basepath:
            raise Exception('Please set your LABRADPATH environment variable')
        path = os.path.join(basepath,'sqip/okfpgaservers/trigger.bit')
        prog = xem.ConfigureFPGA(path)
        if prog: raise("Not able to program FPGA")
        pll = ok.PLL22150()
        xem.GetEepromPLL22150Configuration(pll)
        pll.SetDiv1(pll.DivSrc_VCO,4)
        xem.SetPLL22150Configuration(pll)
    
    def initializeChannels(self):
        for switchName in self.dict['Switches'].keys():
            channel = self.dict['Switches'][switchName][0]
            value = self.dict['Switches'][switchName][1]
            initialize = self.dict['Switches'][switchName][2]
            if initialize:
                print 'initializing {0} to {1}'.format(switchName, value)
                self._switch( channel, value)
        
    def _isSequenceDone(self):
        self.xem.UpdateTriggerOuts()
        return self.xem.IsTriggered(0x6A,0b00000001)
    
    def _trigger(self, channel):
        self.xem.ActivateTriggerIn(0x40, channel)
    
    def _switch(self, channel, value):
        if value:
            self.xem.SetWireInValue(0x00,channel,channel)
        else:
            self.xem.SetWireInValue(0x00,0x00,channel)
        self.xem.UpdateWireIns()
    
    @setting(0, 'Get Trigger Channels', returns = '*s')
    def getTriggerChannels(self, c):
        """
        Returns available channels for triggering
        """
        return self.dict['Triggers'].keys()
    
    @setting(1, 'Get Switching Channels', returns = '*s')
    def getSwitchingChannels(self, c):
        """
        Returns available channels for switching
        """
        return self.dict['Switches'].keys()
    
    @setting(2, 'Trigger', channelName = 's')
    def trigger(self, c, channelName):
        """
        Triggers the select channel
        """
        if channelName not in self.dict['Triggers'].keys(): raise Exception("Incorrect Channel")
        yield self.inCommunication.acquire()
        channel = self.dict['Triggers'][channelName]
        yield deferToThread(self._trigger, channel)
        yield self.inCommunication.release()
    
    @setting(3, 'Switch', channelName = 's', state= 'b')
    def switch(self, c, channelName, state):  
        """
        Switches the given channel
        """
        if channelName not in self.dict['Switches'].keys(): raise Exception("Incorrect Channel")
        if not self.dict['Switches'][channelName][1]: state = not state #allows for easy reversal of high/low
        yield self.inCommunication.acquire()
        channel = self.dict['Switches'][channelName][0]
        yield deferToThread(self._switch, channel, state)
        yield self.inCommunication.release()
        self.dict['Switches'][channelName][2] = state
        if not self.dict['Switches'][channelName][1]: state = not state #(if needed) reverse again for notification
        self.notifyOtherListeners(c, (channelName, state))
    
    @setting(4, 'Get State', channelName = 's', returns = 'b')
    def getState(self, c, channelName):
        """
        Returns the current state of the switch
        """
        if channelName not in self.dict['Switches'].keys(): raise Exception("Incorrect Channel")
        state = self.dict['Switches'][channelName][2]
        if not self.dict['Switches'][channelName][1]: state = not state #allows for easy reversal of high/low
        return state
    
    @setting(5, 'Wait for PBox Completion', timeout = 'v', returns = 'b')
    def setCollectTime(self, c, timeout = 10):
        """
        Returns true if Paul Box sequence has completed within a timeout period
        """
        requestCalls = int(timeout / 0.050 ) #number of request calls
        for i in range(requestCalls):
            yield self.inCommunication.acquire()
            done = yield deferToThread(self._isSequenceDone)
            yield self.inCommunication.release()
            if done: returnValue(True)
            yield deferToThread(time.sleep, 0.050)
        returnValue(False)
        
    def notifyOtherListeners(self, context, message):
        """
        Notifies all listeners except the one in the given context
        """
        notified = self.listeners.copy()
        notified.remove(context.ID)
        self.onNewUpdate(message, notified)     
            
    def initContext(self, c):
        """Initialize a new context object."""
        self.listeners.add(c.ID)
    
    def expireContext(self, c):
        self.listeners.remove(c.ID)
Ejemplo n.º 39
0
class NormalPMTFlow( LabradServer):
    
    name = 'NormalPMTFlow'
    onNewCount = Signal(SIGNALID, 'signal: new count', 'v')
    
    @inlineCallbacks
    def initServer(self):
        #improve on this to start in arbitrary order
        self.dv = yield self.client.data_vault
        self.n = yield self.client.normalpmtcountfpga
        self.pbox = yield self.client.paul_box
        self.trigger = yield self.client.trigger
        self.saveFolder = ['','PMT Counts']
        self.dataSetName = 'PMT Counts'
        self.dataSet = None
        self.collectTimes = {'Normal':0.100, 'Differential':0.100}
        self.lastDifferential = {'ON': 0, 'OFF': 0}
        self.currentMode = 'Normal'
        self.running = DeferredLock()
        self.requestList = []
        self.keepRunning = False
    
#    @inlineCallbacks
#    def confirmPBoxScripting(self):
#        self.script = 'DifferentialPMTCount.py'
#        self.variable = 'CountingInterval'
#        allScripts = yield self.pbox.get_available_scripts()
#        if script not in allScripts: raise Exception('Pauls Box script {} does not exist'.format(script))
#        allVariables = yield self.pbox.get_variable_list(script)
#        if variable not in allVariables[0]: raise Exception('Variable {} not found'.format(variable))
    
    @inlineCallbacks
    def makeNewDataSet(self):
        dir = self.saveFolder
        name = self.dataSetName
        yield self.dv.cd(dir, True)
        self.dataSet = yield self.dv.new(name, [('t', 'num')], [('KiloCounts/sec','866 ON','num'),('KiloCounts/sec','866 OFF','num'),('KiloCounts/sec','Differential Signal','num')])
        self.startTime = time.time()
        yield self.addParameters()
    
    @inlineCallbacks
    def addParameters(self):
        yield self.dv.add_parameter('plotLive',True)
        yield self.dv.add_parameter('startTime',self.startTime)
    
    @setting(0, 'Set Save Folder', folder = '*s', returns = '')
    def setSaveFolder(self,c , folder):
        yield self.dv.cd(folder, True)
        self.saveFolder = folder
    
    @setting(1, 'Start New Dataset', setName = 's', returns = '')
    def setNewDataSet(self, c, setName = None):
        """Starts new dataset, if name not provided, it will be the same"""
        if setName is not None: self.dataSetName = setName
        yield self.makeNewDataSet()
    
    @setting( 2, "Set Mode", mode = 's', returns = '' )
    def setMode(self,c, mode):
        """
        Start recording Time Resolved Counts into Data Vault
        """
        if mode not in self.collectTimes.keys(): raise('Incorrect Mode')
        if not self.keepRunning:
            self.currentMode = mode
            yield self.n.set_mode(mode)
        else:
            yield self.dostopRecording()
            self.currentMode = mode
            yield self.n.set_mode(mode)
            yield self.dorecordData()

    @setting(3, 'getCurrentMode', returns = 's')
    def getCurrentMode(self, c):
        """
        Returns the currently running mode
        """
        return self.currentMode
    
    @setting(4, 'Record Data', returns = '')
    def recordData(self, c):
        """
        Starts recording data of the current PMT mode into datavault
        """
        yield self.dorecordData()
    
    @inlineCallbacks
    def dorecordData(self):
        self.keepRunning = True
        yield self.n.set_collection_time(self.collectTimes[self.currentMode], self.currentMode)
        yield self.n.set_mode(self.currentMode)
        if self.currentMode == 'Differential':
            yield self._programPBOXDiff()
        if self.dataSet is None:
            yield self.makeNewDataSet()
        reactor.callLater(0, self._record)
    
    @setting(5, returns = '')
    def stopRecording(self,c):
        """
        Stop recording counts into Data Vault
        """
        yield self.dostopRecording()
    
    @inlineCallbacks
    def dostopRecording(self):
        self.keepRunning = False
        yield self.running.acquire()
        self.running.release()
        yield self._programPBOXEmpty()
        
    @setting(6, returns = 'b')
    def isRunning(self,c):
        """
        Returns whether or not currently recording
        """
        return self.keepRunning
        
    @setting(7, returns = 's')
    def currentDataSet(self,c):
        if self.dataSet is None: return ''
        name = self.dataSet[1]
        return name
    
    @setting(8, 'Set Time Length', timelength = 'v', mode = 's')
    def setTimeLength(self, c, timelength, mode = None):
        if mode is None: mode = self.currentMode
        if mode not in self.collectTimes.keys(): raise('Incorrect Mode')
        if not 0 < timelength < 5.0: raise ('Incorrect Recording Time')
        self.collectTimes[mode] = timelength
        if mode == self.currentMode:
            yield self.running.acquire()
            yield self.n.set_collection_time(timelength, mode)
            if mode == 'Differential':
                yield self._programPBOXDiff()
            self.running.release()
        else:
            yield self.n.set_collection_time(timelength, mode)
        
    @setting(9, 'Get Next Counts', type = 's', number = 'w', average = 'b', returns = ['*v', 'v'])
    def getNextCounts(self, c, type, number, average = False):
        """
        Acquires next number of counts, where type can be 'ON' or 'OFF' or 'DIFF'
        Average is optionally True if the counts should be averaged
        
        Note in differential mode, Diff counts get updates every time, but ON and OFF
        get updated every 2 times.
        """
        if type not in ['ON', 'OFF','DIFF']: raise('Incorrect type')
        if type in ['OFF','DIFF'] and self.currentMode == 'Normal':raise('in the wrong mode to process this request')
        if not 0 < number < 1000: raise('Incorrect Number')
        if not self.keepRunning: raise('Not currently recording')
        d = Deferred()
        self.requestList.append(self.readingRequest(d, type, number))
        data = yield d
        if average:
            data = sum(data) / len(data)
        returnValue(data)
    
    @setting(10, 'Get Time Length', returns = 'v')
    def getMode(self, c):
        """
        Returns the current timelength of in the current mode
        """
        return self.collectTimes[self.currentMode]
    
    @inlineCallbacks
    def _programPBOXDiff(self):
        yield self.pbox.send_command('DifferentialPMTCount.py',[['FLOAT','CountingInterval',str(10**6 * self.collectTimes['Differential'])]])
        yield deferToThread(time.sleep,.2) #give it enough time to finish programming
        yield self.trigger.trigger('PaulBox')
    
    @inlineCallbacks
    def _programPBOXEmpty(self):
        yield self.pbox.send_command('emptySequence.py',[['FLOAT','nothing','0']])
        yield self.trigger.trigger('PaulBox')
        
    class readingRequest():
        def __init__(self, d, type, count):
            self.d = d
            self.count = count
            self.type = type
            self.data = []
    
    def processRequests(self, data):
        for dataPoint in data:
            for req in self.requestList:
                if dataPoint[1] != 0 and req.type == 'ON':
                    req.data.append(dataPoint[1])
                    if len(req.data) == req.count:
                        req.d.callback(req.data)
                if dataPoint[2] != 0 and req.type == 'OFF':
                    req.data.append(dataPoint[1])
                    if len(req.data) == req.count:
                        req.d.callback(req.data)
                if dataPoint[3] != 0 and req.type == 'DIFF':
                    req.data.append(dataPoint[1])
                    if len(req.data) == req.count:
                        req.d.callback(req.data)
                        
    @inlineCallbacks
    def _record(self):
        yield self.running.acquire()
        if self.keepRunning:
            rawdata = yield self.n.get_all_counts()
            if len(rawdata) != 0:
                if self.currentMode == 'Normal':
                    toDataVault = [ [elem[2] - self.startTime, elem[0], 0, 0] for elem in rawdata] # converting to format [time, normal count, 0 , 0]
                elif self.currentMode =='Differential':
                    toDataVault = self.convertDifferential(rawdata)
                self.processRequests(toDataVault)
                self.processSignals(toDataVault)
                yield self.dv.add(toDataVault)
            self.running.release()
            delayTime = self.collectTimes[self.currentMode]/2 #set to half the collection time no to miss anythign
            reactor.callLater(delayTime,self._record)
        else:
            self.running.release()
    
    def processSignals(self, data):
        lastPt = data[-1]
        NormalCount = lastPt[1]
        self.onNewCount(NormalCount)
    
    def convertDifferential(self, rawdata):
        totalData = []
        for dataPoint in rawdata:
            t = str(dataPoint[1])
            self.lastDifferential[t] = float(dataPoint[0])
            diff = self.lastDifferential['ON'] - self.lastDifferential['OFF']
            totalData.append( [ dataPoint[2] - self.startTime, self.lastDifferential['ON'], self.lastDifferential['OFF'], diff ] )
        return totalData
Ejemplo n.º 40
0
class UnitRelationLifecycle(object):
    """Unit Relation Lifcycle management.

    Provides for watching related units in a relation, and executing hooks
    in response to changes. The lifecycle is driven by the workflow.

    The Unit relation lifecycle glues together a number of components.
    It controls a watcher that recieves watch events from zookeeper,
    and it controls a hook scheduler which gets fed those events. When
    the scheduler wants to execute a hook, the executor is called with
    the hook path and the hook invoker.

    **Relation hook invocation do not maintain global order or
    determinism across relations**. They only maintain ordering and
    determinism within a relation. A shared scheduler across relations
    would be needed to maintain such behavior.

    See docs/source/internals/unit-workflow-lifecycle.rst for a brief
    discussion of some of the more interesting implementation decisions.
    """

    def __init__(self, client, unit_name, unit_relation, relation_ident,
                 unit_dir, state_dir, executor):
        self._client = client
        self._unit_dir = unit_dir
        self._relation_ident = relation_ident
        self._relation_name = relation_ident.split(":")[0]
        self._unit_relation = unit_relation
        self._unit_name = unit_name
        self._executor = executor
        self._run_lock = DeferredLock()
        self._log = logging.getLogger("unit.relation.lifecycle")
        self._error_handler = None

        schedule_path = os.path.join(
            state_dir, "%s.schedule" % unit_relation.internal_relation_id)
        self._scheduler = HookScheduler(
            client, self._execute_change_hook, self._unit_relation,
            self._relation_ident, unit_name, schedule_path)
        self._watcher = None

    @property
    def watching(self):
        """Are we queuing up hook executions in response to state changes?"""
        return self._watcher and self._watcher.running

    @property
    def executing(self):
        """Are we currently dequeuing and executing any queued hooks?"""
        return self._scheduler.running

    def set_hook_error_handler(self, handler):
        """Set an error handler to be invoked if a hook errors.

        The handler should accept two parameters, the RelationChange that
        triggered the hook, and the exception instance.
        """
        self._error_handler = handler

    @inlineCallbacks
    def start(self, start_watches=True, start_scheduler=True):
        """Start watching related units and executing change hooks.

        :param bool start_watches: True to start relation watches

        :param bool start_scheduler: True to run the scheduler and actually
            react to any changes delivered by the watcher
        """
        yield self._run_lock.acquire()
        try:
            # Start the hook execution scheduler.
            if start_scheduler and not self.executing:
                self._scheduler.run()
            # Create a watcher if we don't have one yet.
            if self._watcher is None:
                self._watcher = yield self._unit_relation.watch_related_units(
                    self._scheduler.cb_change_members,
                    self._scheduler.cb_change_settings)
            # And start the watcher.
            if start_watches and not self.watching:
                yield self._watcher.start()
        finally:
            self._run_lock.release()
        self._log.debug(
            "started relation:%s lifecycle", self._relation_name)

    @inlineCallbacks
    def stop(self, stop_watches=True):
        """Stop executing relation change hooks; maybe stop watching changes.

        :param bool stop_watches: True to stop watches as well as scheduler
            (which will prevent changes from being detected and queued, as well
            as stopping them being executed).
        """
        yield self._run_lock.acquire()
        try:
            if stop_watches and self.watching:
                self._watcher.stop()
            if self._scheduler.running:
                self._scheduler.stop()
        finally:
            yield self._run_lock.release()
        self._log.debug("stopped relation:%s lifecycle", self._relation_name)

    @inlineCallbacks
    def depart(self):
        """Inform the charm that the service has departed the relation.
        """
        self._log.debug("depart relation lifecycle")
        unit_id = self._unit_relation.internal_unit_id
        context = DepartedRelationHookContext(
            self._client, self._unit_name, unit_id, self._relation_name,
            self._unit_relation.internal_relation_id)
        change = RelationChange(self._relation_ident, "departed", "")
        invoker = self._get_invoker(context, change)
        hook_name = "%s-relation-broken" % self._relation_name
        yield self._execute_hook(invoker, hook_name, change)

    def _get_invoker(self, context, change):
        socket_path = os.path.join(self._unit_dir, HOOK_SOCKET_FILE)
        return RelationInvoker(
            context, change, "constant", socket_path, self._unit_dir,
            hook_log)

    def _execute_change_hook(self, context, change):
        """Invoked by the contained HookScheduler, to execute a hook.

        We utilize the HookExecutor to execute the hook, if an
        error occurs, it will be reraised, unless an error handler
        is specified see ``set_hook_error_handler``.
        """
        if change.change_type == "departed":
            hook_name = "%s-relation-departed" % self._relation_name
        elif change.change_type == "joined":
            hook_name = "%s-relation-joined" % self._relation_name
        else:
            hook_name = "%s-relation-changed" % self._relation_name

        invoker = self._get_invoker(context, change)
        return self._execute_hook(invoker, hook_name, change)

    @inlineCallbacks
    def _execute_hook(self, invoker, hook_name, change):
        hook_path = os.path.join(
            self._unit_dir, "charm", "hooks", hook_name)
        yield self._run_lock.acquire()
        self._log.debug("Executing hook %s", hook_name)
        try:
            yield self._executor(invoker, hook_path)
        except Exception, e:
            # We can't hold the run lock when we invoke the error
            # handler, or we get a deadlock if the handler
            # manipulates the lifecycle.
            yield self._run_lock.release()
            self._log.warn("Error in %s hook: %s", hook_name, e)

            if not self._error_handler:
                raise
            self._log.info(
                "Invoked error handler for %s hook", hook_name)
            yield self._error_handler(change, e)
            returnValue(False)
        else:
class PicomotorServer(LabradServer):

    name = 'PicomotorServer'
    
    # signal arguments are (axis, new absolute position)
    on_position_change = Signal(144821, 'signal: position change', '(ii)' )

    def construct_command(self, axis, command, nn = None):
        if nn is None:
            return str(axis) + command
        else:
            return str(axis) + command + str(nn)

    @inlineCallbacks
    def initServer(self):
        self.controller = yield Controller( idProduct=0x4000, idVendor=0x104d )
        self.position_dict = dict.fromkeys( [1, 2, 3, 4], 0)
        self.setpoint = dict.fromkeys( [1, 2, 3, 4], 0)
        self.inCommunication = DeferredLock()
        self.listeners = set()


    @setting(0, 'Get Position', axis = 'i', returns = 'i')
    def get_position(self, c, axis):
        """
        Query the controller for the position of the given axis
        and also update position_dict
        """
        yield self.inCommunication.acquire()
        pos = yield self.controller.get_position(axis)
        self.inCommunication.release()

        self.position_dict[axis] = pos
        self.notifyOtherListeners(c, (axis, pos))
        returnValue(pos)

    @setting(1, 'Absolute Move', axis = 'i', pos = 'i')
    def absolute_move(self, c, axis, pos):
        """
        Move the given axis to a given absolute position
        """
        yield self.inCommunication.acquire()
        yield self.controller.absolute_move(axis, pos)
        self.inCommunication.release()

        self.position_dict[axis] = pos
        self.notifyOtherListeners(c, (axis, pos))    

    @setting(2, 'Relative Move', axis = 'i', steps = 'i', returns = 'i')
    def relative_move(self, c, axis, steps):
        """
        Move the given axis the given number of steps.
        Returns the new absolute position.
        """
        yield self.inCommunication.acquire()
        yield self.controller.relative_move(axis, steps)
        self.inCommunication.release()

        self.position_dict[axis] += steps
        self.notifyOtherListeners(c, (axis, self.position_dict[axis]) )
        
        returnValue(self.position_dict[axis])

    @setting(3, 'Mark current setpoint')
    def mark_setpoint(self, c):
        """
        Save the current position of all the axes
        to possibly return to later
        """
        
        axes = [1, 2, 3, 4]
        yield self.inCommunication.acquire()
        for axis in axes:
            pos = yield self.controller.get_position(axis)
            self.position_dict[axis] = pos
        self.inCommunication.release()
        
        self.setpoint = position_dict.copy()

    @setting(4, 'Return to setpoint')
    def return_to_setpoint(self, c):
        """
        Return all axes to the saved setpoint
        """
        axes = [1, 2, 3, 4]
        yield self.inCommunication.acquire()
        for axis in axes:
            yield self.controller.absolute_move( axis, self.setpoint[axis] )
            pos = self.setpoint[axis]
            self.position_dict[axis] = pos
            self.notifyOtherListeners(c, (axis, pos))
        self.inCommunication.release()

    def notifyOtherListeners(self, context, message):
        notified = self.listeners.copy()
        notified.remove(context.ID)
        self.on_position_change(message, notified)

    def initContext(self, c):
        self.listeners.add(c.ID)
    
    def expireContext(self, c):
        self.listeners.remove(c.ID)
Ejemplo n.º 42
0
class AndorServer(LabradServer):
    """ Contains methods that interact with the Andor CCD Cameras"""

    name = "Andor Server"

    def initServer(self):
        self.listeners = set()
        self.camera = AndorCamera()
        self.lock = DeferredLock()
        self.gui = AndorVideo(self)

    def initContext(self, c):
        """Initialize a new context object."""
        self.listeners.add(c.ID)

    def expireContext(self, c):
        self.listeners.remove(c.ID)

    def getOtherListeners(self,c):
        notified = self.listeners.copy()
        notified.remove(c.ID)
        return notified
    '''
    Temperature Related Settings
    '''
    @setting(0, "Get Temperature", returns = 'v[degC]')
    def get_temperature(self, c):
        """Gets Current Device Temperature"""
        temperature = None
        print 'acquiring: {}'.format(self.get_temperature.__name__)
        yield self.lock.acquire()
        try:
            print 'acquired : {}'.format(self.get_temperature.__name__)
            temperature  = yield deferToThread(self.camera.get_temperature)
        finally:
            print 'releasing: {}'.format(self.get_temperature.__name__)
            self.lock.release()
        if temperature is not None:
            temperature = WithUnit(temperature, 'degC')
            returnValue(temperature)

    @setting(1, "Get Cooler State", returns = 'b')
    def get_cooler_state(self, c):
        """Returns Current Cooler State"""
        cooler_state = None
        print 'acquiring: {}'.format(self.get_cooler_state.__name__)
        yield self.lock.acquire()
        try:
            print 'acquired : {}'.format(self.get_cooler_state.__name__)
            cooler_state = yield deferToThread(self.camera.get_cooler_state)
        finally:
            print 'releasing: {}'.format(self.get_cooler_state.__name__)
            self.lock.release()
        if cooler_state is not None:
            returnValue(cooler_state)

    @setting(3, "Set Temperature", setTemp = 'v[degC]', returns = '')
    def set_temperature(self, c, setTemp):
        """Sets The Target Temperature"""
        print 'acquiring: {}'.format(self.set_temperature.__name__)
        yield self.lock.acquire()
        try:
            print 'acquired : {}'.format(self.set_temperature.__name__)
            yield deferToThread(self.camera.set_temperature, setTemp['degC'])
        finally:
            print 'releasing: {}'.format(self.set_temperature.__name__)
            self.lock.release()

    @setting(4, "Set Cooler On", returns = '')
    def set_cooler_on(self, c):
        """Turns Cooler On"""
        print 'acquiring: {}'.format(self.set_cooler_on.__name__)
        yield self.lock.acquire()
        try:
            print 'acquired : {}'.format(self.set_cooler_on.__name__)
            yield deferToThread(self.camera.set_cooler_on)
        finally:
            print 'releasing: {}'.format(self.set_cooler_on.__name__)
            self.lock.release()

    @setting(5, "Set Cooler Off", returns = '')
    def set_cooler_off(self, c):
        """Turns Cooler On"""
        print 'acquiring: {}'.format(self.set_cooler_off.__name__)
        yield self.lock.acquire()
        try:
            print 'acquired : {}'.format(self.set_cooler_off.__name__)
            yield deferToThread(self.camera.set_cooler_off)
        finally:
            print 'releasing: {}'.format(self.set_cooler_off.__name__)
            self.lock.release()

    '''
    EMCCD Gain Settings
    '''
    @setting(6, "Get EMCCD Gain", returns = 'i')
    def getEMCCDGain(self, c):
        """Gets Current EMCCD Gain"""
        gain = None
        print 'acquiring: {}'.format(self.getEMCCDGain.__name__)
        yield self.lock.acquire()
        try:
            print 'acquired : {}'.format(self.getEMCCDGain.__name__)
            gain = yield deferToThread(self.camera.get_emccd_gain)
        finally:
            print 'releasing: {}'.format(self.getEMCCDGain.__name__)
            self.lock.release()
        if gain is not None:
            returnValue(gain)

    @setting(7, "Set EMCCD Gain", gain = 'i', returns = '')
    def setEMCCDGain(self, c, gain):
        """Sets Current EMCCD Gain"""
        print 'acquiring: {}'.format(self.setEMCCDGain.__name__)
        yield self.lock.acquire()
        try:
            print 'acquired : {}'.format(self.setEMCCDGain.__name__)
            yield deferToThread(self.camera.set_emccd_gain, gain)
        finally:
            print 'releasing: {}'.format(self.setEMCCDGain.__name__)
            self.lock.release()
        if c is not None:
            self.gui.set_gain(gain)
    '''
    Read mode
    '''
    @setting(8, "Get Read Mode", returns = 's')
    def getReadMode(self, c):
        return self.camera.get_read_mode()

    @setting(9, "Set Read Mode", readMode = 's', returns = '')
    def setReadMode(self, c, readMode):
        """Sets Current Read Mode"""
        mode = None
        print 'acquiring: {}'.format(self.setReadMode.__name__)
        yield self.lock.acquire()
        try:
            print 'acquired : {}'.format(self.setReadMode.__name__)
            yield deferToThread(self.camera.set_read_mode, readMode)
        finally:
            print 'releasing: {}'.format(self.setReadMode.__name__)
            self.lock.release()
        if mode is not None:
            returnValue(mode)

    '''
    Shutter Mode
    '''

    @setting(100, "get_shutter_mode", returns = 's')
    def get_shutter_mode(self, c):
        return self.camera.get_shutter_mode()

    @setting(101, "set_shutter_mode", shutterMode = 's', returns = '')
    def set_shutter_mode(self, c, shutterMode):
        """Sets Current Shutter Mode"""
        mode = None
        print 'acquiring: {}'.format(self.set_shutter_mode.__name__)
        yield self.lock.acquire()
        try:
            print 'acquired : {}'.format(self.set_shutter_mode.__name__)
            yield deferToThread(self.camera.set_shutter_mode, shutterMode)
        finally:
            print 'releasing: {}'.format(self.set_shutter_mode.__name__)
            self.lock.release()
        if mode is not None:
            returnValue(mode)

    '''
    Acquisition Mode
    '''
    @setting(10, "Get Acquisition Mode", returns = 's')
    def getAcquisitionMode(self, c):
        """Gets Current Acquisition Mode"""
        return self.camera.get_acquisition_mode()

    @setting(11, "Set Acquisition Mode", mode = 's', returns = '')
    def setAcquisitionMode(self, c, mode):
        """Sets Current Acquisition Mode"""
        print 'acquiring: {}'.format(self.setAcquisitionMode.__name__)
        yield self.lock.acquire()
        try:
            print 'acquired : {}'.format(self.setAcquisitionMode.__name__)
            yield deferToThread(self.camera.set_acquisition_mode, mode)
        finally:
            print 'releasing: {}'.format(self.setAcquisitionMode.__name__)
            self.lock.release()
        self.gui.set_acquisition_mode(mode)
    '''
    Trigger Mode
    '''
    @setting(12, "Get Trigger Mode", returns = 's')
    def getTriggerMode(self, c):
        """Gets Current Trigger Mode"""
        return self.camera.get_trigger_mode()

    @setting(13, "Set Trigger Mode", mode = 's', returns = '')
    def setTriggerMode(self, c, mode):
        """Sets Current Trigger Mode"""
        print 'acquiring: {}'.format(self.setTriggerMode.__name__)
        yield self.lock.acquire()
        try:
            print 'acquired : {}'.format(self.setTriggerMode.__name__)
            yield deferToThread(self.camera.set_trigger_mode, mode)
        finally:
            print 'releasing: {}'.format(self.setTriggerMode.__name__)
            self.lock.release()
        self.gui.set_trigger_mode(mode)

    '''
    Exposure Time
    '''
    @setting(14, "Get Exposure Time", returns = 'v[s]')
    def getExposureTime(self, c):
        """Gets Current Exposure Time"""
        time = self.camera.get_exposure_time()
        return WithUnit(time, 's')

    @setting(15, "Set Exposure Time", expTime = 'v[s]', returns = 'v[s]')
    def setExposureTime(self, c, expTime):
        """Sets Current Exposure Time"""
        print 'acquiring: {}'.format(self.setExposureTime.__name__)
        yield self.lock.acquire()
        try:
            print 'acquired : {}'.format(self.setExposureTime.__name__)
            yield deferToThread(self.camera.set_exposure_time, expTime['s'])
        finally:
            print 'releasing: {}'.format(self.setExposureTime.__name__)
            self.lock.release()
        #need to request the actual set value because it may differ from the request when the request is not possible
        time = self.camera.get_exposure_time()
        if c is not None:
            self.gui.set_exposure(time)
        returnValue(WithUnit(time, 's'))
    '''
    Image Region
    '''
    @setting(16, "Get Image Region", returns = '*i')
    def getImageRegion(self, c):
        """Gets Current Image Region"""
        return self.camera.get_image()

    @setting(17, "Set Image Region", horizontalBinning = 'i', verticalBinning = 'i', horizontalStart = 'i', horizontalEnd = 'i', verticalStart = 'i', verticalEnd = 'i', returns = '')
    def setImageRegion(self, c, horizontalBinning, verticalBinning, horizontalStart, horizontalEnd, verticalStart, verticalEnd):
        """Sets Current Image Region"""
        print 'acquiring: {}'.format(self.setImageRegion.__name__)
        yield self.lock.acquire()
        try:
            print 'acquired : {}'.format(self.setImageRegion.__name__)
            yield deferToThread(self.camera.set_image, horizontalBinning, verticalBinning, horizontalStart, horizontalEnd, verticalStart, verticalEnd)
        finally:
            print 'releasing: {}'.format(self.setImageRegion.__name__)
            self.lock.release()
    '''
    Acquisition
    '''
    @setting(18, "Start Acquisition", returns = '')
    def startAcquisition(self, c):
        print 'acquiring: {}'.format(self.startAcquisition.__name__)
        yield self.lock.acquire()
        try:
            print 'acquired : {}'.format(self.startAcquisition.__name__)
            #speeds up the call to start_acquisition
            yield deferToThread(self.camera.prepare_acqusition)
            yield deferToThread(self.camera.start_acquisition)
            #necessary so that start_acquisition call completes even for long kinetic series
            #yield self.wait(0.050)
            yield self.wait(0.1)
        finally:
            print 'releasing: {}'.format(self.startAcquisition.__name__)
            self.lock.release()

    @setting(19, "Wait For Acquisition", returns = '')
    def waitForAcquisition(self, c):
        print 'acquiring: {}'.format(self.waitForAcquisition.__name__)
        yield self.lock.acquire()
        try:
            print 'acquired : {}'.format(self.waitForAcquisition.__name__)
            yield deferToThread(self.camera.wait_for_acquisition)
        finally:
            print 'releasing: {}'.format(self.waitForAcquisition.__name__)
            self.lock.release()

    @setting(20, "Abort Acquisition", returns = '')
    def abortAcquisition(self, c):
        if c is not None and self.gui.live_update_running:
            yield self.gui.stop_live_display()
        print 'acquiring: {}'.format(self.abortAcquisition.__name__)
        yield self.lock.acquire()
        try:
            print 'acquired : {}'.format(self.abortAcquisition.__name__)
            yield deferToThread(self.camera.abort_acquisition)
        finally:
            print 'releasing: {}'.format(self.abortAcquisition.__name__)
            self.lock.release()

    @setting(21, "Get Acquired Data", num_images = 'i',returns = '*i')
    def getAcquiredData(self, c, num_images = 1):
        """Get the acquired images"""
        print 'acquiring: {}'.format(self.getAcquiredData.__name__)
        yield self.lock.acquire()
        try:
            print 'acquired : {}'.format(self.getAcquiredData.__name__)
            image = yield deferToThread(self.camera.get_acquired_data, num_images)
        finally:
            print 'releasing: {}'.format(self.getAcquiredData.__name__)
            self.lock.release()
        returnValue(image)

    @setting(33, "Get Summed Data", num_images = 'i', returns = '*i')
    def getSummedData(self, c, num_images = 1):
        ''' Get the counts with the vertical axis summed over. '''

        print 'acquiring: {}'.format(self.getAcquiredData.__name__)
        yield self.lock.acquire()
        try:
            print 'acquired: {}'.format(self.getAcquiredData.__name__)
            images = yield deferToThread(self.camera.get_acquired_data, num_images)
            hbin, vbin, hstart, hend, vstart, vend = self.camera.get_image()
            x_pixels = int( (hend - hstart + 1.) / (hbin) )
            y_pixels = int(vend - vstart + 1.) / (vbin)
            images = np.reshape(images, (num_images, y_pixels, x_pixels))
            images = images.sum(axis=1)
            images = np.ravel(images, order='C')
            images = images.tolist()
        finally:
            print 'releasing: {}'.format(self.getAcquiredData.__name__)
            self.lock.release()
        returnValue(images)
    '''
    General
    '''
    @setting(22, "Get Camera Serial Number", returns = 'i')
    def getCameraSerialNumber(self, c):
        """Gets Camera Serial Number"""
        return self.camera.get_camera_serial_number()

    @setting(23, "Get Most Recent Image", returns = '*i')
    def getMostRecentImage(self, c):
        """Get all Data"""
#         print 'acquiring: {}'.format(self.getMostRecentImage.__name__)
        yield self.lock.acquire()
        try:
#             print 'acquired : {}'.format(self.getMostRecentImage.__name__)
            image = yield deferToThread(self.camera.get_most_recent_image)
        finally:
#             print 'releasing: {}'.format(self.getMostRecentImage.__name__)
            self.lock.release()
        returnValue(image)

    @setting(24, "Start Live Display", returns = '')
    def startLiveDisplay(self, c):
        """Starts live display of the images on the GUI"""
        yield self.gui.start_live_display()

    @setting(25, "Is Live Display Running", returns = 'b')
    def isLiveDisplayRunning(self, c):
        return self.gui.live_update_running

    @setting(26, "Get Number Kinetics", returns = 'i')
    def getNumberKinetics(self, c):
        """Gets Number Of Scans In A Kinetic Cycle"""
        return self.camera.get_number_kinetics()

    @setting(27, "Set Number Kinetics", numKin = 'i', returns = '')
    def setNumberKinetics(self, c, numKin):
        """Sets Number Of Scans In A Kinetic Cycle"""
        print 'acquiring: {}'.format(self.setNumberKinetics.__name__)
        yield self.lock.acquire()
        try:
            print 'acquired : {}'.format(self.setNumberKinetics.__name__)
            yield deferToThread(self.camera.set_number_kinetics, numKin)
        finally:
            print 'releasing: {}'.format(self.setNumberKinetics.__name__)
            self.lock.release()
    # UPDATED THE TIMEOUT. FIX IT LATER
    @setting(28, "Wait For Kinetic", timeout = 'v[s]',returns = 'b')
    def waitForKinetic(self, c, timeout = WithUnit(1,'s')):
        '''Waits until the given number of kinetic images are completed'''
        requestCalls = int(timeout['s'] / 0.050 ) #number of request calls
        for i in range(requestCalls):
            print 'acquiring: {}'.format(self.waitForKinetic.__name__)
            yield self.lock.acquire()
            try:
                print 'acquired : {}'.format(self.waitForKinetic.__name__)
                status = yield deferToThread(self.camera.get_status)
                #useful for debugging of how many iterations have been completed in case of missed trigger pulses
                a,b = yield deferToThread(self.camera.get_series_progress)
                print a,b
                print status
            finally:
                print 'releasing: {}'.format(self.waitForKinetic.__name__)
                self.lock.release()
            if status == 'DRV_IDLE':
                returnValue(True)
            yield self.wait(0.050)
        returnValue(False)

    @setting(31, "Get Detector Dimensions", returns = 'ww')
    def get_detector_dimensions(self, c):
        print 'acquiring: {}'.format(self.get_detector_dimensions.__name__)
        yield self.lock.acquire()
        try:
            print 'acquired : {}'.format(self.get_detector_dimensions.__name__)
            dimensions = yield deferToThread(self.camera.get_detector_dimensions)
        finally:
            print 'releasing: {}'.format(self.get_detector_dimensions.__name__)
            self.lock.release()
        returnValue(dimensions)

    @setting(32, "getemrange", returns = '(ii)')
    def getemrange(self, c):
        #emrange = yield self.camera.get_camera_em_gain_range()
        #returnValue(emrange)
        return self.camera.get_camera_em_gain_range()


    def wait(self, seconds, result=None):
        """Returns a deferred that will be fired later"""
        d = Deferred()
        reactor.callLater(seconds, d.callback, result)
        return d

    def stop(self):
        self._stopServer()

    @inlineCallbacks
    def stopServer(self):
        """Shuts down camera before closing"""
        try:
            if self.gui.live_update_running:
                yield self.gui.stop_live_display()
            print 'acquiring: {}'.format(self.stopServer.__name__)
            yield self.lock.acquire()
            print 'acquired : {}'.format(self.stopServer.__name__)
            self.camera.shut_down()
            print 'releasing: {}'.format(self.stopServer.__name__)
            self.lock.release()
        except Exception:
            #not yet created
            pass
Ejemplo n.º 43
0
class SqlFuse(FileSystem):
    MOUNT_OPTIONS = {"allow_other": None, "suid": None, "dev": None, "exec": None, "fsname": "fuse.sql"}

    rooter = DummyRooter()
    record = DummyRecorder()
    collector = DummyQuit()
    cleaner = DummyQuit()
    changer = DummyChanger()
    ichanger = DummyChanger()
    updatefinder = DummyQuit()
    copier = DummyQuit()

    db = DummyQuit()
    servers = []
    readonly = True

    # 0: no atime; 1: only if <mtime; 2: always
    atime = 1

    # 0: no atime: 1: when reading; 2: also when traversing
    diratime = 0
    slow = False

    shutting_down = False

    topology = None  # .topology wants to be an OrderedDict,
    neighbors = None  # but that's py2.7 and I'm too lazy to backport that.
    missing_neighbors = None

    def __init__(self, *a, **k):
        self._slot = {}
        self._slot_next = 1
        self._busy = {}
        self._update = {}
        self._xattr_name = {}
        self._xattr_id = {}
        self._xattr_lock = DeferredLock()  # protects name/id translation

        self.FileType = SqlFile
        self.DirType = SqlDir
        self.ENTRY_VALID = (10, 0)
        self.ATTR_VALID = (10, 0)

        self.remote = RemoteDict(self)
        # Note: Calling super().__init__ will happen later, in init_db()

    # map fdnum ⇒ filehandle
    def new_slot(self, x):
        """\
			Remember a file/dir handler. Return an ID.
			"""
        self._slot_next += 1
        while self._slot_next in self._slot:
            if self._slot_next == 999999999:
                self._slot_next = 1
            else:
                self._slot_next += 1
        self._slot[self._slot_next] = x
        return self._slot_next

    def old_slot(self, x):
        """\
			Fetch a file/dir handler, given its ID.
			"""
        return self._slot[x]

    def del_slot(self, x):
        """\
			Fetch a file/dir handler, given its ID.

			As this will be the last access, also delete the mapping.
			"""
        res = self._slot[x]
        del self._slot[x]
        return res

    # 	def _inode_path(self, path, tail=0):
    # 		path = path.split('/')
    # 		while path:
    # 			name = path.pop()
    # 			if name != '':
    # 				break
    # 		if not tail:
    # 			path.append(name)
    # 		depth=0
    # 		q=[""]
    # 		qa = {"root":self.inode}
    # 		for p in path:
    # 			if p == '':
    # 				continue
    # 			depth += 1
    # 			q.append("JOIN tree AS t%d ON t%d.inode = t%d.parent and t%d.name=${t%d_name}" % (depth, depth-1, depth, depth, depth))
    # 			qa["t"+str(depth)+"_name"] = p
    # 		q[0]="SELECT t%d.inode from tree as t0" % (depth,)
    # 		q.append("where t0.inode=${root}")
    # 		ino, = self.db.DoFn(" ".join(q),**qa)
    # 		return ino,name

    ### a few FUSE calls which are not handled by the inode object

    def rename(self, inode_old, name_old, inode_new, name_new, ctx=None):
        # This is atomic, as it's a transaction
        @inlineCallbacks
        def do_rename(db):
            try:
                yield inode_new._unlink(name_new, ctx=ctx, db=db)
            except EnvironmentError as e:
                if e.errno != errno.ENOENT:
                    raise
            yield db.Do(
                "update tree set name=${nname},parent=${ninode} where name=${oname} and parent=${oinode}",
                nname=name_new,
                ninode=inode_new.inum,
                oname=name_old,
                oinode=inode_old.inum,
            )

            def adj_size():
                inode_old.mtime = nowtuple()
                inode_old.size -= len(name_old) + 1
                inode_new.mtime = nowtuple()
                inode_new.size += len(name_new) + 1

            db.call_committed(adj_size)
            returnValue(None)

        return self.db(do_rename, DB_RETRIES)

    ## not supported, we're not file-backed
    # 	def bmap(self, *a,**k):
    # 		log_call()
    # 		raise IOError(errno.EOPNOTSUPP)

    ## not used, because the 'default_permissions' option is set
    # 	def access(self, inode, mode, ctx):
    # 		log_call()
    # 		raise IOError(errno.EOPNOTSUPP)

    @inlineCallbacks
    def statfs(self):
        """\
		File system status.
		We recycle some values, esp. free space, from the underlying storage.
		"""
        s = {}
        osb = os.statvfs(self.store)
        s["bsize"] = BLOCKSIZE
        s["frsize"] = BLOCKSIZE
        s["blocks"], s["files"] = yield self.db(
            lambda db: db.DoFn("select nblocks,nfiles from root where id=${root}", root=self.root_id), DB_RETRIES
        )
        s["bfree"] = (osb.f_bfree * osb.f_bsize) // BLOCKSIZE
        s["bavail"] = (osb.f_bavail * osb.f_bsize) // BLOCKSIZE
        s["ffree"] = osb.f_ffree
        # s['favail'] = osb.f_favail
        s["namelen"] = int(self.info.namelen)  # see SQL schema

        s["blocks"] += s["bfree"]
        s["files"] += s["ffree"]
        returnValue(s)

        ## xattr back-end. The table uses IDs because they're much shorter than the names.
        ## This code only handles the name/ID caching; actual attribute access is in the inode.

    @inlineCallbacks
    def xattr_name(self, xid, db):
        """\
			xattr key-to-name translation.

			Data consistency states that there must be one.
			"""
        try:
            returnValue(self._xattr_name[xid])
        except KeyError:
            pass

        yield self._xattr_lock.acquire()
        try:
            try:
                returnValue(self._xattr_name[xid])
            except KeyError:
                pass

            name, = yield db.DoFn("select name from xattr_name where id=${xid}", xid=xid)

            self._xattr_name[xid] = name
            self._xattr_id[name] = xid

            def _drop():
                del self._xattr_name[xid]
                del self._xattr_id[name]

            db.call_rolledback(_drop)
        finally:
            self._xattr_lock.release()

        returnValue(name)

    @inlineCallbacks
    def xattr_id(self, name, db, add=False):
        """\
			xattr name-to-key translation.

			Remembers null mappings, or creates a new one if @add is set.
			"""
        if len(name) == 0 or len(name) > self.info.attrnamelen:
            raise IOError(errno.ENAMETOOLONG)
        try:
            returnValue(self._xattr_id[name])
        except KeyError:
            pass

        try:
            yield self._xattr_lock.acquire()

            try:
                returnValue(self._xattr_id[name])
            except KeyError:
                pass

            try:
                xid, = yield db.DoFn("select id from xattr_name where name=${name}", name=name)
            except NoData:
                if not add:
                    self._xattr_id[name] = None
                    returnValue(None)
                xid = yield db.Do("insert into xattr_name(name) values(${name})", name=name)

            self._xattr_name[xid] = name
            self._xattr_id[name] = xid

            def _drop():
                del self._xattr_name[xid]
                del self._xattr_id[name]

            db.call_rolledback(_drop)
        finally:
            self._xattr_lock.release()

        returnValue(xid)

    def call_node(self, dest, name, *a, **k):
        if dest in self.missing_neighbors:
            raise NoLink(dest, "missing")

        try:
            node = self.topology[dest]
            rem = self.remote[node]
        except KeyError:
            trace("error", "NoLink! %s %s %s %s", dest, name, repr(a), repr(k))
            self.missing_neighbors.add(dest)
            raise NoLink(dest, "missing 2")

        if dest == node:
            return getattr(rem, "do_" + name)(*a, **k)
        else:
            return rem.remote_exec(node, name, *a, **k)

    @inlineCallbacks
    def each_node(self, chk, name, *a, **k):
        e = None
        if not self.topology:
            raise RuntimeError("No topology information available")
            # for dest in self.topology.keys():
        for dest in self.neighbors:
            try:
                trace("remote", "%d: calling %s %s %s", dest, name, repr(a), repr(k))
                res = yield self.call_node(dest, name, *a, **k)
            except Exception as ee:
                trace("remote", "%d: error %s", dest, ee)
                # If any link is down, that's the error I return.
                en = sys.exc_info()
                if e is None or isinstance(en[1], NoLink):
                    e = en
            else:
                trace("remote", "%d: %s", dest, res)
                if chk and chk(res):
                    returnValue(res)
        if e is None:
            returnValue(None)
        raise e[0], e[1], e[2]

    @inlineCallbacks
    def init_db(self, db, node):
        """\
			Setup the database part of the file system's operation.
			"""
        # TODO: setup a copying thread
        self.db = db
        # reactor.addSystemEventTrigger('before', 'shutdown', db.stopService)

        self.node = node

        @inlineCallbacks
        def do_init_db(db):
            try:
                self.node_id, self.root_id, self.root_inum, self.store, self.port = yield db.DoFn(
                    "select node.id,root.id,root.inode,node.files,node.port from node,root where root.id=node.root and node.name=${name}",
                    name=node,
                )
            except NoData:
                raise RuntimeError("data for '%s' is missing" % (self.node,))

            nnodes, = yield db.DoFn(
                "select count(*) from node where root=${root} and id != ${node}", root=self.root_id, node=self.node_id
            )
            self.single_node = not nnodes

            try:
                mode, = yield db.DoFn("select mode from inode where id=${inode}", inode=self.root_inum)
            except NoData:
                raise RuntimeError("database has not been initialized: inode %d is missing" % (self.inode,))
            if mode == 0:
                yield db.Do(
                    "update inode set mode=${dir} where id=${inode}",
                    dir=stat.S_IFDIR | stat.S_IRWXU | stat.S_IRWXG | stat.S_IRWXO,
                    inode=self.root_inum,
                )

            self.info = Info()
            yield self.info._load(db)
            if self.info.version != DBVERSION:
                raise RuntimeError("Need database version %s, got %s" % (DBVERSION, self.info.version))

            root = SqlInode(self, self.root_inum)
            yield root._load(db)
            returnValue(root)

        root = yield self.db(do_init_db)
        super(SqlFuse, self).__init__(root=root, filetype=SqlFile, dirtype=SqlDir)

    @inlineCallbacks
    def init(self, opt):
        """\
			Last step before running the file system mainloop.
			"""
        if opt.atime:
            self.atime = {"no": 0, "mtime": 1, "yes": 2}[opt.atime]
        if opt.diratime:
            self.diratime = {"no": 0, "read": 1, "access": 2}[opt.diratime]
        if opt.slow:
            self.slow = True
        self.services = MultiService()
        for a, b in (
            ("rooter", RootUpdater),
            ("updatefinder", UpdateCollector),
            ("changer", CacheRecorder),
            ("record", Recorder),
            ("collector", NodeCollector),
            ("ichanger", InodeWriter),
            ("copier", CopyWorker),
            ("cleaner", InodeCleaner),
        ):
            b = b(self)
            setattr(self, a, b)
            b.setServiceParent(self.services)
        reactor.addSystemEventTrigger("before", "shutdown", self.umount)
        yield self.services.startService()
        yield self.connect_all()
        self.record.trigger()

    @inlineCallbacks
    def umount(self):
        self.readonly = True
        if self.shutting_down:
            trace("shutdown", "called twice")
            return
        try:
            self.shutting_down = True

            trace("shutdown", "stopping services")
            yield self.services.stopService()
            trace("shutdown", "disconnect peers")
            self.disconnect_all()
            for k in self.remote.keys():
                del self.remote[k]
            n = 0
            for c in reactor.getDelayedCalls():
                n += 1
                trace("shutdown", "speed-up %s", c)
                c.reset(0)
            if n:
                trace("shutdown", "speed-up wait %d", n)
                yield reactor.callLater(n / 10, lambda: None)
            trace("shutdown", "run idle")
            yield IdleWorker.run()
            trace("shutdown", "flush inodes")
            yield self.db(flush_inodes)
            trace("shutdown", "super")
            yield super(SqlFuse, self).stop(False)
            trace("shutdown", "stop DB")
            yield self.db.stopService()
            trace("shutdown", "done")
        except Exception as e:
            log.err(e, "Shutting down")
            traceback.print_exc()

    @inlineCallbacks
    def connect_all(self):
        from sqlfuse.connect import METHODS

        for m in METHODS:
            try:
                m = __import__("sqlfuse.connect." + m, fromlist=("NodeServerFactory",))
                m = m.NodeServerFactory(self)
                yield m.connect()
            except NoLink:
                log.err(None, "No link to nodes %s" % (m,))
            except Exception:
                f = failure.Failure()
                log.err(f, "No link to nodes %s" % (m,))
            else:
                self.servers.append(m)
        pass

    def disconnect_all(self):
        srv = self.servers
        self.servers = []
        for s in srv:
            try:
                yield s.disconnect()
            except Exception:
                log.err(None, "Disconnect")

    def mount(self, handler, flags):
        """\
			FUSE callback.
			"""
        self.handler = handler
        return {
            "flags": FUSE_ATOMIC_O_TRUNC | FUSE_ASYNC_READ | FUSE_EXPORT_SUPPORT | FUSE_BIG_WRITES,
            "max_write": MAX_BLOCK,
        }
Ejemplo n.º 44
0
class Agent(object):
    """
    Main class associated with getting getting the internals of the
    agent's operations up and running including adding or updating
    itself with the master, starting the periodic task manager,
    and handling shutdown conditions.
    """
    def __init__(self):
        # so parts of this instance are accessible elsewhere
        assert "agent" not in config
        config["agent"] = self
        self.services = {}
        self.register_shutdown_events = False
        self.last_free_ram_post = time.time()
        self.repeating_call_counter = {}
        self.shutdown_timeout = None
        self.post_shutdown_lock = DeferredLock()
        self.stop_lock = DeferredLock()
        self.reannounce_lock = utility.TimedDeferredLock()
        self.stopped = False

        # Register a callback so self.shutdown_timeout is set when
        # "shutting_down" is set or modified.
        config.register_callback(
            "shutting_down", self.callback_shutting_down_changed)

    @classmethod
    def agent_api(cls):
        """
        Return the API url for this agent or None if `agent_id` has not
        been set
        """
        try:
            return cls.agents_endpoint() + str(config["agent_id"])
        except KeyError:
            svclog.error(
                "The `agent_id` configuration value has not been set yet")
            return None

    @classmethod
    def agents_endpoint(cls):
        """
        Returns the API endpoint for used for updating or creating
        agents on the master
        """
        return config["master_api"] + "/agents/"

    @property
    def shutting_down(self):
        return config.get("shutting_down", False)

    @shutting_down.setter
    def shutting_down(self, value):
        assert value in (True, False)
        config["shutting_down"] = value

    def repeating_call(
            self, delay, function, function_args=None, function_kwargs=None,
            now=True, repeat_max=None, function_id=None):
        """
        Causes ``function`` to be called repeatedly up until ``repeat_max``
        or until stopped.

        :param int delay:
            Number of seconds to delay between calls of ``function``.

            ..  note::

                ``delay`` is an approximate interval between when one call ends
                and the next one begins.  The exact time can vary due
                to how the Twisted reactor runs, how long it takes
                ``function`` to run and what else may be going on in the
                agent at the time.

        :param function:
            A callable function to run

        :type function_args: tuple, list
        :keyword function_args:
            Arguments to pass into ``function``

        :keyword dict function_kwargs:
            Keywords to pass into ``function``

        :keyword bool now:
            If True then run ``function`` right now in addition
            to scheduling it.

        :keyword int repeat_max:
            Repeat calling ``function`` this may times.  If not provided
            then we'll continue to repeat calling ``function`` until
            the agent shuts down.

        :keyword uuid.UUID function_id:
            Used internally to track a function's execution count.  This
            keyword exists so if you call :meth:`repeating_call` multiple
            times on the same function or method it will handle ``repeat_max``
            properly.
        """
        if self.shutting_down:
            svclog.debug(
                "Skipping task %s(*%r, **%r) [shutting down]",
                function.__name__, function_args, function_kwargs
            )
            return

        if function_args is None:
            function_args = ()

        if function_kwargs is None:
            function_kwargs = {}

        if function_id is None:
            function_id = uuid.uuid4()

        assert isinstance(delay, NUMERIC_TYPES[:-1])
        assert callable(function)
        assert isinstance(function_args, (list, tuple))
        assert isinstance(function_kwargs, dict)
        assert repeat_max is None or isinstance(repeat_max, int)
        repeat_count = self.repeating_call_counter.setdefault(function_id, 0)

        if repeat_max is None or repeat_count < repeat_max:
            svclog.debug(
                "Executing task %s(*%r, **%r).  Next execution in %s seconds.",
                function.__name__, function_args, function_kwargs, delay
            )

            # Run this function right now using another deferLater so
            # it's scheduled by the reactor and executed before we schedule
            # another.
            if now:
                deferLater(
                    reactor, 0, function, *function_args, **function_kwargs
                )
                self.repeating_call_counter[function_id] += 1
                repeat_count = self.repeating_call_counter[function_id]

            # Schedule the next call but only if we have not hit the max
            if repeat_max is None or repeat_count < repeat_max:
                deferLater(
                    reactor, delay, self.repeating_call, delay,
                    function, function_args=function_args,
                    function_kwargs=function_kwargs, now=True,
                    repeat_max=repeat_max, function_id=function_id
                )

    def should_reannounce(self):
        """Small method which acts as a trigger for :meth:`reannounce`"""
        if self.reannounce_lock.locked or self.shutting_down:
            return False

        contacted = config.master_contacted(update=False)
        if contacted is None:
            return True

        return utility.total_seconds(
            datetime.utcnow() - contacted) > config["agent_master_reannounce"]

    @inlineCallbacks
    def reannounce(self, force=False):
        """
        Method which is used to periodically contact the master.  This
        method is generally called as part of a scheduled task.
        """
        # Attempt to acquire the reannounce lock but fail after 70%
        # of the total time between reannouncements elapses.  This should
        # help prevent an accumulation of requests in the event the master
        # is having issues.
        try:
            yield self.reannounce_lock.acquire(
                config["agent_master_reannounce"] * .70
            )
        except utility.LockTimeoutError:
            svclog.debug("Timed out while waiting to acquire reannounce_lock")
            returnValue(None)

        if not self.should_reannounce() and not force:
            yield self.reannounce_lock.release()
            returnValue(None)

        svclog.debug("Announcing %s to master", config["agent_hostname"])
        data = None
        num_retry_errors = 0
        while True:  # for retries
            try:
                response = yield post_direct(
                    self.agent_api(),
                    data={
                        "state": config["state"],
                        "current_assignments": config.get(
                            "current_assignments", {} # may not be set yet
                        ),
                        "free_ram": memory.free_ram(),
                        "disks": disks.disks(as_dict=True)
                    }
                )

            except (ResponseNeverReceived, RequestTransmissionFailed) as error:
                num_retry_errors += 1
                if num_retry_errors > config["broken_connection_max_retry"]:
                    svclog.error(
                        "Failed to announce self to the master, "
                        "caught try-again type errors %s times in a row.",
                        num_retry_errors)
                    break
                else:
                    svclog.debug("While announcing self to master, caught "
                                 "%s. Retrying immediately.",
                                 error.__class__.__name__)
            except Exception as error:
                if force:
                    delay = http_retry_delay()
                    svclog.error(
                        "Failed to announce self to the master: %s.  Will "
                        "retry in %s seconds.", error, delay)
                    deferred = Deferred()
                    reactor.callLater(delay, deferred.callback, None)
                    yield deferred
                else:
                    # Don't retry because reannounce is called periodically
                    svclog.error(
                        "Failed to announce self to the master: %s.  This "
                        "request will not be retried.", error)
                    break

            else:
                data = yield treq.json_content(response)
                if response.code == OK:
                    config.master_contacted(announcement=True)
                    svclog.info("Announced self to the master server.")
                    break

                elif response.code >= INTERNAL_SERVER_ERROR:
                    if not self.shutting_down:
                        delay = http_retry_delay()
                        svclog.warning(
                            "Could not announce self to the master server, "
                            "internal server error: %s.  Retrying in %s "
                            "seconds.", data, delay)

                        deferred = Deferred()
                        reactor.callLater(delay, deferred.callback, None)
                        yield deferred
                    else:
                        svclog.warning(
                            "Could not announce to master. Not retrying "
                            "because of pending shutdown.")
                        break

                elif response.code == NOT_FOUND:
                    svclog.warning("The master says it does not know about our "
                                   "agent id. Posting as a new agent.")
                    yield self.post_agent_to_master()
                    break

                # If this is a client problem retrying the request
                # is unlikely to fix the issue so we stop here
                elif response.code >= BAD_REQUEST:
                    svclog.error(
                        "Failed to announce self to the master, bad "
                        "request: %s.  This request will not be retried.",
                        data)
                    break

                else:
                    svclog.error(
                        "Unhandled error when posting self to the "
                        "master: %s (code: %s).  This request will not be "
                        "retried.", data, response.code)
                    break

        yield self.reannounce_lock.release()
        returnValue(data)

    def system_data(self, requery_timeoffset=False):
        """
        Returns a dictionary of data containing information about the
        agent.  This is the information that is also passed along to
        the master.
        """
        # query the time offset and then cache it since
        # this is typically a blocking operation
        if config["agent_time_offset"] == "auto":
            config["agent_time_offset"] = None

        if requery_timeoffset or config["agent_time_offset"] is None:
            ntplog.info(
                "Querying ntp server %r for current time",
                config["agent_ntp_server"])

            ntp_client = NTPClient()
            try:
                pool_time = ntp_client.request(
                    config["agent_ntp_server"],
                    version=config["agent_ntp_server_version"])

            except Exception as e:
                ntplog.warning("Failed to determine network time: %s", e)

            else:
                config["agent_time_offset"] = \
                    int(pool_time.tx_time - time.time())

                # format the offset for logging purposes
                utcoffset = datetime.utcfromtimestamp(pool_time.tx_time)
                iso_timestamp = utcoffset.isoformat()
                ntplog.debug(
                    "network time: %s (local offset: %r)",
                    iso_timestamp, config["agent_time_offset"])

                if config["agent_time_offset"] != 0:
                    ntplog.warning(
                        "Agent is %r second(s) off from ntp server at %r",
                        config["agent_time_offset"],
                        config["agent_ntp_server"])

        data = {
            "id": config["agent_id"],
            "hostname": config["agent_hostname"],
            "version": config.version,
            "os_class": system.operating_system(),
            "os_fullname": platform(),
            "ram": int(config["agent_ram"]),
            "cpus": config["agent_cpus"],
            "cpu_name": cpu.cpu_name(),
            "port": config["agent_api_port"],
            "free_ram": memory.free_ram(),
            "time_offset": config["agent_time_offset"] or 0,
            "state": config["state"],
            "mac_addresses": list(network.mac_addresses()),
            "current_assignments": config.get(
                "current_assignments", {}), # may not be set yet
            "disks": disks.disks(as_dict=True)
        }

        try:
            gpu_names = graphics.graphics_cards()
            data["gpus"] = gpu_names
        except graphics.GPULookupError:
            pass

        if "remote_ip" in config:
            data.update(remote_ip=config["remote_ip"])

        if config["farm_name"]:
            data["farm_name"] = config["farm_name"]

        return data

    def build_http_resource(self):
        svclog.debug("Building HTTP Service")
        root = Resource()

        # static endpoints to redirect resources
        # to the right objects
        root.putChild(
            "favicon.ico",
            StaticPath(join(config["agent_static_root"], "favicon.ico"),
                       defaultType="image/x-icon"))
        root.putChild(
            "static",
            StaticPath(config["agent_static_root"]))

        # external endpoints
        root.putChild("", Index())
        root.putChild("configuration", Configuration())

        # api endpoints
        api = root.putChild("api", APIRoot())
        api.putChild("versions", Versions())
        v1 = api.putChild("v1", APIRoot())

        # Top level api endpoints
        v1.putChild("assign", Assign(self))
        v1.putChild("tasks", Tasks())
        v1.putChild("config", Config())
        v1.putChild("task_logs", TaskLogs())

        # Endpoints which are generally used for status
        # and operations.
        v1.putChild("status", Status())
        v1.putChild("stop", Stop())
        v1.putChild("restart", Restart())
        v1.putChild("update", Update())
        v1.putChild("check_software", CheckSoftware())

        return root

    def _start_manhole(self, port, username, password):
        """
        Starts the manhole server so we can connect to the agent
        over telnet
        """
        if "manhole" in self.services:
            svclog.warning(
                "Telnet manhole service is already running on port %s",
                self.services["manhole"].port)
            return

        svclog.info("Starting telnet manhole on port %s", port)

        # Since we don't always need this module we import
        # it here to save on memory and other resources
        from pyfarm.agent.manhole import manhole_factory

        # Contains the things which will be in the top level
        # namespace of the Python interpreter.
        namespace = {
            "config": config, "agent": self,
            "jobtypes": config["jobtypes"],
            "current_assignments": config["current_assignments"]}

        factory = manhole_factory(namespace, username, password)
        self.services["manhole"] = reactor.listenTCP(port, factory)

    def _start_http_api(self, port):
        """
        Starts the HTTP api so the master can communicate
        with the agent.
        """
        if "api" in self.services:
            svclog.warning(
                "HTTP API service already running on port %s",
                self.services["api"].port)
            return

        http_resource = self.build_http_resource()
        self.services["api"] = reactor.listenTCP(port, Site(http_resource))

    def start(self, shutdown_events=True, http_server=True):
        """
        Internal code which starts the agent, registers it with the master,
        and performs the other steps necessary to get things running.

        :param bool shutdown_events:
            If True register all shutdown events so certain actions, such as
            information the master we're going offline, can take place.

        :param bool http_server:
            If True then construct and serve the externally facing http
            server.
        """
        if config["agent_manhole"]:
            self._start_manhole(config["agent_manhole_port"],
                                config["agent_manhole_username"],
                                config["agent_manhole_password"])

        # setup the internal http server so external entities can
        # interact with the service.
        if http_server:
            self._start_http_api(config["agent_api_port"])

        # Update the configuration with this pid (which may be different
        # than the original pid).
        config["pids"].update(child=os.getpid())

        # get ready to 'publish' the agent
        config.register_callback(
            "agent_id",
            partial(
                self.callback_agent_id_set, shutdown_events=shutdown_events))
        return self.post_agent_to_master()

    @inlineCallbacks
    def stop(self):
        """
        Internal code which stops the agent.  This will terminate any running
        processes, inform the master of the terminated tasks, update the
        state of the agent on the master.
        """
        yield self.stop_lock.acquire()
        if self.stopped:
            yield self.stop_lock.release()
            svclog.warning("Agent is already stopped")
            returnValue(None)

        svclog.info("Stopping the agent")

        self.shutting_down = True
        self.shutdown_timeout = (
            datetime.utcnow() + timedelta(
                seconds=config["agent_shutdown_timeout"]))

        if self.agent_api() is not None:
            try:
                yield self.post_shutdown_to_master()
            except Exception as error:  # pragma: no cover
                svclog.warning(
                    "Error while calling post_shutdown_to_master()", error)
        else:
            svclog.warning("Cannot post shutdown, agent_api() returned None")

        utility.remove_file(
            config["agent_lock_file"], retry_on_exit=True, raise_=False)

        svclog.debug("Stopping execution of jobtypes")
        for jobtype_id, jobtype in config["jobtypes"].items():
            try:
                jobtype.stop()
            except Exception as error:  # pragma: no cover
                svclog.warning(
                    "Error while calling stop() on %s (id: %s): %s",
                    jobtype, jobtype_id, error
                )
                config["jobtypes"].pop(jobtype_id)

        svclog.info(
            "Waiting on %s job types to terminate", len(config["jobtypes"]))

        while config["jobtypes"] and datetime.utcnow() < self.shutdown_timeout:
            for jobtype_id, jobtype in config["jobtypes"].copy().items():
                if not jobtype._has_running_processes():
                    svclog.warning(
                        "%r has not removed itself, forcing removal",
                        jobtype)
                    config["jobtypes"].pop(jobtype_id)

            # Brief delay so we don't tie up the cpu
            delay = Deferred()
            reactor.callLater(1, delay.callback, None)
            yield delay

        self.stopped = True
        yield self.stop_lock.release()
        returnValue(None)

    def sigint_handler(self, *_):
        utility.remove_file(
            config["run_control_file"], retry_on_exit=True, raise_=False)

        def errback(failure):
            svclog.error(
                "Error while attempting to shutdown the agent: %s", failure)

            # Stop the reactor but handle the exit code ourselves otherwise
            # Twisted will just exit with 0.
            reactor.stop()
            sys.exit(1)

        # Call stop() and wait for it to finish before we stop
        # the reactor.
        # NOTE: We're not using inlineCallbacks here because reactor.stop()
        # would be called in the middle of the generator unwinding
        deferred = self.stop()
        deferred.addCallbacks(lambda _: reactor.stop(), errback)

    @inlineCallbacks
    def post_shutdown_to_master(self):
        """
        This method is called before the reactor shuts down and lets the
        master know that the agent's state is now ``offline``
        """
        # We're under the assumption that something's wrong with
        # our code if we try to call this method before self.shutting_down
        # is set.
        assert self.shutting_down
        yield self.post_shutdown_lock.acquire()

        svclog.info("Informing master of shutdown")

        # Because post_shutdown_to_master is blocking and needs to
        # stop the reactor from finishing we perform the retry in-line
        data = None
        tries = 0
        num_retry_errors = 0
        response = None
        timed_out = False
        while True:
            tries += 1
            try:
                response = yield post_direct(
                    self.agent_api(),
                    data={
                        "state": AgentState.OFFLINE,
                        "free_ram": memory.free_ram(),
                        "current_assignments": config["current_assignments"]})

            except (ResponseNeverReceived, RequestTransmissionFailed) as error:
                num_retry_errors += 1
                if num_retry_errors > config["broken_connection_max_retry"]:
                    svclog.error(
                        "Failed to post shutdown to the master, "
                        "caught try-again errors %s times in a row.",
                        num_retry_errors)
                    break
                elif self.shutdown_timeout < datetime.utcnow():
                    svclog.error("While posting shutdown to master, caught "
                                 "%s. Shutdown timeout has been reached, not "
                                 "retrying.",
                                 error.__class__.__name__)
                    break
                else:
                    svclog.debug("While posting shutdown to master, caught "
                                 "%s. Retrying immediately.",
                                 error.__class__.__name__)
            # When we get a hard failure it could be an issue with the
            # server, although it's unlikely, so we retry.  Only retry
            # for a set period of time though since the shutdown as a timeout
            except Exception as failure:
                if self.shutdown_timeout > datetime.utcnow():
                    delay = http_retry_delay()
                    svclog.warning(
                        "State update failed due to unhandled error: %s.  "
                        "Retrying in %s seconds",
                        failure, delay)

                    # Wait for 'pause' to fire, introducing a delay
                    pause = Deferred()
                    reactor.callLater(delay, pause.callback, None)
                    yield pause

                else:
                    timed_out = True
                    svclog.warning(
                        "State update failed due to unhandled error: %s.  "
                        "Shutdown timeout reached, not retrying.",
                        failure)
                    break

            else:
                data = yield treq.json_content(response)
                if response.code == NOT_FOUND:
                    svclog.warning(
                        "Agent %r no longer exists, cannot update state.",
                        config["agent_id"])
                    break

                elif response.code == OK:
                    svclog.info(
                        "Agent %r has POSTed shutdown state change "
                        "successfully.",
                        config["agent_id"])
                    break

                elif response.code >= INTERNAL_SERVER_ERROR:
                    if self.shutdown_timeout > datetime.utcnow():
                        delay = http_retry_delay()
                        svclog.warning(
                            "State update failed due to server error: %s.  "
                            "Retrying in %s seconds.",
                            data, delay)

                        # Wait for 'pause' to fire, introducing a delay
                        pause = Deferred()
                        reactor.callLater(delay, pause.callback, None)
                        yield pause
                    else:
                        timed_out = True
                        svclog.warning(
                            "State update failed due to server error: %s.  "
                            "Shutdown timeout reached, not retrying.",
                            data)
                        break

        yield self.post_shutdown_lock.release()
        extra_data = {
            "response": response,
            "timed_out": timed_out,
            "tries": tries,
            "retry_errors": num_retry_errors
        }

        if isinstance(data, dict):
            data.update(extra_data)
        else:
            data = extra_data

        returnValue(data)

    @inlineCallbacks
    def post_agent_to_master(self):
        """
        Runs the POST request to contact the master.  Running this method
        multiple times should be considered safe but is generally something
        that should be avoided.
        """
        url = self.agents_endpoint()
        data = self.system_data()

        try:
            response = yield post_direct(url, data=data)
        except Exception as failure:
            delay = http_retry_delay()
            if isinstance(failure, ConnectionRefusedError):
                svclog.error(
                    "Failed to POST agent to master, the connection was "
                    "refused. Retrying in %s seconds", delay)
            else:  # pragma: no cover
                svclog.error(
                    "Unhandled error when trying to POST the agent to the "
                    "master. The error was %s.", failure)

            if not self.shutting_down:
                svclog.info(
                    "Retrying failed POST to master in %s seconds.", delay)
                yield deferLater(reactor, delay, self.post_agent_to_master)
            else:
                svclog.warning("Not retrying POST to master, shutting down.")

        else:
            # Master might be down or have some other internal problems
            # that might eventually be fixed.  Retry the request.
            if response.code >= INTERNAL_SERVER_ERROR:
                if not self.shutting_down:
                    delay = http_retry_delay()
                    svclog.warning(
                        "Failed to post to master due to a server side error "
                        "error %s, retrying in %s seconds",
                        response.code, delay)
                    yield deferLater(reactor, delay, self.post_agent_to_master)
                else:
                    svclog.warning(
                        "Failed to post to master due to a server side error "
                        "error %s. Not retrying, because the agent is "
                        "shutting down", response.code)

            # Master is up but is rejecting our request because there's
            # something wrong with it.  Do not retry the request.
            elif response.code >= BAD_REQUEST:
                text = yield response.text()
                svclog.error(
                    "%s accepted our POST request but responded with code %s "
                    "which is a client side error.  The message the server "
                    "responded with was %r.  Sorry, but we cannot retry this "
                    "request as it's an issue with the agent's request.",
                    url, response.code, text)

            else:
                data = yield treq.json_content(response)
                config["agent_id"] = data["id"]
                config.master_contacted()

                if response.code == OK:
                    svclog.info(
                        "POST to %s was successful. Agent %s was updated.",
                        url, config["agent_id"])

                elif response.code == CREATED:
                    svclog.info(
                        "POST to %s was successful.  A new agent "
                        "with an id of %s was created.",
                        url, config["agent_id"])

                returnValue(data)

    def callback_agent_id_set(
            self, change_type, key, new_value, old_value, shutdown_events=True):
        """
        When `agent_id` is created we need to:

            * Register a shutdown event so that when the agent is told to
              shutdown it will notify the master of a state change.
            * Star the scheduled task manager
        """
        if key == "agent_id" and change_type == config.CREATED \
                and not self.register_shutdown_events:
            if shutdown_events:
                self.register_shutdown_events = True

            # set the initial free_ram
            config["free_ram"] = memory.free_ram()

            config.master_contacted()
            svclog.debug(
                "`%s` was %s, adding system event trigger for shutdown",
                key, change_type)

            self.repeating_call(
                config["agent_master_reannounce"], self.reannounce)

    def callback_shutting_down_changed(
            self, change_type, key, new_value, old_value):
        """
        When `shutting_down` is changed in the configuration, set or
        reset self.shutdown_timeout
        """
        if change_type not in (config.MODIFIED, config.CREATED):
            return

        if new_value is not True:
            self.shutdown_timeout = None
            return

        self.shutdown_timeout = timedelta(
            seconds=config["agent_shutdown_timeout"]) + datetime.utcnow()
        svclog.debug("New shutdown_timeout is %s", self.shutdown_timeout)
class Dataset(QtCore.QObject):
    
    """Class to handle incoming data and prepare them for plotting """
    def __init__(self, cxn, context, dataset):
        super(Dataset, self).__init__()
        self.accessingData = DeferredLock()
        self.cxn = cxn
        self.context = context # context of the first dataset in the window
        self.dataset = dataset
        self.data = None
        self.setupDataListener(self.context)
        
    # open dataset in order to listen for new data signals in current context        
    @inlineCallbacks
    def openDataset(self):
        yield self.cxn.data_vault.cd(DIRECTORY, context = self.context)
        yield self.cxn.data_vault.open(self.dataset, context = self.context)
        
    # sets up the listener for new data
    @inlineCallbacks
    def setupDataListener(self, context):
        yield self.cxn.data_vault.signal__data_available(11111, context = context)
        yield self.cxn.data_vault.addListener(listener = self.updateData, source = None, ID = 11111, context = context)
        #self.setupDeferred.callback(True)
         
    # new data signal
    def updateData(self,x,y):
        self.getData(self.context)
    
    def waitfor(self):
        #set up a timer
        #start looping call
            #check if timer expired - then return False
            try:
                data_vault.get('plot')
            except:
                pass
            #if this paramter exists return True
      
#    # returns the number of things to plot
#    @inlineCallbacks
#    def getPlotnum(self,context):
#        variables = yield self.cxn.data_vault.variables(context = context)
#        plotNum = len(variables[1])
#        returnValue(plotNum) 

    # returns the current data
    @inlineCallbacks
    def getData(self,context):
        Data = yield self.cxn.data_vault.get(100, context = context)
        if (self.data == None):
            self.data = Data.asarray
        else:
            yield self.accessingData.acquire()         
            self.data = np.append(self.data, Data.asarray, 0)
            self.accessingData.release()
        
    @inlineCallbacks
    def emptyDataBuffer(self):
        print 'in empty, waiting to acquire'
        yield self.accessingData.acquire()
        del(self.data)
        self.data = None
        print 'self data should be none now'
        self.accessingData.release()
Ejemplo n.º 46
0
class UnitLifecycle(object):
    """Manager for a unit lifecycle.

    Primarily used by the workflow interaction, to modify unit behavior
    according to the current unit workflow state and transitions.
    """

    def __init__(self, client, unit, service, unit_path, executor):
        self._client = client
        self._unit = unit
        self._service = service
        self._executor = executor
        self._unit_path = unit_path
        self._relations = {}
        self._running = False
        self._watching_relation_memberships = False
        self._watching_relation_resolved = False
        self._run_lock = DeferredLock()
        self._log = logging.getLogger("unit.lifecycle")

    def get_relation_workflow(self, relation_id):
        """Accessor to a unit relation workflow, by relation id.

        Primarily intended for and used by unit tests. Raises
        a KeyError if the relation workflow does not exist.
        """
        return self._relations[relation_id]

    @inlineCallbacks
    def install(self, fire_hooks=True):
        """Invoke the unit's install hook.
        """
        if fire_hooks:
            yield self._execute_hook("install")

    @inlineCallbacks
    def upgrade_charm(self, fire_hooks=True):
        """Invoke the unit's upgrade-charm hook.
        """
        if fire_hooks:
            yield self._execute_hook("upgrade-charm", now=True)
        # Restart hook queued hook execution.
        self._executor.start()

    @inlineCallbacks
    def start(self, fire_hooks=True):
        """Invoke the start hook, and setup relation watching.
        """
        self._log.debug("pre-start acquire, running:%s", self._running)
        yield self._run_lock.acquire()
        self._log.debug("start running, unit lifecycle")
        watches = []

        try:
            # Verify current state
            assert not self._running, "Already started"

            # Execute the start hook
            if fire_hooks:
                yield self._execute_hook("config-changed")
                yield self._execute_hook("start")

            # If we have any existing relations in memory, start them.
            if self._relations:
                self._log.debug("starting relation lifecycles")

            for workflow in self._relations.values():
                yield workflow.transition_state("up")

            # Establish a watch on the existing relations.
            if not self._watching_relation_memberships:
                self._log.debug("starting service relation watch")
                watches.append(self._service.watch_relation_states(
                    self._on_service_relation_changes))
                self._watching_relation_memberships = True

            # Establish a watch for resolved relations
            if not self._watching_relation_resolved:
                self._log.debug("starting unit relation resolved watch")
                watches.append(self._unit.watch_relation_resolved(
                    self._on_relation_resolved_changes))
                self._watching_relation_resolved = True

            # Set current status
            self._running = True
        finally:
            self._run_lock.release()

        # Give up the run lock before waiting on initial watch invocations.
        results = yield DeferredList(watches, consumeErrors=True)

        # If there's an error reraise the first one found.
        errors = [e[1] for e in results if not e[0]]
        if errors:
            returnValue(errors[0])

        self._log.debug("started unit lifecycle")

    @inlineCallbacks
    def stop(self, fire_hooks=True):
        """Stop the unit, executes the stop hook, and stops relation watching.
        """
        self._log.debug("pre-stop acquire, running:%s", self._running)
        yield self._run_lock.acquire()
        try:
            # Verify state
            assert self._running, "Already Stopped"

            # Stop relation lifecycles
            if self._relations:
                self._log.debug("stopping relation lifecycles")

            for workflow in self._relations.values():
                yield workflow.transition_state("down")

            if fire_hooks:
                yield self._execute_hook("stop")

            # Set current status
            self._running = False
        finally:
            self._run_lock.release()
        self._log.debug("stopped unit lifecycle")

    @inlineCallbacks
    def configure(self, fire_hooks=True):
        """Inform the unit that its service config has changed.
        """
        if not fire_hooks:
            returnValue(None)
        yield self._run_lock.acquire()
        try:
            # Verify State
            assert self._running, "Needs to be running."

            # Execute hook
            yield self._execute_hook("config-changed")
        finally:
            self._run_lock.release()
        self._log.debug("configured unit")

    @inlineCallbacks
    def _on_relation_resolved_changes(self, event):
        """Callback for unit relation resolved watching.

        The callback is invoked whenever the relation resolved
        settings change.
        """
        self._log.debug("relation resolved changed")
        # Acquire the run lock, and process the changes.
        yield self._run_lock.acquire()

        try:
            # If the unit lifecycle isn't running we shouldn't process
            # any relation resolutions.
            if not self._running:
                self._log.debug("stop watch relation resolved changes")
                self._watching_relation_resolved = False
                raise StopWatcher()

            self._log.info("processing relation resolved changed")
            if self._client.connected:
                yield self._process_relation_resolved_changes()
        finally:
            yield self._run_lock.release()

    @inlineCallbacks
    def _process_relation_resolved_changes(self):
        """Invoke retry transitions on relations if their not running.
        """
        relation_resolved = yield self._unit.get_relation_resolved()
        if relation_resolved is None:
            returnValue(None)
        else:
            yield self._unit.clear_relation_resolved()

        keys = set(relation_resolved).intersection(self._relations)
        for rel_id in keys:
            relation_workflow = self._relations[rel_id]
            relation_state = yield relation_workflow.get_state()
            if relation_state == "up":
                continue
            yield relation_workflow.transition_state("up")

    @inlineCallbacks
    def _on_service_relation_changes(self, old_relations, new_relations):
        """Callback for service relation watching.

        The callback is used to manage the unit relation lifecycle in
        accordance with the current relations of the service.

        @param old_relations: Previous service relations for a service. On the
               initial execution, this value is None.
        @param new_relations: Current service relations for a service.
        """
        self._log.debug(
            "services changed old:%s new:%s", old_relations, new_relations)

        # Acquire the run lock, and process the changes.
        yield self._run_lock.acquire()
        try:
            # If the lifecycle is not running, then stop the watcher
            if not self._running:
                self._log.debug("stop service-rel watcher, discarding changes")
                self._watching_relation_memberships = False
                raise StopWatcher()

            self._log.debug("processing relations changed")
            yield self._process_service_changes(old_relations, new_relations)
        finally:
            self._run_lock.release()

    @inlineCallbacks
    def _process_service_changes(self, old_relations, new_relations):
        """Add and remove unit lifecycles per the service relations Determine.
        """
        # changes relation delta of global zk state with our memory state.
        new_relations = dict([(service_relation.internal_relation_id,
                               service_relation) for
                              service_relation in new_relations])
        added = set(new_relations.keys()) - set(self._relations.keys())
        removed = set(self._relations.keys()) - set(new_relations.keys())

        # Stop and remove, old ones.

        # Trying to directly transition this causes additional yielding
        # operations, which means that concurrent events for subsequent
        # watch firings will be executed. ie. if the relation
        # is broken, but a subsequent modify comes in for a related unit,
        # it will cause the modify to have a hook execution. To prevent
        # this we stop the lifecycle immediately before executing the
        # transition. see UnitLifecycleTest.test_removed_relation_depart
        for relation_id in removed:
            yield self._relations[relation_id].lifecycle.stop()

        for relation_id in removed:
            workflow = self._relations.pop(relation_id)
            yield workflow.transition_state("departed")

        # Process new relations.
        for relation_id in added:
            service_relation = new_relations[relation_id]
            try:
                unit_relation = yield service_relation.get_unit_state(
                    self._unit)
            except UnitRelationStateNotFound:
                # This unit has not yet been assigned a unit relation state,
                # Go ahead and add one.
                unit_relation = yield service_relation.add_unit_state(
                    self._unit)

            self._log.debug(
                "Starting new relation: %s", service_relation.relation_name)

            workflow = self._get_unit_relation_workflow(unit_relation,
                                                        service_relation)
            # Start it before storing it.
            yield workflow.fire_transition("start")
            self._relations[service_relation.internal_relation_id] = workflow

    def _get_unit_path(self):
        """Retrieve the root path of the unit.
        """
        return self._unit_path

    def _get_unit_relation_workflow(self, unit_relation, service_relation):

        lifecycle = UnitRelationLifecycle(self._client,
                                          self._unit.unit_name,
                                          unit_relation,
                                          service_relation.relation_name,
                                          self._get_unit_path(),
                                          self._executor)

        state_directory = os.path.abspath(os.path.join(
            self._unit_path, "../../state"))

        workflow = RelationWorkflowState(
            self._client, unit_relation, lifecycle, state_directory)

        return workflow

    @inlineCallbacks
    def _execute_hook(self, hook_name, now=False):
        """Execute the hook with the given name.

        For priority hooks, the hook is scheduled and then the
        executioner started, before wait on the result.
        """
        unit_path = self._get_unit_path()
        hook_path = os.path.join(unit_path, "charm", "hooks", hook_name)
        socket_path = os.path.join(unit_path, HOOK_SOCKET_FILE)

        invoker = Invoker(HookContext(self._client, self._unit.unit_name),
                          None, "constant", socket_path,
                          self._unit_path, hook_log)
        if now:
            yield self._executor.run_priority_hook(invoker, hook_path)
        else:
            yield self._executor(invoker, hook_path)
Ejemplo n.º 47
0
class UnitRelationLifecycle(object):
    """Unit Relation Lifcycle management.

    Provides for watching related units in a relation, and executing hooks
    in response to changes. The lifecycle is driven by the workflow.

    The Unit relation lifecycle glues together a number of components.
    It controls a watcher that recieves watch events from zookeeper,
    and it controls a hook scheduler which gets fed those events. When
    the scheduler wants to execute a hook, the executor is called with
    the hook path and the hook invoker.

    **Relation hook invocation do not maintain global order or
    determinism across relations**. They only maintain ordering and
    determinism within a relation. A shared scheduler across relations
    would be needed to maintain such behavior.
    """

    def __init__(self, client, unit_name, unit_relation, relation_name, unit_path, executor):
        self._client = client
        self._unit_path = unit_path
        self._relation_name = relation_name
        self._unit_relation = unit_relation
        self._executor = executor
        self._run_lock = DeferredLock()
        self._log = logging.getLogger("unit.relation.lifecycle")
        self._error_handler = None

        self._scheduler = HookScheduler(client,
                                        self._execute_change_hook,
                                        self._unit_relation,
                                        self._relation_name,
                                        unit_name=unit_name)
        self._watcher = None

    @inlineCallbacks
    def _execute_change_hook(self, context, change, hook_name=None):
        """Invoked by the contained HookScheduler, to execute a hook.

        We utilize the HookExecutor to execute the hook, if an
        error occurs, it will be reraised, unless an error handler
        is specified see ``set_hook_error_handler``.
        """
        socket_path = os.path.join(self._unit_path, HOOK_SOCKET_FILE)
        if hook_name is None:
            if change.change_type == "departed":
                hook_names = [
                    "%s-relation-departed" % self._relation_name]
            elif change.change_type == "joined":
                hook_names = [
                    "%s-relation-joined" % self._relation_name,
                    "%s-relation-changed" % self._relation_name]
            else:
                hook_names = ["%s-relation-changed" % self._relation_name]
        else:
            hook_names = [hook_name]

        invoker = RelationInvoker(
            context, change, "constant", socket_path, self._unit_path,
            hook_log)

        for hook_name in hook_names:
            hook_path = os.path.join(
                self._unit_path, "charm", "hooks", hook_name)
            yield self._run_lock.acquire()
            self._log.debug("Executing hook %s", hook_name)
            try:
                yield self._executor(invoker, hook_path)
            except Exception, e:
                yield self._run_lock.release()
                self._log.warn("Error in %s hook: %s", hook_name, e)

                if not self._error_handler:
                    raise
                self._log.info(
                    "Invoked error handler for %s hook", hook_name)
                # We can't hold the run lock, when we invoke the error
                # handler, or we get a deadlock if the handler
                # manipulates the lifecycle.
                yield self._error_handler(change, e)
            else:
                yield self._run_lock.release()
Ejemplo n.º 48
0
class NotificationSource(object):
    """
    An AMQP consumer which handles messages sent over a "frontend" queue to
    set up temporary queues.  The L{get_message} method should be invoked to
    retrieve one single message from those temporary queues.

    @ivar timeout: time to wait for a message before giving up in C{get}.
    """

    # The timeout must be lower than the Apache one in front, which by default
    # is 5 minutes.
    timeout = 270

    def __init__(self, connector, prefix=None, clock=reactor):
        """
        @param connector: A callable returning a deferred which should fire
            with an opened AMQChannel. The deferred is expected to never
            errback (typically it will be fired by some code which in case
            of failure keeps retrying to connect to a broker or a cluster
            of brokers).
        @param prefix: Optional prefix for identifying the AMQP queues we
            should consume messages from.
        @param clock: An object implementing IReactorTime.
        """
        self._connector = connector
        self._prefix = prefix
        self._clock = clock
        self._channel_lock = DeferredLock()
        # Preserve compatibility by using special forms for naming when a
        # prefix is specified.
        if self._prefix is not None and len(self._prefix) != 0:
            self._tag_form = "%s.notifications-tag.%%s.%%s" % self._prefix
            self._queue_form = "%s.notifications-queue.%%s" % self._prefix
        else:
            self._tag_form = "%s.%s"
            self._queue_form = "%s"

    @inlineCallbacks
    def get(self, uuid, sequence):
        """Request the next L{Notification} for C{uuid}.

        @param uuid: The identifier of the notifications stream.
        @param sequence: Sequential number for identifying this particular
            request. This makes it possible to invoke this API more than once
            concurrently to handle the same notification. Typically only
            one notification will be actually processed and the other discarded
            as duplicates. The FrontEndAjax code makes use of this feature
            in order to get rid of dead requests. See #745708.

        If no notification is received within the number of seconds in
        L{timeout}, then the returned Deferred will errback with L{Timeout}.
        """
        # Attempt to a fetch a single notification retrying any transient error
        # until the timeout expires.
        timeout = self.timeout
        while timeout > 0:
            now = self._clock.seconds()
            channel = yield self._connector()
            try:
                notification = yield self._do(channel, uuid, sequence, timeout)
                returnValue(notification)
            except _Retriable:
                # Wait for the connection to shutdown.
                yield channel.client.disconnected.wait()
                timeout -= self._clock.seconds() - now
                continue
        raise Timeout()

    @inlineCallbacks
    def _do(self, channel, uuid, sequence, timeout):
        """Do fetch a single notification.

        If we hit a transient error, the _Retriable exception will be raised.
        """
        tag = self._tag_form % (uuid, sequence)
        try:
            yield self._check_retriable(
                channel.basic_consume, consumer_tag=tag,
                queue=self._queue_form % uuid)
        except ChannelClosed as error:
            # If the broker sent us channel-close because the queue doesn't
            # exists, raise NotFound. Otherwise just propagate.
            if error.args[0].reply_code == 404:
                # This will try to close the client cleanly (by sending 'close'
                # and waiting for 'close-ok'), but will force a connection
                # shutdown if that doesn't happen within 5 seconds (e.g because
                # the broker got shutdown exactly at this time).
                # See AMQClient.close().
                yield channel.client.close(within=5)
                raise NotFound()
            raise

        log.msg("Consuming from queue '%s'" % uuid)

        queue = yield channel.client.queue(tag)
        empty = False

        try:
            msg = yield queue.get(timeout)
        except Empty:
            empty = True
        except QueueClosed:
            # The queue has been closed, presumably because of a side effect.
            # Let's retry after reconnection.
            raise _Retriable()

        yield self._check_retriable(channel.basic_cancel, consumer_tag=tag)

        channel.client.queues.pop(tag, None)

        if empty:
            # Check for the messages arrived in the mean time
            if queue.pending:
                msg = queue.pending.pop()
            else:
                raise Timeout()

        returnValue(Notification(self, channel, msg))

    @inlineCallbacks
    def _check_retriable(self, method, **kwargs):
        """Invoke the given channel method and check for transient errors.

        @param method: A bound method of a txamqp.protocol.AMQChannel instance.
        @param kwargs: The keyword arguments to pass to the method.
        """
        # Serialize calls to channel method, because in case get() gets called
        # concurrently we don't want two calls in flight at the same time, as
        # in case of a failure txamqp would errback both calls and there's no
        # hit about which call actually failed.
        channel = method.im_self
        yield self._channel_lock.acquire()
        try:
            if channel.closed:
                # The channel got closed, e.g. because another call to
                # NotificationSource._do() hit an error. In this case we just
                # want to retry.
                raise _Retriable()
            yield method(**kwargs)
        except ConnectionClosed as error:
            # 320 (conncetion-forced) and 541 (internal-error) are transient
            # errors that can be retried, the most common being 320 which
            # happens if the broker gets restarted.
            # See also https://www.rabbitmq.com/amqp-0-9-1-reference.html.
            message = error.args[0]
            if message.reply_code in (320, 541):
                raise _Retriable()
            raise
        except Closed as error:
            reason = error.args[0]
            if isinstance(reason, Failure):
                if isinstance(reason.value, TransportClosed):
                    raise _Retriable()
            raise
        finally:
            self._channel_lock.release()

    @inlineCallbacks
    def _done(self, notification, successful):
        """Confirm that a notification has been handled (successfully or not).

        @param notification: The Notification to confirm.
        @param successful: If True, then the notification has been correctly
            processed and will be deleted. If False, it will be re-queued and
            be available at the next NotificationSource.get() call for the
            same UUID.
        """
        channel = notification._channel
        if successful:
            method = channel.basic_ack
        else:
            method = partial(channel.basic_reject, requeue=True)

        yield self._channel_lock.acquire()
        try:
            yield method(delivery_tag=notification._message.delivery_tag)
        except Closed:
            # If we hit any channel or connection error, we raise an error
            # since there's no way this can be re-tried.
            raise Bounced()
        finally:
            self._channel_lock.release()
Ejemplo n.º 49
0
class CalibrationServer(LabradServer):
    name = 'DAC Calibration'

    @inlineCallbacks
    def initServer(self):
        self.IQcalsets = {}
        self.DACcalsets = {}
        print 'loading server settings...',
        self.loadServerSettings()
        print 'done.'
        yield LabradServer.initServer(self)

    def loadServerSettings(self):
        """Load configuration information from the registry."""
        d = {}
        defaults = {
            'deconvIQ': True,
            'deconvZ': True,
            'bandwidthIQ': 0.4, #original default: 0.4
            'bandwidthZ': 0.13, #original default: 0.13
            'maxfreqZ': 0.45, #optimal parameter: 10% below Nyquist frequency of dac, 0.45
            'maxvalueZ': 5.0 #optimal parameter: 5.0, from the jitter in 1/H fourier amplitudes
        }
        for key in keys.SERVERSETTINGVALUES:
            default = defaults.get(key, None)
            keyval = default
            print key, ':', keyval
            d[key] = keyval
        self.serverSettings = d

    #@inlineCallbacks
    def initContext(self, c):
        c['Loop'] = False
        c['t0'] = 0
        c['Settling'] = ([], [])
        c['Reflection'] = ([], [])        
        c['Filter'] = 0.2
        c['deconvIQ'] = self.serverSettings['deconvIQ']
        c['deconvZ'] = self.serverSettings['deconvZ']

    @inlineCallbacks
    def call_sync(self, *args, **kw):
        """Call synchronous code in a separate thread outside the twisted event loop."""
        if not hasattr(self, '_sync_lock'):
            self._sync_lock = DeferredLock()
        yield self._sync_lock.acquire()
        try:
            result = yield deferToThread(*args, **kw)
            returnValue(result)
        finally:
            self._sync_lock.release()

    @inlineCallbacks
    def getIQcalset(self, c):
        """Get an IQ calset for the board in the given context, creating it if needed."""
        if 'Board' not in c:
            raise NoBoardSelectedError()
        board = c['Board']

        if board not in self.IQcalsets:
            calset = yield self.call_sync(IQcorrector, board,
                                                       None,
                                                       errorClass=CalibrationNotFoundError,
                                                       bandwidth=self.serverSettings['bandwidthIQ'])
            self.IQcalsets[board] = calset
        returnValue(self.IQcalsets[board])

    @inlineCallbacks
    def getDACcalset(self, c):
        """Get a DAC calset for the board and DAC in the given context, creating it if needed."""
        if 'Board' not in c:
            raise NoBoardSelectedError()
        board = c['Board']

        if 'DAC' not in c:
            raise NoDACSelectedError()
        dac = c['DAC']

        if board not in self.DACcalsets:
            self.DACcalsets[board] = {}
        if dac not in self.DACcalsets[board]:
            calset = yield self.call_sync(DACcorrector, board,
                                                        dac,
                                                        None,
                                                        errorClass=CalibrationNotFoundError,
                                                        bandwidth=self.serverSettings['bandwidthZ'],
                                                        maxfreqZ=self.serverSettings['maxfreqZ'])
            self.DACcalsets[board][dac] = calset
        returnValue(self.DACcalsets[board][dac])

    @setting(1, 'Board', board=['s'], returns=['s'])
    def board(self, c, board):
        """Sets the board for which to correct the data."""
        c['Board'] = board
        return board

    @setting(10, 'Frequency', frequency=['v[GHz]'], returns=['v[GHz]'])
    def frequency(self, c, frequency):
        """Sets the microwave driving frequency for which to correct the data.

        This also implicitly selects I/Q mode for the correction.
        """
        # c['Frequency'] = float(frequency)
        c['Frequency'] = frequency['GHz']
        c['DAC'] = None
        return frequency

    @setting(11, 'Loop', loopmode=['b: Loop mode'], returns=['b'])
    def loop(self, c, loopmode=True):
        c['Loop'] = loopmode
        return loopmode

    @setting(12, 'Time Offset', t0=['v[ns]'], returns=['v[ns]'])
    def set_time_offset(self, c, t0):
        # c['t0'] = float(t0)
        c['t0'] = t0['ns']
        return t0

    @setting(13, 'deconvIQ', deconvIQ=['b'], returns=['b'])
    def set_deconvIQ(self, c, deconvIQ):
        c['deconvIQ'] = deconvIQ
        return deconvIQ

    @setting(14, 'deconvZ', deconvZ=['b'], returns=['b'])
    def set_deconvZ(self, c, deconvZ):
        c['deconvZ'] = deconvZ
        return deconvZ

    @setting(15, 'getdeconvIQ', returns=['b'])
    def get_deconvIQ(self, c):
        return c['deconvIQ']

    @setting(16, 'getdeconvZ', returns=['b'])
    def get_deconvZ(self, c):
        return c['deconvZ']

    @setting(20, 'DAC', dac=['w: DAC channel 0 or 1', 's: DAC channel'], returns=['w'])
    def dac(self, c, dac):
        """Set the DAC for which to correct the data.

        This also implicitly selects single channel mode for the correction.
        If a string is passed in, the final character is used to select the DAC,
        and must be either 'A' ('a') or 'B' ('b').
        """
        if isinstance(dac, str):
            dac = dac[-1]
        if dac in [0, '0', 'a', 'A']:
            dac = 0
        elif dac in [1, '1', 'b', 'B']:
            dac = 1
        else:
            raise NoSuchDACError()

        c['Frequency'] = None
        c['DAC'] = dac
        return dac

    @setting(30,
        'Correct IQ',
        data=['*(v, v): I/Q data', '*c: I/Q data'],
        zero_ends='b',
        returns=['(*i, *i): Dual channel DAC values'])
    def correct_iq(self, c, data, zero_ends=False):
        """Correct IQ data specified in the time domain.

        Args:
            data (list of tuple or list of complex): The time-domain IQ sequence
                to be deconvolved.
            zero_ends (boolean): If true, the first and last 4 nanoseconds will
                be set to the deconvolved zero value to ensure microwaves are off.

        Returns:
            A tuple of deconvolved I DAC values and Q DAC values.
        """

        if len(data) == 0:
            returnValue([]) # special case for empty data

        if len(data.shape) == 2:
            data = data[:,0] + 1j * data[:,1]

        calset = yield self.getIQcalset(c)
        deconv = c['deconvIQ']
        corrected = yield self.call_sync(calset.DACify, c['Frequency'],
                                                  data,
                                                  loop=c['Loop'],
                                                  zipSRAM=False,
                                                  deconv=deconv,
                                                  zeroEnds=zero_ends)
        if deconv is False:
            print 'No deconv on board ' + c['Board'] 
        returnValue(corrected)

    @setting(31,
        'Correct IQ FT',
        data=['*(v, v): I/Q data', '*c: I/Q data'],
        zero_ends='b',
        returns=['(*i, *i): Dual channel DAC values'])
    def correct_iq_ft(self, c, data, zero_ends=False):
        """Correct IQ data specified in the frequency domain.

        This allows for sub-nanosecond timing resolution.

        Args:
            data (list of tuple or list of complex): The frequency-domain IQ
                sequence to be deconvolved.
            zero_ends (boolean): If true, the first and last 4 nanoseconds will
                be set to the deconvolved zero value to ensure microwaves are off.

        Returns:
            A tuple of deconvolved I DAC values and Q DAC values.
        """
        if len(data) == 0:
            returnValue([]) # special case for empty data

        if len(data.shape) == 2:
            data = data[:,0] + 1.0j * data[:,1]

        calset = yield self.getIQcalset(c)
        deconv = c['deconvIQ']
        corrected = yield self.call_sync(calset.DACifyFT, c['Frequency'],
                                                          data,
                                                          n=len(data),
                                                          t0=c['t0'],
                                                          loop=c['Loop'],
                                                          zipSRAM=False,
                                                          deconv=deconv,
                                                          zeroEnds=zero_ends)
        if deconv is False:
            print 'No deconv on board ' + c['Board']
        returnValue(corrected)

    @setting(32,
        'Correct Analog',
        data=['*v: Single channel data'],
        average_ends='b',
        dither='b',
        returns=['*i: Single channel DAC values'])
    def correct_analog(self, c, data, average_ends=False, dither=False):
        """Correct single channel data specified in the time domain.

        Args:
            data (list of float): The time-domain sequence to be deconvolved.
            average_ends (boolean): If true, the first and last 4 nanoseconds
                will be averaged and set to the constant average value to
                ensure the DAC output is constant after the sequence ends.
            dither (boolean): If true, the sequence will be dithered by adding
                random noise to reduce quantization noise.

        Returns:
            A list of deconvolved DAC values.
        """
        if len(data) == 0:
            returnValue([]) # special case for empty data

        calset = yield self.getDACcalset(c)
        calset.setSettling(*c['Settling'])
        calset.setReflection(*c['Reflection'])
        deconv = c['deconvZ']
        corrected = yield self.call_sync(calset.DACify, data,
                                                  loop=c['Loop'],
                                                  fitRange=False,
                                                  deconv=deconv,
                                                  dither=dither,
                                                  averageEnds=average_ends)
        if deconv is False:
            print 'No deconv on board ' + c['Board']
        returnValue(corrected)

    @setting(33,
        'Correct Analog FT',
        data=['*c: Single channel data'],
        average_ends='b',
        dither='b',
        returns=['*i: Single channel DAC values'])
    def correct_analog_ft(self, c, data, average_ends=False, dither=False):
        """Correct single channel data specified in the frequency domain.

        This allows for sub-nanosecond timing resolution.

        Args:
            data (list of float): The frequency-domain sequence to be deconvolved.
            average_ends (boolean): If true, the first and last 4 nanoseconds
                will be averaged and set to the constant average value to
                ensure the DAC output is constant after the sequence ends.
            dither (boolean): If true, the sequence will be dithered by adding
                random noise to reduce quantization noise.

        Returns:
            A list of deconvolved DAC values.
        """
        if len(data) == 0:
            returnValue([]) # special case for empty data

        calset = yield self.getDACcalset(c)
        calset.setSettling(*c['Settling'])
        calset.setReflection(*c['Reflection'])
        calset.setFilter(bandwidth=c['Filter'])
        deconv = c['deconvZ']
        corrected = yield self.call_sync(calset.DACifyFT, data,
                                                          n=(len(data)-1)*2,
                                                          t0=c['t0'],
                                                          loop=c['Loop'],
                                                          fitRange=False,
                                                          deconv=deconv,
                                                          maxvalueZ=self.serverSettings['maxvalueZ'],
                                                          dither=dither,
                                                          averageEnds=average_ends)
        if deconv is False:
            print 'No deconv on board ' + c['Board']
        returnValue(corrected)

    @setting(40, 'Set Settling', rates=['*v[GHz]: settling rates'], amplitudes=['*v: settling amplitudes'])
    def setsettling(self, c, rates, amplitudes):
        """
        If a calibration can be characterized by time constants, i.e.
        the step response function is
          0                                             for t <  0
          1 + sum(amplitudes[i]*exp(-decayrates[i]*t))  for t >= 0,
        then you don't need to load the response function explicitly
        but can just give the timeconstants and amplitudes.
        All previously used time constants will be replaced.
        """
        c['Settling'] = (rates, amplitudes)

    @setting(41, 'Set Reflection', rates=['*v[GHz]: reflection rates'], amplitudes=['*v: reflection amplitudes'])
    def setreflection(self, c, rates, amplitudes):
        """ Correct for reflections in the line.
        Impulse response of a line reflection is H = (1-amplitude) / (1-amplitude * exp( -2i*pi*f/rate) )
        All previously used time constants for the reflections will be replaced.
        """
        c['Reflection'] = (rates, amplitudes)

    @setting(45, 'Set Filter', bandwidth=['v[GHz]: bandwidth'])
    def setfilter(self, c, bandwidth):
        """
        Set the lowpass filter used for deconvolution.

        bandwidth: bandwidth are arguments passed to the lowpass
            filter function (see above)
        """
        c['Filter'] = float(bandwidth)

    @setting(50, 'Fast FFT Len', n='w')
    def fast_fft_len(self, c, n):
        """Given a sequence length n, get a new length nfft >= n which is efficient for calculating fft."""
        return fastfftlen(n)
Ejemplo n.º 50
0
class DAC(LabradServer):
    
    name = 'DAC'
    onNewVoltage = Signal(123556, 'signal: new voltage', '(sv)')
    
    @inlineCallbacks
    def initServer(self):
        self.api_dac  = api_dac()
        self.inCommunication = DeferredLock()
        connected = self.api_dac.connectOKBoard()
        if not connected:
            raise Exception ("Could not connect to DAC")
        self.d = yield self.initializeDAC()
        self.listeners = set()     
    
    @inlineCallbacks
    def initializeDAC(self):
        '''creates dictionary for information storage'''
        d = {}
        for name,channel_number,min_voltage,vpp in [
                             ('comp1', 0, -40.0, 80.0),
                             ('comp2', 1, -40.0, 80.0),
                             ('endcap1', 2, -9.9552, 20.0),
                             ('endcap2', 3, -9.9561, 20.0),
                             ]:
            chan = dac_channel(name, channel_number, min_voltage, vpp)
            chan.voltage = yield self.getRegValue(name)
            d[name] = chan
            value = self.voltage_to_val(chan.voltage, chan.min_voltage, chan.vpp)
            yield self.do_set_voltage(channel_number, value)
        returnValue( d )
    
    @inlineCallbacks
    def getRegValue(self, name):
        yield self.client.registry.cd(['','Servers', 'DAC'], True)
        try:
            voltage = yield self.client.registry.get(name)
        except Exception:
            print '{} not found in registry'.format(name)
            voltage = 0
        returnValue(voltage)
            
    @setting(0, "Set Voltage",channel = 's', voltage = 'v[V]', returns = '')
    def setVoltage(self, c, channel, voltage):
        try:
            chan = self.d[channel]
            minim,total,channel_number = chan.min_voltage, chan.vpp, chan.channel_number
        except KeyError:
            raise Exception ("Channel {} not found".format(channel))
        voltage = voltage['V']
        value = self.voltage_to_val(voltage, minim, total)
        yield self.do_set_voltage(channel_number, value)
        chan.voltage = voltage
        self.notifyOtherListeners(c, (channel, voltage), self.onNewVoltage)
    
    @inlineCallbacks
    def do_set_voltage(self, channel_number, value):
        yield self.inCommunication.acquire()
        try:
            yield deferToThread(self.api_dac.setVoltage, channel_number, value)
            confirmation = yield deferToThread(self.api_dac.getVoltage, channel_number)
            print 'setting value', value
            if not value == confirmation:
                raise Exception("Board did not set the voltage not set properly")
        except Exception as e:
            raise e
        finally:
            self.inCommunication.release()
        
    def voltage_to_val(self, voltage, minim, total, prec = 16):
        '''converts voltage of a channel to FPGA-understood sequential value'''
        value = int((voltage - minim) / total * (2 ** prec  - 1) )
        if not  0 <= value <= 2**16 - 1: raise Exception ("Voltage Out of Range")
        return value
           
    @setting(1, "Get Voltage", channel = 's', returns = 'v[V]')
    def getVoltage(self, c, channel):
        try:
            voltage = self.d[channel].voltage
        except KeyError:
            raise Exception ("Channel {} not found".format(channel))
        return WithUnit(voltage, 'V')
    
    @setting(2, "Get Range", channel = 's', returns = '(v[V]v[V])')
    def getRange(self, c, channel):
        try:
            chan = self.d[channel]
            minim,maxim = chan.min_voltage,chan.max_voltage
        except KeyError:
            raise Exception ("Channel {} not found".format(channel))
        return (WithUnit(minim,'V'), WithUnit(maxim, 'V'))
    
    def notifyOtherListeners(self, context, message, f):
        """
        Notifies all listeners except the one in the given context, executing function f
        """
        notified = self.listeners.copy()
        notified.remove(context.ID)
        f(message,notified)
    
    def initContext(self, c):
        """Initialize a new context object."""
        self.listeners.add(c.ID)
    
    def expireContext(self, c):
        self.listeners.remove(c.ID)
    
    @inlineCallbacks
    def stopServer(self):
        '''save the latest voltage information into registry'''
        try:
            yield self.client.registry.cd(['','Servers', 'DAC'], True)
            for name,channel in self.d.iteritems():
                yield self.client.registry.set(name, channel.voltage)
        except AttributeError:
            #if dictionary doesn't exist yet (i.e bad identification error), do nothing
            pass

    @setting(3, "Set Endcaps", voltage = 'v[V]', returns = '')
    def setEndcaps(self, c, voltage):
        for channel in ['comp1', 'comp2']:
            try:
                chan = self.d[channel]
                minim,total,channel_number = chan.min_voltage, chan.vpp, chan.channel_number
            except KeyError:
                raise Exception ("Channel {} not found".format(channel))
            voltage_value = voltage['V']
            value = self.voltage_to_val(voltage_value, minim, total)
            yield self.do_set_voltage(channel_number, value)
            chan.voltage = voltage_value
            self.notifyOtherListeners(c, (channel, voltage_value), self.onNewVoltage)
class Dataset(QtCore.QObject):
    
    def __init__(self, data_vault, context, dataset_location,reactor):
        super(Dataset, self).__init__()
        self.data = None
        self.accessingData = DeferredLock()
        self.reactor = reactor
        self.dataset_location = dataset_location
        self.data_vault = data_vault
        self.updateCounter = 0
        self.context = context
        self.connectDataVault()
        self.setupListeners()

    @inlineCallbacks
    def connectDataVault(self):
        yield self.data_vault.cd(self.dataset_location[0], context = self.context)
        path, dataset_name = yield self.data_vault.open(self.dataset_location[1], context = self.context)
        self.dataset_name = dataset_name

    @inlineCallbacks
    def setupListeners(self):
        yield self.data_vault.signal__data_available(11111, context = self.context)
        yield self.data_vault.addListener(listener = self.updateData, source = None, ID = 11111, context = self.context)


    @inlineCallbacks
    def openDataset(self):
        yield self.data_vault.cd(self.dataset_location[0], context = self.context)
        yield self.data_vault.open(self.dataset_location[1], context = self.context)

    @inlineCallbacks
    def getParameters(self):
        parameters = yield self.data_vault.parameters(context = self.context)
        parameterValues = []
        for parameter in parameters:
            parameterValue = yield self.data_vault.get_parameter(parameter, context = self.context)
            parameterValues.append( (parameter, parameterValue) )
        returnValue(parameterValues)

    def updateData(self, x, y):
        self.updateCounter += 1
        self.getData()

    @inlineCallbacks
    def getData(self):
        Data = yield self.data_vault.get(100, context = self.context)
        if (self.data == None):
            yield self.accessingData.acquire()
            try:
                self.data = Data.asarray
            except:
                self.data = Data
            self.accessingData.release()
        else:
            yield self.accessingData.acquire()
            try:
                self.data = np.append(self.data, Data.asarray, 0)
            except:
                self.data = np.append(self.data, Data, 0)
            self.accessingData.release()

    @inlineCallbacks
    def getLabels(self):
        labels = []
        yield self.openDataset()
        variables = yield self.data_vault.variables(context = self.context)
        for i in range(len(variables[1])):
            labels.append(variables[1][i][1] + ' - ' + self.dataset_name)
        returnValue(labels)

    @inlineCallbacks
    def disconnectDataSignal(self):
        yield self.data_vault.removeListener(listener = self.updateData, source = None, ID = 11111, context = self.context)
Ejemplo n.º 52
0
class Pulser(LabradServer):
    name = 'Pulser'
    onSwitch = Signal(611051, 'signal: switch toggled', '(ss)')
    
    def initServer(self):
        self.channelDict =  hardwareConfiguration.channelDict
        self.collectionTime = hardwareConfiguration.collectionTime
        self.collectionMode = hardwareConfiguration.collectionMode
        self.sequenceType = hardwareConfiguration.sequenceType
        self.isProgrammed = hardwareConfiguration.isProgrammed
        self.inCommunication = DeferredLock()
        self.connectOKBoard()
        self.listeners = set()

    def connectOKBoard(self):
        self.xem = None
        fp = ok.FrontPanel()
        module_count = fp.GetDeviceCount()
        print "Found {} unused modules".format(module_count)
        for i in range(module_count):
            serial = fp.GetDeviceListSerial(i)
            tmp = ok.FrontPanel()
            tmp.OpenBySerial(serial)
            id = tmp.GetDeviceID()
            if id == okDeviceID:
                self.xem = tmp
                print 'Connected to {}'.format(id)
                self.programOKBoard()
                self.initializeSettings()
                return
        print 'Not found {}'.format(okDeviceID)
        print 'Will try again in {} seconds'.format(devicePollingPeriod)
        reactor.callLater(devicePollingPeriod, self.connectOKBoard)
    
    def programOKBoard(self):
        print 'Programming FPGA'
        prog = self.xem.ConfigureFPGA('photon.bit')
        if prog: raise("Not able to program FPGA")
        pll = ok.PLL22150()
        self.xem.GetEepromPLL22150Configuration(pll)
        pll.SetDiv1(pll.DivSrc_VCO,4)
        self.xem.SetPLL22150Configuration(pll)
    
    def initializeSettings(self):
        for channel in self.channelDict.itervalues():
            channelnumber = channel.channelnumber
            if channel.ismanual:
                state = self.cnot(channel.manualinv, channel.manualstate)
                self._setManual(channelnumber, state)
            else:
                self._setAuto(channelnumber, channel.autoinv)

    
    @setting(0, "New Sequence", returns = '')
    def newSequence(self, c):
        """
        Create New Pulse Sequence
        """
        c['sequence'] = Sequence()
    
    @setting(1, "Program Sequence", returns = '')
    def programSequence(self, c, sequence):
        """
        Programs Pulser with the current sequence.
        """
        if self.xem is None: raise Exception('Board not connected')
        sequence = c.get('sequence')
        if not sequence: raise Exception ("Please create new sequence first")
        parsedSequence = sequence.progRepresentation()
        yield self.inCommunication.acquire()
        yield deferToThread(self._programBoard, parsedSequence)    
        self.inCommunication.release()
        self.isProgrammed = True
    
    @setting(2, "Start Infinite", returns = '')
    def startInfinite(self,c):
        if not self.isProgrammed: raise Exception ("No Programmed Sequence")
        yield self.inCommunication.acquire()
        yield deferToThread(self._resetSeqCounter)
        yield deferToThread(self._startInfinite)
        self.sequenceType = 'Infinite'
        self.inCommunication.release()
    
    @setting(3, "Complete Infinite Iteration", returns = '')
    def completeInfinite(self,c):
        if self.sequenceType != 'Infinite': raise Exception( "Not Running Infinite Sequence")
        yield self.inCommunication.acquire()
        yield deferToThread(self._startSingle)
        self.inCommunication.release()
    
    @setting(4, "Start Single", returns = '')
    def start(self, c):
        if not self.isProgrammed: raise Exception ("No Programmed Sequence")
        yield self.inCommunication.acquire()
        yield deferToThread(self._resetSeqCounter)
        yield deferToThread(self._startSingle)
        self.sequenceType = 'One'
        self.inCommunication.release()
    
    @setting(5, 'Add TTL Pulse', channel = 's', start = 'v', duration = 'v')
    def addTTLPulse(self, c, channel, start, duration):
        """
        Add a TTL Pulse to the sequence, times are in seconds
        """
        hardwareAddr = self.channelDict.get(channel).channelnumber
        sequence = c.get('sequence')
        #simple error checking
        if hardwareAddr is None: raise Exception("Unknown Channel {}".format(channel))
        if not (MIN_SEQUENCE <= start,start + duration <= MAX_SEQUENCE): raise Exception ("Time boundaries are out of range")
        if not duration >= timeResolution: raise Exception ("Incorrect duration") 
        if not sequence: raise Exception ("Please create new sequence first")
        sequence.addTTLPulse(hardwareAddr, start, duration)
    
    @setting(6, 'Add TTL Pulses', pulses = '*(svv)')
    def addTTLPulses(self, c, pulses):
        """
        Add multiple TTL Pulses to the sequence, times are in seconds. The pulses are a list in the same format as 'add ttl pulse'.
        """
        for pulse in pulses:
            channel = pulse[0]
            start = pulse[1]
            duration = pulse[2]
            yield self.addTTLPulse(c, channel, start, duration)
    
    @setting(7, "Extend Sequence Length", timeLength = 'v')
    def extendSequenceLength(self, c, timeLength):
        """
        Allows to optionally extend the total length of the sequence beyond the last TTL pulse. 
        """
        sequence = c.get('sequence')
        if not (MIN_SEQUENCE <= timeLength <= MAX_SEQUENCE): raise Exception ("Time boundaries are out of range")
        if not sequence: raise Exception ("Please create new sequence first")
        sequence.extendSequenceLength(timeLength)
        
    
    @setting(8, "Stop Sequence")
    def stopSequence(self, c):
        """Stops any currently running  sequence"""
        yield self.inCommunication.acquire()
        yield deferToThread(self._resetRam)
        if self.sequenceType =='Infinite':
            yield deferToThread(self._stopInfinite)
        elif self.sequenceType =='One':
            yield deferToThread(self._stopSingle)
        self.inCommunication.release()
        self.sequenceType = None
    
    @setting(9, "Human Readable", returns = '*2s')
    def humanReadable(self, c):
        """
        Returns a readable form of the programmed sequence for debugging
        """
        sequence = c.get('sequence')
        if not sequence: raise Exception ("Please create new sequence first")
        ans = sequence.humanRepresentation()
        return ans.tolist()
    
    @setting(11, 'Get Channels', returns = '*s')
    def getChannels(self, c):
        """
        Returns all available channels
        """
        return self.channelDict.keys()
    
    @setting(12, 'Switch Manual', channelName = 's', state= 'b')
    def switchManual(self, c, channelName, state = None):  
        """
        Switches the given channel into the manual mode, by default will go into the last remembered state but can also
        pass the argument which state it should go into.
        """
        if channelName not in self.channelDict.keys(): raise Exception("Incorrect Channel")
        channel = self.channelDict[channelName]
        channelNumber = channel.channelnumber
        channel.ismanual = True
        if state is not None:
            channel.manualstate = state
        else:
            state = channel.manualstate
        yield self.inCommunication.acquire()
        yield deferToThread(self._setManual, channelNumber, self.cnot(channel.manualinv, state))
        self.inCommunication.release()
        if state:
            self.notifyOtherListeners(c,(channelName,'ManualOn'), self.onSwitch)
        else:
            self.notifyOtherListeners(c,(channelName,'ManualOff'), self.onSwitch)
    
    @setting(13, 'Switch Auto', channelName = 's', invert= 'b')
    def switchAuto(self, c, channelName, invert = None):  
        """
        Switches the given channel into the automatic mode, with an optional inversion.
        """
        if channelName not in self.channelDict.keys(): raise Exception("Incorrect Channel")
        channel = self.channelDict[channelName]
        channelNumber = channel.channelnumber
        channel.ismanual = False
        if invert is not None:
            channel.autoinv = invert
        else:
            invert = channel.autoinv
        yield self.inCommunication.acquire()
        yield deferToThread(self._setAuto, channelNumber, invert)
        self.inCommunication.release()
        self.notifyOtherListeners(c,(channelName,'Auto'), self.onSwitch)

    @setting(14, 'Get State', channelName = 's', returns = '(bbbb)')
    def getState(self, c, channelName):
        """
        Returns the current state of the switch: in the form (Manual/Auto, ManualOn/Off, ManualInversionOn/Off, AutoInversionOn/Off)
        """
        if channelName not in self.channelDict.keys(): raise Exception("Incorrect Channel")
        channel = self.channelDict[channelName]
        answer = (channel.ismanual,channel.manualstate,channel.manualinv,channel.autoinv)
        return answer
    
    @setting(15, 'Wait Sequence Done', timeout = 'v', returns = 'b')
    def waitSequenceDone(self, c, timeout = 10):
        """
        Returns true if the sequence has completed within a timeout period
        """
        requestCalls = int(timeout / 0.050 ) #number of request calls
        for i in range(requestCalls):
            yield self.inCommunication.acquire()
            done = yield deferToThread(self._isSeqDone)
            self.inCommunication.release()
            if done: returnValue(True)
            yield self.wait(0.050)
        returnValue(False)
    
    @setting(21, 'Set Mode', mode = 's', returns = '')
    def setMode(self, c, mode):
        """
        Set the counting mode, either 'Normal' or 'Differential'
        In the Normal Mode, the FPGA automatically sends the counts with a preset frequency
        In the differential mode, the FPGA uses triggers the pulse sequence
        frequency and to know when the repumping light is swtiched on or off.
        """
        if mode not in self.collectionTime.keys(): raise Exception("Incorrect mode")
        self.collectionMode = mode
        countRate = self.collectionTime[mode]
        yield self.inCommunication.acquire()
        if mode == 'Normal':
            #set the mode on the device and set update time for normal mode
            yield deferToThread(self._setModeNormal)
            yield deferToThread(self._setPMTCountRate, countRate)
        elif mode == 'Differential':
            yield deferToThread(self._setModeDifferential)
        yield deferToThread(self._resetFIFONormal)
        self.inCommunication.release()
    
    @setting(22, 'Set Collection Time', time = 'v', mode = 's', returns = '')
    def setCollectTime(self, c, time, mode):
        """
        Sets how long to collect photonslist in either 'Normal' or 'Differential' mode of operation
        """
        time = float(time)
        if not collectionTimeRange[0]<=time<=collectionTimeRange[1]: raise Exception('incorrect collection time')
        if mode not in self.collectionTime.keys(): raise("Incorrect mode")
        if mode == 'Normal':
            self.collectionTime[mode] = time
            yield self.inCommunication.acquire()
            yield deferToThread(self._resetFIFONormal)
            yield deferToThread(self._setPMTCountRate, time)
            self.inCommunication.release()
        elif mode == 'Differential':
            self.collectionTime[mode] = time
    
    @setting(23, 'Get Collection Time', returns = '(vv)')
    def getCollectTime(self, c):
        return collectionTimeRange
    
    @setting(24, 'Reset FIFO Normal', returns = '')
    def resetFIFONormal(self,c):
        """
        Resets the FIFO on board, deleting all queued counts
        """
        yield self.inCommunication.acquire()
        yield deferToThread(self._resetFIFONormal)
        self.inCommunication.release()
    
    @setting(25, 'Get PMT Counts', returns = '*(vsv)')
    def getALLCounts(self, c):
        """
        Returns the list of counts stored on the FPGA in the form (v,s1,s2) where v is the count rate in KC/SEC
        and s can be 'ON' in normal mode or in Differential mode with 866 on and 'OFF' for differential
        mode when 866 is off. s2 is the approximate time of acquisition.
        
        NOTE: For some reason, FGPA ReadFromBlockPipeOut never time outs, so can not implement requesting more packets than
        currently stored because it may hang the device.
        """
        yield self.inCommunication.acquire()
        countlist = yield deferToThread(self.doGetAllCounts)
        self.inCommunication.release()
        returnValue(countlist)
    
    def doGetAllCounts(self):
        inFIFO = self._getNormalTotal()
        reading = self._getNormalCounts(inFIFO)
        split = self.split_len(reading, 4)
        countlist = map(self.infoFromBuf, split)
        countlist = map(self.convertKCperSec, countlist)
        countlist = self.appendTimes(countlist, time.time())
        return countlist
    
    @staticmethod
    def infoFromBuf(buf):
        #converts the received buffer into useful information
        #the most significant digit of the buffer indicates wheter 866 is on or off
        count = 65536*(256*ord(buf[1])+ord(buf[0]))+(256*ord(buf[3])+ord(buf[2]))
        if count >= 2**31:
            status = 'OFF'
            count = count % 2**31
        else:
            status = 'ON'
        return [count, status]
    
    def convertKCperSec(self, input):
        [rawCount,type] = input
        countKCperSec = float(rawCount) / self.collectionTime[self.collectionMode] / 1000.
        return [countKCperSec, type]
        
    def appendTimes(self, list, timeLast):
        #in the case that we received multiple PMT counts, uses the current time
        #and the collectionTime to guess the arrival time of the previous readings
        #i.e ( [[1,2],[2,3]] , timeLAst = 1.0, normalupdatetime = 0.1) ->
        #    ( [(1,2,0.9),(2,3,1.0)])
        collectionTime = self.collectionTime[self.collectionMode]
        for i in range(len(list)):
            list[-i - 1].append(timeLast - i * collectionTime)
            list[-i - 1] = tuple(list[-i - 1])
        return list
    
    def split_len(self,seq, length):
        '''useful for splitting a string in length-long pieces'''
        return [seq[i:i+length] for i in range(0, len(seq), length)]
    
    @setting(26, 'Get Collection Mode', returns = 's')
    def getMode(self, c):
        return self.collectionMode
    
    @setting(31, "Reset Timetags")
    def resetTimetags(self, c):
        """Reset the time resolved FIFO to clear any residual timetags"""
        yield self.inCommunication.acquire()
        yield deferToThread(self._resetFIFOResolved)
        self.inCommunication.release()
    
    @setting(32, "Get Timetags", returns = '*v')
    def getTimetags(self, c):
        """Get the time resolved timetags"""
        yield self.inCommunication.acquire()
        counted = yield deferToThread(self._getResolvedTotal)
        raw = yield deferToThread(self._getResolvedCounts, counted)
        self.inCommunication.release()
        arr = numpy.fromstring(raw, dtype = numpy.uint16)
        del(raw)
        arr = arr.reshape(-1,2)
        timetags =( 65536  *  arr[:,0] + arr[:,1]) * timeResolvedResolution
        returnValue(timetags)
    
    @setting(33, "Get TimeTag Resolution", returns = 'v')
    def getTimeTagResolution(self, c):
        return timeResolvedResolution
    
    def wait(self, seconds, result=None):
        """Returns a deferred that will be fired later"""
        d = Deferred()
        reactor.callLater(seconds, d.callback, result)
        return d
    
    def _programBoard(self, sequence):
        self.xem.WriteToBlockPipeIn(0x80, 2, sequence)
  
    def _startInfinite(self):
        self.xem.SetWireInValue(0x00,0x06,0x06)
        self.xem.UpdateWireIns()
    
    def _stopInfinite(self):
        self.xem.SetWireInValue(0x00,0x02,0x06)
        self.xem.UpdateWireIns()
        
    def _startSingle(self):
        self.xem.SetWireInValue(0x00,0x04,0x06)
        self.xem.UpdateWireIns()
    
    def _stopSingle(self):
        self.xem.SetWireInValue(0x00,0x00,0x06)
        self.xem.UpdateWireIns()
    
    def _resetRam(self):
        self.xem.ActivateTriggerIn(0x40,1)
        
    def _resetSeqCounter(self):
        self.xem.ActivateTriggerIn(0x40,0)
    
    def _resetFIFONormal(self):
        self.xem.ActivateTriggerIn(0x40,2)
    
    def _resetFIFOResolved(self):
        self.xem.ActivateTriggerIn(0x40,3)
    
    def _setModeNormal(self):
        """user selects PMT counting rate"""
        self.xem.SetWireInValue(0x00,0x00,0x01)
        self.xem.UpdateWireIns()
    
    def _setModeDifferential(self):
        """pulse sequence controls the PMT counting rate"""
        self.xem.SetWireInValue(0x00,0x01,0x01)
        self.xem.UpdateWireIns()
    
    def _isSeqDone(self):
        self.xem.SetWireInValue(0x00,0x00,0xf0)
        self.xem.UpdateWireIns()
        self.xem.UpdateWireOuts()
        done = self.xem.GetWireOutValue(0x21)
        return done
    
    def _getResolvedTotal(self):
        self.xem.UpdateWireOuts()
        counted = self.xem.GetWireOutValue(0x22)
        return counted
    
    def _getResolvedCounts(self, number):
        buf = "\x00"*(number*2)
        self.xem.ReadFromBlockPipeOut(0xa0,2,buf)
        return buf
    
    def _getNormalTotal(self):
        self.xem.SetWireInValue(0x00,0x40,0xf0)
        self.xem.UpdateWireIns()
        self.xem.UpdateWireOuts()
        done = self.xem.GetWireOutValue(0x21)
        return done
    
    def _getNormalCounts(self, number):
        buf = "\x00"* ( number * 2 )
        self.xem.ReadFromBlockPipeOut(0xa1,2,buf)
        return buf
    
    def _howManySequencesDone(self):
        self.xem.SetWireInValue(0x00,0x20,0xf0)
        self.xem.UpdateWireIns()
        self.xem.UpdateWireOuts()
        completed = xem.GetWireOutValue(0x21)
        return completed
    
    def _setPMTCountRate(self, time):
        #takes time in seconds
        self.xem.SetWireInValue(0x01,int(1000 * time))
        self.xem.UpdateWireIns()
        
    def _setAuto(self, channel, inversion):
        self.xem.SetWireInValue(0x02,0x00, 2**channel)
        if not inversion:
            self.xem.SetWireInValue(0x03,0x00, 2**channel)
        else:
            self.xem.SetWireInValue(0x03,2**channel, 2**channel)
        self.xem.UpdateWireIns()
    
    def _setManual(self, channel, state):
        self.xem.SetWireInValue(0x02,2**channel, 2**channel )
        if state:
            self.xem.SetWireInValue(0x03,2**channel, 2**channel)
        else:
            self.xem.SetWireInValue(0x03,0x00, 2**channel)
        self.xem.UpdateWireIns()
    
    def cnot(self, control, input):
        if control:
            input = not input
        return input
    
    def notifyOtherListeners(self, context, message, f):
        """
        Notifies all listeners except the one in the given context, excuting function of
        """
        notified = self.listeners.copy()
        notified.remove(context.ID)
        f(message,notified)   
    
    def initContext(self, c):
        """Initialize a new context object."""
        self.listeners.add(c.ID)
    
    def expireContext(self, c):
        self.listeners.remove(c.ID)
Ejemplo n.º 53
0
class Dataset(QtCore.QObject):

    """Class to handle incoming data and prepare them for plotting """
    def __init__(self, parent, cxn, context, dataset, directory, datasetName, reactor):
        super(Dataset, self).__init__()
        self.accessingData = DeferredLock()
        self.parent = parent
        self.cxn = cxn
        self.context = context # context of the first dataset in the window
        self.dataset = dataset
        self.datasetName = datasetName
        self.directory = directory
        self.reactor = reactor
        self.data = None
#        self.hasPlotParameter = False
        self.cnt = 0
        self.setupDataListener(self.context)
#        self.setupFitListener(self.context)

#    @inlineCallbacks
#    def checkForPlotParameter(self):
#        self.parameters = yield self.cxn.data_vault.get_parameters(context = self.context)
#        if (self.parameters != None):
#            for (parameterName, value) in self.parameters:
#                if (str(parameterName) == 'plotLive'):
#                    self.hasPlotParameter = True
#                elif ((self.hasPlotParameter == True and str(parameterName) == 'Fit')):
#                      self.updateFit()

    @inlineCallbacks
    def getWindowParameter(self):
        try:
            value = yield self.cxn.data_vault.get_parameter('Window', context = self.context)
        except:
            value = None
        returnValue(value)

    # open dataset in order to listen for new data signals in current context
    @inlineCallbacks
    def openDataset(self, context):
        yield self.cxn.data_vault.cd(self.directory, context = context)
        yield self.cxn.data_vault.open(self.dataset, context = context)
        self.parameters = yield self.cxn.data_vault.parameters(context = context)
        self.parameterValues = []
        for parameter in self.parameters:
            parameterValue = yield self.cxn.data_vault.get_parameter(parameter, context = context)
            self.parameterValues.append(parameterValue)

#    @inlineCallbacks
#    def setupParameterListener(self, context):
#        yield self.cxn.data_vault.signal__new_parameter(66666, context = context)
#        yield self.cxn.data_vault.addListener(listener = self.updateParameter, source = None, ID = 66666, context = context)

#    # Over 60 seconds, check if the dataset has the appropriate 'plotLive' parameter
#    @inlineCallbacks
#    def listenForPlotParameter(self):
#        for i in range(20):
#            if (self.hasPlotParameter == True):
#                returnValue(self.hasPlotParameter)
##            yield deferToThread(time.sleep, .5)
#            yield self.wait(.5)
#        returnValue(self.hasPlotParameter)
#
#    def updateParameter(self, x, y):
#        self.checkForPlotParameter()

        #append whatever to self.parameters

#    # sets up the listener for new data
#    @inlineCallbacks
#    def setupFitListener(self, context):
#        yield self.cxn.data_vault.signal__new_parameter(22222, context = context)
#        yield self.cxn.data_vault.addListener(listener = self.updateFit, source = None, ID = 22222, context = context)

#    # new data signal
    @inlineCallbacks
#    def updateFit(self):
    def fit(self):
        value = yield self.cxn.data_vault.get_parameter('Fit', context = self.context)
        variables = yield self.cxn.data_vault.variables(context = self.context)
        numberDependentVariables = len(variables[1])
#       if (self.parameters != None):
        try:
            for window in self.parent.dwDict[self]:
                window.fitFromScript(self.dataset, self.directory, numberDependentVariables, value)
        except KeyError:
            print 'dwDict not created yet. Either the Fit parameter was added before data was created or the data is added too quickly. Try adding a pause after adding all the data intended for fitting.'
    # sets up the listener for new data
    @inlineCallbacks
    def setupDataListener(self, context):
        yield self.cxn.data_vault.signal__data_available(11111, context = context)
        yield self.cxn.data_vault.addListener(listener = self.updateData, source = None, ID = 11111, context = context)
        #self.setupDeferred.callback(True)
        self.updatecounter = 0
        self.timer = self.startTimer(100)

    # new data signal
    def updateData(self,x,y):
        self.updatecounter = self.updatecounter + 1
        self.getData(self.context)
#        print 'still happening dataset'

    def timerEvent(self,evt):
        #print self.updatecounter
#        print 'in dataset'
#        if self.updatecounter < 1:
#            print 'slowing down!, less than 1 dataupdate per 100milliseconds '
        self.updatecounter = 0

    def endTimer(self):
        self.killTimer(self.timer)

    @inlineCallbacks
    def disconnectDataSignal(self):
        yield self.cxn.data_vault.removeListener(listener = self.updateData, source = None, ID = 11111, context = self.context)
#        yield self.cxn.data_vault.removeListener(listener = self.updateParameter, source = None, ID = 66666, context = self.context)

    # returns the current data
    @inlineCallbacks
    def getData(self,context):
        Data = yield self.cxn.data_vault.get(100, context = context)
        if (self.data == None):
            self.data = Data
        else:
            yield self.accessingData.acquire()
            self.data = np.append(self.data, Data, 0)
            self.accessingData.release()

    @inlineCallbacks
    def emptyDataBuffer(self):
        yield self.accessingData.acquire()
        del(self.data)
        self.data = None
        self.accessingData.release()

    @inlineCallbacks
    def getYLabels(self):
        labels = []
        variables = yield self.cxn.data_vault.variables(context = self.context)
        for i in range(len(variables[1])):
            labels.append(variables[1][i][1] + ' - ' + self.datasetName)
        returnValue(labels)

    def wait(self, seconds, result=None):
        d = Deferred()
        self.reactor.callLater(seconds, d.callback, result)
        return d
Ejemplo n.º 54
0
class SqlInode(Inode):
	"""\
	This represents an in-memory inode object.
	"""
#	__slots__ = [ \
#		'inum',          # inode number
#		'seq',           # database update tracking
#		'attrs',         # dictionary of current attributes (size etc.)
#		'updated',       # set of attributes not yet written to disk
#		'seq',           # change sequence# on-disk
#		'timestamp',     # time the attributes have been read from disk
#		'tree',          # Operations object
#		'changes',       # Range of bytes written (but not saved in a change record)
#		'cache',         # Range of bytes read from remote nodes
#		'inuse',         # open files on this inode? <0:delete after close
#		'write_timer',   # attribute write timer
#		'no_attrs',      # set of attributes which don't exist
#		'_save_timer',   # delayed inode saving
#		'_saving',       # DB object currently writing
#		'_saveq',        # delayed save
#		'no_attrs',      # cache for empty extended attributes
#		'load_lock',     # deferred queue for loading from database
#		'_nlink',        # deferred which holds the current nlink count
#		'_nlink_drop',   # timer which frees that when it times out
#		]

	@inlineCallbacks
	def _shutdown(self,db):
		"""When shutting down, flush the inode from the system."""
		if self.inum is None or self.seq is None:
			return
		yield self._save(db)
		if self.cache:
			yield self.cache._close()
		try: del self.fs.nodes[self.inum]
		except KeyError: pass

	# ___ FUSE methods ___

	def _no_nlink(self):
		self._nlink_drop = None
		self._nlink.addErrback(log.err,"no_nlink_end")
		self._nlink = None

	def _adj_nlink(self,x):
		if self._nlink is not None:
			self._nlink.addCallback(lambda n: n+x)

	def getattr(self):
		"""Read inode attributes from the database."""
		def do_nlink(db):
			if stat.S_ISDIR(self.mode): 
				d = db.DoFn("select count(*) from tree,inode where tree.parent=${inode} and tree.inode=inode.id and inode.typ='d'",inode=self.inum)
			else:
				d = db.DoFn("select count(*) from tree where inode=${inode}",inode=self.inum)
			def nlc(r):
				r = r[0]
				if stat.S_ISDIR(self.mode): 
					r += 2 ## . and ..
				self._nlink_drop = reactor.callLater(self.fs.ATTR_VALID[0], self._no_nlink)
				return r
			d.addCallback(nlc)
			d.addErrback(lambda r: log.err(r,"get nlink"))
			return d
			
		def do_getattr(nlink):
			res = {'ino':self.inum if self.inum != self.fs.root_inum else 1, 'nlink':nlink}
			for k in inode_attrs:
				res[k] = self[k]
# TODO count subdirectories
#			if stat.S_ISDIR(self.mode): 
#				res['nlink'] += 1
			res['blocks'] = (res['size']+BLOCKSIZE-1)//BLOCKSIZE
			res['blksize'] = BLOCKSIZE

			res = {'attr': res}
			res['nodeid'] = self.inum
			res['generation'] = 1 ## TODO: inodes might be recycled (depends on the database)
			res['attr_valid'] = self.fs.ATTR_VALID
			res['entry_valid'] = self.fs.ENTRY_VALID
			return res

		# if _nlink is set, it's a Deferred which has the current nlink
		# count as its value. If not, we get the value from the database.
		if self._nlink is None:
			self._nlink = self.fs.db(do_nlink, DB_RETRIES)

		d = Deferred()
		def cpy(r):
			d.callback(r)
			return r
		self._nlink.addCallback(cpy)
		d.addCallback(do_getattr)
		return d

	def setattr(self, **attrs):
		size = attrs.get('size',None)
		if size is not None:
			self.fs.record.trim(self,size)
		if size is not None and self.cache and self.size > size:
			dtrim = self.cache.trim(size)
		else:
			dtrim = None
		do_mtime = False; do_ctime = False; did_mtime = False
		for f in inode_attrs:
			if f == "ctime": continue
			v = attrs.get(f,None)
			if v is not None:
				if f == "mode":
					self[f] = stat.S_IFMT(self[f]) | stat.S_IMODE(v)
				else:
					self[f] = v
				if f == "size":
					do_mtime = True
				else:
					do_ctime = True
					if f == "mtime":
						did_mtime = True
		if do_ctime:
			self.ctime = nowtuple()
		if do_mtime and not did_mtime:
			self.mtime = nowtuple()
		return dtrim

	@inlineCallbacks
	def open(self, flags, ctx=None):
		"""Existing file."""
		yield self.fs.db(self._load, DB_RETRIES)
		if stat.S_ISDIR(self.mode):
			returnValue( IOError(errno.EISDIR) )
		f = self.fs.FileType(self,flags)
		yield f.open()
		returnValue( f )

	@inlineCallbacks
	def opendir(self, ctx=None):
		"""Existing file."""
		if not stat.S_ISDIR(self.mode):
			returnValue( IOError(errno.ENOTDIR) )
		d = self.fs.DirType(self)
		yield d.open()
		returnValue( d )

	@inlineCallbacks
	def _lookup(self, name, db):
		self.do_atime(is_dir=2)
		if name == ".":
			returnValue( self )
		elif name == "..":
			if self.inum == self.fs.root_inum:
				returnValue( self )
			try:
				inum, = yield db.DoFn("select parent from tree where inode=${inode} limit 1", inode=self.inum)
			except NoData:
				raise IOError(errno.ENOENT, "%d:%s" % (self.inum,name))
		else:
			try:
				inum, = yield db.DoFn("select inode from tree where parent=${inode} and name=${name}", inode=self.inum, name=name)
			except NoData:
				raise IOError(errno.ENOENT, "%d:%s" % (self.inum,name))
		res = SqlInode(self.fs,inum)
		yield res._load(db)
		returnValue( res )
	    
	def lookup(self, name):
		return self.fs.db(lambda db: self._lookup(name,db), 5)
			
	@inlineCallbacks
	def create(self, name, flags,mode, umask, ctx=None):
		"""New file."""
		@inlineCallbacks
		def do_create(db):
			try:
				inum, = yield db.DoFn("select inode from tree where parent=${par} and name=${name}", par=self.inum,name=name)
			except NoData:
				inode = yield self._new_inode(db, name,mode|stat.S_IFREG,ctx)
			else:
				if flags & os.O_EXCL:
					returnValue( IOError(errno.EEXIST) )
				inode = SqlInode(self.fs,inum)
				yield inode._load(db)
	
			res = self.fs.FileType(inode, flags)
			returnValue( (inode,res) )
		inode,res = yield self.fs.db(do_create, DB_RETRIES)

		# opens its own database connection and therefore must be outside
		# the sub block, otherwise it'll not see the inner transaction
		yield res.open()
		returnValue( (inode, res) )

	@property
	def typ(self):
		return mode_char[stat.S_IFMT(self.mode)]

	@inlineCallbacks
	def _new_inode(self, db, name,mode,ctx=None,rdev=None,target=None):
		"""\
			Helper to create a new named inode.
			"""
		if len(name) == 0 or len(name) > self.fs.info.namelen:
			raise IOError(errno.ENAMETOOLONG)
		now,now_ns = nowtuple()
		if rdev is None: rdev=0 # not NULL
		if target: size=len(target)
		else: size=0

		self.mtime = nowtuple()
		self.size += len(name)+1
		if stat.S_IFMT(mode) == stat.S_IFDIR:
			self._adj_nlink(1)
		def adj_size():
			self.size -= len(name)+1
			if stat.S_IFMT(mode) == stat.S_IFDIR:
				self._adj_nlink(-1)
		db.call_rolledback(adj_size)


		inum = yield db.Do("insert into inode (root,mode,uid,gid,atime,mtime,ctime,atime_ns,mtime_ns,ctime_ns,rdev,target,size,typ) values(${root},${mode},${uid},${gid},${now},${now},${now},${now_ns},${now_ns},${now_ns},${rdev},${target},${size},${typ})", root=self.fs.root_id,mode=mode, uid=ctx.uid,gid=ctx.gid, now=now,now_ns=now_ns,rdev=rdev,target=target,size=size,typ=mode_char[stat.S_IFMT(mode)])
		yield db.Do("insert into tree (inode,parent,name) values(${inode},${par},${name})", inode=inum,par=self.inum,name=name)
		db.call_committed(self.fs.rooter.d_inode,1)
		
		inode = SqlInode(self.fs,inum)
		yield inode._load(db)
		returnValue( inode )

	@inlineCallbacks
	def forget(self):
		"""\
			Drop this node: save.
			"""
		yield self.fs.db(self._save, DB_RETRIES)
		returnValue (None)
			
	@inlineCallbacks
	def unlink(self, name, ctx=None):
		yield self.fs.db(lambda db: self._unlink(name,ctx=ctx,db=db), DB_RETRIES)
		returnValue( None )

	@inlineCallbacks
	def _unlink(self, name, ctx=None, db=None):
		inode = yield self._lookup(name,db)
		if stat.S_ISDIR(inode.mode):
			returnValue( IOError(errno.EISDIR) )

		yield db.Do("delete from tree where parent=${par} and name=${name}", par=self.inum,name=name)
		cnt, = yield db.DoFn("select count(*) from tree where inode=${inode}", inode=inode.inum)
		if cnt == 0:
			if not inode.defer_delete():
				yield inode._remove(db)

		self.mtime = nowtuple()
		self._adj_nlink(-1)
		self.size -= len(name)+1
		if self.size < 0:
			log.err("Size problem, inode %s"%(self,))
			self.size = 0
		def adj_size():
			self._adj_nlink(1)
			self.size += len(name)+1
		db.call_rolledback(adj_size)
		returnValue( None )

	def rmdir(self, name, ctx=None):
		@inlineCallbacks
		def do_rmdir(db):
			inode = yield self._lookup(name,db)
			if not stat.S_ISDIR(self.mode):
				returnValue( IOError(errno.ENOTDIR) )
			cnt, = yield db.DoFn("select count(*) from tree where parent=${inode}", inode=inode.inum)
			if cnt:
				returnValue( IOError(errno.ENOTEMPTY) )

			self._adj_nlink(-1)
			db.call_rolledback(self._adj_nlink,1)
			db.call_committed(self.fs.rooter.d_dir,-1)
			yield inode._remove(db)
		return self.fs.db(do_rmdir, DB_RETRIES)

	def symlink(self, name, target, ctx=None):
		if len(target) > self.fs.info.targetlen:
			return IOError(errno.ENAMETOOLONG,"target entry too long")
		return self.fs.db(lambda db: self._new_inode(db,name,stat.S_IFLNK|(0o755) ,ctx,target=target), DB_RETRIES)

	def link(self, oldnode,target, ctx=None):
		def do_link(db):
			if stat.S_ISDIR(oldnode.mode):
				return IOError(errno.EDIR,"Cannot link a directory")
			return self._link(oldnode,target, ctx=ctx,db=db)
		return self.fs.db(do_link, DB_RETRIES)

	@inlineCallbacks
	def _link(self, oldnode,target, ctx=None,db=None):
		try:
			yield db.Do("insert into tree (inode,parent,name) values(${inode},${par},${name})", inode=oldnode.inum,par=self.inum,name=target)
		except Exception:
			returnValue( IOError(errno.EEXIST, "%d:%s" % (self.inum,target)) )
		self.mtime = nowtuple()

		self.size += len(target)+1
		self._adj_nlink(1)
		def adj_size():
			self.size -= len(target)+1
			self._adj_nlink(-1)
		db.call_rolledback(adj_size)

		returnValue( oldnode ) # that's what's been linked, i.e. link count +=1
			

	def mknod(self, name, mode, rdev, umask, ctx=None):
		return self.fs.db(lambda db: self._new_inode(db,name,mode,ctx,rdev), DB_RETRIES)

	def mkdir(self, name, mode,umask, ctx=None):
		@inlineCallbacks
		def do_mkdir(db):
			inode = yield self._new_inode(db,name,(mode&0o7777&~umask)|stat.S_IFDIR,ctx)
			db.call_committed(self.fs.rooter.d_dir,1)
			returnValue( inode )
		return self.fs.db(do_mkdir, DB_RETRIES)

	@inlineCallbacks
	def _remove(self,db):
		yield self._save(db) # may not be necessary; this clears the save timer

		entries = []
		def app(parent,name):
			entries.append((parent,name))
		yield db.DoSelect("select parent,name from tree where inode=${inode}", inode=self.inum, _empty=True, _callback=app)
		for p in entries:
			p,name = p
			p = SqlInode(self.fs,p)
			yield p._load(db)
			def adj_size(p):
				p.mtime = nowtuple()
				p.size -= len(name)+1
			db.call_committed(adj_size,p)
		yield db.Do("delete from tree where inode=${inode}", inode=self.inum, _empty=True)
		if self.fs.single_node or not stat.S_ISREG(self.mode):
			yield db.Do("delete from inode where id=${inode}", inode=self.inum)
			yield db.call_committed(self.fs.rooter.d_inode,-1)
			if stat.S_ISREG(self.mode):
				yield db.call_committed(self.fs.rooter.d_size,self.size,0)
		else:
			self.fs.record.delete(self)

		yield deferToThread(self._os_unlink)
		del self.fs.nodes[self.inum]
		self.inum = None
		returnValue( None )

	def _os_unlink(self):
		if self.inum is None: return
		if stat.S_ISREG(self.mode):
			try:
				os.unlink(self._file_path())
			except EnvironmentError as e:
				if e.errno != errno.ENOENT:
					raise

	def do_atime(self, is_dir=0):
		"""\
			Rules for atime update.
			"""
		if is_dir:
			if self.fs.diratime < is_dir: return
		else:
			if not self.fs.atime: return
			if self.fs.atime == 1 and self.atime > self.mtime: return
		self.atime = nowtuple()

	def _file_path(self):
		return self.cache._file_path()


	def getxattr(self, name, ctx=None):
		@inlineCallbacks
		def do_getxattr(db):
			nid = yield self.fs.xattr_id(name,db,False)
			if nid is None:
				returnValue( IOError(errno.ENODATA) )
			try:
				val, = yield db.DoFn("select value from xattr where inode=${inode} and name=${name}", inode=self.inum,name=nid)
			except NoData:
				returnValue( IOError(errno.ENODATA) )
			returnValue( val )

		if name in self.no_attrs:
			return IOError(errno.ENODATA)
		res = self.fs.db(do_getxattr, DB_RETRIES)
		def noa(r):
			if not isinstance(r,IOError):
				return r
			if r.errno != errno.ENODATA:
				return r
			self.no_attrs.add(name)
			return r
		res.addCallback(noa)
		return res


	def setxattr(self, name, value, flags, ctx=None):
		if name in self.no_attrs:
			self.no_attrs.remove(name)
		if len(value) > self.fs.info.attrlen:
			return IOError(errno.E2BIG)

		@inlineCallbacks
		def do_setxattr(db):
			nid = yield self.fs.xattr_id(name,db,True)
			try:
				yield db.Do("update xattr set value=${value},seq=seq+1 where inode=${inode} and name=${name}", inode=self.inum,name=nid,value=value)
			except NoData:
				if flags & XATTR_REPLACE:
					returnValue( IOError(errno.ENODATA) )
				yield db.Do("insert into xattr (inode,name,value,seq) values(${inode},${name},${value},1)", inode=self.inum,name=nid,value=value)
			else: 
				if flags & XATTR_CREATE:
					returnValue( IOError(errno.EEXIST) )
			returnValue( None )
		return self.fs.db(do_setxattr, DB_RETRIES)

	def listxattrs(self, ctx=None):
		@inlineCallbacks
		def do_listxattrs(db):
			res = []
			i = yield db.DoSelect("select name from xattr where inode=${inode}", inode=self.inum, _empty=1,_store=1)
			for nid, in i:
				name = yield self.fs.xattr_name(nid,db)
				res.append(name)
			returnValue( res )
		return self.fs.db(do_listxattrs, DB_RETRIES)

	def removexattr(self, name, ctx=None):
		@inlineCallbacks
		def do_removexattr(db):
			nid = self.fs.xattr_id(name, db,False)
			if nid is None:
				returnValue( IOError(errno.ENODATA) )
			try:
				yield db.Do("delete from xattr where inode=${inode} and name=${name}", inode=self.inum,name=nid)
			except NoData:
				returnValue( IOError(errno.ENODATA) )
			returnValue( None )
		if name in self.no_attrs:
			return IOError(errno.ENODATA)
		self.no_attrs.add(name)
		return self.fs.db(do_removexattr, DB_RETRIES)

	def readlink(self, ctx=None):
		self.do_atime()
		return self.target

	# ___ supporting stuff ___

	def __repr__(self):
		if not self.inum:
			return "<SInode>"
		if not self.seq:
			return "<SInode%s %d>" % (typ,self.inum)
		cache = self.cache
		if self.typ == "f":
			typ = ""
		else:
			typ = ":"+self.typ

		if not cache:
			cache = ""
		elif cache.known is None:
			cache = " ??"
		else:
			cache = " "+str(cache.known)
		if not self.updated:
			return "<SInode%s %d:%d%s>" % (typ,self.inum, self.seq, cache)
		return "<SInode%s %d:%d (%s)%s>" % (typ,self.inum, self.seq, " ".join(sorted(self.updated.keys())), cache)
	__str__=__repr__

	def __hash__(self):
		if self.inum:
			return self.inum
		else:
			return id(self)
	
	def __cmp__(self,other):
		if self.inum is None or other.inum is None:
			return id(self)-id(other)
		else:
			return self.inum-other.inum
	def __eq__(self,other):
		if id(self)==id(other):
			return True
		if isinstance(other,SqlInode):
			if self.inum and other and self.inum == other:
				raise RuntimeError("two inodes have the same ID")
		elif self.inum == other:
			return True
		return False

	def __ne__(self,other):
		if id(self)==id(other):
			return False
		if self.inum and other.inum and self.inum == other.inum:
			raise RuntimeError("two inodes have the same ID")
		return True

	def __new__(cls,filesystem,nodeid):
		self = _Inode.get(nodeid,None)
		if self is None:
			self = object.__new__(cls)
			self.inuse = None
			_Inode[nodeid] = self
		return self
	def __init__(self,filesystem,nodeid):
#		if isinstance(inum,SqlInode): return
#		if self.inum is not None:
#			assert self.inum == inum
#			return
		if getattr(self,"inuse",None) is not None: return
		super(SqlInode,self).__init__(filesystem,nodeid)
		self.seq = None
		self.attrs = None
		self.timestamp = None
		self.inuse = 0
		self.changes = Range()
		self.cache = NotKnown
		self.load_lock = DeferredLock()
		self._saving = None
		self._saveq = []
		self.no_attrs = set()
		self._save_timer = None
		self._nlink = None
		_InodeList.append(self)
		# defer anything we only need when loaded to after _load is called

	def missing(self, start,end):
		"""\
			The system has determined that some data could not be found
			anywhere.
			"""
		if not self.cache:
			return
		r = Range([(start,end)]) - self.cache.available
		self.cache.known -= r
		self.fs.changer.note(self.cache)

	@inlineCallbacks
	def _load(self, db):
		"""Load attributes from storage"""
		yield self.load_lock.acquire()
		try:
			if not self.inum:
				# probably deleted
				return

			if self.seq:
				yield self._save(db)

			d = yield db.DoFn("select * from inode where id=${inode}", inode=self.inum, _dict=True)
			if self.seq is not None and self.seq == d["seq"]:
				returnValue( None )

			self.attrs = {}
			self.updated = {} # old values

			for k in inode_xattrs:
				if k.endswith("time"):
					v = (d[k],d[k+"_ns"])
				else:
					v = d[k]
				self.attrs[k] = v
			self.seq = d["seq"]
			self.size = d["size"]
			self.timestamp = nowtuple()

			if self.cache is NotKnown:
				if self.typ == mode_char[stat.S_IFREG]:
					self.cache = Cache(self.fs,self)
					yield self.cache._load(db)
				else:
					self.cache = None

		finally:
			self.load_lock.release()
		returnValue( None )
	
	def __getitem__(self,key):
		if not self.seq:
			raise RuntimeError("inode data not loaded: "+repr(self))
			# self._load()
		return self.attrs[key]

	def __setitem__(self,key,value):
		if not self.seq:
			raise RuntimeError("inode data not loaded: "+repr(self))
			# self._load()
		if key.endswith("time"):
			assert isinstance(value,tuple)
		else:
			assert not isinstance(value,tuple)
		if self.attrs[key] != value:
			if key not in self.updated:
				self.updated[key] = self.attrs[key]
			self.attrs[key] = value
			self.fs.ichanger.note(self)

	# We can't use a DeferredLock here because the "done" callback will run at the end of the transaction,
	# but we might still need to re-enter the thing within the same transaction.
	def _save_done(self,db):
		if self._saving != db:
			trace('fs',"%s: save unlock error: %s %s",self,self._saving,db)
			raise RuntimeError("inode _save releasing")
		trace('fs',"%s: save unlock: %s",self,db)
		self._saving = None
		if self._saveq:
			self._saveq.pop(0).callback(None)
	def _up_seq(self,n):
		self.seq = n
	def _up_args(self,updated,d):
		if not self.inum: return
		for k in inode_xattrs:
			if k.endswith("time"):
				v = (d[k],d[k+"_ns"])
			else:
				v = d[k]
			if k in self.updated: # new old value
				self.updated[k] = v
			elif k not in updated: # updated and unchanged value
				self.attrs[k] = v

	def delayed_save(self):
		self._save_timer = None
		d = self.fs.db(self._save, DB_RETRIES)
		d.addErrback(lambda r: log.err(r,"delayed save"))

	@inlineCallbacks
	def _save(self, db, new_seq=None):
		"""Save this inode's attributes"""
		if not self.inum: return
		if self._save_timer:
			self._save_timer.cancel()
			self._save_timer = None
		ch = None

		if not self.updated and not self.changes:
			return
		if self._saving is not None and self._saving != db:
			d = Deferred()
			self._saveq.append(d)
			trace('fs',"%s: save lock wait",self)
			yield d
			trace('fs',"%s: save lock done",self)
			if self._saving is not None:
				raise RuntimeError("inode _save locking")
		if not self.updated and not self.changes:
			return

		if self._saving is None:
			trace('fs',"%s: save locked",self)
			self._saving = db
			do_release = True
		else:
			trace('fs',"%s: save reentered",self)
			assert self._saving is db
			do_release = False
		try:
			args = {}
			for f in self.updated.keys():
				if f.endswith('time'):
					v=self.attrs[f]
					args[f]=v[0]
					args[f+'_ns']=v[1]
				else:
					args[f]=self.attrs[f]
			updated = self.updated
			self.updated = {}

			if self.changes:
				ch = self.changes.encode()
				self.changes = Range()
			else:
				ch = None

			if args:
				try:
					if new_seq:
						raise NoData # don't even try
					yield db.Do("update inode set seq=seq+1, "+(", ".join("%s=${%s}"%(k,k) for k in args.keys()))+" where id=${inode} and seq=${seq}", inode=self.inum, seq=self.seq, **args)
				except NoData:
					try:
						d = yield db.DoFn("select for update * from inode where id=${inode}", inode=self.inum, _dict=True)
					except NoData:
						# deleted inode
						trace('conflict',"inode_deleted %s %s %s",self.inum,self.seq,",".join(sorted(updated.keys())))
						del self.fs.nodes[self.inum]
						self.inum = None
					else:
						if new_seq:
							if new_seq != d['seq']+1:
								raise NoData
						else:
							if seq >= d['seq']:
								raise NoData
							new_seq=d['seq']+1
						trace('fs',"%s: inode_changed seq=%s old=%s new=%s",self,self.seq,d['seq'],new_seq)
						for k,v in updated:
							if k in ('event','size'):
								# always use the latest event / largest file size
								if self[k] < d[k]:
									self[k] = d[k]
									del args[k]
							elif k.endswith('time'):
								# Timestamps only increase, so that's no conflict
								if self[k] < d[k]:
									self[k] = d[k]
									del args[k]
									del args[k+'_ns']
							else:
								if self[k] != d[k] and d[k] != v:
									# three-way difference. Annoy the user.
									trace('conflict',"%d: %s k=%s old=%s loc=%s rem=%s",self.inum,k,v,self[k],d[k])

						if args:
							yield db.Do("update inode set seq=${new_seq}, "+(", ".join("%s=${%s}"%(k,k) for k in args.keys()))+" where id=${inode} and seq=${seq}", inode=self.inum, seq=seq, new_seq=new_seq, **args)
							db.call_committed(self._up_seq,new_seq)
						else:
							db.call_committed(self._up_seq,d["seq"])

						# now update the rest
						db.call_committed(self._up_args,updated,d)
				else:
					db.call_committed(self._up_seq,self.seq+1)

					self.seq += 1
				if "size" in args:
					db.call_committed(self.fs.rooter.d_size,updated["size"],self.attrs["size"])

				def re_upd(self):
					for k,v in updated:
						if k not in self.updated:
							self.updated[k]=v
				db.call_rolledback(re_upd,self)

			if ch:
				db.call_committed(self.fs.record.change,self,ch)

		finally:
			if do_release:
				db.call_committed(self._save_done,db)
				db.call_rolledback(self._save_done,db)

		returnValue( None )

# busy-inode flag
	def set_inuse(self):
		"""Increment the inode's busy counter."""
		if self.inuse >= 0:
			self.inuse += 1
		else:
			self.inuse += -1

	def clr_inuse(self):
		"""\
			Decrement the inode's busy counter,
			kill the inode if it reaches zero and the inode is marked for deletion.
			"""
		#return a Deferred if it's to be deleted
		if self.inuse < 0:
			self.inuse += 1
			if self.inuse == 0:
				return self.node.fs.db(self._remove, DB_RETRIES)
		elif self.inuse > 0:
			self.inuse -= 1
		else:
			raise RuntimeError("SqlInode %r counter mismatch" % (self,))
		return succeed(None)

	def defer_delete(self):
		"""Mark for deletion if in use."""
		if not self.inuse:
			return False
		if self.inuse > 0:
			self.inuse = -self.inuse
		return True
Ejemplo n.º 55
0
class ZipStream(object):

    def __init__(self, consumer):
        self.consumer = consumer
        assert IConsumer.implementedBy(consumer.__class__)

        self._producers = []

        self._sendingLock = DeferredLock()
        self._localHeaderLength = 0
        self._centralDirectoryLength = 0


    @inlineCallbacks
    def addProducer(self, producer):
        assert IZippyProducer.implementedBy(producer.__class__)

        size = yield producer.size()
        timestamp = yield producer.timestamp()
        crc32 = yield producer.crc32()
        key = yield producer.key()

        yield self._sendingLock.acquire()

        self._producers.append((producer, self._localHeaderLength))

        # local file header
        timestamp = dos_timestamp(timestamp)
        localHeader = struct.pack('<L5H3L2H', # format 
                                  0x04034b50, # magic (4 bytes)
                                  20, # version needed to extract (2 bytes)
                                  0, # general purpose bit flag (2 bytes)
                                  0, # compression method (2 bytes)
                                  timestamp[1], # last mod file time (2 bytes)
                                  timestamp[0], # last mod file date (2 bytes)
                                  crc32 & 0xffffffff, # CRC (4 bytes)
                                  size, # compressed size (4 bytes)
                                  size, # uncompressed size (4 bytes)
                                  len(key), # file name length (2 bytes)
                                  0, # extra field length (2 bytes)
                                 )

        localHeader += producer.key()
        self.consumer.write(localHeader)
        self._localHeaderLength += len(localHeader) + size

        # file data
        yield producer.beginProducing(self.consumer)

        self._sendingLock.release()


    @inlineCallbacks
    def centralDirectory(self):
        yield self._sendingLock.acquire()

        # file header
        for producer, offset in self._producers:
            size = yield producer.size()
            timestamp = yield producer.timestamp()
            timestamp = dos_timestamp(timestamp)
            crc32 = yield producer.crc32()
            key = yield producer.key()

            fileHeader = struct.pack('<L6H3L5H2L', # format
                                     0x02014b50, # magic (4 bytes)
                                     20, # version made by (2 bytes)
                                     20, # version needed to extract (2 bytes)
                                     0, # general purpose bit flag (2 bytes)
                                     0, # compression method (2 bytes)
                                     timestamp[1], # last mod file time (2 bytes)
                                     timestamp[0], # last mod file date (2 bytes)
                                     crc32 & 0xffffffff, # CRC (4 bytes)
                                     size, # compressed size (4 bytes)
                                     size, # uncompressed size(4 bytes)
                                     len(key), # file name length (2 bytes)
                                     0, # extra field length (2 bytes)
                                     0, # file comment length (2 bytes)
                                     0, # disk number start (2 bytes)
                                     0, # internal file attributes (2 bytes)
                                     0, # external file attributes (4 bytes)
                                     offset, # relative offset of local header (4 bytes)
                                    )

            fileHeader += producer.key()
            self._centralDirectoryLength += len(fileHeader)
            self.consumer.write(fileHeader)


        # end of central directory header
        endHeader = struct.pack('<L4H2LH', # format
                                0x06054b50, # magic (4 bytes)
                                0, # disk number (2 bytes)
                                0, # disk number with start of central directory (2 bytes)
                                len(self._producers), # total central directory entries on this disk (2 bytes)
                                len(self._producers), # total central directory entries (2 bytes)
                                self._centralDirectoryLength, # size of central directory (4 bytes)
                                self._localHeaderLength, # offset of start of central directory with respect to the starting disk number (4 bytes)
                                0, # zip file comment length (2 bytes)
                               )
        self.consumer.write(endHeader)



        self._sendingLock.release()
Ejemplo n.º 56
0
class DACServer( LabradServer ):
    """
    DAC Server
    Used for controlling DC trap electrodes
    """
    name = SERVERNAME
    onNewUpdate = Signal(SIGNALID, 'signal: ports updated', 's')
    registryPath = [ '', 'Servers', hc.EXPNAME + SERVERNAME ]
    currentPosition = 0
    CfileName = 'None Specified'
    
    @inlineCallbacks
    def initServer( self ):
        self.api = api()
        self.inCommunication = DeferredLock()
        self.registry = self.client.registry        
        self.dacDict = dict(hc.elec_dict.items() + hc.sma_dict.items())
        print("dacDict:")
        print(len(self.dacDict.items()))
        self.queue = Queue()
        self.multipoles = hc.default_multipoles
        self.currentVoltages = {}
        self.initializeBoard()
        self.listeners = set() 
        yield self.setCalibrations()
        self.setPreviousControlFile()

    def initializeBoard(self):
        connected = self.api.connectOKBoard()
        if not connected:
            raise Exception ("FPGA Not Found")     

    @inlineCallbacks
    def setCalibrations( self ):
        ''' Go through the list of sma outs and electrodes and try to detect calibrations '''
        yield self.registry.cd(self.registryPath + ['Calibrations'], True)
        subs, keys = yield self.registry.dir()
        print 'Calibrated channels: ', subs
        for chan in self.dacDict.values():
            chan.voltageList = []
            c = [] # list of calibration coefficients in form [c0, c1, ..., cn]
            if str(chan.dacChannelNumber) in subs:
                yield self.registry.cd(self.registryPath + ['Calibrations', str(chan.dacChannelNumber)])
                dirs, coeffs = yield self.registry.dir()
                for n in range( len(coeffs) ):
                    e = yield self.registry.get( 'c'+str(n) )
                    c.append(e)
                chan.calibration = c
            else:
                (vMin, vMax) = chan.boardVoltageRange
                prec = hc.PREC_BITS
                chan.calibration = [2**(prec - 1), float(2**(prec))/(vMax - vMin) ]
    
    @inlineCallbacks
    def setPreviousControlFile( self ):
        try:
            yield self.registry.cd(self.registryPath)
            CfilePath = yield self.registry.get('MostRecentCfile')
            yield self.setMultipoleControlFile(0, CfilePath)
        except: 
            self.multipoleMatrix = {k: {j: [.1] for j in self.multipoles} for k in hc.elec_dict.keys()} # if no previous Cfile was set, set all entries to 0.1
            self.numCols = 1
            yield self.registry.cd(self.registryPath + ['None Specified'])
            yield self.setMultipoleValues(0, [(k, 0) for k in self.multipoles])
            yield self.setIndividualAnalogVoltages(0, [(k, 0) for s in hc.sma_dict.keys()])

    @inlineCallbacks
    def setPreviousVoltages( self ):
        ''' Try to set previous voltages used with current Cfile '''
        yield self.registry.cd(self.registryPath + [self.CfileName], True)
        try: self.currentPosition = yield self.registry.get('position')
        except: self.currentPosition = 0
        
        try: ms = yield self.registry.get('MultipoleSet')         
        except: ms = [(k, 0) for k in self.multipoles] # if no previous multipole values have been recorded, set them to zero. 
        yield self.setMultipoleValues(0, ms)      
        
        yield self.registry.cd(self.registryPath + [self.CfileName, 'smaVoltages'], True)
        for k in hc.sma_dict.keys():
            try: av = yield self.registry.get(k)
            except: av = 0. # if no previous voltage has been recorded, set to zero. 
            yield self.setIndividualAnalogVoltages(0, [(k, av)])

    @setting( 0, "Set Multipole Control File", CfilePath = 's')
    def setMultipoleControlFile(self, c, CfilePath):
        data = open(CfilePath)
        mults = data.readline().rstrip('\n').split(':')
        if len(mults) > 1: 
            self.multipoles = mults[1].split(',')
        else: 
            data = open(CfilePath)
        data = genfromtxt(data)
        self.numCols = data[1].size
        if self.numCols == 1: data = [[data[i]] for i in range(data.size)]
        self.multipoleMatrix = {elec: {mult: data[int(elec) + index*len(hc.elec_dict) - 1] for index, mult in enumerate(self.multipoles)} for elec in hc.elec_dict.keys()}
        self.positionList = data[-1]

        if sys.platform.startswith('linux'): self.CfileName = CfilePath.split('/')[-1]
        elif sys.platform.startswith('win'): self.CfileName = CfilePath.split('\\')[-1]
        
        yield self.setPreviousVoltages()
        yield self.registry.cd(self.registryPath)
        yield self.registry.set('MostRecentCfile', CfilePath)

    @setting( 1, "Set Multipole Values", ms = '*(sv): dictionary of multipole values')
    def setMultipoleValues(self, c, ms):
        """
        set should be a dictionary with keys 'Ex', 'Ey', 'U2', etc.
        """
        self.multipoleSet = {m: v for (m,v) in ms}
        voltageMatrix = {}
        for e in hc.elec_dict.keys():
            voltageMatrix[e] = [0. for n in range(self.numCols)]
            for n in range(self.numCols):
                for m in self.multipoles: voltageMatrix[e][n] += self.multipoleMatrix[e][m][n] * self.multipoleSet[m]
        if self.numCols > 1:
            voltageMatrix = self.interpolateVoltageMatrix(voltageMatrix)
        self.voltageMatrix = voltageMatrix
        yield self.setVoltages(c, newPosition = self.currentPosition)

        yield self.registry.cd(self.registryPath + [self.CfileName], True)
        yield self.registry.set('MultipoleSet', ms)

    def interpolateVoltageMatrix( self, voltageMatrix ):
        # fix step size here
        numPositions = 10*(self.numCols - 1.)
        inc = (self.numCols-1)/numPositions
        partition = arange(0, (numPositions + 1) * inc, inc)
        splineFit = {elec: UniSpline(range(self.numCols) , voltageMatrix[elec], s = 0 ) for elec in hc.elec_dict.keys()}
        interpolatedVoltageMatrix = {elec: splineFit[elec](partition) for elec in hc.elec_dict.keys()}
        return interpolatedVoltageMatrix

    @inlineCallbacks
    def setVoltages(self, c, newPosition = currentPosition, writeSMAs = False):
        n = newPosition
        newVoltageSet = []
        for e in hc.elec_dict.keys():
            av = self.voltageMatrix[e][n]
            newVoltageSet.append( (e, av) )

        # if changing DAC FPGA voltage set, write sma voltages. 
        if writeSMAs: 
            for s in hc.sma_dict.keys(): newVoltageSet.append( (s, self.currentVoltages[s]) )
        yield self.setIndividualAnalogVoltages(c, newVoltageSet)
        newVoltageSet.append(newVoltageSet[len(newVoltageSet)-1])
        self.currentPosition = n

        yield self.registry.cd(self.registryPath + [self.CfileName])
        yield self.registry.set('position', self.currentPosition)

    @inlineCallbacks
    def writeToFPGA(self, c):
        yield self.resetFIFODAC()
        for i in range(len(self.queue.setDict[self.queue.currentSet])):
            v = self.queue.get()            
            yield self.setDACVoltages(v.hexRep)
            print v.channel.name, v.analogVoltage
            self.currentVoltages[v.channel.name] = v.analogVoltage
            self.notifyOtherListeners(c)

    @inlineCallbacks
    def setDACVoltages(self, stringy):
        yield self.inCommunication.acquire()
        yield deferToThread(self.api.setDACVoltage, stringy)
        # self.api.setDACVoltage(stringy)
        self.inCommunication.release()
    
    @inlineCallbacks
    def resetFIFODAC(self):
        yield self.inCommunication.acquire()
        yield deferToThread(self.api.resetFIFODAC)
        # self.api.resetFIFODAC()
        self.inCommunication.release()            

    @setting( 2, "Set Digital Voltages", digitalVoltages = '*v', setNum = 'i', returns = '')
    def setDigitalVoltages( self, c, digitalVoltages, setNum):
        """
        Pass digitalVoltages, a list of digital voltages to update.
        Currently, there must be one for each port.
        """
        l = zip(self.dacDict.keys(), digitalVoltages)
        self.setIndivDigVoltages(c, l, setNum)

    @setting( 3, "Set Analog Voltages", analogVoltages = '*v', setNum = 'i', returns = '')
    def setAnalogVoltages( self, c, analogVoltages, setNum):
        """
        Pass analogVoltages, a list of analog voltages to update.
        Currently, there must be one for each port.
        """
        l = zip(self.dacDict.keys(), analogVoltages)
        yield self.setIndivAnaVoltages(c, l, setNum)

    @setting( 4, "Set Individual Digital Voltages", digitalVoltages = '*(si)', returns = '')
    def setIndividualDigitalVoltages(self, c, digitalVoltages, setNum = 0):
        """
        Pass a list of tuples of the form:
        (portNum, newVolts)
        """
        for (port, dv) in digitalVoltages:
            self.queue.insert(Voltage(self.dacDict[port], digitalVoltage = dv))
        yield self.writeToFPGA(c)

    @setting( 5, "Set Individual Analog Voltages", analogVoltages = '*(sv)', returns = '')
    def setIndividualAnalogVoltages(self, c, analogVoltages):
        """
        Pass a list of tuples of the form:
        (portNum, newVolts)
        """
        for (port, av) in analogVoltages:
            self.queue.insert(Voltage(self.dacDict[port], analogVoltage = av))
            if self.dacDict[port].smaOutNumber:
                yield self.registry.cd(self.registryPath + [self.CfileName, 'smaVoltages'])
                yield self.registry.set(port, av)
        yield self.writeToFPGA(c)
    
    @setting( 6, "Set First Voltages")
    def setFirstVoltages(self, c):
        self.queue.reset()
        yield self.setVoltages(c, newPosition = self.currentPosition, writeSMAs = True)

    @setting( 7, "Set Next Voltages", newPosition = 'i')
    def setFutureVoltages(self, c, newPosition):
        self.queue.advance()
        yield self.setVoltages(c, newPosition, True)

    @setting( 8, "Set Next Voltages New Multipoles", multipoles = '*(sv)')
    def setNextVoltagesNewMultipoles(self, c, multipoles):
        self.queue.advance()
        yield self.setMultipoleValues(c, multipoles)

    @setting( 9, "Get Analog Voltages", returns = '*(sv)' )
    def getCurrentVoltages(self, c):
        """
        Return the current voltage
        """
        return self.currentVoltages.items()        

    @setting( 10, "Get Multipole Values",returns='*(s, v)')
    def getMultipoleValues(self, c):
        """
        Return a list of multipole voltages
        """
        return self.multipoleSet.items()

    @setting( 11, "Get Multipole Names",returns='*s')
    def getMultipoleNames(self, c):
        """
        Return a list of multipole voltages
        """
        return self.multipoles        

    @setting( 12, "Get Position", returns = 'i')
    def getPosition(self, c):
        return self.currentPosition

    def initContext(self, c):
        self.listeners.add(c.ID)

    def expireContext(self, c):
        self.listeners.remove(c.ID)
    
    def notifyOtherListeners(self, context):
        notified = self.listeners.copy()
        try: notified.remove(context.ID)
        except: pass
        self.onNewUpdate('Channels updated', notified)
Ejemplo n.º 57
0
class CFProcessor(service.Service):
    implements(interfaces.IProcessor)

    def __init__(self, name, conf):
        _log.info("CF_INIT %s", name)
        self.name, self.conf = name, conf
        self.channel_dict = defaultdict(list)
        self.iocs = dict()
        self.client = None
        self.currentTime = getCurrentTime
        self.lock = DeferredLock()

    def startService(self):
        service.Service.startService(self)
        self.running = 1
        _log.info("CF_START")
        from channelfinder import ChannelFinderClient
        # Using the default python cf-client.
        # The usr, username, and password are provided by the channelfinder._conf module.
        if self.client is None:  # For setting up mock test client
            self.client = ChannelFinderClient()
        self.clean_service()

    def stopService(self):
        service.Service.stopService(self)
        #Set channels to inactive and close connection to client
        self.running = 0
        self.clean_service()
        _log.info("CF_STOP")

    @defer.inlineCallbacks
    def commit(self, transaction_record):
        yield self.lock.acquire()
        try:
            yield deferToThread(self.__commit__, transaction_record)
        finally:
            self.lock.release()

    def __commit__(self, TR):
        _log.debug("CF_COMMIT %s", TR.infos.items())
        pvNames = [unicode(rname, "utf-8") for rid, (rname, rtype) in TR.addrec.iteritems()]
        delrec = list(TR.delrec)
        iocName = TR.src.port
        hostName = TR.src.host
        iocid = hostName + ":" + str(iocName)
        owner = TR.infos.get('CF_USERNAME') or TR.infos.get('ENGINEER') or self.conf.get('username', 'cfstore')
        time = self.currentTime()
        if TR.initial:
            self.iocs[iocid] = {"iocname": iocName, "hostname": hostName, "owner": owner, "channelcount": 0}  # add IOC to source list
        if not TR.connected:
            delrec.extend(self.channel_dict.keys())
        for pv in pvNames:
            self.channel_dict[pv].append(iocid)  # add iocname to pvName in dict
            self.iocs[iocid]["channelcount"] += 1
        for pv in delrec:
            if iocid in self.channel_dict[pv]:
                self.channel_dict[pv].remove(iocid)
                self.iocs[iocid]["channelcount"] -= 1
                if self.iocs[iocid]['channelcount'] == 0:
                    self.iocs.pop(iocid, None)
                elif self.iocs[iocid]['channelcount'] < 0:
                    _log.error("channel count negative!")
                if len(self.channel_dict[pv]) <= 0:  # case: channel has no more iocs
                    del self.channel_dict[pv]
        poll(__updateCF__, self.client, pvNames, delrec, self.channel_dict, self.iocs, hostName, iocName, time, owner)
        dict_to_file(self.channel_dict, self.iocs, self.conf)

    def clean_service(self):
        sleep = 1
        retry_limit = 5
        owner = self.conf.get('username', 'cfstore')
        while 1:
            try:
                _log.debug("Cleaning service...")
                channels = self.client.findByArgs([('pvStatus', 'Active')])
                if channels is not None:
                    new_channels = []
                    for ch in channels or []:
                        new_channels.append(ch[u'name'])
                    if len(new_channels) > 0:
                        self.client.update(property={u'name': 'pvStatus', u'owner': owner, u'value': "Inactive"},
                                           channelNames=new_channels)
                    _log.debug("Service clean.")
                    return
            except RequestException:
                _log.exception("cleaning failed, retrying: ")

            time.sleep(min(60, sleep))
            sleep *= 1.5
            if self.running == 0 and sleep >= retry_limit:
                _log.debug("Abandoning clean.")
                return
Ejemplo n.º 58
0
class UnitLifecycle(object):
    """Manager for a unit lifecycle.

    Primarily used by the workflow interaction, to modify unit behavior
    according to the current unit workflow state and transitions.

    See docs/source/internals/unit-workflow-lifecycle.rst for a brief
    discussion of some of the more interesting implementation decisions.
    """

    def __init__(self, client, unit, service, unit_dir, state_dir, executor):
        self._client = client
        self._unit = unit
        self._service = service
        self._executor = executor
        self._unit_dir = unit_dir
        self._state_dir = state_dir
        self._relations = None
        self._running = False
        self._watching_relation_memberships = False
        self._watching_relation_resolved = False
        self._run_lock = DeferredLock()
        self._log = logging.getLogger("unit.lifecycle")

    @property
    def running(self):
        return self._running

    def get_relation_workflow(self, relation_id):
        """Accessor to a unit relation workflow, by relation id.

        Primarily intended for and used by unit tests. Raises
        a KeyError if the relation workflow does not exist.
        """
        return self._relations[relation_id]

    @inlineCallbacks
    def install(self, fire_hooks=True):
        """Invoke the unit's install hook.
        """
        if fire_hooks:
            yield self._execute_hook("install")

    @inlineCallbacks
    def start(self, fire_hooks=True, start_relations=True):
        """Invoke the start hook, and setup relation watching.

        :param fire_hooks: False to skip running config-change and start hooks.
            Will not affect any relation hooks that happen to be fired as a
            consequence of starting up.

        :param start_relations: True to transition all "down" relation
            workflows to "up".
        """
        self._log.debug("pre-start acquire, running:%s", self._running)
        yield self._run_lock.acquire()
        self._log.debug("start running, unit lifecycle")
        watches = []

        try:
            if fire_hooks:
                yield self._execute_hook("config-changed")
                yield self._execute_hook("start")

            if self._relations is None:
                yield self._load_relations()

            if start_relations:
                # We actually want to transition from "down" to "up" where
                # applicable (ie a stopped unit is starting up again)
                for workflow in self._relations.values():
                    with (yield workflow.lock()):
                        state = yield workflow.get_state()
                        if state == "down":
                            yield workflow.transition_state("up")

            # Establish a watch on the existing relations.
            if not self._watching_relation_memberships:
                self._log.debug("starting service relation watch")
                watches.append(self._service.watch_relation_states(
                    self._on_service_relation_changes))
                self._watching_relation_memberships = True

            # Establish a watch for resolved relations
            if not self._watching_relation_resolved:
                self._log.debug("starting unit relation resolved watch")
                watches.append(self._unit.watch_relation_resolved(
                    self._on_relation_resolved_changes))
                self._watching_relation_resolved = True

            # Set current status
            self._running = True
        finally:
            self._run_lock.release()

        # Give up the run lock before waiting on initial watch invocations.
        results = yield DeferredList(watches, consumeErrors=True)

        # If there's an error reraise the first one found.
        errors = [e[1] for e in results if not e[0]]
        if errors:
            returnValue(errors[0])

        self._log.debug("started unit lifecycle")

    @inlineCallbacks
    def stop(self, fire_hooks=True, stop_relations=True):
        """Stop the unit, executes the stop hook, and stops relation watching.

        :param fire_hooks: False to skip running stop hooks.

        :param stop_relations: True to transition all "up" relation
            workflows to "down"; when False, simply shut down relation
            lifecycles (in preparation for process shutdown, for example).
        """
        self._log.debug("pre-stop acquire, running:%s", self._running)
        yield self._run_lock.acquire()
        try:
            # Verify state
            assert self._running, "Already Stopped"

            if stop_relations:
                # We actually want to transition relation states
                # (probably because the unit workflow state is stopped/error)
                for workflow in self._relations.values():
                    with (yield workflow.lock()):
                        yield workflow.transition_state("down")
            else:
                # We just want to stop the relations from acting
                # (probably because the process is going down)
                self._log.debug("stopping relation lifecycles")
                for workflow in self._relations.values():
                    yield workflow.lifecycle.stop()

            if fire_hooks:
                yield self._execute_hook("stop")

            # Set current status
            self._running = False
        finally:
            self._run_lock.release()
        self._log.debug("stopped unit lifecycle")

    @inlineCallbacks
    def configure(self, fire_hooks=True):
        """Inform the unit that its service config has changed.
        """
        if not fire_hooks:
            returnValue(None)
        yield self._run_lock.acquire()
        try:
            # Verify State
            assert self._running, "Needs to be running."

            # Execute hook
            yield self._execute_hook("config-changed")
        finally:
            self._run_lock.release()
        self._log.debug("configured unit")

    @inlineCallbacks
    def upgrade_charm(self, fire_hooks=True, force=False):
        """Upgrade the charm and invoke the upgrade-charm hook if requested.

        :param fire_hooks: if False, *and* the actual upgrade operation is not
            necessary, skip the upgrade-charm hook. When the actual charm has
            changed during this invocation, this flag is ignored: hooks will
            always be fired.

        :param force: Boolean, if true then we're merely putting the charm into
            place on disk, not executing charm hooks.
        """
        msg = "Upgrading charm"
        if force:
            msg += " - forced"
        self._log.debug(msg)
        upgrade = _CharmUpgradeOperation(
            self._client, self._service, self._unit, self._unit_dir)
        yield self._run_lock.acquire()
        try:
            yield upgrade.prepare()

            # Executor may already be stopped if we're retrying.
            if self._executor.running:
                self._log.debug("Pausing normal hook execution")
                yield self._executor.stop()

            if upgrade.ready:
                yield upgrade.run()
                fire_hooks = True

            if fire_hooks and not force:
                yield self._execute_hook("upgrade-charm", now=True)

            # Always restart executor on success; charm upgrade operations and
            # errors are the only reasons for the executor to be stopped.
            self._log.debug("Resuming normal hook execution.")
            self._executor.start()
        finally:
            self._run_lock.release()
            upgrade.cleanup()

    @inlineCallbacks
    def _on_relation_resolved_changes(self, event):
        """Callback for unit relation resolved watching.

        The callback is invoked whenever the relation resolved
        settings change.
        """
        self._log.debug("relation resolved changed")
        # Acquire the run lock, and process the changes.
        yield self._run_lock.acquire()

        try:
            # If the unit lifecycle isn't running we shouldn't process
            # any relation resolutions.
            if not self._running:
                self._log.debug("stop watch relation resolved changes")
                self._watching_relation_resolved = False
                raise StopWatcher()

            self._log.info("processing relation resolved changed")
            if self._client.connected:
                yield self._process_relation_resolved_changes()
        finally:
            yield self._run_lock.release()

    @inlineCallbacks
    def _process_relation_resolved_changes(self):
        """Invoke retry transitions on relations if their not running.
        """
        relation_resolved = yield self._unit.get_relation_resolved()
        if relation_resolved is None:
            returnValue(None)
        else:
            yield self._unit.clear_relation_resolved()

        keys = set(relation_resolved).intersection(self._relations)
        for internal_rel_id in keys:
            workflow = self._relations[internal_rel_id]
            with (yield workflow.lock()):
                state = yield workflow.get_state()
                if state != "up":
                    yield workflow.transition_state("up")

    @inlineCallbacks
    def _on_service_relation_changes(self, old_relations, new_relations):
        """Callback for service relation watching.

        The callback is used to manage the unit relation lifecycle in
        accordance with the current relations of the service.

        @param old_relations: Previous service relations for a service. On the
               initial execution, this value is None.
        @param new_relations: Current service relations for a service.
        """
        self._log.debug(
            "services changed old:%s new:%s", old_relations, new_relations)

        # Acquire the run lock, and process the changes.
        yield self._run_lock.acquire()
        try:
            # If the lifecycle is not running, then stop the watcher
            if not self._running:
                self._log.debug("stop service-rel watcher, discarding changes")
                self._watching_relation_memberships = False
                raise StopWatcher()

            self._log.debug("processing relations changed")
            yield self._process_service_changes(old_relations, new_relations)
        finally:
            self._run_lock.release()

    @inlineCallbacks
    def _process_service_changes(self, old_relations, new_relations):
        """Add and remove unit lifecycles per the service relations Determine.
        """
        # Calculate delta between zookeeper state and our stored state.
        new_relations = dict(
            (service_relation.internal_relation_id, service_relation)
            for service_relation in new_relations)

        if old_relations:
            old_relations = dict(
                (service_relation.internal_relation_id, service_relation)
                for service_relation in old_relations)

        added = set(new_relations.keys()) - set(self._relations.keys())
        removed = set(self._relations.keys()) - set(new_relations.keys())
        # Could this service be a principal container?
        is_principal = not (yield self._service.is_subordinate())

        # Once we know a relation is departed, *immediately* stop running
        # its hooks. We can't really handle the case in which a hook is
        # *already* running, but we can at least make sure it doesn't run
        # any *more* hooks (which could have been queued in the past, but
        # not yet executed).# This isn't *currently* an exceptionally big
        # deal, because:
        #
        # (1) The ZK state won't actually be deleted, so an inappropriate
        #     hook will still run happily.
        # (2) Even if the state is deleted, and the hook errors out, the
        #     only actual consequence is that we'll eventually run the
        #     error_depart transition rather than depart or down_depart.
        #
        # However, (1) will certainly change in the future, and (2) is not
        # necessarily a watertight guarantee.
        for relation_id in removed:
            yield self._relations[relation_id].lifecycle.stop()

        # Actually depart old relations.
        for relation_id in removed:
            workflow = self._relations.pop(relation_id)
            with (yield workflow.lock()):
                yield workflow.transition_state("departed")
            self._store_relations()

        # Process new relations.
        for relation_id in added:
            service_relation = new_relations[relation_id]
            yield self._add_relation(service_relation)
            if (is_principal and service_relation.relation_scope == "container"):
                self._add_subordinate_unit(service_relation)
            yield self._store_relations()

    @inlineCallbacks
    def _add_relation(self, service_relation):
        try:
            unit_relation = yield service_relation.get_unit_state(
                self._unit)
        except UnitRelationStateNotFound:
            # This unit has not yet been assigned a unit relation state,
            # Go ahead and add one.
            unit_relation = yield service_relation.add_unit_state(
                self._unit)

        lifecycle = UnitRelationLifecycle(
            self._client, self._unit.unit_name, unit_relation,
            service_relation.relation_ident,
            self._unit_dir, self._state_dir, self._executor)

        workflow = RelationWorkflowState(
            self._client, unit_relation, service_relation.relation_name,
            lifecycle, self._state_dir)

        self._relations[service_relation.internal_relation_id] = workflow

        with (yield workflow.lock()):
            yield workflow.synchronize()

    @inlineCallbacks
    def _do_unit_deploy(self, unit_name, machine_id, charm_dir):
        # this method exists to aid testing rather than being an
        # inline
        unit_deployer = UnitDeployer(self._client, machine_id, charm_dir)
        yield unit_deployer.start("subordinate")
        yield unit_deployer.start_service_unit(unit_name)

    @inlineCallbacks
    def _add_subordinate_unit(self, service_relation):
        """Deploy a subordinate unit for service_relation remote endpoint."""
        # Figure out the remote service state
        service_states = yield service_relation.get_service_states()
        subordinate_service = [s for s in service_states if
                               s.service_name != self._unit.service_name][0]

        # add a unit state to service (using self._unit as the
        # principal container)
        subordinate_unit = yield subordinate_service.add_unit_state(
            container=self._unit)
        machine_id = yield self._unit.get_assigned_machine_id()

        subordinate_unit_dir = os.path.dirname(self._unit_dir)
        charm_dir = os.path.join(subordinate_unit_dir,
                                 subordinate_unit.unit_name.replace(
                                     "/", "-"))
        state_dir = os.path.join(charm_dir, "state")
        if not os.path.exists(state_dir):
                os.makedirs(state_dir)

        self._log.debug("deploying %s as subordinate of %s",
                        subordinate_unit.unit_name,
                        self._unit.unit_name)
        # with the relation in place and the units added to the
        # container we can start the unit agent
        yield self._do_unit_deploy(subordinate_unit.unit_name,
                                   machine_id,
                                   charm_dir)

    @property
    def _known_relations_path(self):
        return os.path.join(
            self._state_dir, "%s.lifecycle.relations" % self._unit.internal_id)

    def _store_relations(self):
        """Store *just* enough information to recreate RelationWorkflowStates.

        Note that we don't need to store the actual states -- if we can
        reconstruct the RWS, it will be responsible for finding its own state
        -- but we *do* need to store the fact of their existence, so that we
        can still depart broken relations even if they break while we're not
        running.
        """
        state_dict = {}
        for relation_wf in self._relations.itervalues():
            state_dict.update(relation_wf.get_relation_info())
        state = yaml.dump(state_dict)
        temp_path = self._known_relations_path + "~"

        with open(temp_path, "w") as f:
            f.write(state)
        os.rename(temp_path, self._known_relations_path)

    @inlineCallbacks
    def _load_relations(self):
        """Recreate workflows for any relation we had previously stored.

        All relations (including those already departed) are stored in
        ._relations (and will be added or departed as usual); but only
        relations *not* already departed will be synchronized, to avoid
        errors caused by trying to access ZK state that may not exist any
        more.
        """
        self._relations = {}
        if not os.path.exists(self._known_relations_path):
            return

        rsm = RelationStateManager(self._client)
        relations = yield rsm.get_relations_for_service(self._service)
        relations_by_id = dict((r.internal_relation_id, r) for r in relations)

        with open(self._known_relations_path) as f:
            known_relations = yaml.load(f.read())

        for relation_id, relation_info in known_relations.items():
            if relation_id in relations_by_id:
                # The service relation's still around: set up workflow as usual
                yield self._add_relation(relations_by_id[relation_id])
            else:
                # The relation has departed. Create an *un*synchronized
                # workflow and place it in relations for detection and
                # removal (with hook-firing) in _process_service_changes.
                workflow = self._reconstruct_workflow(
                    relation_id,
                    relation_info["relation_name"],
                    relation_info["relation_scope"])
                self._relations[relation_id] = workflow

    def _reconstruct_workflow(self, relation_id, relation_ident, relation_scope):
        """Create a RelationWorkflowState which may refer to outdated state.

        This means that *if* this service has already departed the relevant
        relation, it is not safe to synchronize the resultant workflow,
        because its lifecycle may attempt to watch state that doesn't exist.

        Since synchronization is a one-time occurrence, and this method has
        only one client, this shouldn't be too hard to keep track of.
        """
        unit_relation = UnitRelationState(
            self._client, self._service.internal_id, self._unit.internal_id,
            relation_id, relation_scope)
        lifecycle = UnitRelationLifecycle(
            self._client, self._unit.unit_name, unit_relation, relation_ident,
            self._unit_dir, self._state_dir, self._executor)
        relation_name = relation_ident.split(":")[0]
        return RelationWorkflowState(
            self._client, unit_relation, relation_name, lifecycle,
            self._state_dir)

    @inlineCallbacks
    def _execute_hook(self, hook_name, now=False):
        """Execute the hook with the given name.

        For priority hooks, the hook is scheduled and then the
        executioner started, before wait on the result.
        """
        hook_path = os.path.join(self._unit_dir, "charm", "hooks", hook_name)
        socket_path = os.path.join(self._unit_dir, HOOK_SOCKET_FILE)
        invoker = Invoker(
            HookContext(self._client, self._unit.unit_name), None,
            _EVIL_CONSTANT, socket_path, self._unit_dir, hook_log)
        yield invoker.start()

        if now:
            yield self._executor.run_priority_hook(invoker, hook_path)
        else:
            yield self._executor(invoker, hook_path)
Ejemplo n.º 59
0
class FreqCounterFPGA(LabradServer):
    name = 'FreqCounter'
    
    def initServer(self):
        self.collectionTime = {0:1.0,1:1.0} #default collection times in the format channel:time(sec)
        self.inCommunication = DeferredLock()
        self.connectOKBoard()
    
    def connectOKBoard(self):
        self.xem = None
        fp = ok.FrontPanel()
        module_count = fp.GetDeviceCount()
        print "Found {} unused modules".format(module_count)
        for i in range(module_count):
            serial = fp.GetDeviceListSerial(i)
            tmp = ok.FrontPanel()
            tmp.OpenBySerial(serial)
            id = tmp.GetDeviceID()
            if id == okDeviceID:
                self.xem = tmp
                print 'Connected to {}'.format(id)
                self.programOKBoard(self.xem)
                return
        print 'Not found {}'.format(okDeviceID)
        print 'Will try again in {} seconds'.format(devicePollingPeriod)
        reactor.callLater(devicePollingPeriod, self.connectOKBoard)
    
    def programOKBoard(self, xem):
        print 'Programming FPGA'
        basepath = os.environ.get('LABRADPATH',None)
        if not basepath:
            raise Exception('Please set your LABRADPATH environment variable')
        path = os.path.join(basepath,'lattice/okfpgaservers/freqcounter.bit')
        prog = xem.ConfigureFPGA(path)
        if prog: raise("Not able to program FPGA")
        pll = ok.PLL22150()
        xem.GetEepromPLL22150Configuration(pll)
        pll.SetDiv1(pll.DivSrc_VCO,4)
        xem.SetPLL22150Configuration(pll)
    
    def _resetFIFO(channel, self):
        if channel == 0:
            self.xem.ActivateTriggerIn(0x40,0)
        elif channel == 1:
            self.xem.ActivateTriggerIn(0x40,1)
        
    def _setUpdateTime(self, channel, time):
        if channel == 0:
            self.xem.SetWireInValue(0x01,int(1000 * time))
        elif channel == 1:
            self.xem.SetWireInValue(0x02,int(1000 * time))
        self.xem.UpdateWireIns()
    
    @setting(0, 'Get Channels', returns = '*w')
    def getChannels(self, c):
        """
        Get Available Channels
        """
        return self.collectionTime.keys()
       
    @setting(1, 'Set Collection Time', channel = 'w', time = 'v', returns = '')
    def setCollectTime(self, c, channel, time):
        """
        Set collection time for the given channel
        """
        time = float(time)
        if not 0.0<time<5.0: raise('incorrect collection time')
        if channel not in self.collectionTime.keys(): raise("Incorrect channel")
        self.collectionTime[channel] = time
        yield self.inCommunication.acquire()
        yield deferToThread(self._setUpdateTime, channel, time)
        self.inCommunication.release()

    @setting(2, 'Reset FIFO', channel = 'w', returns = '')
    def resetFIFO(self,c, channel):
        """
        Resets the FIFO on board, deleting all queued counts
        """
        if channel not in self.collectionTime.keys(): raise("Incorrect channel")
        yield self.inCommunication.acquire()
        yield deferToThread(self._resetFIFO, channel)
        self.inCommunication.release()
    
    @setting(3, 'Get All Counts', channel = 'w', returns = '*(vv)')
    def getALLCounts(self, c, channel):
        """
        Returns the list of counts stored on the FPGA in the form (v1,v2) where v1 is the count rate in Hz
        and v2 is the approximate time of acquisition.
        
        NOTE: For some reason, FGPA ReadFromBlockPipeOut never time outs, so can not implement requesting more packets than
        currently stored because it may hang the device.
        """
        if channel not in self.collectionTime.keys(): raise("Incorrect channel")
        yield self.inCommunication.acquire()
        countlist = yield deferToThread(self.doGetAllCounts, channel)
        self.inCommunication.release()
        returnValue(countlist)
        
    def doGetAllCounts(self, channel):
        inFIFO = self._countsInFIFO(channel)
        reading = self._readCounts(channel, inFIFO)
        split = self.split_len(reading, 4)
        countlist = map(self.infoFromBuf, split)
        countlist = self.convertHz(channel, countlist)
        countlist = self.appendTimes(channel, countlist, time.time())
        return countlist
    
    def convertHz(self, channel, rawCounts):
        Hz = []
        for rawCount in rawCounts:
            Hz.append(float(rawCount) / self.collectionTime[channel])
        return Hz
        
    def appendTimes(self, channel, list, timeLast):
        "appends the collection times to the list using the last known time"
        collectionTime = self.collectionTime[channel]
        for i in range(len(list)):
            count = list[-i-1]
            list[-i - 1] = (count, timeLast - i * collectionTime) 
        print list
        return list
        
    def split_len(self,seq, length):
        #useful for splitting a string in length-long pieces
        return [seq[i:i+length] for i in range(0, len(seq), length)]
    
    def _countsInFIFO(self, channel):
        """
        returns how many counts are in FIFO
        """
        self.xem.UpdateWireOuts()
        if channel == 0:
            inFIFO16bit = self.xem.GetWireOutValue(0x21)
        elif channel == 1:
            inFIFO16bit = self.xem.GetWireOutValue(0x22)
        counts = inFIFO16bit / 2
        return counts
    
    def _readCounts(self, channel, number):
        """
        reads the next number of counts from the FPGA
        """
        buf = "\x00"* ( number * 4 )
        if channel == 0:
            self.xem.ReadFromBlockPipeOut(0xa0,4,buf)
        elif channel == 1:
            self.xem.ReadFromBlockPipeOut(0xa1,4,buf)
        return buf
    
    @staticmethod
    def infoFromBuf(buf):
        #converts the received buffer into useful information
        #the most significant digit of the buffer indicates wheter 866 is on or off
        count = 65536*(256*ord(buf[1])+ord(buf[0]))+(256*ord(buf[3])+ord(buf[2]))
        return count