示例#1
0
    def make_docker_image(self):
        if self.order_obj.project.deploy_type != DOCKER:
            return None

        logger.debug('开始生成docker镜像')
        self.f.write('> 开始生成docker镜像\n')
        self.f.flush()

        minion = settings.DEPLOY.get('M_MINION')

        rets = saltapi.state_sls(
            minion, **{
                'pillar': {
                    'project': self.order_obj.project.name,
                    'env': self.order_obj.env.code,
                    'tag': self.order_obj.version
                },
                'mods': 'make_docker_image',
                'saltenv': 'deploy'
            })

        self.f.write(json.dumps(rets, indent=4))
        logger.debug('执行生成docker镜像完成')
        self.f.write('\n> 执行生成docker镜像完成\n')
        self.f.flush()

        rets = rets.get('return', [])[0].get(minion)

        for k, v in rets.items():
            if not v.get('result'):
                raise Exception('生成docker镜像失败')
def sgdRMSPropNestorov(w0, x, y, f, grad, learning_rate=0.01,
                       batch_size=100, max_epochs=1000,
                       alpha=0.9, delta=1e-6, ro=0.9, eps=1e-6,
                       shuffle=False, rng=None):
    tm = Timer()
    n = x.shape[0]
    n_batches = get_num_batches(n, batch_size)
    w = np.copy(w0)
    v = np.zeros(len(w0), dtype=w0.dtype)  # velocity
    r = np.zeros(len(w0), dtype=w0.dtype)  # gradient accumulation variable
    epoch_losses = np.zeros(max_epochs, dtype=float)
    epoch = 0
    w_best = np.copy(w0)
    loss_best = np.inf
    if n <= batch_size:
        # no need to shuffle since all instances will be used up in one batch
        shuffle = False
    if shuffle:
        shuffled_idxs = np.arange(n)
        if rng is None:
            np.random.shuffle(shuffled_idxs)
        else:
            rng.shuffle(shuffled_idxs)
    else:
        shuffled_idxs = None
    prev_loss = np.inf
    while epoch < max_epochs:
        losses = np.zeros(n_batches, dtype=float)
        for i in range(n_batches):
            xi, yi = get_sgd_batch(x, y, i, batch_size, shuffled_idxs=shuffled_idxs)
            tw = w + alpha * v
            g = grad(tw, xi, yi)
            r[:] = ro * r + (1 - ro) * np.multiply(g, g)
            dw_scale = (learning_rate / (np.sqrt(delta + r)))
            v = alpha * v - np.multiply(dw_scale, g)
            w[:] = w + v
            losses[i] = f(w, xi, yi)
        loss = np.mean(losses)
        if np.isnan(loss):
            logger.debug("loss is nan")
            logger.debug("|w|=%f" % w.dot(w))
            raise ArithmeticError("loss is nan in sgd")
        epoch_losses[epoch] = loss
        if loss < loss_best:
            # pocket algorithm
            np.copyto(w_best, w)
            loss_best = loss
        epoch += 1
        if (loss < eps or np.abs(loss - prev_loss) < eps or
            avg_loss_check(epoch_losses, epoch, n=20, eps=eps)):
            break
        prev_loss = loss
    debug_log_sgd_losses("sgdRMSPropNestorov", epoch_losses, epoch, n=20, timer=tm)
    # logger.debug("epochs: %d" % epoch)
    # logger.debug("net losses:")
    # logger.debug("epoch losses:\n%s" % str(epoch_losses[0:epoch]))
    # logger.debug("best loss: %f" % loss_best)
    return w_best
示例#3
0
    def append(self, test_name, **parameters):
        logger.info("Append name \"%s\" " % (test_name))
        if parameters == {}:
            obj = tests[test_name]()
        else:
            obj = tests[test_name](**parameters)

        if obj is not None:
            logger.debug("Appended name \"%s\" " % (test_name))
            self.__data[test_name + str(self.__id)] = obj
            self.__id = self.__id + 1
def closeConnect(host, port, s):
    global messageForMPQueue
    logger.debug("closing MP: " + host + " : " + str(port))
    if s in outputs:
        outputs.remove(s)
    d = getOrBuildMPClient(host, port)
    if messageForMPQueue.has_key(d):
        q = messageForMPQueue[d]
        messageForMPQueue.pop(d)
    del deviceConnectionToIPMap[s]
    inputs.remove(s)
    s.close()
示例#5
0
    def __init__(self, **params):
        logger.debug("Init axis %s", params['ip'])
        vc.__init__(self, **params)
        auth = '%s:%s' % (self.user, self.password)
        auth = auth.encode('ascii')
        userAndPass = b64encode(auth).decode("ascii")
        self.__headers_auth = {'Authorization': 'Basic %s' % userAndPass}

        logger.debug("Connection string http://%s:%s@%s" %
                     (self.user, self.password, self.ip))
        try:
            self.__conn = httplib.HTTPConnection("%s" % self.ip)
        except httplib.HTTPException as e:
            logger.error("Execution failed: %s", e)
示例#6
0
    def run_script(self, tgt, path):
        data = {
            'client': 'local',
            'fun': 'cmd.script',
            'tgt': tgt,
            'arg': 'salt://' + path
        }
        req = self.post(data=data)
        logger.debug(req.json())

        if req.status_code != 200:
            response = {'code': req.status_code, 'detail': req.text}
        else:
            response = req.json().get('return', req.json())
        return response
def debug_log_sgd_losses(sgd_type, losses, epoch, n=20, timer=None):
    if False:
        # disable logging -- should be used in PRODUCTION
        return
    elif True:
        # minimal info
        logger.debug("[%s] epochs: %d; avg last %d losses:%f%s" %
                     (sgd_type, epoch, n, np.mean(losses[(epoch-min(n, epoch)):(epoch)]),
                      "" if timer is None else "; time: %f" % timer.elapsed()))
    else:
        # maximum info
        logger.debug("[%s] epochs: %d; avg last %d losses:%f\n%s\n%s" %
                     (sgd_type, epoch, n, np.mean(losses[(epoch-min(n, epoch)):(epoch)]),
                      str(list(losses[0:min(n, epoch)])),
                      str(list(losses[(epoch-min(n, epoch)):(epoch)]))))
示例#8
0
    def download_package(self):
        logger.debug('开始下载代码')
        self.f.write('> 开始下载代码\n')
        self.f.flush()

        build_number = self.deploy_cache['build_number']

        self.set_step_cache(self.cache_name, self.deploy_cache)
        jenkins_api.download_package(self.order_obj.project.package_url,
                                     self.order_obj.project.jenkins_job,
                                     build_number)

        logger.debug('代码下载完成')
        self.f.write('> 代码下载完成\n')
        self.f.flush()
示例#9
0
 def state(self):
     try:
         logger.debug("%s: Check service state", self.service_name)
         FNULL = open(os.devnull, 'w')
         retcode = call("pgrep %s" % (self.service_name),
                        stdout=FNULL,
                        stderr=subprocess.STDOUT,
                        shell=True)
         if retcode == 0:
             logger.info("%s: Service runnig" % (self.service_name))
             return Service_State.run
         else:
             logger.info("%s: Service stopped" % (self.service_name))
             return Service_State.stop
     except OSError as e:
         logger.error("Execution failed: %s", e)
示例#10
0
 def get_grains_items(self, key_id):
     prefix = '/minions/{}'.format(key_id)
     try:
         req = self.get(prefix=prefix)
         logger.debug(req.json())
         data = req.json()
         response = data['return'][0][key_id]
         if req.status_code != 200:
             response = {'code': req.status_code, 'detail': '请求异常'}
         elif not response:
             response = {'code': -1, 'detail': 'minion 返回false'}
         else:
             response['code'] = req.status_code
     except Exception as e:
         logger.error(e)
         response = {'code': -1, 'detail': 'salt api返回数据异常'}
     return response
示例#11
0
    def check_iif_state(self, id_iff):
        logger.debug("Check iif %s from %s", id_iff, self.ip)
        res = True

        snmp_res = netsnmp.snmpget('.1.3.6.1.4.1.8691.7.19.1.9.1.1.3.' +
                                   id_iff,
                                   Version=2,
                                   Community=self.snmp_rw_pass,
                                   DestHost=self.ip)
        if snmp_res[0] == 0: res = False
        #        self.__tn = telnetlib.Telnet(self.ip)
        #        try:
        #            self.__tn.read_until("login as: ".encode(), 5)
        #        except EOFError as e:
        #            logger.error("%s: Execution failed: %s" % (self.ip, e))
        #            return
        #
        #        try:
        #            self.__tn.write(self.user.encode() + "\n".encode())
        #        except EOFError as e:
        #            logger.error("%s: Execution failed: %s" % (self.ip, e))
        #            return
        #
        #        if self.password:
        #            try:
        #                self.__tn.read_until("password: "******"%s: Execution failed: %s" % (self.ip, e))
        #                return
        #
        #            try:
        #                self.__tn.write(self.password.encode() + "\n\n".encode())
        #            except EOFError as e:
        #                logger.error("%s: Execution failed: %s" % (self.ip, e))
        #
        #        request = "show interfaces ethernet %s\n" % (id_iff)
        #        self.__tn.write(request.encode())
        #        self.__tn.write("exit\n".encode())
        #        str = self.__tn.read_all().decode()
        #        if ("Enable" in str):
        #            res = True
        #        elif ("Disable" in str):
        #            res = False
        #
        return res
示例#12
0
    def download_package(self, package_url, name, build_number):
        URI = settings.JENKINS.get('URI')
        download_url = '{}/job/{}/{}/artifact/{}'.format(
            URI, name, build_number, package_url)
        logger.debug(download_url)
        local_filename = download_url.split('/')[-1]
        code_path = os.path.join(settings.DEPLOY.get('CODE_PATH'), 'packages')
        local_full_filename = os.path.join(code_path, local_filename)

        # with requests.get(url, stream=True,
        # auth=HTTPBasicAuth("zhoujinliang", "117c911a35acf51e428e29f3ccb363f53f")) as r:
        with requests.get(download_url, stream=True) as r:
            r.raise_for_status()
            with open(local_full_filename, 'wb') as f:
                for chunk in r.iter_content(chunk_size=8192):
                    if chunk:  # filter out keep-alive new chunks
                        f.write(chunk)
        return local_full_filename
示例#13
0
def sgd(w0, x, y, f, grad, learning_rate=0.01,
        batch_size=100, max_epochs=1000, eps=1e-6, shuffle=False, rng=None):
    tm = Timer()
    n = x.shape[0]
    n_batches = get_num_batches(n, batch_size)
    w = np.copy(w0)
    epoch_losses = np.zeros(max_epochs, dtype=float)
    epoch = 0
    w_best = np.copy(w0)
    loss_best = np.inf
    if n <= batch_size:
        shuffle = False  # no need to shuffle since all instances will be used up in one batch
    if shuffle:
        shuffled_idxs = np.arange(n)
        if rng is None:
            np.random.shuffle(shuffled_idxs)
        else:
            rng.shuffle(shuffled_idxs)
    else:
        shuffled_idxs = None
    while epoch < max_epochs:
        losses = np.zeros(n_batches, dtype=float)
        for i in range(n_batches):
            xi, yi = get_sgd_batch(x, y, i, batch_size, shuffled_idxs=shuffled_idxs)
            if xi.shape[0] == 0:
                raise ValueError("Batch size of 0")
            g = grad(w, xi, yi)
            w -= learning_rate * g
            losses[i] = f(w, xi, yi)
            if False:
                g_norm = g.dot(g)
                if np.isnan(g_norm) or np.isinf(g_norm):
                    logger.debug("|grad|=%f, i=%d/%d, epoch:%d" % (g.dot(g), i+1, n_batches, epoch))
                    logger.debug("|w0|=%f" % w0.dot(w0))
                    raise ArithmeticError("grad is nan/inf in sgd")
        loss = np.mean(losses)
        if np.isnan(loss):
            logger.debug("loss is nan")
            logger.debug("|w|=%f" % w.dot(w))
            raise ArithmeticError("loss is nan in sgd")
        epoch_losses[epoch] = loss
        if loss < loss_best:
            # pocket algorithm
            np.copyto(w_best, w)
            loss_best = loss
        epoch += 1
        if loss < eps:
            break
    debug_log_sgd_losses("sgd", epoch_losses, epoch, n=20, timer=tm)
    # logger.debug("epochs: %d" % epoch)
    # logger.debug("net losses:")
    # logger.debug("epoch losses:\n%s" % str(epoch_losses[0:epoch]))
    # logger.debug("best loss: %f" % loss_best)
    return w_best
示例#14
0
    def reject_key(self, key_id):
        data = {
            'client': 'wheel',
            'fun': 'key.reject',
            'match': key_id,
            'include_accepted': True,
            'include_denied': True
        }

        try:
            req = self.post(data=data)
            logger.debug(req.json())

            if req.status_code != 200:
                response = {'code': req.status_code, 'detail': '请求异常'}
            else:
                data = req.json()
                status = data['return'][0]['data']['success']
                # salt-api 返回执行成功的成员,任意saltID都会返回status,最终结果还得看numbers
                numbers = data['return'][0]['data']['return']
                if numbers and status:
                    response = {
                        'code': 200,
                        'status': status,
                        'detail': '驳回成功'
                    }
                else:
                    if not status:
                        response = {
                            'code': 200,
                            'status': status,
                            'detail': '驳回失败'
                        }
                    else:
                        response = {
                            'code': 200,
                            'status': False,
                            'detail': '未知的minion'
                        }
        except Exception as e:
            logger.error(e)
            response = {'code': -1, 'detail': 'salt api返回数据异常'}

        return response
示例#15
0
    def post(self, headers=None, data=None, json=None, prefix='/'):
        salt_token = cache.get('salt-token')
        logger.debug('{}:{}'.format(self.__token, self.__token_expire))
        logger.debug(salt_token)

        if not headers and data:
            headers = {
                'X-Auth-Token': self.__token,
                'Accept': 'application/json',
            }
        elif not headers and json:
            headers = {
                'X-Auth-Token': self.__token,
                'Accept': 'application/json',
                'Content-type': 'application/json'
            }

        url = '{}{}'.format(self.__url, prefix)

        # token过期,重新获取
        if time.time() >= self.__token_expire:
            self.get_token()
            headers['X-Auth-Token'] = self.__token

        ret = requests.post(url,
                            headers=headers,
                            data=data,
                            json=json,
                            verify=False,
                            timeout=self.__timeout)
        logger.debug(JSON.dumps(ret.json(), indent=4))
        return ret
示例#16
0
    def get_token(self, prefix='/login'):
        '''
        登录获取token
        '''
        data = {
            "username": self.__username,
            "password": self.__password,
            "eauth": "pam"
        }
        headers = {'Accept': 'application/json'}
        url = '{}{}'.format(self.__url, prefix)

        try:
            req = requests.post(url,
                                headers=headers,
                                data=data,
                                verify=False,
                                timeout=self.__timeout)

            logger.debug(JSON.dumps(req.json(), indent=4))

            if req.status_code != 200:
                return {'code': req.status_code, 'detail': '请求异常'}

            req = req.json()
            self.__token = req['return'][0]['token']
            self.__token_expire = req['return'][0]['expire']
            start = req['return'][0]['start']

            cache.set('salt-token', {
                'token': self.__token,
                'start': start,
                'expire': self.__token_expire
            },
                      timeout=86400)

        except Exception as e:
            logger.critical(e)
            raise e
def sendMessageToMPClient(msg, device):
    global deviceConnectionToIPMap, outputs, messageForMPQueue
    for conn in deviceConnectionToIPMap.keys():
        addr = deviceConnectionToIPMap[conn]
        d = getOrBuildMPClient(addr[0], addr[1])
        if d.code:
            try:
                # 只发给对应用的MP
                if device.code != d.code: continue
                q = messageForMPQueue[d]
                #如果满了,则弹出一个
                if q.full():
                    q.get_nowait()
                q.put_nowait(msg)
                if not conn in outputs:
                    outputs.append(conn)
                    logger.debug("MP outputs conn: " + str(len(outputs)))
            except:
                traceback.print_exc()
                logger.error("add MP msg QUEUE ERROR :" +
                             binascii.b2a_hex(msg))
                pass
示例#18
0
    def delete_key(self, key_id):
        data = {'client': 'wheel', 'fun': 'key.delete', 'match': key_id}

        try:
            req = self.post(data=data)
            logger.debug(req.json())

            if req.status_code != 200:
                response = {'code': req.status_code, 'detail': '请求异常'}
            else:
                data = req.json()
                status = data['return'][0]['data']['success']
                response = {'code': 200, 'status': status, 'detail': '删除key成功'}
                # delete key 无论key是否存在都返回空
                if not status:
                    response['detail'] = '删除key失败'

        except Exception as e:
            logger.error(e)
            response = {'code': -1, 'detail': '删除key失败'}

        return response
示例#19
0
    def deploy_state_sls(self):
        logger.debug('开始执行salt SLS')
        self.f.write('> 开始执行salt SLS\n')
        self.f.flush()

        salt_id_list = [s.saltID for s in self.order_obj.get_deploy_servers]

        rets = saltapi.state_sls(
            salt_id_list, **{
                'pillar': {
                    'project': self.order_obj.project.name,
                    'order_id': str(self.order_obj.id),
                    'env': self.order_obj.env.code,
                    'devops_env': settings.ENV,
                    'tag': self.order_obj.version,
                    'private_vars': self.order_obj.get_private_vars
                },
                'mods': self.order_obj.project.name,
                'saltenv': 'deploy'
            })

        self.f.write(json.dumps(rets, indent=4))
        logger.debug('salt SLS 执行完成')
        self.f.write('\n> salt SLS 执行完成\n')
        self.f.flush()

        brief = self.__deal_with_salt_ret(rets)

        for salt_id in salt_id_list:
            if brief.get(salt_id, 'default') == 'default':
                brief[salt_id] = False

        failed_salt = [k for k, v in brief.items() if v == False]

        if failed_salt:
            raise Exception('{}执行salt sls失败'.format(','.join(failed_salt)))
示例#20
0
 def __new__(cls, **params):
     logger.debug("Create vc %s" % (params['name']))
     inst = device.__new__(cls, **params)
     return inst
示例#21
0
def sgdAdam(w0, x, y, f, grad, learning_rate=0.01,
            batch_size=100, max_epochs=1000, delta=1e-8,
            ro1=0.9, ro2=0.999, eps=1e-6,
            shuffle=False, rng=None):
    tm = Timer()
    n = x.shape[0]
    n_batches = get_num_batches(n, batch_size)
    w = np.copy(w0)
    s = np.zeros(len(w0), dtype=w0.dtype)  # first moment variable
    s_hat = np.zeros(len(w0), dtype=w0.dtype)  # first moment corrected for bias
    r = np.zeros(len(w0), dtype=w0.dtype)  # second moment variable
    r_hat = np.zeros(len(w0), dtype=w0.dtype)  # second moment corrected for bias
    t = 0  # time step
    epoch_losses = np.zeros(max_epochs, dtype=float)
    epoch = 0
    w_best = np.copy(w0)
    loss_best = np.inf
    if n <= batch_size:
        # no need to shuffle since all instances will be used up in one batch
        shuffle = False
    if shuffle:
        shuffled_idxs = np.arange(n)
        if rng is None:
            np.random.shuffle(shuffled_idxs)
        else:
            rng.shuffle(shuffled_idxs)
    else:
        shuffled_idxs = None
    prev_loss = np.inf
    while epoch < max_epochs:
        losses = np.zeros(n_batches, dtype=float)
        for i in range(n_batches):
            xi, yi = get_sgd_batch(x, y, i, batch_size, shuffled_idxs=shuffled_idxs)
            g = grad(w, xi, yi)
            t += 1
            s[:] = ro1 * s + (1 - ro1) * g
            r[:] = ro2 * r + (1 - ro2) * np.multiply(g, g)
            # correct bias in first moment
            s_hat[:] = (1./(1 - ro1 ** t)) * s
            # correct bias in second moment
            r_hat[:] = (1./(1 - ro2 ** t)) * r
            dw_scale = (learning_rate / (np.sqrt(delta + r_hat)))
            dw = np.multiply(dw_scale, s_hat)
            w[:] = w - dw
            losses[i] = f(w, xi, yi)
        loss = np.mean(losses)
        if np.isnan(loss):
            logger.debug("loss is nan")
            logger.debug("|w|=%f" % w.dot(w))
            raise ArithmeticError("loss is nan in sgd")
        epoch_losses[epoch] = loss
        if loss < loss_best:
            # pocket algorithm
            np.copyto(w_best, w)
            loss_best = loss
        epoch += 1
        if (loss < eps or np.abs(loss - prev_loss) < eps or
            avg_loss_check(epoch_losses, epoch, n=20, eps=eps)):
            break
        prev_loss = loss
    debug_log_sgd_losses("sgdAdam", epoch_losses, epoch, n=20, timer=tm)
    # logger.debug("epochs: %d" % epoch)
    # logger.debug("net losses:")
    # logger.debug("epoch losses:\n%s" % str(epoch_losses[0:epoch]))
    # logger.debug("best loss: %f" % loss_best)
    return w_best
def missionPlannerServer():

    global inputs, outputs, MISSION_PLANNER_S, deviceConnectionToIPMap, messageForMPQueue, logger
    logger.info("Mission Planner Server start at port:  " + str(MP_PORT))

    # TCP 入口主程序,负责接收网络连接,然后分发到子线程处理
    while True:
        # global MP_PORT
        try:
            readables, writables, exceptional = select.select(
                inputs, outputs, [], .5)
        except:
            if not (readables or writables or exceptional):
                break
        else:
            # 循环处理 可读 列表
            for s in readables:
                if s is server:
                    # 接收网络请求
                    connection, addr = s.accept()
                    connection.setblocking(0)
                    logger.info("MP connection from: " + addr[0] + ":" +
                                str(addr[1]))
                    inputs.append(connection)
                    # 接受客户端连接请求
                    deviceConnectionToIPMap[connection] = addr
                    d = getOrBuildMPClient(addr[0], addr[1], connection)
                    messageForMPQueue[d] = Queue.Queue(QUEUE_SIZE)

                # 如果是客户端上传数据进来
                else:
                    host, port = deviceConnectionToIPMap[s]
                    if deviceConnectionToIPMap.has_key(s):
                        try:
                            data = s.recv(BUF_SIZE)
                        except socket.error, e:
                            if 10035 == e.errno:
                                continue
                            # 如果对方强制关闭
                            else:
                                closeConnect(host, port, s)
                        except:
                            continue
                        else:
                            # 如果读数据成功,则处理由客户端上传数据
                            if data:
                                hexData = binascii.b2a_hex(data)
                                logger.debug("data from MP " + host + ":" +
                                             str(port) + " " + hexData)

                                # 获取连接设备
                                d = getOrBuildMPClient(addr[0], addr[1], s)
                                d.counter += 1

                                # 第一次连接,第一个上行数据包为两个字节时
                                # 第一个字节,为指令,00则认为是关注哪个FC (此版忽略)
                                # 第二个字节,为数据,当第一个字节为00时,则代码关注哪个设备
                                if 5 > d.counter and len(data) == 2:
                                    d.code = hexData[2:4]
                                else:
                                    devicesServer.sendMessageToDevice(
                                        d.code, data)

                                    # 处理客户端上传的数据
                                # if s not in outputs:
                                #     outputs.append(s)

                            # 如果为客户端关闭请求
                            else:
                                closeConnect(host, port, s)
                                pass
示例#23
0
 def __init__(self):
     service_control.__init__(self, 'aksusbd')
     logger.debug("Init HASP service")
示例#24
0
 def __new__(cls, **parameters):
     logger.debug("New complex thread test")
     inst = complex.__new__(cls, **parameters)
     return inst
示例#25
0
def deviceServer():
    global messageForDeviceQueue, outputs, logger

    logger.info("Device Server start at port:  " + str(PORT))

    # TCP 入口主程序,负责接收网络连接,然后分发到子线程处理
    while True:
        try:
            #global  PORT
            readables, writables, exceptional = select.select(
                inputs, outputs, [], .5)
        except:
            if not (readables or writables or exceptional):
                break
        else:

            # 循环处理 可读 列表
            for s in readables:
                if s is server:
                    # 接收网络请求
                    connection, addr = s.accept()
                    connection.setblocking(0)
                    logger.info("Device connection from: " + addr[0] + ":" +
                                str(addr[1]))
                    inputs.append(connection)
                    deviceConnToHostMap[connection] = addr
                    messageForDeviceQueue[(addr[0],
                                           addr[1])] = Queue.Queue(QUEUE_SIZE)
                    getOrBuildDevice(addr[0], addr[1], connection)

                # 如果是客户端其它数据进来
                else:
                    host, port = deviceConnToHostMap[s]
                    try:
                        data = s.recv(BUF_SIZE)
                    except socket.error, e:
                        if 10035 == e.errno:
                            continue
                        # 如果对方强制关闭
                        else:
                            logger.info("closing FC:" + host)
                            closeConnection(host, port, s)
                    except:
                        continue
                    else:
                        # 0000000000000000000000000000000000000000000000000000000000000000
                        host, port = deviceConnToHostMap[s]
                        # 如果读数据成功,则处理由客户端上传数据
                        if data:
                            d = getOrBuildDevice(host, port, s)
                            d.counter += 1
                            hexData = binascii.b2a_hex(data)

                            # 如果上传数据大于,则取第四个字节做为设备编号
                            if "-1" == d.code and len(
                                    data) > 4 and "fe" == hexData[0:2]:
                                d.code = hexData[6:8]
                                logger.debug("get the FC code:" + d.code +
                                             "  msg:" + hexData)

                            # 处理设备上传的数据
                            # service.logServerService.receiveData(data, d)
                            # 信息透传到 MissionPlanner
                            missionPlannerServer.sendMessageToMPClient(data, d)
                            logger.debug("data from FC " + host + " " +
                                         hexData)

                            # if s not in outputs:
                            #     outputs.append(s)

                        # 如果为客户端关闭请求
                        else:
                            closeConnection(host, port, s)
示例#26
0
 def __init__(self, **parameters):
     self.__name = parameters['name']
     self.__run_handler = self.handler
     self.__init_state = {}
     logger.debug("Init test \"%s\"", self.__name)
示例#27
0
 def save_init_state(self):
     logger.debug("Save init state test \"%s\"", self.__name)
     self._runned = True
示例#28
0
 def return_init_state(self):
     if self._runned == False: return
     logger.debug("Return init state test \"%s\"", self.__name)
     self._runned = False
示例#29
0
 def __init__(self, service_name):
     self.__service_name = service_name
     self.__state = Service_State.unknown
     logger.debug("Init service object")
示例#30
0
                        else:
                            closeConnection(host, port, s)

            # 循环处理 可写 处理列表
            for s in writables:
                host, port = deviceConnToHostMap[s]
                try:
                    # 如果有消息放在此连接的消息队列中,则进行下发
                    msg = messageForDeviceQueue[(host, port)].get_nowait()
                except Exception, e:
                    if s in outputs:
                        outputs.remove(s)
                    pass
                else:
                    # 111111111111111111111111111111111111111111111111111111111111111111111111
                    logger.debug("sending to FC " + host + " " +
                                 binascii.b2a_hex(msg))
                    try:
                        s.sendall(msg)
                    except:
                        if s in outputs:
                            outputs.remove(s)
                        logger.error("sending to FC FAILED " + host + ":" +
                                     str(port) + " : " + binascii.b2a_hex(msg))
                        pass

            for s in exceptional:
                logger.error(" exception condition on " + s.getpeername())
                if s in outputs:
                    outputs.remove(s)
                inputs.remove(s)
                s.close()