示例#1
0
文件: t1.py 项目: karitra/coxx.serfs
def ping_loop():
    echo = Service(DEFAULT_SERVICE)

    ch = yield echo.enqueue('version')
    version = yield ch.rx.get(timeout=5)

    print 'version: {}'.format(version)

    cnt = 1
    while True:
        msg = '{}_{}'.format(DEFAULT_MESSAGE, cnt)
        cnt += 1

        # msg = msgpack.packb(msg)

        print 'sending ping message {}'.format(msg)

        ch = yield echo.enqueue('ping')

        _ = yield ch.tx.write(msg)
        answer = yield ch.rx.get(timeout=10)

        print 'ans {}'.format(answer)

        yield gen.sleep(DEFAULT_SLEEP)
示例#2
0
    def process_synchronous(self, cocaine_service_name, cocaine_method, data):
        """Synchronous Cocaine worker handling."""
        self.log("In process_synchronous()")
        service = Service(cocaine_service_name)
        response = service.enqueue(cocaine_method, msgpack.dumps(data)).get()

        service.disconnect()
        self.log("process_synchronous() finished")
        return response
示例#3
0
    def process_asynchronous(self, cocaine_service_name, cocaine_method, data):
        """Run selected service and get all chunks as generator."""
        self.log("In process_asynchronous()")
        service = Service(cocaine_service_name)

        chunks_g = service.enqueue(cocaine_method, msgpack.dumps(data))

        yield chunks_g
        service.disconnect()
        self.log("process_asynchronous() finished")
示例#4
0
def raw(request, response):
    inc  = yield request.read()
    goapp = Service("gococaine")
    storage = Service("storage")
    # Send data to the  go application
    response.write("Send data to the go application")
    key = yield goapp.enqueue("process", str(inc))
    response.write("Data was processed and saved to the storage, key: %s" % key)
    # Read data from storage
    res = yield storage.read("namespace", key)
    response.write(res)
    response.close()
示例#5
0
def reconnect():
    yield gen.sleep(10)

    while True:
        try:
            echo = Service(DEFAULT_SERVICE)
            ch = yield echo.enqueue('ping')

            for i in xrange(10):
                msg = 'boo{}'.format(i)
                yield ch.tx.write(msg)
                data = yield ch.rx.get()

        except Exception as e:
            print 'error1 {}'.format(e)
            yield gen.sleep(3)
示例#6
0
    def start_async(self, login, power):
        self.log("In start_async()")
        service = Service("login")

        login_response = yield service.enqueue("login", msgpack.dumps(login))

        service.disconnect()
        self.log("got login!")

        if "error" in login_response:
            self.log("Login '{0}' is invalid!".format(login))
            self.write(login_response)
            self.finish()
        else:
            self.log("Login '{0}' ok!".format(login))
            self.process_powers("powers", "binary_powers", power)

        self.log("Finished start_async()")
示例#7
0
    def powers_8(self):
        self.log("In powers_8()")
        service = Service("powers")

        chunk = yield service.enqueue("binary_powers", msgpack.dumps(8))

        chunks = [chunk]
        try:
            while True:
                ch = yield
                chunks.append(ch)

        except ChokeEvent as err:
            pass

        self.powers_8_res = chunks
        service.disconnect()
        self.log("powers_8() finished")
示例#8
0
    def process_powers(self, cocaine_service_name, cocaine_method, data):
        self.log("In process_powers()")
        service = Service(cocaine_service_name)

        chunk = yield service.enqueue(cocaine_method, msgpack.dumps(data))

        if chunk:
            try:
                while True:
                    ch = yield
                    self.log(ch)
                    self.write_chunk("{0} ".format(ch))

            except ChokeEvent as err:
                pass
        else:
            self.write_chunk("no data!")

        service.disconnect()
        self.log("process_powers() finished")
        self.finish()
示例#9
0
def aggreagate(request, response):
    raw = yield request.read()
    task = AggregationTask(raw)
    logger = get_logger_adapter(task.Id)
    logger.info("task started")
    metahost = task.parsing_config.metahost
    hosts = task.hosts

    # read aggregation config passed to us
    aggcfg = task.aggregation_config
    logger.debug("aggregation config %s", aggcfg)

    logger.info("%s", hosts)
    # repack hosts by subgroups by dc
    # For example:
    # {"GROUP-DC": "hostname"} from {"DC": "hostname"}
    hosts = dict(("%s-%s" % (metahost, subgroup), v)
                 for subgroup, v in hosts.iteritems())

    result = {}

    for name, cfg in aggcfg.data.iteritems():
        mapping = {}

        logger.info("Send to %s %s" % (name, cfg['type']))
        app = cache.get(cfg['type'])
        if app is None:
            logger.info("Skip %s" % cfg['type'])
            continue

        result[name] = {}

        for subgroup, value in hosts.iteritems():
            subgroup_data = list()
            for host in value:
                # Key specification
                key = "%s;%s;%s;%s;%s" % (host, task.parsing_config_name,
                                          task.aggregation_config_name,
                                          name,
                                          task.CurrTime)
                try:
                    data = yield storage.read("combaine", key)
                    subgroup_data.append(data)
                    if cfg.get("perHost"):
                        res = yield app.enqueue("aggregate_group",
                                                msgpack.packb((task.Id, cfg, [data])))
                        result[name][host] = res
                except Exception as err:
                    if err.code != 2:
                        logger.error("unable to read from cache %s %s",
                                     key, err)

            mapping[subgroup] = subgroup_data
            try:
                res = yield app.enqueue("aggregate_group",
                                        msgpack.packb((task.Id, cfg, subgroup_data)))
                logger.info("name %s subgroup %s result %s",
                            name, subgroup, res)
                result[name][subgroup] = res
            except Exception as err:
                logger.error("unable to aggregte %s %s %s",
                             name, subgroup, err)

        all_data = []
        for v in mapping.itervalues():
            all_data.extend(v)
        try:
            res = yield app.enqueue("aggregate_group",
                                    msgpack.packb((task.Id, cfg, all_data)))
        except Exception as err:
            logger.error("unable to aggreagate all: %s %s", name, err)
        logger.info("name %s ALL %s %d" % (name, res, len(all_data)))
        result[name][metahost] = res

    # Send data to various senders
    for name, item in aggcfg.senders.iteritems():
        try:
            sender_type = item.get("type")
            if sender_type is None:
                logger.error("unable to detect sender type: %s", name)
                continue

            logger.info("Send to %s", sender_type)
            s = Service(sender_type)
            res = yield s.enqueue("send", msgpack.packb({"Config": item,
                                                         "Data": result,
                                                         "Id": task.Id}))
            logger.info("res for %s is %s", sender_type, res)
        except Exception as err:
            logger.error("unable to send to %s %s", name, err)

    logger.info("Result %s", result)
    response.write("Done")
    response.close()
示例#10
0
def aggreagate(request, response):
    raw = yield request.read()
    task = AggregationTask(raw)
    logger = get_logger_adapter(task.Id)
    logger.info("task started")
    metahost = task.parsing_config.metahost
    hosts = task.hosts

    # read aggregation config passed to us
    aggcfg = task.aggregation_config
    logger.debug("aggregation config %s", aggcfg)

    logger.info("%s", hosts)
    # repack hosts by subgroups by dc
    # For example:
    # {"GROUP-DC": "hostname"} from {"DC": "hostname"}
    hosts = dict(("%s-%s" % (metahost, subgroup), v)
                 for subgroup, v in hosts.iteritems())

    result = {}

    for name, cfg in aggcfg.data.iteritems():
        mapping = {}

        logger.info("Send to %s %s" % (name, cfg['type']))
        app = cache.get(cfg['type'])
        if app is None:
            logger.info("Skip %s" % cfg['type'])
            continue

        result[name] = {}

        for subgroup, value in hosts.iteritems():
            subgroup_data = list()
            for host in value:
                # Key specification
                key = "%s;%s;%s;%s;%s" % (host, task.parsing_config_name,
                                          task.aggregation_config_name, name,
                                          task.CurrTime)
                try:
                    data = yield storage.read("combaine", key)
                    subgroup_data.append(data)
                    if cfg.get("perHost"):
                        res = yield app.enqueue(
                            "aggregate_group",
                            msgpack.packb((task.Id, cfg, [data])))
                        result[name][host] = res
                except Exception as err:
                    if err.code != 2:
                        logger.error("unable to read from cache %s %s", key,
                                     err)

            mapping[subgroup] = subgroup_data
            try:
                res = yield app.enqueue(
                    "aggregate_group",
                    msgpack.packb((task.Id, cfg, subgroup_data)))
                logger.info("name %s subgroup %s result %s", name, subgroup,
                            res)
                result[name][subgroup] = res
            except Exception as err:
                logger.error("unable to aggregte %s %s %s", name, subgroup,
                             err)

        all_data = []
        for v in mapping.itervalues():
            all_data.extend(v)
        try:
            res = yield app.enqueue("aggregate_group",
                                    msgpack.packb((task.Id, cfg, all_data)))
        except Exception as err:
            logger.error("unable to aggreagate all: %s %s", name, err)
        logger.info("name %s ALL %s %d" % (name, res, len(all_data)))
        result[name][metahost] = res

    # Send data to various senders
    for name, item in aggcfg.senders.iteritems():
        try:
            sender_type = item.get("type")
            if sender_type is None:
                logger.error("unable to detect sender type: %s", name)
                continue

            logger.info("Send to %s", sender_type)
            s = Service(sender_type)
            res = yield s.enqueue(
                "send",
                msgpack.packb({
                    "Config": item,
                    "Data": result,
                    "Id": task.Id
                }))
            logger.info("res for %s is %s", sender_type, res)
        except Exception as err:
            logger.error("unable to send to %s %s", name, err)

    logger.info("Result %s", result)
    response.write("Done")
    response.close()