Esempio n. 1
0
    def create(self,
               name,
               host_id,
               start_port=0,
               user_id="",
               consensus_plugin=CONSENSUS_PLUGINS[0],
               consensus_mode=CONSENSUS_MODES[0],
               size=CLUSTER_SIZES[0]):
        """ Create a cluster based on given data

        TODO: maybe need other id generation mechanism

        :param name: name of the cluster
        :param host_id: id of the host URL
        :param start_port: first service port for cluster, will generate
         if not given
        :param user_id: user_id of the cluster if start to be applied
        :param consensus_plugin: type of the consensus type
        :param size: size of the cluster, int type
        :return: Id of the created cluster or None
        """
        logger.info("Create cluster {}, host_id={}, consensus={}/{}, "
                    "size={}".format(name, host_id, consensus_plugin,
                                     consensus_mode, size))

        h = self.host_handler.get_active_host_by_id(host_id)
        if not h:
            return None

        if len(h.get("clusters")) >= h.get("capacity"):
            logger.warning("host {} is full already".format(host_id))
            return None

        daemon_url = h.get("daemon_url")
        logger.debug("daemon_url={}".format(daemon_url))

        if start_port <= 0:
            ports = self.find_free_start_ports(host_id, 1)
            if not ports:
                logger.warning("No free port is found")
                return None
            start_port = ports[0]

        peer_mapped_ports, ca_mapped_ports, mapped_ports = {}, {}, {}
        for k, v in PEER_SERVICE_PORTS.items():
            peer_mapped_ports[k] = v - PEER_SERVICE_PORTS['rest'] + start_port
        for k, v in CA_SERVICE_PORTS.items():
            ca_mapped_ports[k] = v - PEER_SERVICE_PORTS['rest'] + start_port

        mapped_ports.update(peer_mapped_ports)
        mapped_ports.update(ca_mapped_ports)

        c = {
            'id': '',
            'name': name,
            'user_id': user_id or SYS_CREATOR,  # avoid applied
            'host_id': host_id,
            'daemon_url': daemon_url,
            'consensus_plugin': consensus_plugin,
            'consensus_mode': consensus_mode,
            'create_ts': datetime.datetime.now(),
            'apply_ts': '',
            'release_ts': '',
            'duration': '',
            'mapped_ports': mapped_ports,
            'service_url': {},  # e.g., {rest: xxx:7050, grpc: xxx:7051}
            'size': size,
            'containers': [],
            'status': 'running',
            'health': ''
        }
        uuid = self.col_active.insert_one(c).inserted_id  # object type
        cid = str(uuid)
        self.col_active.update_one({"_id": uuid}, {"$set": {"id": cid}})
        # try to add one cluster to host
        h = self.host_handler.db_update_one({"id": host_id},
                                            {"$addToSet": {
                                                "clusters": cid
                                            }})
        if not h or len(h.get("clusters")) > h.get("capacity"):
            self.col_active.delete_one({"id": cid})
            self.host_handler.db_update_one({"id": host_id},
                                            {"$pull": {
                                                "clusters": cid
                                            }})
            return None

        # from now on, we should be safe

        # start compose project, failed then clean and return
        logger.debug("Start compose project with name={}".format(cid))
        containers = self.cluster_agents[h.get('type')]\
            .create(cid, mapped_ports, h, user_id=user_id,
                    consensus_plugin=consensus_plugin,
                    consensus_mode=consensus_mode, size=size)
        if not containers:
            logger.warning(
                "failed to start cluster={}, then delete".format(name))
            self.delete(id=cid, record=False, forced=True)
            return None

        peer_host_ip = self._get_service_ip(cid, 'vp0')
        ca_host_ip = self._get_service_ip(cid, 'membersrvc')
        # no api_url, then clean and return
        if not peer_host_ip:  # not valid api_url
            logger.error("Error to find peer host url, cleanup")
            self.delete(id=cid, record=False, forced=True)
            return None

        service_urls = {}
        for k, v in peer_mapped_ports.items():
            service_urls[k] = "{}:{}".format(peer_host_ip, v)

        for k, v in ca_mapped_ports.items():
            service_urls[k] = "{}:{}".format(ca_host_ip, v)

        # update api_url, container, and user_id field
        self.db_update_one({"id": cid}, {
            "$set": {
                "containers": containers,
                "user_id": user_id,
                'api_url': service_urls['rest'],
                'service_url': service_urls
            }
        })

        def check_health_work(cid):
            time.sleep(5)
            self.refresh_health(cid)

        t = Thread(target=check_health_work, args=(cid, ))
        t.start()

        logger.info("Create cluster OK, id={}".format(cid))
        return cid
Esempio n. 2
0
    def create(self, name, host_id, start_port=0, user_id="",
               consensus_plugin=CONSENSUS_PLUGINS[0],
               consensus_mode=CONSENSUS_MODES[0], size=CLUSTER_SIZES[0]):
        """ Create a cluster based on given data

        TODO: maybe need other id generation mechanism

        :param name: name of the cluster
        :param host_id: id of the host URL
        :param start_port: first service port for cluster, will generate
         if not given
        :param user_id: user_id of the cluster if start to be applied
        :param consensus_plugin: type of the consensus type
        :param size: size of the cluster, int type
        :return: Id of the created cluster or None
        """
        logger.info("Create cluster {}, host_id={}, consensus={}/{}, "
                    "size={}".format(name, host_id, consensus_plugin,
                                     consensus_mode, size))

        h = self.host_handler.get_active_host_by_id(host_id)
        if not h:
            return None

        if len(h.get("clusters")) >= h.get("capacity"):
            logger.warning("host {} is full already".format(host_id))
            return None

        daemon_url = h.get("daemon_url")
        logger.debug("daemon_url={}".format(daemon_url))

        if start_port <= 0:
            ports = self.find_free_start_ports(host_id, 1)
            if not ports:
                logger.warning("No free port is found")
                return None
            start_port = ports[0]

        peer_mapped_ports, ca_mapped_ports, mapped_ports = {}, {}, {}
        for k, v in PEER_SERVICE_PORTS.items():
            peer_mapped_ports[k] = v - PEER_SERVICE_PORTS['rest'] + start_port
        for k, v in CA_SERVICE_PORTS.items():
            ca_mapped_ports[k] = v - PEER_SERVICE_PORTS['rest'] + start_port

        mapped_ports.update(peer_mapped_ports)
        mapped_ports.update(ca_mapped_ports)

        c = {
            'id': '',
            'name': name,
            'user_id': user_id or SYS_CREATOR,  # avoid applied
            'host_id': host_id,
            'daemon_url': daemon_url,
            'consensus_plugin': consensus_plugin,
            'consensus_mode': consensus_mode,
            'create_ts': datetime.datetime.now(),
            'apply_ts': '',
            'release_ts': '',
            'duration': '',
            'mapped_ports': mapped_ports,
            'service_url': {},  # e.g., {rest: xxx:7050, grpc: xxx:7051}
            'size': size,
            'containers': [],
            'status': 'running',
            'health': ''
        }
        uuid = self.col_active.insert_one(c).inserted_id  # object type
        cid = str(uuid)
        self.col_active.update_one({"_id": uuid}, {"$set": {"id": cid}})
        # try to add one cluster to host
        h = self.host_handler.db_update_one(
            {"id": host_id}, {"$addToSet": {"clusters": cid}})
        if not h or len(h.get("clusters")) > h.get("capacity"):
            self.col_active.delete_one({"id": cid})
            self.host_handler.db_update_one({"id": host_id},
                                            {"$pull": {"clusters": cid}})
            return None

        # from now on, we should be safe

        # start compose project, failed then clean and return
        logger.debug("Start compose project with name={}".format(cid))
        containers = compose_up(
            name=cid, mapped_ports=mapped_ports, host=h,
            consensus_plugin=consensus_plugin, consensus_mode=consensus_mode,
            cluster_size=size)
        if not containers or len(containers) != size:
            logger.warning("failed containers={}, then delete cluster".format(
                containers))
            self.delete(id=cid, record=False, forced=True)
            return None

        peer_host_ip = self._get_service_ip(cid, 'vp0')
        ca_host_ip = self._get_service_ip(cid, 'membersrvc')
        # no api_url, then clean and return
        if not peer_host_ip:  # not valid api_url
            logger.error("Error to find peer host url, cleanup")
            self.delete(id=cid, record=False, forced=True)
            return None

        service_urls = {}
        for k, v in peer_mapped_ports.items():
            service_urls[k] = "{}:{}".format(peer_host_ip, v)

        for k, v in ca_mapped_ports.items():
            service_urls[k] = "{}:{}".format(ca_host_ip, v)

        # update api_url, container, and user_id field
        self.db_update_one(
            {"id": cid},
            {"$set": {"containers": containers, "user_id": user_id,
                      'api_url': service_urls['rest'],
                      'service_url': service_urls}})

        def check_health_work(cid):
            time.sleep(5)
            self.refresh_health(cid)
        t = Thread(target=check_health_work, args=(cid,))
        t.start()

        logger.info("Create cluster OK, id={}".format(cid))
        return cid
Esempio n. 3
0
    def create(self,
               name,
               host_id,
               network_type,
               config,
               start_port=0,
               user_id=""):
        """ Create a cluster based on given data

        TODO: maybe need other id generation mechanism
        Args:

            name: name of the cluster
            host_id: id of the host URL
            network_type: type of the network
            config: network configuration
            start_port: first service port for cluster, will generate
             if not given
            user_id: user_id of the cluster if start to be applied

        return: Id of the created cluster or None
        """
        logger.info("Create cluster {}, host_id={}, config={}".format(
            name, host_id, network_type, config.get_data()))

        worker = self.host_handler.get_active_host_by_id(host_id)
        if not worker:
            return None

        if len(worker.get("clusters")) >= worker.get("capacity"):
            logger.warning("host {} is already full".format(host_id))
            return None

        worker_api = worker.get("worker_api")
        logger.debug("worker_api={}".format(worker_api))

        if start_port <= 0:
            ports = self.find_free_start_ports(host_id, 1)
            if not ports:
                logger.warning("No free port is found")
                return None
            start_port = ports[0]

        peer_mapped_ports, ca_mapped_ports, mapped_ports = {}, {}, {}
        for k, v in PEER_SERVICE_PORTS.items():
            peer_mapped_ports[k] = v - PEER_SERVICE_PORTS['rest'] + start_port
        for k, v in CA_SERVICE_PORTS.items():
            ca_mapped_ports[k] = v - PEER_SERVICE_PORTS['rest'] + start_port

        mapped_ports.update(peer_mapped_ports)
        mapped_ports.update(ca_mapped_ports)

        net = {  # net is a blockchain network instance
            'id': '',
            'name': name,
            'user_id': user_id or SYS_CREATOR,  # avoid applied
            'host_id': host_id,
            'worker_api': worker_api,
            'network_type': network_type,  # e.g., fabric-1.0
            'create_ts': datetime.datetime.now(),
            'apply_ts': '',
            'release_ts': '',
            'status': 'running',
            'containers': [],
            'duration': '',
            'health': ''
        }
        if network_type == NETWORK_TYPE_FABRIC_V1:  # TODO: fabric v1.0
            net.update({
                'mapped_ports': mapped_ports,
                'service_url': {},  # e.g., {rest: xxx:7050, grpc: xxx:7051}
            })
        elif network_type == NETWORK_TYPE_FABRIC_PRE_V1:  # fabric v0.6
            net.update({
                'mapped_ports': mapped_ports,
                'service_url': {},  # e.g., {rest: xxx:7050, grpc: xxx:7051}
            })

        net.update(config.get_data())

        uuid = self.col_active.insert_one(net).inserted_id  # object type
        cid = str(uuid)
        self.col_active.update_one({"_id": uuid}, {"$set": {"id": cid}})
        # try to start one cluster at the host
        worker = self.host_handler.db_update_one(
            {"id": host_id}, {"$addToSet": {
                "clusters": cid
            }})
        if not worker or len(worker.get("clusters")) > worker.get("capacity"):
            self.col_active.delete_one({"id": cid})
            self.host_handler.db_update_one({"id": host_id},
                                            {"$pull": {
                                                "clusters": cid
                                            }})
            return None

        # from now on, we should be safe

        # start compose project, failed then clean and return
        logger.debug("Start compose project with name={}".format(cid))
        containers = self.cluster_agents[worker.get('type')]\
            .create(cid, mapped_ports, worker, user_id=user_id,
                    network_type=network_type, config=config)
        if not containers:
            logger.warning(
                "failed to start cluster={}, then delete".format(name))
            self.delete(id=cid, record=False, forced=True)
            return None

        peer_host_ip = self._get_service_ip(cid, 'vp0')
        ca_host_ip = self._get_service_ip(cid, 'membersrvc')
        # no api_url, then clean and return
        if not peer_host_ip:  # not valid api_url
            logger.error("Error to find peer host url, cleanup")
            self.delete(id=cid, record=False, forced=True)
            return None

        service_urls = {}
        for k, v in peer_mapped_ports.items():
            service_urls[k] = "{}:{}".format(peer_host_ip, v)

        for k, v in ca_mapped_ports.items():
            service_urls[k] = "{}:{}".format(ca_host_ip, v)

        # update api_url, container, and user_id field
        self.db_update_one({"id": cid}, {
            "$set": {
                "containers": containers,
                "user_id": user_id,
                'api_url': service_urls['rest'],
                'service_url': service_urls
            }
        })

        def check_health_work(cid):
            time.sleep(5)
            self.refresh_health(cid)

        t = Thread(target=check_health_work, args=(cid, ))
        t.start()

        logger.info("Create cluster OK, id={}".format(cid))
        return cid
Esempio n. 4
0
    def create(self, name, host_id, config, start_port=0,
               user_id=""):
        """ Create a cluster based on given data

        TODO: maybe need other id generation mechanism
        Args:

            name: name of the cluster
            host_id: id of the host URL
            config: network configuration
            start_port: first service port for cluster, will generate
             if not given
            user_id: user_id of the cluster if start to be applied

        return: Id of the created cluster or None
        """
        logger.info("Create cluster {}, host_id={}, config={}, start_port={}, "
                    "user_id={}".format(name, host_id, config.get_data(),
                                        start_port, user_id))

        worker = self.host_handler.get_active_host_by_id(host_id)
        if not worker:
            return None

        if len(worker.get("clusters")) >= worker.get("capacity"):
            logger.warning("host {} is already full".format(host_id))
            return None

        worker_api = worker.get("worker_api")
        logger.debug("worker_api={}".format(worker_api))

        if start_port <= 0:
            ports = self.find_free_start_ports(host_id, 1)
            if not ports:
                logger.warning("No free port is found")
                return None
            start_port = ports[0]

        peer_mapped_ports, ca_mapped_ports, mapped_ports = {}, {}, {}
        for k, v in PEER_SERVICE_PORTS.items():
            peer_mapped_ports[k] = v - PEER_SERVICE_PORTS['rest'] + start_port
        for k, v in CA_SERVICE_PORTS.items():
            ca_mapped_ports[k] = v - PEER_SERVICE_PORTS['rest'] + start_port

        mapped_ports.update(peer_mapped_ports)
        mapped_ports.update(ca_mapped_ports)
        logger.debug("mapped_ports={}".format(mapped_ports))

        network_type = config['network_type']
        net = {  # net is a blockchain network instance
            'id': '',
            'name': name,
            'user_id': user_id or SYS_CREATOR,  # avoid applied
            'host_id': host_id,
            'worker_api': worker_api,
            'network_type': network_type,  # e.g., fabric-1.0
            'create_ts': datetime.datetime.now(),
            'apply_ts': '',
            'release_ts': '',
            'status': 'running',
            'containers': [],
            'duration': '',
            'health': ''
        }
        if network_type == NETWORK_TYPE_FABRIC_V1:  # TODO: fabric v1.0
            net.update({
                'mapped_ports': mapped_ports,
                'service_url': {},  # e.g., {rest: xxx:7050, grpc: xxx:7051}
            })
        elif network_type == NETWORK_TYPE_FABRIC_PRE_V1:  # fabric v0.6
            net.update({
                'mapped_ports': mapped_ports,
                'service_url': {},  # e.g., {rest: xxx:7050, grpc: xxx:7051}
            })

        net.update(config.get_data())

        uuid = self.col_active.insert_one(net).inserted_id  # object type
        cid = str(uuid)
        self.col_active.update_one({"_id": uuid}, {"$set": {"id": cid}})
        # try to start one cluster at the host
        worker = self.host_handler.db_update_one(
            {"id": host_id}, {"$addToSet": {"clusters": cid}})
        if not worker or len(worker.get("clusters")) > worker.get("capacity"):
            self.col_active.delete_one({"id": cid})
            self.host_handler.db_update_one({"id": host_id},
                                            {"$pull": {"clusters": cid}})
            return None

        # from now on, we should be safe

        # start compose project, failed then clean and return
        logger.debug("Start compose project with name={}".format(cid))
        containers = self.cluster_agents[worker.get('type')]\
            .create(cid, mapped_ports, worker, config=config, user_id=user_id)
        if not containers:
            logger.warning("failed to start cluster={}, then delete"
                           .format(name))
            self.delete(id=cid, record=False, forced=True)
            return None

        access_peer, access_ca = '', ''
        if network_type == NETWORK_TYPE_FABRIC_V1:  # fabric v1.0
            access_peer = 'peer0.org1.example.com'
            access_ca = 'ca.example.com'
        elif network_type == NETWORK_TYPE_FABRIC_PRE_V1:  # fabric v0.6
            access_peer = 'vp0'
            access_ca = 'membersrvc'

        peer_host_ip = self._get_service_ip(cid, access_peer)
        ca_host_ip = self._get_service_ip(cid, access_ca)
        # no api_url, then clean and return
        if not peer_host_ip:  # not valid api_url
            logger.error("Error to find peer host url, cleanup")
            self.delete(id=cid, record=False, forced=True)
            return None

        service_urls = {}
        for k, v in peer_mapped_ports.items():
            service_urls[k] = "{}:{}".format(peer_host_ip, v)

        for k, v in ca_mapped_ports.items():
            service_urls[k] = "{}:{}".format(ca_host_ip, v)

        # update api_url, container, and user_id field
        self.db_update_one(
            {"id": cid},
            {"$set": {"containers": containers, "user_id": user_id,
                      'api_url': service_urls['rest'],
                      'service_url': service_urls}})

        def check_health_work(cid):
            time.sleep(5)
            self.refresh_health(cid)

        t = Thread(target=check_health_work, args=(cid,))
        t.start()

        logger.info("Create cluster OK, id={}".format(cid))
        return cid