コード例 #1
0
ファイル: cluster.py プロジェクト: zhmz1326/cello
    def db_update_one(self, filter, operations, after=True, col="active"):
        """
        Update the data into the active db

        :param filter: Which instance to update, e.g., {"id": "xxx"}
        :param operations: data to update to db, e.g., {"$set": {}}
        :param after: return AFTER or BEFORE
        :param col: collection to operate on
        :return: The updated host json dict
        """
        state = CLUSTER_STATE.active.name if col == "active" \
            else CLUSTER_STATE.released.name
        filter.update({
            "state": state
        })
        logger.info("filter {} operations {}".format(filter, operations))
        kwargs = dict(('set__' + k, v) for (k, v) in operations.items())
        for k, v in kwargs.items():
            logger.info("k {} v {}".format(k, v))
        try:
            ClusterModel.objects(id=filter.get("id")).update(
                upsert=True,
                **kwargs
            )
            doc = ClusterModel.objects.get(id=filter.get("id"))
        except Exception as exc:
            logger.info("exception {}".format(exc.message))
            return None
        return doc
コード例 #2
0
ファイル: host.py プロジェクト: zhoudaqing/cello
    def clean(self, id):
        """
        Clean a host's free clusters.

        :param id: host id
        :return: True or False
        """
        logger.debug("clean host with id = {}".format(id))
        host = self.get_by_id(id)
        if not host:
            return False
        clusters = ClusterModel.objects(host=host)
        if host.status != "active":
            return False

        if len(clusters) <= 0:
            return True

        host = self.db_set_by_id(id, **{"autofill": False})
        schedulable_status = host.schedulable
        if schedulable_status:
            host = self.db_set_by_id(id, **{"schedulable": False})

        for cluster_item in clusters:
            cid = str(cluster_item.id)
            t = Thread(target=cluster.cluster_handler.delete, args=(cid,))
            t.start()
            time.sleep(0.2)

        if schedulable_status:
            self.db_set_by_id(id, **{"schedulable": schedulable_status})

        return True
コード例 #3
0
ファイル: host.py プロジェクト: zhmz1326/cello
    def update(self, id, d):
        """ Update a host's property

        TODO: may check when changing host type

        :param id: id of the host
        :param d: dict to use as updated values
        :return: serialized result or obj
        """
        logger.debug("Get a host with id=" + id)
        h_old = self.get_by_id(id)
        if not h_old:
            logger.warning("No host found with id=" + id)
            return {}

        if h_old.get("status") == "pending":
            return {}

        if "worker_api" in d and not d["worker_api"].startswith("tcp://"):
            d["worker_api"] = "tcp://" + d["worker_api"]

        if "capacity" in d:
            d["capacity"] = int(d["capacity"])
        if d["capacity"] < ClusterModel.objects(host=h_old).count():
            logger.warning("Cannot set cap smaller than running clusters")
            return {}
        if "log_server" in d and "://" not in d["log_server"]:
            d["log_server"] = "udp://" + d["log_server"]
        if "log_type" in d and d["log_type"] == CLUSTER_LOG_TYPES[0]:
            d["log_server"] = ""
        self.db_set_by_id(id, **d)
        h_new = self.get_by_id(id)
        return self._schema(h_new)
コード例 #4
0
ファイル: host.py プロジェクト: zhoudaqing/cello
    def reset(self, id):
        """
        Clean a host's free clusters.

        :param id: host id
        :return: True or False
        """
        logger.debug("clean host with id = {}".format(id))
        host = self.get_by_id(id)
        if not host or ClusterModel.objects(host=host).count() < 0:
            logger.warning("No find resettable host with id ={}".format(id))
            return False
        host_type = host.type
        return self.host_agents[host_type].reset(host_type, host.worker_api)
コード例 #5
0
ファイル: host_api.py プロジェクト: zhoudaqing/cello
def host_actions():
    logger.info("/host_op, method=" + r.method)
    request_debug(r, logger)

    host_id, action = r.form['id'], r.form['action']
    if not host_id or not action:
        error_msg = "host POST without enough data"
        logger.warning(error_msg)
        return make_fail_resp(error=error_msg,
                              data=r.form)
    else:
        if action == "fillup":
            if host_handler.fillup(host_id):
                logger.debug("fillup successfully")
                return make_ok_resp()
            else:
                error_msg = "Failed to fillup the host."
                logger.warning(error_msg)
                return make_fail_resp(error=error_msg, data=r.form)
        elif action == "clean":
            if host_handler.clean(host_id):
                logger.debug("clean successfully")
                return make_ok_resp()
            else:
                error_msg = "Failed to clean the host."
                logger.warning(error_msg)
                return make_fail_resp(error=error_msg, data=r.form)
        elif action == "reset":
            if host_handler.reset(host_id):
                logger.debug("reset successfully")
                try:
                    host_model = HostModel.objects.get(id=host_id)
                    clusters = ClusterModel.objects(host=host_model)
                    for cluster_item in clusters:
                        cluster_item.delete()
                except Exception:
                    pass
                return make_ok_resp()
            else:
                error_msg = "Failed to reset the host."
                logger.warning(error_msg)
                return make_fail_resp(error=error_msg, data=r.form)

    error_msg = "unknown host action={}".format(action)
    logger.warning(error_msg)
    return make_fail_resp(error=error_msg, data=r.form)
コード例 #6
0
ファイル: host.py プロジェクト: zhoudaqing/cello
    def fillup(self, id):
        """
        Fullfil a host with clusters to its capacity limit

        :param id: host id
        :return: True or False
        """
        logger.debug("Try fillup host {}".format(id))
        host = self.get_by_id(id)
        if not host:
            return False
        if host.status != "active":
            logger.warning("host {} is not active".format(id))
            return False
        clusters = ClusterModel.objects(host=host)
        num_new = host.capacity - len(clusters)
        if num_new <= 0:
            logger.warning("host {} already full".format(id))
            return True

        free_ports = cluster.cluster_handler.find_free_start_ports(id, num_new)
        logger.debug("Free_ports = {}".format(free_ports))

        def create_cluster_work(start_port):
            cluster_name = "{}_{}".format(
                host.name,
                int((start_port - CLUSTER_PORT_START) / CLUSTER_PORT_STEP))
            consensus_plugin = CONSENSUS_PLUGIN_SOLO
            cluster_size = random.choice(NETWORK_SIZE_FABRIC_V1)
            config = FabricV1NetworkConfig(
                consensus_plugin=consensus_plugin,
                size=cluster_size)
            cid = cluster.cluster_handler.create(name=cluster_name,
                                                 host_id=id, config=config,
                                                 start_port=start_port)
            if cid:
                logger.debug("Create cluster {} with id={}".format(
                    cluster_name, cid))
            else:
                logger.warning("Create cluster failed")
        for p in free_ports:
            t = Thread(target=create_cluster_work, args=(p,))
            t.start()
            time.sleep(0.2)

        return True
コード例 #7
0
ファイル: host.py プロジェクト: silkchain/cello
    def fillup(self, id):
        """
        Fullfil a host with clusters to its capacity limit

        :param id: host id
        :return: True or False
        """
        logger.debug("Try fillup host {}".format(id))
        host = self.get_by_id(id)
        if not host:
            return False
        if host.status != "active":
            logger.warning("host {} is not active".format(id))
            return False
        clusters = ClusterModel.objects(host=host)
        num_new = host.capacity - len(clusters)
        if num_new <= 0:
            logger.warning("host {} already full".format(id))
            return True

        free_ports = cluster.cluster_handler.find_free_start_ports(id, num_new)
        logger.debug("Free_ports = {}".format(free_ports))

        def create_cluster_work(start_port):
            cluster_name = "{}_{}".format(
                host.name,
                int((start_port - CLUSTER_PORT_START) / CLUSTER_PORT_STEP))
            cluster_size = random.choice(NETWORK_SIZE_FABRIC_V1)
            config = FabricV1NetworkConfig(
                consensus_plugin=CONSENSUS_PLUGIN_SOLO,
                size=cluster_size)
            cid = cluster.cluster_handler.create(name=cluster_name,
                                                 host_id=id, config=config,
                                                 start_port=start_port)
            if cid:
                logger.debug("Create cluster {} with id={}".format(
                    cluster_name, cid))
            else:
                logger.warning("Create cluster failed")
        for p in free_ports:
            t = Thread(target=create_cluster_work, args=(p,))
            t.start()
            time.sleep(0.2)

        return True
コード例 #8
0
def host_actions():
    logger.info("/host_op, method=" + r.method)
    request_debug(r, logger)

    host_id, action = r.form['id'], r.form['action']
    if not host_id or not action:
        error_msg = "host POST without enough data"
        logger.warning(error_msg)
        return make_fail_resp(error=error_msg, data=r.form)
    else:
        if action == "fillup":
            if host_handler.fillup(host_id):
                logger.debug("fillup successfully")
                return make_ok_resp()
            else:
                error_msg = "Failed to fillup the host."
                logger.warning(error_msg)
                return make_fail_resp(error=error_msg, data=r.form)
        elif action == "clean":
            if host_handler.clean(host_id):
                logger.debug("clean successfully")
                return make_ok_resp()
            else:
                error_msg = "Failed to clean the host."
                logger.warning(error_msg)
                return make_fail_resp(error=error_msg, data=r.form)
        elif action == "reset":
            if host_handler.reset(host_id):
                logger.debug("reset successfully")
                try:
                    host_model = HostModel.objects.get(id=host_id)
                    clusters = ClusterModel.objects(host=host_model)
                    for cluster_item in clusters:
                        cluster_item.delete()
                except Exception:
                    pass
                return make_ok_resp()
            else:
                error_msg = "Failed to reset the host."
                logger.warning(error_msg)
                return make_fail_resp(error=error_msg, data=r.form)

    error_msg = "unknown host action={}".format(action)
    logger.warning(error_msg)
    return make_fail_resp(error=error_msg, data=r.form)
コード例 #9
0
    def list(self, filter_data={}, col_name="active"):
        """ List clusters with given criteria

        :param filter_data: Image with the filter properties
        :param col_name: Use data in which col_name
        :return: list of serialized doc
        """
        result = []
        if col_name in [e.name for e in CLUSTER_STATE]:
            logger.debug("List all {} clusters".format(col_name))
            filter_data.update({
                "state": col_name
            })
            clusters = ClusterModel.objects(__raw__=filter_data)
            result = self._schema(clusters, many=True)
        else:
            logger.warning("Unknown cluster col_name=" + col_name)
        return result
コード例 #10
0
ファイル: cluster.py プロジェクト: zimuxin/cello
    def _get_cluster_info(self, cid, config):
        cluster = ClusterModel.objects.get(id=cid)

        cluster_name = cluster.name
        kube_config = KubernetesOperation()._get_config_from_params(
            cluster.host.k8s_param)

        clusters_exists = ClusterModel.objects(host=cluster.host)
        ports_index = [
            service.port
            for service in ServicePort.objects(cluster__in=clusters_exists)
        ]

        nfsServer_ip = cluster.host.k8s_param.get('K8SNfsServer')
        consensus = config['consensus_plugin']

        return cluster, cluster_name, kube_config, ports_index, \
            nfsServer_ip, consensus
コード例 #11
0
ファイル: cluster.py プロジェクト: zhmz1326/cello
    def list(self, filter_data={}, col_name="active"):
        """ List clusters with given criteria

        :param filter_data: Image with the filter properties
        :param col_name: Use data in which col_name
        :return: list of serialized doc
        """
        result = []
        if col_name in [e.name for e in CLUSTER_STATE]:
            logger.debug("List all {} clusters".format(col_name))
            filter_data.update({
                "state": col_name
            })
            clusters = ClusterModel.objects(__raw__=filter_data)
            result = self._schema(clusters, many=True)
        else:
            logger.warning("Unknown cluster col_name=" + col_name)
        return result
コード例 #12
0
    def find_free_start_ports(self, host_id, number):
        """ Find the first available port for a new cluster api

        This is NOT lock-free. Should keep simple, fast and safe!

        Check existing cluster records in the host, find available one.

        :param host_id: id of the host
        :param number: Number of ports to get
        :return: The port list, e.g., [7050, 7150, ...]
        """
        logger.debug("Find {} start ports for host {}".format(number, host_id))
        if number <= 0:
            logger.warning("number {} <= 0".format(number))
            return []
        host = self.host_handler.get_by_id(host_id)
        if not host:
            logger.warning("Cannot find host with id={}", host_id)
            return ""

        clusters_exists = ClusterModel.objects(host=host)
        # clusters_valid = list(filter(lambda c: c.get("service_url"),
        #                              clusters_exists))
        # ports_existed = list(map(
        #     lambda c: int(c["service_url"]["rest"].split(":")[-1]),
        #     clusters_valid))
        ports_existed = [
            service.port
            for service in ServicePort.objects(cluster__in=clusters_exists)
        ]

        logger.debug("The ports existed: {}".format(ports_existed))
        if len(ports_existed) + number >= 1000:
            logger.warning("Too much ports are already in used.")
            return []
        candidates = [
            CLUSTER_PORT_START + i * CLUSTER_PORT_STEP
            for i in range(len(ports_existed) + number)
        ]

        result = list(filter(lambda x: x not in ports_existed, candidates))

        logger.debug("Free ports are {}".format(result[:number]))
        return result[:number]
コード例 #13
0
ファイル: cluster.py プロジェクト: zhmz1326/cello
    def find_free_start_ports(self, host_id, number):
        """ Find the first available port for a new cluster api

        This is NOT lock-free. Should keep simple, fast and safe!

        Check existing cluster records in the host, find available one.

        :param host_id: id of the host
        :param number: Number of ports to get
        :return: The port list, e.g., [7050, 7150, ...]
        """
        logger.debug("Find {} start ports for host {}".format(number, host_id))
        if number <= 0:
            logger.warning("number {} <= 0".format(number))
            return []
        host = self.host_handler.get_by_id(host_id)
        if not host:
            logger.warning("Cannot find host with id={}", host_id)
            return ""

        clusters_exists = ClusterModel.objects(host=host)
        # clusters_valid = list(filter(lambda c: c.get("service_url"),
        #                              clusters_exists))
        # ports_existed = list(map(
        #     lambda c: int(c["service_url"]["rest"].split(":")[-1]),
        #     clusters_valid))
        ports_existed = [service.port for service in
                         ServicePort.objects(cluster__in=clusters_exists)]

        logger.debug("The ports existed: {}".format(ports_existed))
        if len(ports_existed) + number >= 1000:
            logger.warning("Too much ports are already in used.")
            return []
        candidates = [CLUSTER_PORT_START + i * CLUSTER_PORT_STEP
                      for i in range(len(ports_existed) + number)]

        result = list(filter(lambda x: x not in ports_existed, candidates))

        logger.debug("Free ports are {}".format(result[:number]))
        return result[:number]
コード例 #14
0
    def delete(self, id):
        """ Delete a host instance

        :param id: id of the host to delete
        :return:
        """
        logger.debug("Delete a host with id={0}".format(id))

        try:
            h = HostModel.objects.get(id=id)
        except Exception:
            logger.warning("Cannot delete non-existed host")
            return False

        host_type = h.type

        if ClusterModel.objects(host=h).count():
            logger.warning("Host type not found.")
            return False

        elif (host_type == WORKER_TYPE_DOCKER or
              host_type == WORKER_TYPE_SWARM):
            self.host_agents[host_type].delete(h.worker_api)

        elif host_type == WORKER_TYPE_VSPHERE:
            if h.status == "pending":
                return False
            vmuuid = h.vcparam[utils.VMUUID]
            vcip = h.vcparam[utils.VCIP]
            vcusername = h.vcparam[utils.VCUSERNAME]
            vcpwd = h.vcparam[utils.VCPWD]
            vcport = h.vcparam[utils.VCPORT]
            self.host_agents[host_type].delete(vmuuid,
                                               vcip,
                                               vcusername,
                                               vcpwd,
                                               vcport)
        h.delete()
        return True
コード例 #15
0
ファイル: host.py プロジェクト: zlpmetyou/taiyi_baas
    def update(self, id, d):
        """ Update a host's property

        TODO: may check when changing host type

        :param id: id of the host
        :param d: dict to use as updated values
        :return: serialized result or obj
        """
        logger.debug("Get a host with id=" + id)
        h_old = self.get_by_id(id)
        if not h_old:
            logger.warning("No host found with id=" + id)
            return {}

        if h_old.status == "pending":
            return {}

        if "worker_api" in d:
            if not d["worker_api"].startswith("tcp://"):
                d["worker_api"] = "tcp://" + d["worker_api"]

        if "capacity" in d:
            d["capacity"] = int(d["capacity"])
            if d["capacity"] < ClusterModel.objects(host=h_old).count():
                logger.warning("Cannot set cap smaller than running clusters")
                return {}
        if "log_server" in d and "://" not in d["log_server"]:
            d["log_server"] = "udp://" + d["log_server"]
        if "log_type" in d and d["log_type"] == CLUSTER_LOG_TYPES[0]:
            d["log_server"] = ""
        if "autofill" in d:
            d["autofill"] = d["autofill"] == "on"
        if "schedulable" in d:
            d["schedulable"] = d["schedulable"] == "on"
        self.db_set_by_id(id, **d)
        h_new = self.get_by_id(id)
        return self._schema(h_new)
コード例 #16
0
ファイル: host.py プロジェクト: ricardolousada/cello
    def delete(self, id):
        """ Delete a host instance

        :param id: id of the host to delete
        :return:
        """
        logger.debug("Delete a host with id={0}".format(id))

        try:
            h = HostModel.objects.get(id=id)
        except Exception:
            logger.warning("Cannot delete non-existed host")
            return False

        host_type = h.type

        if ClusterModel.objects(host=h).count():
            logger.warning("Host type not found.")
            return False

        elif (host_type == WORKER_TYPE_DOCKER or
              host_type == WORKER_TYPE_SWARM):
            self.host_agents[host_type].delete(h.worker_api)

        # elif host_type == WORKER_TYPE_VSPHERE:
        #     if h.status == "pending":
        #         return False
        #     vc_params = self.get_vc_params_by_id(id)
        #     vm_params = self.get_vm_params_by_id(id)
        #     logger.info(vc_params)
        #     self.host_agents[host_type].delete(vm_params.get(VMUUID),
        #                                        h.worker_api,
        #                                        vc_params.get(VCUSERNAME),
        #                                        vc_params.get(VCPWD),
        #                                        vc_params.get(VCPORT))
        h.delete()
        return True
コード例 #17
0
ファイル: host.py プロジェクト: zhoudaqing/cello
    def delete(self, id):
        """ Delete a host instance

        :param id: id of the host to delete
        :return:
        """
        logger.debug("Delete a host with id={0}".format(id))

        try:
            h = HostModel.objects.get(id=id)
        except Exception:
            logger.warning("Cannot delete non-existed host")
            return False

        host_type = h.type

        if ClusterModel.objects(host=h).count():
            logger.warning("Host type not found.")
            return False

        elif (host_type == WORKER_TYPE_DOCKER or
              host_type == WORKER_TYPE_SWARM):
            self.host_agents[host_type].delete(h.worker_api)

        # elif host_type == WORKER_TYPE_VSPHERE:
        #     if h.status == "pending":
        #         return False
        #     vc_params = self.get_vc_params_by_id(id)
        #     vm_params = self.get_vm_params_by_id(id)
        #     logger.info(vc_params)
        #     self.host_agents[host_type].delete(vm_params.get(VMUUID),
        #                                        h.worker_api,
        #                                        vc_params.get(VCUSERNAME),
        #                                        vc_params.get(VCPWD),
        #                                        vc_params.get(VCPORT))
        h.delete()
        return True
コード例 #18
0
ファイル: clusters.py プロジェクト: zlpmetyou/taiyi_baas
    def post(self):
        user = utils._get_user()
        user = user.dbUser

        body = cluster_create_parser.parse_args()
        cluster_name = body.get('cluster_name')
        network_type = body.get('network_type')
        consensus_plugin = body.get('consensus_plugin')
        domain = body.get('domain')
        channel = body.get('channel')
        orderer = body.get('orderer')
        peer_orgs = body.get('peer_orgs')
        orderer_org = body.get('orderer_org')

        if not all([cluster_name, network_type, peer_orgs, channel, domain]):
            error_msg = "参数缺失"
            logger.warning(error_msg)
            return make_fail_resp(error=error_msg, data=body)

        host_id = ''
        try:
            hosts = HostModel.objects.all()
            for host in hosts:
                if len(host.clusters) < host.capacity:
                    host_id = host.id
                    break
        except Exception as e:
            logger.error(e)
            return make_fail_resp(error='db error')

        if not host_id:
            return {'stat': 401, 'msg': '服务器已满,请联系客服'}

        if network_type not in NETWORK_TYPES:
            error_msg = "Unknown network_type={}".format(network_type)
            logger.warning(error_msg)
            return make_fail_resp()

        peer_org_list = []

        orderer_org = {
            'name': 'orderer',
            'domain': domain,
            'orderers': ['orderer']
        }

        for peer_org in peer_orgs:
            peer_org = demjson.decode(peer_org)
            if not all([peer_org.get('name'), peer_org.get('peers')]):
                return make_fail_resp(error='参数缺失', data=body)
            peer_org['peers'] = [
                'peer{}'.format(i) for i in range(int(peer_org['peers']))
            ]
            peer_org['domain'] = domain
            peer_org['ca'] = 'ca'
            peer_org_list.append(peer_org)

        cluster = ClusterModel.objects(name=cluster_name)
        if cluster:
            return make_fail_resp(error='链名重复')

        config = FabricNetworkConfig(consensus_plugin=consensus_plugin,
                                     network_type=network_type,
                                     channel=channel,
                                     orderer_org=orderer_org,
                                     peer_orgs=peer_org_list)
        config.domain = domain
        if not config.validate():
            return make_fail_resp(error="config not validated",
                                  data=config.get_data())

        if cluster_handler.create(name=cluster_name,
                                  host_id=host_id,
                                  config=config,
                                  user=user):
            logger.debug("cluster POST successfully")
            return make_ok_resp()
        else:
            logger.debug("cluster creation failed using handlder")
            return make_fail_resp(
                error="Failed to create cluster {}".format(cluster_name))
コード例 #19
0
    def create(self, name, host_id, config, start_port=0, user_id=""):
        """ Create a cluster based on given data

        TODO: maybe need other id generation mechanism
        Args:

            name: name of the cluster
            host_id: id of the host URL
            config: network configuration
            start_port: first service port for cluster, will generate
             if not given
            user_id: user_id of the cluster if start to be applied

        return: Id of the created cluster or None
        """
        logger.info("Create cluster {}, host_id={}, config={}, start_port={}, "
                    "user_id={}".format(name, host_id, config.get_data(),
                                        start_port, user_id))

        worker = self.host_handler.get_active_host_by_id(host_id)
        if not worker:
            logger.error("Cannot find available host to create new network")
            return None

        if ClusterModel.objects(host=worker).count() >= worker.capacity:
            logger.warning("host {} is already full".format(host_id))
            return None

        peer_num = int(config.get_data().get("size", 4))
        ca_num = 2 if peer_num > 1 else 1

        cid = uuid4().hex
        mapped_ports, peer_ports, ca_ports, orderer_ports = \
            self.gen_ports_mapping(peer_num, ca_num, start_port, host_id)
        if not mapped_ports:
            logger.error("mapped_ports={}".format(mapped_ports))
            return None

        env_mapped_ports = dict(
            ((k + '_port').upper(), str(v)) for (k, v) in mapped_ports.items())

        network_type = config['network_type']
        net = {  # net is a blockchain network instance
            'id': cid,
            'name': name,
            'user_id': user_id,
            'worker_api': worker.worker_api,
            'network_type': network_type,  # e.g., fabric-1.0
            'env': env_mapped_ports,
            'status': NETWORK_STATUS_CREATING,
            'mapped_ports': mapped_ports,
            'service_url': {},  # e.g., {rest: xxx:7050, grpc: xxx:7051}
        }
        net.update(config.get_data())

        # try to start one cluster at the host
        cluster = ClusterModel(**net)
        cluster.host = worker
        cluster.save()
        # start cluster creation asynchronously for better user experience.
        t = Thread(target=self._create_cluster,
                   args=(cluster, cid, mapped_ports, worker, config, user_id,
                         peer_ports, ca_ports, orderer_ports))
        t.start()
        return cid
コード例 #20
0
ファイル: cluster.py プロジェクト: zjsun/cello
    def create(self, name, host_id, config, start_port=0,
               user_id=SYS_USER):
        """ Create a cluster based on given data

        TODO: maybe need other id generation mechanism
        Args:

            name: name of the cluster
            host_id: id of the host URL
            config: network configuration
            start_port: first service port for cluster, will generate
             if not given
            user_id: user_id of the cluster if start to be applied

        return: Id of the created cluster or None
        """
        logger.info("Create cluster {}, host_id={}, config={}, start_port={}, "
                    "user_id={}".format(name, host_id, config.get_data(),
                                        start_port, user_id))
        size = int(config.get_data().get("size", 4))

        worker = self.host_handler.get_active_host_by_id(host_id)
        if not worker:
            return None

        if worker.type == WORKER_TYPE_VSPHERE:
            vm_params = self.host_handler.get_vm_params_by_id(host_id)
            docker_daemon = vm_params.get(VMIP) + ":2375"
            worker.update({"worker_api": "tcp://" + docker_daemon})
            logger.info(worker)

        if ClusterModel.objects(host=worker).count() >= worker.capacity:
            logger.warning("host {} is already full".format(host_id))
            return None

        worker_api = worker.worker_api
        logger.debug("worker_api={}".format(worker_api))

        ca_num = 1
        request_port_num = \
            len(ORDERER_SERVICE_PORTS.items()) + \
            len(ca_service_ports.items()) * ca_num + \
            size * (len(peer_service_ports.items()))
        logger.debug("request port number {}".format(request_port_num))

        if start_port <= 0:
            ports = self.find_free_start_ports(host_id, request_port_num)
            if not ports:
                logger.warning("No free port is found")
                return None
        else:
            ports = [i for i in
                     range(start_port, start_port + request_port_num)]

        logger.debug("ports {}".format(ports))
        peers_ports, ca_mapped_ports, orderer_service_ports,\
            explorer_mapped_port, mapped_ports = \
            {}, {}, {}, {}, {}

        if size > 1:
            org_num_list = [1, 2]
            peer_num_end = int(size / 2)
        else:
            org_num_list = [1]
            peer_num_end = 1

        logger.debug("org num list {} peer_num_end {}".
                     format(org_num_list, peer_num_end))

        pos = 0
        for org_num in org_num_list:
            for peer_num in range(0, peer_num_end):
                for k, v in peer_service_ports.items():
                    peers_ports[k.format(peer_num, org_num)] = ports[pos]
                    logger.debug("pos {}".format(pos))
                    pos += 1
        # for org_num in org_num_list:
        for k, v in ca_service_ports.items():
            ca_mapped_ports[k.format(1)] = ports[pos]
            logger.debug("pos={}".format(pos))
            pos += 1
        for k, v in ORDERER_SERVICE_PORTS.items():
            orderer_service_ports[k] = ports[pos]
            logger.debug("pos={}".format(pos))
            pos += 1

        for k, v in EXPLORER_PORT.items():
            explorer_mapped_port[k] = \
                v - PEER_SERVICE_PORTS['rest'] + start_port

        mapped_ports.update(peers_ports)
        mapped_ports.update(ca_mapped_ports)
        mapped_ports.update(orderer_service_ports)
        mapped_ports.update(explorer_mapped_port)
        env_mapped_ports = dict(((k + '_port').upper(),
                                 str(v)) for (k, v) in mapped_ports.items())

        network_type = config['network_type']
        cid = uuid4().hex
        net = {  # net is a blockchain network instance
            'id': cid,
            'name': name,
            'user_id': user_id or SYS_CREATOR,  # avoid applied
            'worker_api': worker_api,
            'network_type': network_type,  # e.g., fabric-1.0
            'env': env_mapped_ports
        }
        if network_type == NETWORK_TYPE_FABRIC_V1:  # TODO: fabric v1.0
            net.update({
                'mapped_ports': mapped_ports,
                'service_url': {},  # e.g., {rest: xxx:7050, grpc: xxx:7051}
            })
        elif network_type == NETWORK_TYPE_FABRIC_PRE_V1:  # fabric v0.6
            net.update({
                'mapped_ports': mapped_ports,
                'service_url': {},  # e.g., {rest: xxx:7050, grpc: xxx:7051}
            })

        net.update(config.get_data())

        # try to start one cluster at the host
        cluster = ClusterModel(**net)
        cluster.host = worker
        cluster.save()

        # from now on, we should be safe

        # start compose project, failed then clean and return
        logger.debug("Start compose project with name={}".format(cid))
        containers = self.cluster_agents[worker.type] \
            .create(cid, mapped_ports, self.host_handler.schema(worker),
                    config=config, user_id=user_id)
        if not containers:
            logger.warning("failed to start cluster={}, then delete"
                           .format(name))
            self.delete(id=cid, record=False, forced=True)
            return None
        for k, v in containers.items():
            container = Container(id=v, name=k, cluster=cluster)
            container.save()

        access_peer, access_ca = '', ''
        if network_type == NETWORK_TYPE_FABRIC_V1:  # fabric v1.0
            access_peer = 'peer0.org1.example.com'
            access_ca = 'ca.example.com'
            # access_explorer = 'explorer'
        elif network_type == NETWORK_TYPE_FABRIC_PRE_V1:  # fabric v0.6
            access_peer = 'vp0'
            access_ca = 'membersrvc'

        peer_host_ip = self._get_service_ip(cid, access_peer)
        ca_host_ip = self._get_service_ip(cid, access_ca)
        # explorer_host_ip = self._get_service_ip(cid, access_explorer)
        # no api_url, then clean and return
        if not peer_host_ip:  # not valid api_url
            logger.error("Error to find peer host url, cleanup")
            self.delete(id=cid, record=False, forced=True)
            return None

        service_urls = {}
        for k, v in peers_ports.items():
            service_urls[k] = "{}:{}".format(peer_host_ip, v)

        for k, v in ca_mapped_ports.items():
            service_urls[k] = "{}:{}".format(ca_host_ip, v)

        for k, v in orderer_service_ports.items():
            service_urls[k] = "{}:{}".format(ca_host_ip, v)

        for k, v in service_urls.items():
            service_port = ServicePort(name=k, ip=v.split(":")[0],
                                       port=int(v.split(":")[1]),
                                       cluster=cluster)
            service_port.save()

        # update api_url, container, and user_id field
        self.db_update_one(
            {"id": cid},
            {
                "user_id": user_id,
                'api_url': service_urls.get('rest', ""),
                'service_url': service_urls
            }
        )

        def check_health_work(cid):
            time.sleep(5)
            self.refresh_health(cid)

        t = Thread(target=check_health_work, args=(cid,))
        t.start()

        logger.info("Create cluster OK, id={}".format(cid))
        return cid
コード例 #21
0
ファイル: cluster.py プロジェクト: zoe27/cello
    def create(self, name, host_id, config, start_port=0,
               user_id=""):
        """ Create a cluster based on given data

        TODO: maybe need other id generation mechanism
        Args:

            name: name of the cluster
            host_id: id of the host URL
            config: network configuration
            start_port: first service port for cluster, will generate
             if not given
            user_id: user_id of the cluster if start to be applied

        return: Id of the created cluster or None
        """
        logger.info("Create cluster {}, host_id={}, config={}, start_port={}, "
                    "user_id={}".format(name, host_id, config.get_data(),
                                        start_port, user_id))

        worker = self.host_handler.get_active_host_by_id(host_id)
        if not worker:
            logger.error("Cannot find available host to create new network")
            return None

        if ClusterModel.objects(host=worker).count() >= worker.capacity:
            logger.warning("host {} is already full".format(host_id))
            return None

        if worker.type == WORKER_TYPE_VSPHERE:
            vm_params = self.host_handler.get_vm_params_by_id(host_id)
            docker_daemon = vm_params.get(VMIP) + ":2375"
            worker.update({"worker_api": "tcp://" + docker_daemon})
            logger.info(worker)

        peer_num = int(config.get_data().get("size", 4))
        ca_num = 2 if peer_num > 1 else 1

        cid = uuid4().hex
        mapped_ports, peer_ports, ca_ports, orderer_ports, explorer_ports = \
            self.gen_ports_mapping(peer_num, ca_num, start_port, host_id)
        if not mapped_ports:
            logger.error("mapped_ports={}".format(mapped_ports))
            return None

        env_mapped_ports = dict(((k + '_port').upper(),
                                 str(v)) for (k, v) in mapped_ports.items())

        network_type = config['network_type']
        net = {  # net is a blockchain network instance
            'id': cid,
            'name': name,
            'user_id': user_id,
            'worker_api': worker.worker_api,
            'network_type': network_type,  # e.g., fabric-1.0
            'env': env_mapped_ports,
            'status': NETWORK_STATUS_CREATING,
            'mapped_ports': mapped_ports,
            'service_url': {},  # e.g., {rest: xxx:7050, grpc: xxx:7051}
        }
        net.update(config.get_data())

        # try to start one cluster at the host
        cluster = ClusterModel(**net)
        cluster.host = worker
        cluster.save()

        # start compose project, failed then clean and return
        logger.debug("Start compose project with name={}".format(cid))
        containers = self.cluster_agents[worker.type] \
            .create(cid, mapped_ports, self.host_handler.schema(worker),
                    config=config, user_id=user_id)
        if not containers:
            logger.warning("failed to start cluster={}, then delete"
                           .format(name))
            self.delete(id=cid, record=False, forced=True)
            return None

        # creation done, update the container table in db
        for k, v in containers.items():
            container = Container(id=v, name=k, cluster=cluster)
            container.save()

        # service urls can only be calculated after service is created
        service_urls = self.gen_service_urls(cid, peer_ports, ca_ports,
                                             orderer_ports, explorer_ports)
        # update the service port table in db
        for k, v in service_urls.items():
            service_port = ServicePort(name=k, ip=v.split(":")[0],
                                       port=int(v.split(":")[1]),
                                       cluster=cluster)
            service_port.save()

        # update api_url, container, user_id and status
        self.db_update_one(
            {"id": cid},
            {
                "user_id": user_id,
                'api_url': service_urls.get('rest', ""),
                'service_url': service_urls,
                'status': NETWORK_STATUS_RUNNING
            }
        )

        def check_health_work(cid):
            time.sleep(5)
            self.refresh_health(cid)
        t = Thread(target=check_health_work, args=(cid,))
        t.start()

        host = HostModel.objects.get(id=host_id)
        host.update(add_to_set__clusters=[cid])
        logger.info("Create cluster OK, id={}".format(cid))
        return cid