Пример #1
0
    def start(self, name, worker_api, mapped_ports, log_type, log_level,
              log_server, config):
        try:
            cluster, cluster_name, kube_config, ports_index, nfsServer_ip, \
                consensus = self._get_cluster_info(name, config)

            operation = K8sClusterOperation(kube_config)
            containers = operation.start_cluster(cluster_name, ports_index,
                                                 nfsServer_ip, consensus)

            if not containers:
                logger.warning(
                    "failed to start cluster={}, stop it again.".format(
                        cluster_name))
                operation.stop_cluster(cluster_name, ports_index, nfsServer_ip,
                                       consensus)
                return None

            service_urls = self.get_services_urls(name)
            # Update the service port table in db
            for k, v in service_urls.items():
                service_port = ServicePort(name=k,
                                           ip=v.split(":")[0],
                                           port=int(v.split(":")[1]),
                                           cluster=cluster)
                service_port.save()
            for k, v in containers.items():
                container = Container(id=v, name=k, cluster=cluster)
                container.save()

        except Exception as e:
            logger.error("Failed to start Kubernetes Cluster: {}".format(e))
            return None
        return containers
Пример #2
0
    def start(self, name, worker_api, mapped_ports, log_type, log_level,
              log_server, config):
        try:
            cluster, cluster_name, kube_config, ports_index, nfsServer_ip, \
                consensus = self._get_cluster_info(name, config)

            operation = K8sClusterOperation(kube_config)
            cluster_name = self.trim_cluster_name(cluster_name)
            containers = operation.start_cluster(cluster_name, ports_index,
                                                 nfsServer_ip, consensus,
                                                 config.get('network_type'))

            if not containers:
                logger.warning("failed to start cluster={}, stop it again."
                               .format(cluster_name))
                operation.stop_cluster(cluster_name, ports_index,
                                       nfsServer_ip, consensus,
                                       config.get('network_type'))
                return None

            service_urls = self.get_services_urls(name)
            # Update the service port table in db
            for k, v in service_urls.items():
                service_port = ServicePort(name=k, ip=v.split(":")[0],
                                           port=int(v.split(":")[1]),
                                           cluster=cluster)
                service_port.save()
            for k, v in containers.items():
                container = Container(id=v, name=k, cluster=cluster)
                container.save()

        except Exception as e:
            logger.error("Failed to start Kubernetes Cluster: {}".format(e))
            return None
        return containers
Пример #3
0
    def _create_cluster(self, cluster, cid, mapped_ports, worker, config,
                        user_id, peer_ports, ca_ports, orderer_ports,
                        explorer_ports):
        # start compose project, failed then clean and return
        logger.debug("Start compose project with name={}".format(cid))
        containers = self.cluster_agents[worker.type] \
            .create(cid, mapped_ports, self.host_handler.schema(worker),
                    config=config, user_id=user_id)
        if not containers:
            logger.warning("failed to start cluster={}, then delete"
                           .format(cluster.name))
            self.delete(id=cid, record=False, forced=True)
            return None

        # creation done, update the container table in db
        for k, v in containers.items():
            container = Container(id=v, name=k, cluster=cluster)
            container.save()

        # service urls can only be calculated after service is created
        if worker.type == WORKER_TYPE_K8S:
            service_urls = self.cluster_agents[worker.type]\
                               .get_services_urls(cid)
        else:
            service_urls = self.gen_service_urls(cid, peer_ports, ca_ports,
                                                 orderer_ports, explorer_ports)
        # update the service port table in db
        for k, v in service_urls.items():
            service_port = ServicePort(name=k, ip=v.split(":")[0],
                                       port=int(v.split(":")[1]),
                                       cluster=cluster)
            service_port.save()

        # update api_url, container, user_id and status
        self.db_update_one(
            {"id": cid},
            {
                "user_id": user_id,
                'api_url': service_urls.get('rest', ""),
                'service_url': service_urls,
                'status': NETWORK_STATUS_RUNNING
            }
        )

        def check_health_work(cid):
            time.sleep(60)
            self.refresh_health(cid)
        t = Thread(target=check_health_work, args=(cid,))
        t.start()

        host = HostModel.objects.get(id=worker.id)
        host.update(add_to_set__clusters=[cid])
        logger.info("Create cluster OK, id={}".format(cid))
Пример #4
0
    def _create_cluster(self, cluster, cid, mapped_ports, worker, config,
                        user_id, peer_ports, ca_ports, orderer_ports):
        # start compose project, failed then clean and return
        logger.debug("Start compose project with name={}".format(cid))
        containers = self.cluster_agents[worker.type] \
            .create(cid, mapped_ports, self.host_handler.schema(worker),
                    config=config, user_id=user_id)
        if not containers:
            logger.warning("failed to start cluster={}, then delete"
                           .format(cluster.name))
            self.delete(id=cid, record=False, forced=True)
            return None

        # creation done, update the container table in db
        for k, v in containers.items():
            container = Container(id=v, name=k, cluster=cluster.as_pointer)
            container.save()

        # service urls can only be calculated after service is created
        if worker.type == WORKER_TYPE_K8S:
            service_urls = self.cluster_agents[worker.type]\
                               .get_services_urls(cid)
        else:
            service_urls = self.gen_service_urls(cid, peer_ports,
                                                 ca_ports, orderer_ports)
        # update the service port table in db
        for k, v in service_urls.items():
            service_port = ServicePort(name=k, ip=v.split(":")[0],
                                       port=int(v.split(":")[1]),
                                       cluster=cluster.as_pointer)
            service_port.save()

        # update api_url, container, user_id and status
        self.db_update_one(
            {"id": cid},
            {
                "user_id": user_id,
                'api_url': service_urls.get('rest', ""),
                'service_url': service_urls,
                'status': NETWORK_STATUS_RUNNING
            }
        )

        def check_health_work(cid):
            time.sleep(60)
            self.refresh_health(cid)
        t = Thread(target=check_health_work, args=(cid,))
        t.start()

        # host = HostModel.Query.get(id=worker.id)
        # host.update(add_to_set__clusters=[cid])
        logger.info("Create cluster OK, id={}".format(cid))
Пример #5
0
    def _get_cluster_info(self, cid, config):
        cluster = ClusterModel.objects.get(id=cid)

        cluster_name = cluster.name
        kube_config = KubernetesOperation()._get_config_from_params(
            cluster.host.k8s_param)

        clusters_exists = ClusterModel.objects(host=cluster.host)
        ports_index = [
            service.port
            for service in ServicePort.objects(cluster__in=clusters_exists)
        ]

        nfsServer_ip = cluster.host.k8s_param.get('K8SNfsServer')
        consensus = config['consensus_plugin']

        return cluster, cluster_name, kube_config, ports_index, \
            nfsServer_ip, consensus
Пример #6
0
    def find_free_start_ports(self, host_id, number):
        """ Find the first available port for a new cluster api

        This is NOT lock-free. Should keep simple, fast and safe!

        Check existing cluster records in the host, find available one.

        :param host_id: id of the host
        :param number: Number of ports to get
        :return: The port list, e.g., [7050, 7150, ...]
        """
        logger.debug("Find {} start ports for host {}".format(number, host_id))
        if number <= 0:
            logger.warning("number {} <= 0".format(number))
            return []
        host = self.host_handler.get_by_id(host_id)
        if not host:
            logger.warning("Cannot find host with id={}", host_id)
            return ""

        clusters_exists = ClusterModel.objects(host=host)
        # clusters_valid = list(filter(lambda c: c.get("service_url"),
        #                              clusters_exists))
        # ports_existed = list(map(
        #     lambda c: int(c["service_url"]["rest"].split(":")[-1]),
        #     clusters_valid))
        ports_existed = [
            service.port
            for service in ServicePort.objects(cluster__in=clusters_exists)
        ]

        logger.debug("The ports existed: {}".format(ports_existed))
        if len(ports_existed) + number >= 1000:
            logger.warning("Too much ports are already in used.")
            return []
        candidates = [
            CLUSTER_PORT_START + i * CLUSTER_PORT_STEP
            for i in range(len(ports_existed) + number)
        ]

        result = list(filter(lambda x: x not in ports_existed, candidates))

        logger.debug("Free ports are {}".format(result[:number]))
        return result[:number]
Пример #7
0
    def stop(self, name, worker_api, mapped_ports, log_type, log_level,
             log_server, config):
        try:
            cluster, cluster_name, kube_config, ports_index, nfsServer_ip, \
                consensus = self._get_cluster_info(name, config)

            operation = K8sClusterOperation(kube_config)
            operation.stop_cluster(cluster_name, ports_index, nfsServer_ip,
                                   consensus)

            cluster_ports = ServicePort.objects(cluster=cluster)
            for ports in cluster_ports:
                ports.delete()
            cluster_containers = Container.objects(cluster=cluster)
            for container in cluster_containers:
                container.delete()

        except Exception as e:
            logger.error("Failed to stop Kubernetes Cluster: {}".format(e))
            return False
        return True
Пример #8
0
    def find_free_start_ports(self, host_id, number):
        """ Find the first available port for a new cluster api

        This is NOT lock-free. Should keep simple, fast and safe!

        Check existing cluster records in the host, find available one.

        :param host_id: id of the host
        :param number: Number of ports to get
        :return: The port list, e.g., [7050, 7150, ...]
        """
        logger.debug("Find {} start ports for host {}".format(number, host_id))
        if number <= 0:
            logger.warning("number {} <= 0".format(number))
            return []
        host = self.host_handler.get_by_id(host_id)
        if not host:
            logger.warning("Cannot find host with id={}", host_id)
            return ""

        clusters_exists = ClusterModel.objects(host=host)
        # clusters_valid = list(filter(lambda c: c.get("service_url"),
        #                              clusters_exists))
        # ports_existed = list(map(
        #     lambda c: int(c["service_url"]["rest"].split(":")[-1]),
        #     clusters_valid))
        ports_existed = [service.port for service in
                         ServicePort.objects(cluster__in=clusters_exists)]

        logger.debug("The ports existed: {}".format(ports_existed))
        if len(ports_existed) + number >= 1000:
            logger.warning("Too much ports are already in used.")
            return []
        candidates = [CLUSTER_PORT_START + i * CLUSTER_PORT_STEP
                      for i in range(len(ports_existed) + number)]

        result = list(filter(lambda x: x not in ports_existed, candidates))

        logger.debug("Free ports are {}".format(result[:number]))
        return result[:number]
Пример #9
0
    def delete(self, cid, worker_api, config):
        try:
            cluster, cluster_name, kube_config, ports_index, nfsServer_ip,\
                consensus = self._get_cluster_info(cid, config)

            operation = K8sClusterOperation(kube_config)
            operation.delete_cluster(cluster_name, ports_index, nfsServer_ip,
                                     consensus)

            # delete ports for clusters
            cluster_ports = ServicePort.objects(cluster=cluster)
            if cluster_ports:
                for ports in cluster_ports:
                    ports.delete()
            cluster_containers = Container.objects(cluster=cluster)
            if cluster_containers:
                for container in cluster_containers:
                    container.delete()

        except Exception as e:
            logger.error("Failed to delete Kubernetes Cluster: {}".format(e))
            return False
        return True
Пример #10
0
    def create(self, name, domain, ca, peers, cluster, user):

        host = cluster.host
        peer_services = []
        if not isinstance(peers, list):
            peers = ['peer{}'.format(_) for _ in range(int(peers))]
        for peer in peers:
            for k in peer_service_ports:
                peer_services.append(k.format(peer, name))
        ca_service = ca_service_ports.format(ca, name)
        map_ports, ca_port, peer_ports = self.generate_ports_mapping(
            peer_services, ca_service, host)
        if not map_ports:
            logger.error("mapped_ports={}".format(map_ports))
            return None
        cluster.update(add_to_set__ports=list(map_ports.values()))

        # 写个方法得了
        host_ip = host.worker_api.split(':')[1][2:]
        original_orgs = OrgModel.objects(cluster=cluster,
                                         org_type='peer').all().count()
        new_org = OrgModel(name=name,
                           alias='org{}'.format(original_orgs + 1),
                           domain=domain,
                           org_type='peer',
                           cluster=cluster)
        new_org.save()
        channels = ChannelModel.objects(cluster=cluster)
        channels[0].update(add_to_set__orgs=[new_org])
        pos = 0
        for peer in peers:
            peer = Node(
                name=peer,
                alias='peer{}'.format(pos),
                node_type='peer',
                ip=IP_MAPPINGS[host_ip],
                org=new_org,
                ports={
                    'grpc':
                    map_ports.get("{}_{}_grpc".format(peer, new_org.name)),
                    'event':
                    map_ports.get("{}_{}_event".format(peer, new_org.name))
                })
            peer.save()
            pos += 1

        ca = Node(name=ca,
                  alias='ca',
                  ip=IP_MAPPINGS[host_ip],
                  node_type='ca',
                  org=new_org,
                  ports={
                      'ecap':
                      map_ports.get('{}_{}_ecap'.format(ca, new_org.name))
                  })
        ca.save()
        compose_file_handler = ComposeHandler(cluster)
        compose_file = compose_file_handler.add_org(new_org)
        if not compose_file:
            logger.error('generate config file failed')
            new_org.delete()
            return None
        containers = self.compose_up(cluster=cluster,
                                     host=host,
                                     file=compose_file)
        config_path = BLOCKCHAIN_CONFIG_FILES_PATH
        cluster_info = compose_file_handler.generate_chaincode_config()

        if not containers:
            logger.warning("failed to create container")
            return {}
        else:
            from tasks import send_cluster_info
            time.sleep(20)
            send_cluster_info(config_path=config_path,
                              cid=str(cluster.id),
                              cluster_info=cluster_info,
                              user_id=user)

            for k, v in containers.items():
                container = Container(id=v, name=k, cluster=cluster)
                container.save()

            service_urls = cluster.service_url

            for k, v in service_urls.items():
                service_port = ServicePort(name=k,
                                           ip=v.split(":")[0],
                                           port=int(v.split(":")[1]),
                                           cluster=cluster)
                service_port.save()
            logger.debug("Created containers={}".format(containers))
            return containers
Пример #11
0
    def create(self, name, host_id, config, start_port=0,
               user_id=SYS_USER):
        """ Create a cluster based on given data

        TODO: maybe need other id generation mechanism
        Args:

            name: name of the cluster
            host_id: id of the host URL
            config: network configuration
            start_port: first service port for cluster, will generate
             if not given
            user_id: user_id of the cluster if start to be applied

        return: Id of the created cluster or None
        """
        logger.info("Create cluster {}, host_id={}, config={}, start_port={}, "
                    "user_id={}".format(name, host_id, config.get_data(),
                                        start_port, user_id))
        size = int(config.get_data().get("size", 4))

        worker = self.host_handler.get_active_host_by_id(host_id)
        if not worker:
            return None

        if worker.type == WORKER_TYPE_VSPHERE:
            vm_params = self.host_handler.get_vm_params_by_id(host_id)
            docker_daemon = vm_params.get(VMIP) + ":2375"
            worker.update({"worker_api": "tcp://" + docker_daemon})
            logger.info(worker)

        if ClusterModel.objects(host=worker).count() >= worker.capacity:
            logger.warning("host {} is already full".format(host_id))
            return None

        worker_api = worker.worker_api
        logger.debug("worker_api={}".format(worker_api))

        ca_num = 1
        request_port_num = \
            len(ORDERER_SERVICE_PORTS.items()) + \
            len(ca_service_ports.items()) * ca_num + \
            size * (len(peer_service_ports.items()))
        logger.debug("request port number {}".format(request_port_num))

        if start_port <= 0:
            ports = self.find_free_start_ports(host_id, request_port_num)
            if not ports:
                logger.warning("No free port is found")
                return None
        else:
            ports = [i for i in
                     range(start_port, start_port + request_port_num)]

        logger.debug("ports {}".format(ports))
        peers_ports, ca_mapped_ports, orderer_service_ports,\
            explorer_mapped_port, mapped_ports = \
            {}, {}, {}, {}, {}

        if size > 1:
            org_num_list = [1, 2]
            peer_num_end = int(size / 2)
        else:
            org_num_list = [1]
            peer_num_end = 1

        logger.debug("org num list {} peer_num_end {}".
                     format(org_num_list, peer_num_end))

        pos = 0
        for org_num in org_num_list:
            for peer_num in range(0, peer_num_end):
                for k, v in peer_service_ports.items():
                    peers_ports[k.format(peer_num, org_num)] = ports[pos]
                    logger.debug("pos {}".format(pos))
                    pos += 1
        # for org_num in org_num_list:
        for k, v in ca_service_ports.items():
            ca_mapped_ports[k.format(1)] = ports[pos]
            logger.debug("pos={}".format(pos))
            pos += 1
        for k, v in ORDERER_SERVICE_PORTS.items():
            orderer_service_ports[k] = ports[pos]
            logger.debug("pos={}".format(pos))
            pos += 1

        for k, v in EXPLORER_PORT.items():
            explorer_mapped_port[k] = \
                v - PEER_SERVICE_PORTS['rest'] + start_port

        mapped_ports.update(peers_ports)
        mapped_ports.update(ca_mapped_ports)
        mapped_ports.update(orderer_service_ports)
        mapped_ports.update(explorer_mapped_port)
        env_mapped_ports = dict(((k + '_port').upper(),
                                 str(v)) for (k, v) in mapped_ports.items())

        network_type = config['network_type']
        cid = uuid4().hex
        net = {  # net is a blockchain network instance
            'id': cid,
            'name': name,
            'user_id': user_id or SYS_CREATOR,  # avoid applied
            'worker_api': worker_api,
            'network_type': network_type,  # e.g., fabric-1.0
            'env': env_mapped_ports
        }
        if network_type == NETWORK_TYPE_FABRIC_V1:  # TODO: fabric v1.0
            net.update({
                'mapped_ports': mapped_ports,
                'service_url': {},  # e.g., {rest: xxx:7050, grpc: xxx:7051}
            })
        elif network_type == NETWORK_TYPE_FABRIC_PRE_V1:  # fabric v0.6
            net.update({
                'mapped_ports': mapped_ports,
                'service_url': {},  # e.g., {rest: xxx:7050, grpc: xxx:7051}
            })

        net.update(config.get_data())

        # try to start one cluster at the host
        cluster = ClusterModel(**net)
        cluster.host = worker
        cluster.save()

        # from now on, we should be safe

        # start compose project, failed then clean and return
        logger.debug("Start compose project with name={}".format(cid))
        containers = self.cluster_agents[worker.type] \
            .create(cid, mapped_ports, self.host_handler.schema(worker),
                    config=config, user_id=user_id)
        if not containers:
            logger.warning("failed to start cluster={}, then delete"
                           .format(name))
            self.delete(id=cid, record=False, forced=True)
            return None
        for k, v in containers.items():
            container = Container(id=v, name=k, cluster=cluster)
            container.save()

        access_peer, access_ca = '', ''
        if network_type == NETWORK_TYPE_FABRIC_V1:  # fabric v1.0
            access_peer = 'peer0.org1.example.com'
            access_ca = 'ca.example.com'
            # access_explorer = 'explorer'
        elif network_type == NETWORK_TYPE_FABRIC_PRE_V1:  # fabric v0.6
            access_peer = 'vp0'
            access_ca = 'membersrvc'

        peer_host_ip = self._get_service_ip(cid, access_peer)
        ca_host_ip = self._get_service_ip(cid, access_ca)
        # explorer_host_ip = self._get_service_ip(cid, access_explorer)
        # no api_url, then clean and return
        if not peer_host_ip:  # not valid api_url
            logger.error("Error to find peer host url, cleanup")
            self.delete(id=cid, record=False, forced=True)
            return None

        service_urls = {}
        for k, v in peers_ports.items():
            service_urls[k] = "{}:{}".format(peer_host_ip, v)

        for k, v in ca_mapped_ports.items():
            service_urls[k] = "{}:{}".format(ca_host_ip, v)

        for k, v in orderer_service_ports.items():
            service_urls[k] = "{}:{}".format(ca_host_ip, v)

        for k, v in service_urls.items():
            service_port = ServicePort(name=k, ip=v.split(":")[0],
                                       port=int(v.split(":")[1]),
                                       cluster=cluster)
            service_port.save()

        # update api_url, container, and user_id field
        self.db_update_one(
            {"id": cid},
            {
                "user_id": user_id,
                'api_url': service_urls.get('rest', ""),
                'service_url': service_urls
            }
        )

        def check_health_work(cid):
            time.sleep(5)
            self.refresh_health(cid)

        t = Thread(target=check_health_work, args=(cid,))
        t.start()

        logger.info("Create cluster OK, id={}".format(cid))
        return cid
Пример #12
0
    def _create_cluster(self, cluster, cid, mapped_ports, worker, config,
                        user, peer_ports, ca_ports, orderer_ports):
        # start compose project, failed then clean and return
        logger.debug("Start compose project with name={}".format(cid))
        config_path = BLOCKCHAIN_CONFIG_FILES_PATH
        logger.info('config_path:{}'.format(config_path))
        compose_file_handler = ComposeHandler(cluster)
        compose_file_handler.create_cluster()

        containers = self.cluster_agents[worker.host_type] \
            .create(cid, mapped_ports, self.host_handler.schema(worker),
                    config=config, user_id=user)
        if not containers:
            logger.warning("failed to start cluster={}, then delete"
                           .format(cluster.name))
            self.delete(id=cid, record=False, forced=True)
            worker.update(pull__clusters=id)

            cluster.delete()
            return None

        # creation done, update the container table in db
        for k, v in containers.items():
            container = Container(id=v, name=k, cluster=cluster)
            container.save()

        service_urls = ClusterModel.objects.get(id=cid).service_url

        for k, v in service_urls.items():
            service_port = ServicePort(name=k, ip=v.split(":")[0],
                                       port=int(v.split(":")[1]),
                                       cluster=cluster)
            service_port.save()

        pos = 0
        cluster = ''
        peer_join_stat = False

        while pos <= 5:

            try:
                cluster = ClusterModel.objects.get(id=cid)
            except Exception as e:
                logger.error(e)
            if cluster.status == 'running':
                peer_join_stat = True
                break
            time.sleep(2)
            pos += 1
        # peer_join_stat = True
        # if peer_join_stat:

        def check_health_work(cid):
            # time.sleep(5)
            if self.refresh_health(cid):
                return True
        # t = Thread(target=check_health_work, args=(cid,))
        # t.start()
        # if check_health_work(cid):
        if check_health_work(cid) and peer_join_stat:
            cluster_info = compose_file_handler.generate_chaincode_config()
            logger.info('{}'.format(cluster_info))
            from tasks import send_cluster_info
            time.sleep(10)
            send_cluster_info(config_path=config_path, cid=cid, cluster_info=cluster_info, user_id=str(user.id))
            # org = OrgModel.objects.get(cluster=cluster,org_type='peer')
            # body = {
            #     "BlockchainSign": str(cluster.id),
            #     "ChannelId": config.channel,
            #     "OrgId": str(org.alias),
            #     "UserId": str(user.id)
            # }
            # logger.info('add user info:{}'.format(body))
            # if not send_new_user_info(str(user.id), body=body):
            #     return {'stat': 400, 'msg': '添加用户失败'}
            host_obj = HostModel.objects.get(id=worker.id)
            host_obj.update(add_to_set__clusters=[cid])
            logger.info("Create cluster OK, id={}".format(cid))
            return True
        else:
            logger.info('peer join channel failed')
            return False
Пример #13
0
    def create(self, name, host_id, config, start_port=0,
               user_id=""):
        """ Create a cluster based on given data

        TODO: maybe need other id generation mechanism
        Args:

            name: name of the cluster
            host_id: id of the host URL
            config: network configuration
            start_port: first service port for cluster, will generate
             if not given
            user_id: user_id of the cluster if start to be applied

        return: Id of the created cluster or None
        """
        logger.info("Create cluster {}, host_id={}, config={}, start_port={}, "
                    "user_id={}".format(name, host_id, config.get_data(),
                                        start_port, user_id))

        worker = self.host_handler.get_active_host_by_id(host_id)
        if not worker:
            logger.error("Cannot find available host to create new network")
            return None

        if ClusterModel.objects(host=worker).count() >= worker.capacity:
            logger.warning("host {} is already full".format(host_id))
            return None

        if worker.type == WORKER_TYPE_VSPHERE:
            vm_params = self.host_handler.get_vm_params_by_id(host_id)
            docker_daemon = vm_params.get(VMIP) + ":2375"
            worker.update({"worker_api": "tcp://" + docker_daemon})
            logger.info(worker)

        peer_num = int(config.get_data().get("size", 4))
        ca_num = 2 if peer_num > 1 else 1

        cid = uuid4().hex
        mapped_ports, peer_ports, ca_ports, orderer_ports, explorer_ports = \
            self.gen_ports_mapping(peer_num, ca_num, start_port, host_id)
        if not mapped_ports:
            logger.error("mapped_ports={}".format(mapped_ports))
            return None

        env_mapped_ports = dict(((k + '_port').upper(),
                                 str(v)) for (k, v) in mapped_ports.items())

        network_type = config['network_type']
        net = {  # net is a blockchain network instance
            'id': cid,
            'name': name,
            'user_id': user_id,
            'worker_api': worker.worker_api,
            'network_type': network_type,  # e.g., fabric-1.0
            'env': env_mapped_ports,
            'status': NETWORK_STATUS_CREATING,
            'mapped_ports': mapped_ports,
            'service_url': {},  # e.g., {rest: xxx:7050, grpc: xxx:7051}
        }
        net.update(config.get_data())

        # try to start one cluster at the host
        cluster = ClusterModel(**net)
        cluster.host = worker
        cluster.save()

        # start compose project, failed then clean and return
        logger.debug("Start compose project with name={}".format(cid))
        containers = self.cluster_agents[worker.type] \
            .create(cid, mapped_ports, self.host_handler.schema(worker),
                    config=config, user_id=user_id)
        if not containers:
            logger.warning("failed to start cluster={}, then delete"
                           .format(name))
            self.delete(id=cid, record=False, forced=True)
            return None

        # creation done, update the container table in db
        for k, v in containers.items():
            container = Container(id=v, name=k, cluster=cluster)
            container.save()

        # service urls can only be calculated after service is created
        service_urls = self.gen_service_urls(cid, peer_ports, ca_ports,
                                             orderer_ports, explorer_ports)
        # update the service port table in db
        for k, v in service_urls.items():
            service_port = ServicePort(name=k, ip=v.split(":")[0],
                                       port=int(v.split(":")[1]),
                                       cluster=cluster)
            service_port.save()

        # update api_url, container, user_id and status
        self.db_update_one(
            {"id": cid},
            {
                "user_id": user_id,
                'api_url': service_urls.get('rest', ""),
                'service_url': service_urls,
                'status': NETWORK_STATUS_RUNNING
            }
        )

        def check_health_work(cid):
            time.sleep(5)
            self.refresh_health(cid)
        t = Thread(target=check_health_work, args=(cid,))
        t.start()

        host = HostModel.objects.get(id=host_id)
        host.update(add_to_set__clusters=[cid])
        logger.info("Create cluster OK, id={}".format(cid))
        return cid