def create(self, cid, mapped_ports, host, user_id="", fabric_version=FABRIC_VERSION[0], consensus_plugin=CONSENSUS_PLUGINS[0], consensus_mode=CONSENSUS_MODES[0], size=CLUSTER_SIZES[0]): """ Create a cluster based on given data TODO: maybe need other id generation mechanism :param name: name of the cluster :param host_id: id of the host URL :param start_port: first service port for cluster, will generate if not given :param user_id: user_id of the cluster if start to be applied :param fabric_version: fabric images version :param consensus_plugin: type of the consensus type :param size: size of the cluster, int type :return: Id of the created cluster or None """ # from now on, we should be safe # start compose project, failed then clean and return logger.debug("Start compose project with name={}".format(cid)) containers = compose_up( name=cid, mapped_ports=mapped_ports, host=host, fabric_version=fabric_version, consensus_plugin=consensus_plugin, consensus_mode=consensus_mode, cluster_size=size) if not containers or len(containers) != size: logger.warning("failed to create cluster, with container={}" .format(containers)) return [] else: return containers
def create(self, cid, mapped_ports, host, config, user_id=""): """ Create a cluster based on given data TODO: maybe need other id generation mechanism :param name: name of the cluster :param host_id: id of the host URL :param start_port: first service port for cluster, will generate if not given :param user_id: user_id of the cluster if start to be applied :param network_type: fabric images version :param config: network config :return: Id of the created cluster or None """ # from now on, we should be safe # start compose project, failed then clean and return logger.debug("Start compose project with name={}".format(cid)) containers = compose_up(name=cid, mapped_ports=mapped_ports, host=host, config=config) if not containers: logger.warning( "failed to create cluster, with container={}".format( containers)) return {} else: logger.debug("Created containers={}".format(containers)) return containers
def create(self, cid, mapped_ports, host, config, user_id=""): """ Create a cluster based on given data TODO: maybe need other id generation mechanism :param name: name of the cluster :param host_id: id of the host URL :param start_port: first service port for cluster, will generate if not given :param user_id: user_id of the cluster if start to be applied :param network_type: fabric images version :param config: network config :return: Id of the created cluster or None """ # from now on, we should be safe # start compose project, failed then clean and return logger.debug("Start compose project with name={}".format(cid)) containers = compose_up(name=cid, mapped_ports=mapped_ports, host=host, config=config) if not containers: logger.warning("failed to create cluster, with container={}" .format(containers)) return {} else: logger.debug("Created containers={}".format(containers)) return containers
def create(self, cid, mapped_ports, host, config, user_id=""): containers = compose_up(name=cid, mapped_ports=mapped_ports, host=host, config=config) if not containers: return [] return containers
def create(self, name, host_id, start_port=0, user_id="", consensus_plugin=CONSENSUS_PLUGINS[0], consensus_mode=CONSENSUS_MODES[0], size=CLUSTER_SIZES[0]): """ Create a cluster based on given data TODO: maybe need other id generation mechanism :param name: name of the cluster :param host_id: id of the host URL :param start_port: first service port for cluster, will generate if not given :param user_id: user_id of the cluster if start to be applied :param consensus_plugin: type of the consensus type :param size: size of the cluster, int type :return: Id of the created cluster or None """ logger.info("Create cluster {}, host_id={}, consensus={}/{}, " "size={}".format(name, host_id, consensus_plugin, consensus_mode, size)) h = self.host_handler.get_active_host_by_id(host_id) if not h: return None if len(h.get("clusters")) >= h.get("capacity"): logger.warning("host {} is full already".format(host_id)) return None daemon_url = h.get("daemon_url") logger.debug("daemon_url={}".format(daemon_url)) if start_port <= 0: ports = self.find_free_start_ports(host_id, 1) if not ports: logger.warning("No free port is found") return None start_port = ports[0] peer_mapped_ports, ca_mapped_ports, mapped_ports = {}, {}, {} for k, v in PEER_SERVICE_PORTS.items(): peer_mapped_ports[k] = v - PEER_SERVICE_PORTS['rest'] + start_port for k, v in CA_SERVICE_PORTS.items(): ca_mapped_ports[k] = v - PEER_SERVICE_PORTS['rest'] + start_port mapped_ports.update(peer_mapped_ports) mapped_ports.update(ca_mapped_ports) c = { 'id': '', 'name': name, 'user_id': user_id or SYS_CREATOR, # avoid applied 'host_id': host_id, 'daemon_url': daemon_url, 'consensus_plugin': consensus_plugin, 'consensus_mode': consensus_mode, 'create_ts': datetime.datetime.now(), 'apply_ts': '', 'release_ts': '', 'duration': '', 'mapped_ports': mapped_ports, 'service_url': {}, # e.g., {rest: xxx:7050, grpc: xxx:7051} 'size': size, 'containers': [], 'status': 'running', 'health': '' } uuid = self.col_active.insert_one(c).inserted_id # object type cid = str(uuid) self.col_active.update_one({"_id": uuid}, {"$set": {"id": cid}}) # try to add one cluster to host h = self.host_handler.db_update_one({"id": host_id}, {"$addToSet": { "clusters": cid }}) if not h or len(h.get("clusters")) > h.get("capacity"): self.col_active.delete_one({"id": cid}) self.host_handler.db_update_one({"id": host_id}, {"$pull": { "clusters": cid }}) return None # from now on, we should be safe # start compose project, failed then clean and return logger.debug("Start compose project with name={}".format(cid)) containers = compose_up(name=cid, mapped_ports=mapped_ports, host=h, consensus_plugin=consensus_plugin, consensus_mode=consensus_mode, cluster_size=size) if not containers or len(containers) != size: logger.warning( "failed containers={}, then delete cluster".format(containers)) self.delete(id=cid, record=False, forced=True) return None peer_host_ip = self._get_service_ip(cid, 'vp0') ca_host_ip = self._get_service_ip(cid, 'membersrvc') # no api_url, then clean and return if not peer_host_ip: # not valid api_url logger.error("Error to find peer host url, cleanup") self.delete(id=cid, record=False, forced=True) return None service_urls = {} for k, v in peer_mapped_ports.items(): service_urls[k] = "{}:{}".format(peer_host_ip, v) for k, v in ca_mapped_ports.items(): service_urls[k] = "{}:{}".format(ca_host_ip, v) # update api_url, container, and user_id field self.db_update_one({"id": cid}, { "$set": { "containers": containers, "user_id": user_id, 'api_url': service_urls['rest'], 'service_url': service_urls } }) def check_health_work(cid): time.sleep(5) self.refresh_health(cid) t = Thread(target=check_health_work, args=(cid, )) t.start() logger.info("Create cluster OK, id={}".format(cid)) return cid
def create(self, name, host_id, start_port=0, user_id="", consensus_plugin=CONSENSUS_PLUGINS[0], consensus_mode=CONSENSUS_MODES[0], size=CLUSTER_SIZES[0]): """ Create a cluster based on given data TODO: maybe need other id generation mechanism :param name: name of the cluster :param host_id: id of the host URL :param start_port: first service port for cluster, will generate if not given :param user_id: user_id of the cluster if start to be applied :param consensus_plugin: type of the consensus type :param size: size of the cluster, int type :return: Id of the created cluster or None """ logger.info("Create cluster {}, host_id={}, consensus={}/{}, " "size={}".format(name, host_id, consensus_plugin, consensus_mode, size)) h = self.host_handler.get_active_host_by_id(host_id) if not h: return None if len(h.get("clusters")) >= h.get("capacity"): logger.warning("host {} is full already".format(host_id)) return None daemon_url = h.get("daemon_url") logger.debug("daemon_url={}".format(daemon_url)) if start_port <= 0: ports = self.find_free_start_ports(host_id, 1) if not ports: logger.warning("No free port is found") return None start_port = ports[0] peer_mapped_ports, ca_mapped_ports, mapped_ports = {}, {}, {} for k, v in PEER_SERVICE_PORTS.items(): peer_mapped_ports[k] = v - PEER_SERVICE_PORTS['rest'] + start_port for k, v in CA_SERVICE_PORTS.items(): ca_mapped_ports[k] = v - PEER_SERVICE_PORTS['rest'] + start_port mapped_ports.update(peer_mapped_ports) mapped_ports.update(ca_mapped_ports) c = { 'id': '', 'name': name, 'user_id': user_id or SYS_CREATOR, # avoid applied 'host_id': host_id, 'daemon_url': daemon_url, 'consensus_plugin': consensus_plugin, 'consensus_mode': consensus_mode, 'create_ts': datetime.datetime.now(), 'apply_ts': '', 'release_ts': '', 'duration': '', 'mapped_ports': mapped_ports, 'service_url': {}, # e.g., {rest: xxx:7050, grpc: xxx:7051} 'size': size, 'containers': [], 'status': 'running', 'health': '' } uuid = self.col_active.insert_one(c).inserted_id # object type cid = str(uuid) self.col_active.update_one({"_id": uuid}, {"$set": {"id": cid}}) # try to add one cluster to host h = self.host_handler.db_update_one( {"id": host_id}, {"$addToSet": {"clusters": cid}}) if not h or len(h.get("clusters")) > h.get("capacity"): self.col_active.delete_one({"id": cid}) self.host_handler.db_update_one({"id": host_id}, {"$pull": {"clusters": cid}}) return None # from now on, we should be safe # start compose project, failed then clean and return logger.debug("Start compose project with name={}".format(cid)) containers = compose_up( name=cid, mapped_ports=mapped_ports, host=h, consensus_plugin=consensus_plugin, consensus_mode=consensus_mode, cluster_size=size) if not containers or len(containers) != size: logger.warning("failed containers={}, then delete cluster".format( containers)) self.delete(id=cid, record=False, forced=True) return None peer_host_ip = self._get_service_ip(cid, 'vp0') ca_host_ip = self._get_service_ip(cid, 'membersrvc') # no api_url, then clean and return if not peer_host_ip: # not valid api_url logger.error("Error to find peer host url, cleanup") self.delete(id=cid, record=False, forced=True) return None service_urls = {} for k, v in peer_mapped_ports.items(): service_urls[k] = "{}:{}".format(peer_host_ip, v) for k, v in ca_mapped_ports.items(): service_urls[k] = "{}:{}".format(ca_host_ip, v) # update api_url, container, and user_id field self.db_update_one( {"id": cid}, {"$set": {"containers": containers, "user_id": user_id, 'api_url': service_urls['rest'], 'service_url': service_urls}}) def check_health_work(cid): time.sleep(5) self.refresh_health(cid) t = Thread(target=check_health_work, args=(cid,)) t.start() logger.info("Create cluster OK, id={}".format(cid)) return cid