def reset(self, cluster_id, record=False): """ Force to reset a chain. Delete it and recreate with the same configuration. :param cluster_id: id of the reset cluster :param record: whether to record into released db :return: """ c = self.get_by_id(cluster_id) logger.debug("Run recreate_work in background thread") cluster_name, host_id, network_type, \ = c.get("name"), c.get("host_id"), c.get("network_type") if not self.delete(cluster_id, record=record, forced=True): logger.warning("Delete cluster failed with id=" + cluster_id) return False network_type = c.get('network_type') if network_type == NETWORK_TYPE_FABRIC_V1: config = FabricV1NetworkConfig( consensus_plugin=c.get('consensus_plugin'), size=c.get('size')) elif network_type == NETWORK_TYPE_FABRIC_V1_1: config = FabricV1NetworkConfig( consensus_plugin=c.get('consensus_plugin'), size=c.get('size')) config.network_type = NETWORK_TYPE_FABRIC_V1_1 elif network_type == NETWORK_TYPE_FABRIC_V1_2: config = FabricV1NetworkConfig( consensus_plugin=c.get('consensus_plugin'), size=c.get('size')) config.network_type = NETWORK_TYPE_FABRIC_V1_2 else: return False if not self.create(name=cluster_name, host_id=host_id, config=config): logger.warning("Fail to recreate cluster {}".format(cluster_name)) return False return True
def restart(self, cluster_id): """Restart a cluster :param cluster_id: id of cluster to start :return: Bool """ c = self.get_by_id(cluster_id) if not c: logger.warning('No cluster found with id={}'.format(cluster_id)) return False h_id = c.get('host_id') h = self.host_handler.get_active_host_by_id(h_id) if not h: logger.warning('No host found with id={}'.format(h_id)) return False network_type = c.get('network_type') if network_type == NETWORK_TYPE_FABRIC_PRE_V1: config = FabricPreNetworkConfig( consensus_plugin=c.get('consensus_plugin'), consensus_mode=c.get('consensus_mode'), size=c.get('size')) elif network_type == NETWORK_TYPE_FABRIC_V1: config = FabricV1NetworkConfig( consensus_plugin=c.get('consensus_plugin'), size=c.get('size')) elif network_type == NETWORK_TYPE_FABRIC_V1_1: config = FabricV1NetworkConfig( consensus_plugin=c.get('consensus_plugin'), size=c.get('size')) config.network_type = NETWORK_TYPE_FABRIC_V1_1 elif network_type == NETWORK_TYPE_FABRIC_V1_2: config = FabricV1NetworkConfig( consensus_plugin=c.get('consensus_plugin'), size=c.get('size')) config.network_type = NETWORK_TYPE_FABRIC_V1_2 else: return False result = self.cluster_agents[h.type].restart( name=cluster_id, worker_api=h.worker_api, mapped_ports=c.get('mapped_ports', PEER_SERVICE_PORTS), log_type=h.log_type, log_level=h.log_level, log_server='', config=config, ) if result: if h.type == WORKER_TYPE_K8S: service_urls = self.cluster_agents[h.type]\ .get_services_urls(cluster_id) self.db_update_one({"id": cluster_id}, { 'status': 'running', 'api_url': service_urls.get('rest', ""), 'service_url': service_urls }) else: self.db_update_one({"id": cluster_id}, {'status': 'running'}) return True else: return False
def cluster_create(): """Create a cluster on a host POST /cluster { name: xxx, host_id: xxx, network_type=fabric-0.6, consensus_plugin: pbft, consensus_mode: batch, size: 4, } :return: response object """ logger.info("/cluster action=" + r.method) request_debug(r, logger) if r.content_type.startswith("application/json"): body = dict(r.get_json(force=True, silent=True)) else: body = r.form if not body["name"] or not body["host_id"] or \ not body["network_type"]: error_msg = "cluster post without enough data" logger.warning(error_msg) return make_fail_resp(error=error_msg, data=body) name, host_id, network_type, size = \ body['name'], body['host_id'],\ body['network_type'], int(body['size']) if network_type == NETWORK_TYPE_FABRIC_PRE_V1: # TODO: deprecated soon config = FabricPreNetworkConfig( consensus_plugin=body['consensus_plugin'], consensus_mode=body['consensus_mode'], size=size) elif network_type == NETWORK_TYPE_FABRIC_V1: config = FabricV1NetworkConfig( consensus_plugin=body['consensus_plugin'], size=size) elif network_type == NETWORK_TYPE_FABRIC_V1_1: config = FabricV1NetworkConfig( consensus_plugin=body['consensus_plugin'], size=size) config.network_type = NETWORK_TYPE_FABRIC_V1_1 else: error_msg = "Unknown network_type={}".format(network_type) logger.warning(error_msg) return make_fail_resp() if not config.validate(): return make_fail_resp(error="config not validated", data=config.get_data()) if cluster_handler.create(name=name, host_id=host_id, config=config): logger.debug("cluster POST successfully") return make_ok_resp(code=CODE_CREATED) else: logger.debug("cluster creation failed using handlder") return make_fail_resp(error="Failed to create cluster {}".format(name))
def stop(self, cluster_id): """Stop a cluster :param cluster_id: id of cluster to stop :return: Bool """ c = self.get_by_id(cluster_id) if not c: logger.warning('No cluster found with id={}'.format(cluster_id)) return False h_id = c.get('host_id') h = self.host_handler.get_active_host_by_id(h_id) if not h: logger.warning('No host found with id={}'.format(h_id)) return False network_type = c.get('network_type') if network_type == NETWORK_TYPE_FABRIC_PRE_V1: config = FabricPreNetworkConfig( consensus_plugin=c.get('consensus_plugin'), consensus_mode=c.get('consensus_mode'), size=c.get('size')) elif network_type == NETWORK_TYPE_FABRIC_V1: config = FabricV1NetworkConfig( consensus_plugin=c.get('consensus_plugin'), size=c.get('size')) elif network_type == NETWORK_TYPE_FABRIC_V1_1: config = FabricV1NetworkConfig( consensus_plugin=c.get('consensus_plugin'), size=c.get('size')) config.network_type = NETWORK_TYPE_FABRIC_V1_1 else: return False result = self.cluster_agents[h.type].stop( name=cluster_id, worker_api=h.worker_api, mapped_ports=c.get('mapped_ports', PEER_SERVICE_PORTS), log_type=h.log_type, log_level=h.log_level, log_server='', config=config, ) if result: self.db_update_one({"id": cluster_id}, { 'status': 'stopped', 'health': '' }) return True else: return False
def reset(self, cluster_id, record=False): """ Force to reset a chain. Delete it and recreate with the same configuration. :param cluster_id: id of the reset cluster :param record: whether to record into released db :return: """ c = self.get_by_id(cluster_id) logger.debug("Run recreate_work in background thread") cluster_name, host_id, network_type, \ = c.get("name"), c.get("host_id"), c.get("network_type") config = c.get('config') if not self.delete(cluster_id, record=record, forced=True): logger.warning("Delete cluster failed with id=" + cluster_id) return False network_type = c.get('network_type') # if network_type == NETWORK_TYPE_FABRIC_V1: # config = FabricV1NetworkConfig( # consensus_plugin=c.get('consensus_plugin'), # size=c.get('size')) # elif network_type == NETWORK_TYPE_FABRIC_V1_1: # config = FabricV1NetworkConfig( # consensus_plugin=c.get('consensus_plugin'), # size=c.get('size')) # config.network_type = NETWORK_TYPE_FABRIC_V1_1 # elif network_type == NETWORK_TYPE_FABRIC_V1_2: # config = FabricV1NetworkConfig( # consensus_plugin=c.get('consensus_plugin'), # size=c.get('size')) # config.network_type = NETWORK_TYPE_FABRIC_V1_2 # else: # return False if network_type not in NETWORK_TYPES: error_msg = "Unknown network_type={}".format(network_type) logger.warning(error_msg) return {'msg': error_msg, 'stat': 400} else: # pass new_config = FabricV1NetworkConfig( consensus_plugin=config.get('consensus_plugin'), peer=config.get('peer'), orderer=config.get('orderer'), ca=config.get('ca'), network_type=config.get('network_type') ) # for k, v in config.items(): # new_config.k = v if not self.create(name=cluster_name, host_id=host_id, config=new_config): logger.warning("Fail to recreate cluster {}".format(cluster_name)) return False return True
def cluster_create(): """Create a cluster on a host POST /cluster { name: xxx, host_id: xxx, network_type=fabric-0.6, consensus_plugin: pbft, consensus_mode: batch, size: 4, } :return: response object """ logger.info("/cluster action=" + r.method) request_debug(r, logger) if not r.form["name"] or not r.form["host_id"] or \ not r.form["network_type"]: error_msg = "cluster post without enough data" logger.warning(error_msg) return make_fail_resp(error=error_msg, data=r.form) name, host_id, network_type = \ r.form['name'], r.form['host_id'], r.form['network_type'] if network_type == NETWORK_TYPE_FABRIC_PRE_V1: config = FabricPreNetworkConfig( consensus_plugin=r.form['consensus_plugin'], consensus_mode=r.form['consensus_mode'], size=r.form['size']) elif network_type == NETWORK_TYPE_FABRIC_V1: config = FabricV1NetworkConfig( size=r.form['size']) # TODO: add more variables else: error_msg = "Unknown network_type={}".format(network_type) logger.warning(error_msg) return make_fail_resp() if not config.validate(): return make_fail_resp(error="config not validated", data=config.get_data()) if cluster_handler.create(name=name, host_id=host_id, network_type=network_type, config=config): logger.debug("cluster POST successfully") return make_ok_resp(code=CODE_CREATED) else: logger.debug("cluster creation failed using handlder") return make_fail_resp(error="Failed to create cluster {}".format(name))
def create_cluster_work(start_port): cluster_name = "{}_{}".format( host.name, int((start_port - CLUSTER_PORT_START) / CLUSTER_PORT_STEP)) cluster_size = random.choice(NETWORK_SIZE_FABRIC_V1) config = FabricV1NetworkConfig( consensus_plugin=CONSENSUS_PLUGIN_SOLO, size=cluster_size) cid = cluster.cluster_handler.create(name=cluster_name, host_id=id, config=config, start_port=start_port) if cid: logger.debug("Create cluster {} with id={}".format( cluster_name, cid)) else: logger.warning("Create cluster failed")
def restart(self, cluster_id): """Restart a cluster :param cluster_id: id of cluster to start :return: Bool """ c = self.get_by_id(cluster_id) if not c: logger.warning('No cluster found with id={}'.format(cluster_id)) return False h_id = c.get('host_id') h = self.host_handler.get_active_host_by_id(h_id) if not h: logger.warning('No host found with id={}'.format(h_id)) return False network_type = c.get('network_type') if network_type == NETWORK_TYPE_FABRIC_PRE_V1: config = FabricPreNetworkConfig( consensus_plugin=c.get('consensus_plugin'), consensus_mode=c.get('consensus_mode'), size=c.get('size')) elif network_type == NETWORK_TYPE_FABRIC_V1: config = FabricV1NetworkConfig(size=c.get('size')) else: return False result = self.cluster_agents[h.get('type')].restart( name=cluster_id, worker_api=h.get('worker_api'), mapped_ports=c.get('mapped_ports', PEER_SERVICE_PORTS), network_type=c.get('network_type'), log_type=h.get('log_type'), log_level=h.get('log_level'), log_server='', config=config, ) if result: self.db_update_one({"id": cluster_id}, {"$set": { 'status': 'running' }}) return True else: return False
def delete(self, id, record=False, forced=False): """ Delete a cluster instance Clean containers, remove db entry. Only operate on active host. :param id: id of the cluster to delete :param record: Whether to record into the released collections :param forced: Whether to removing user-using cluster, for release :return: """ logger.debug("Delete cluster: id={}, forced={}".format(id, forced)) try: cluster = ClusterModel.objects.get(id=id) except Exception: logger.warning("Cannot find cluster {}".format(id)) return False c = self.db_update_one({"id": id}, {"status": NETWORK_STATUS_DELETING}, after=False) # we are safe from occasional applying now user_id = c.user_id # original user_id if not forced and user_id != "": # not forced, and chain is used by normal user, then no process logger.warning("Cannot delete cluster {} by " "user {}".format(id, user_id)) cluster.update(set__user_id=user_id, upsert=True) return False else: cluster.update(set__status=NETWORK_STATUS_DELETING, upsert=True) host_id, worker_api, network_type, consensus_plugin, cluster_size = \ str(c.host.id), c.worker_api, \ c.network_type if c.network_type else NETWORK_TYPE_FABRIC_PRE_V1, \ c.consensus_plugin if c.consensus_plugin else \ CONSENSUS_PLUGINS_FABRIC_V1[0], \ c.size if c.size else NETWORK_SIZE_FABRIC_PRE_V1[0] # port = api_url.split(":")[-1] or CLUSTER_PORT_START h = self.host_handler.get_active_host_by_id(host_id) if not h: logger.warning("Host {} inactive".format(host_id)) cluster.update(set__user_id=user_id, upsert=True) return False if network_type == NETWORK_TYPE_FABRIC_V1: config = FabricV1NetworkConfig(consensus_plugin=consensus_plugin, size=cluster_size) elif network_type == NETWORK_TYPE_FABRIC_V1_1: config = FabricV1NetworkConfig(consensus_plugin=consensus_plugin, size=cluster_size) config.network_type = NETWORK_TYPE_FABRIC_V1_1 elif network_type == NETWORK_TYPE_FABRIC_V1_2: config = FabricV1NetworkConfig(consensus_plugin=consensus_plugin, size=cluster_size) config.network_type = NETWORK_TYPE_FABRIC_V1_2 elif network_type == NETWORK_TYPE_FABRIC_PRE_V1: config = FabricPreNetworkConfig(consensus_plugin=consensus_plugin, consensus_mode='', size=cluster_size) else: return False config.update({"env": cluster.env}) delete_result = self.cluster_agents[h.type].delete( id, worker_api, config) if not delete_result: logger.warning("Error to run compose clean work") cluster.update(set__user_id=user_id, upsert=True) return False # remove cluster info from host logger.info("remove cluster from host, cluster:{}".format(id)) h.update(pull__clusters=id) c.delete() return True
def delete(self, id, record=False, forced=False): """ Delete a cluster instance Clean containers, remove db entry. Only operate on active host. :param id: id of the cluster to delete :param record: Whether to record into the released collections :param forced: Whether to removing user-using cluster, for release :return: """ logger.debug("Delete cluster: id={}, forced={}".format(id, forced)) c = self.db_update_one({"id": id}, {"$set": { "user_id": SYS_DELETER }}, after=False) if not c: logger.warning("Cannot find cluster {}".format(id)) return False # we are safe from occasional applying now user_id = c.get("user_id") # original user_id if not forced and user_id != "" and not user_id.startswith(SYS_USER): # not forced, and chain is used by normal user, then no process logger.warning("Cannot delete cluster {} by " "user {}".format(id, user_id)) self.col_active.update_one({"id": id}, {"$set": { "user_id": user_id }}) return False # 0. forced # 1. user_id == SYS_DELETER or "" # Then, add deleting flag to the db, and start deleting if not user_id.startswith(SYS_DELETER): self.col_active.update_one( {"id": id}, {"$set": { "user_id": SYS_DELETER + user_id }}) host_id, worker_api, network_type, consensus_plugin, cluster_size = \ c.get("host_id"), c.get("worker_api"), \ c.get("network_type", NETWORK_TYPE_FABRIC_PRE_V1), \ c.get("consensus_plugin", CONSENSUS_PLUGINS_FABRIC_V1[0]), \ c.get("size", NETWORK_SIZE_FABRIC_PRE_V1[0]) # port = api_url.split(":")[-1] or CLUSTER_PORT_START h = self.host_handler.get_active_host_by_id(host_id) if not h: logger.warning("Host {} inactive".format(host_id)) self.col_active.update_one({"id": id}, {"$set": { "user_id": user_id }}) return False if network_type == NETWORK_TYPE_FABRIC_V1: config = FabricV1NetworkConfig(consensus_plugin=consensus_plugin, size=cluster_size) elif network_type == NETWORK_TYPE_FABRIC_PRE_V1: config = FabricPreNetworkConfig(consensus_plugin=consensus_plugin, consensus_mode='', size=cluster_size) else: return False if not self.cluster_agents[h.get('type')]\ .delete(id, worker_api, network_type, config): logger.warning("Error to run compose clean work") self.col_active.update_one({"id": id}, {"$set": { "user_id": user_id }}) return False self.host_handler.db_update_one({"id": c.get("host_id")}, {"$pull": { "clusters": id }}) self.col_active.delete_one({"id": id}) if record: # record original c into release collection logger.debug("Record the cluster info into released collection") c["release_ts"] = datetime.datetime.now() c["duration"] = str(c["release_ts"] - c["apply_ts"]) # seems mongo reject timedelta type if user_id.startswith(SYS_DELETER): c["user_id"] = user_id[len(SYS_DELETER):] self.col_released.insert_one(c) return True
def delete(self, id, record=False, forced=True): """ Delete a cluster instance Clean containers, remove db entry. Only operate on active host. :param id: id of the cluster to delete :param record: Whether to record into the released collections :param forced: Whether to removing user-using cluster, for release :return: """ logger.debug("Delete cluster: id={}, forced={}".format(id, forced)) try: cluster = ClusterModel.objects.get(id=id) except Exception: logger.warning("Cannot find cluster {}".format(id)) return False user_id = str(cluster.user.id) channel = ChannelModel.objects.get(cluster=cluster) channel = channel.alias logger.info('delete channel :{}'.format(channel)) from tasks import send_delete_cluster # send_delete_cluster.delay(body={'BlockchainSign': id}) send_delete_cluster(body={'BlockchainSign': id, "ChannelId": channel}, user_id=user_id) c = self.db_update_one({"id": id}, {"status": NETWORK_STATUS_DELETING}, after=False) # we are safe from occasional applying now # user_id = c.user_id # original user_id if not forced: # not forced, and chain is used by normal user, then no process logger.warning("Cannot delete cluster {} by " "user {}".format(id, user_id)) cluster.update( set__user_id=user_id, upsert=True ) return False else: cluster.update(set__status=NETWORK_STATUS_DELETING, upsert=True) host_id, worker_api, network_type, consensus_plugin = \ str(c.host.id), c.host.worker_api, \ c.network_type if c.network_type else NETWORK_TYPE_FABRIC_PRE_V1, \ c.consensus_plugin if c.consensus_plugin else \ CONSENSUS_PLUGINS_FABRIC_V1[0] # port = api_url.split(":")[-1] or CLUSTER_PORT_START cluster_size = '' h = self.host_handler.get_active_host_by_id(host_id) if not h: logger.warning("Host {} inactive".format(host_id)) cluster.update(set__user_id=user_id, upsert=True) return False if network_type == NETWORK_TYPE_FABRIC_V1: config = FabricV1NetworkConfig(consensus_plugin=consensus_plugin) elif network_type == NETWORK_TYPE_FABRIC_V1_1: config = FabricV1NetworkConfig(consensus_plugin=consensus_plugin) config.network_type = NETWORK_TYPE_FABRIC_V1_1 elif network_type == NETWORK_TYPE_FABRIC_V1_2: config = FabricV1NetworkConfig(consensus_plugin=consensus_plugin) config.network_type = NETWORK_TYPE_FABRIC_V1_2 elif network_type == NETWORK_TYPE_FABRIC_PRE_V1: config = FabricPreNetworkConfig(consensus_plugin=consensus_plugin, consensus_mode='', size=cluster_size) else: return False config.update({ "env": cluster.env }) delete_result = self.cluster_agents[h.host_type].delete(id, worker_api, config) config_path = BLOCKCHAIN_CONFIG_FILES_PATH + '/{}'.format(id) if os.path.exists(config_path): shutil.rmtree(config_path) if not delete_result: logger.warning("Error to run compose clean work") cluster.update(set__user_id=user_id, upsert=True) return False # remove cluster info from host logger.info("remove cluster from host, cluster:{}".format(id)) h.update(pull__clusters=id) orgs = OrgModel.objects(cluster=cluster) for org in orgs: org_users = org.users for user in org_users: if int(user.role) in [2, 3]: user.delete() c.delete() return True