def delete(self, network): """ Delete a cluster instance Clean containers, remove db entry. Only operate on active host. :param id: id of the cluster to delete :param forced: Whether to removing user-using cluster, for release :return: """ logger.debug("Delete cluster: id={}".format(network.id)) network.update(set__status='deleting') net_id = network.id try: #self.host_agents[host.type].delete(network) # remove cluster info from host logger.info("remove network from host, network:{}".format( network.id)) # if org has referenced network, remove for org_id in network.peer_orgs: peer_org = org_handler().schema( org_handler().get_by_id(org_id)) host_id = peer_org['host_id'] host_handler.refresh_status(host_id) host = host_handler.get_active_host_by_id(host_id) host.update(pull__clusters=network.id) self.host_agents[host.type].delete_peer_org( peer_org, host, net_id) org_obj = modelv2.Organization.objects.get(id=org_id) org_obj.update(unset__network=network.id) for org_id in network.orderer_orgs: orderer_org = org_handler().schema( org_handler().get_by_id(org_id)) host_id = orderer_org['host_id'] host_handler.refresh_status(host_id) host = host_handler.get_active_host_by_id(host_id) consensus_type = network.consensus_type host.update(pull__clusters=network.id) self.host_agents[host.type].delete_orderer_org( orderer_org, consensus_type, host, net_id) org_obj = modelv2.Organization.objects.get(id=org_id) org_obj.update(unset__network=network.id) #从Userdashboard的mongo中删除该network相关的数据 self.userdashboard_mongo_delete(network.id) network.delete() filepath = '{}{}'.format(CELLO_MASTER_FABRIC_DIR, network.id) os.system('rm -rf {}'.format(filepath)) return except Exception as e: logger.info("remove network {} fail from host".format(network.id)) network.update(set__status='error') raise e
def remove_network(self, network): try: network.update(set__status='deleting') # remove cluster info from host logger.info("remove network from host, network:{}".format( network.id)) # if org has referenced network, remove for org_id in network.peer_orgs: peer_org = org_handler().schema( org_handler().get_by_id(org_id)) host_id = peer_org['host_id'] host = host_handler.get_active_host_by_id(host_id) host.update(pull__clusters=network.id) org_obj = modelv2.Organization.objects.get(id=org_id) org_obj.update(unset__network=network.id) for org_id in network.orderer_orgs: orderer_org = org_handler().schema( org_handler().get_by_id(org_id)) host_id = orderer_org['host_id'] host = host_handler.get_active_host_by_id(host_id) host.update(pull__clusters=network.id) org_obj = modelv2.Organization.objects.get(id=org_id) org_obj.update(unset__network=network.id) # 从Userdashboard的mongo中删除该network相关的数据 # self.userdashboard_mongo_delete(network.id) network.delete() filepath = '{}{}'.format(CELLO_MASTER_FABRIC_DIR, network.id) os.system('rm -rf {}'.format(filepath)) except Exception as e: logger.error("network remove failed for {}".format(e)) raise e
def _update_network(self, network_config, request_host_ports): net_id = network_config['id'] network = modelv2.BlockchainNetwork.objects.get(id=net_id) try: #self.host_agents[host.type].update(network_config, request_host_ports) # # service urls can only be calculated after service is create # if host.type == WORKER_TYPE_K8S: # service_urls = self.host_agents[host.type] \ # .get_services_urls(net_id) # else: # service_urls = self.gen_service_urls(net_id) net_id = network_config['id'] net_name = network_config['name'] couchdb_enabled = False if network_config['db_type'] == 'couchdb': couchdb_enabled = True fabric_version = fabric_image_version[ network_config['fabric_version']] portid = [] portid.append(0) for peer_org in network_config['peer_org_dicts']: host_id = peer_org['host_id'] peer_num = peer_org['peerNum'] host = host_handler.get_active_host_by_id(host_id) host.update(add_to_set__clusters=[net_id]) self.host_agents[host.type].create_peer_org( peer_org, couchdb_enabled, host, net_id, net_name, fabric_version, request_host_ports, portid, peer_num) network.update(set__status='running') for peer_org in network_config['peer_org_dicts']: org_obj = modelv2.Organization.objects.get(id=peer_org['id']) org_obj.update(set__network=network) for orderer_org in network_config['orderer_org_dicts']: org_obj = modelv2.Organization.objects.get( id=orderer_org['id']) org_obj.update(set__network=network) logger.info("Update network OK, id={}".format(net_id)) except Exception as e: logger.error("network {} update failed for {}".format(net_id, e)) # will not call self.delete(network) in case of nested exception #self.delete(network) raise e
def _create_network(self, network_config, request_host_ports): net_id = network_config['id'] network = modelv2.BlockchainNetwork.objects.get(id=net_id) try: #self.host_agents[host.type].create(network_config, request_host_ports) # # service urls can only be calculated after service is create # if host.type == WORKER_TYPE_K8S: # service_urls = self.host_agents[host.type] \ # .get_services_urls(net_id) # else: # service_urls = self.gen_service_urls(net_id) net_id = network_config['id'] net_name = network_config['name'] couchdb_enabled = False if network_config['db_type'] == 'couchdb': couchdb_enabled = True fabric_version = fabric_image_version[ network_config['fabric_version']] consensus_type = network_config['consensus_type'] portid = [] portid.append(0) for orderer_org in network_config['orderer_org_dicts']: host_id = orderer_org['host_id'] host_handler.refresh_status(host_id) host = host_handler.get_active_host_by_id(host_id) host.update(add_to_set__clusters=[net_id]) self.host_agents[host.type].create_orderer_org( orderer_org, consensus_type, host, net_id, net_name, fabric_version, request_host_ports, portid) time.sleep(5) for peer_org in network_config['peer_org_dicts']: host_id = peer_org['host_id'] peer_num = peer_org['peerNum'] host_handler.refresh_status(host_id) host = host_handler.get_active_host_by_id(host_id) host.update(add_to_set__clusters=[net_id]) self.host_agents[host.type].create_peer_org( peer_org, couchdb_enabled, host, net_id, net_name, fabric_version, request_host_ports, portid, peer_num) network.update(set__status='running') # zsh修改,为解决网络创建过程中,还可以继续操作组织的问题,将给组织增加网络的动作放到前面 # for peer_org in network_config['peer_org_dicts']: # org_obj = modelv2.Organization.objects.get(id=peer_org['id']) # org_obj.update(set__network=network) # for orderer_org in network_config['orderer_org_dicts']: # org_obj = modelv2.Organization.objects.get(id=orderer_org['id']) # org_obj.update(set__network=network) logger.info("Create network OK, id={}".format(net_id)) def check_health_work(network): time.sleep(180) self.refresh_health(network) t = Thread(target=check_health_work, args=(network, )) t.start() except Exception as e: logger.error("network {} create failed for {}".format(net_id, e)) # will not call self.delete(network) in case of nested exception self.delete(network) raise e
def blockchain_network_create(): request_debug(r, logger) # add operating log # cur_time = datetime.datetime.utcnow() cur_time = datetime.datetime.utcnow() opName = sys._getframe().f_code.co_name opObject = "Network" operator = "admin" op_log_handler = OperatorLogHandler() if r.content_type.startswith("application/json"): body = dict(r.get_json(force=True, silent=True))['blockchain_network'] opDetails = body else: error_msg = "request header content-type is not supported, use application/json" op_log_handler.record_operating_log(opDate=cur_time, opName=opName, opObject=opObject, resCode=400, operator=operator, errorMsg=error_msg) raise UnsupportedMediaType(msg=error_msg) name = body.get('name', None) orderer_orgs = body.get('orderer_orgs', None) peer_orgs = body.get('peer_orgs', None) host_id = body.get('host_id', None) if name is None or orderer_orgs is None or peer_orgs is None or host_id is None: error_msg = "name, orderer(peer)_orgs and host_id must be provided in request body" op_log_handler.record_operating_log(opDate=cur_time, opName=opName, opObject=opObject, resCode=400, operator=operator, errorMsg=error_msg, opDetails=opDetails) raise BadRequest(msg=error_msg) description = body.get('description', "") fabric_version = body.get('fabric_version', None) if fabric_version is None or (fabric_version != 'v1.1' and fabric_version != 'v1.4'): error_msg = "Now only fabric v1.1 and v1.4 is supported" op_log_handler.record_operating_log(opDate=cur_time, opName=opName, opObject=opObject, resCode=400, operator=operator, errorMsg=error_msg, opDetails=opDetails) raise BadRequest(msg=error_msg) consensus_type = body.get('consensus_type', None) if consensus_type is None: consensus_type = 'kafka' elif consensus_type not in ['kafka', 'solo']: error_msg = 'consensus type {} is not supported'.format(consensus_type) op_log_handler.record_operating_log(opDate=cur_time, opName=opName, opObject=opObject, resCode=400, operator=operator, errorMsg=error_msg, opDetails=opDetails) raise BadRequest(msg=error_msg) id = uuid4().hex host = host_handler.get_active_host_by_id(host_id) if not host: error_msg = "Cannot find available host to create new network" logger.error(error_msg) op_log_handler.record_operating_log(opDate=cur_time, opName=opName, opObject=opObject, resCode=500, operator=operator, errorMsg=error_msg, opDetails=opDetails) raise InternalServerError(msg=error_msg) network_handler = BlockchainNetworkHandler() try: network = network_handler.create(id=id, name=name, description=description, fabric_version=fabric_version, orderer_orgs=orderer_orgs, peer_orgs=peer_orgs, host=host, consensus_type=consensus_type, create_ts=cur_time) op_log_handler.record_operating_log(opDate=cur_time, opName=opName, opObject=opObject, resCode=200, operator=operator, opDetails=opDetails) return make_ok_gaea_resp(resource='blockchain_network', result=network) except Exception as e: error_msg = "blockchain_network create failed {}".format(e) logger.error(error_msg) op_log_handler.record_operating_log(opDate=cur_time, opName=opName, opObject=opObject, resCode=500, operator=operator, errorMsg=error_msg, opDetails=opDetails) raise InternalServerError(msg=error_msg)
def create_peer_org(self, peer_org, couchdb_enabled, host, net_id, net_name, fabric_version, request_host_ports, portid, peer_num): service_names = [] couchdb_service_names = [] index = portid[0] sevices_dict = {} org_name = peer_org['name'] org_domain = peer_org['domain'] peer_num_all = int(peer_org['peerNum']) exist_peer_num = 0 if peer_num_all != peer_num: exist_peer_num = peer_num_all - peer_num container_service_ip = host.worker_api.split(':')[1][2:] composefile_dict = { 'version': '3.2', 'networks': { 'celloNet': None }, 'services': {} } net_dir = CELLO_MASTER_FABRIC_DIR + net_id for i in range(int(peer_org['peerNum'])): if exist_peer_num > i: continue peer_name = 'peer{}'.format(i) if couchdb_enabled is True: peer_seq = ['couchdb', peer_name, org_name, org_domain] couchdb_service_name = '.'.join(peer_seq) service_names.append(couchdb_service_name) couchdb_service_names.append(couchdb_service_name) couch_host_port = request_host_ports[index] index = index + 1 service_names.append(couchdb_service_name) couchdb_service_dict = self._construct_couchdb_docker_service(net_id, couchdb_service_name, \ fabric_version, couch_host_port) sevices_dict.update(couchdb_service_dict) couchdb_service_endpoint = modelv2.ServiceEndpoint( id=uuid4().hex, service_ip=container_service_ip, service_port=couch_host_port, service_name=couchdb_service_name, service_type='couchdb', network=modelv2.BlockchainNetwork.objects.get(id=net_id)) couchdb_service_endpoint.save() peer_seq = [peer_name, org_name, org_domain] peer_service_name = '.'.join(peer_seq) service_names.append(peer_service_name) host_ports = [ request_host_ports[index], request_host_ports[index + 1] ] index = index + 2 peer_service_dict = self._construct_peer_docker_service(net_id, org_name, org_domain, peer_name, \ fabric_version, \ host_ports, couchdb_enabled) sevices_dict.update(peer_service_dict) for i in range(len(host_ports)): peer_service_endpoint = modelv2.ServiceEndpoint( id=uuid4().hex, service_ip=container_service_ip, service_port=host_ports[i], service_name=peer_service_name, service_type='peer', org_name=org_name, peer_port_proto=PEER_PORT_GRPC if i == 0 else PEER_PORT_CCLISTEN, network=modelv2.BlockchainNetwork.objects.get(id=net_id)) peer_service_endpoint.save() if exist_peer_num == 0: ca_service_name = '.'.join(['ca', org_name, org_domain]) service_names.append(ca_service_name) org_full_domain = '.'.join([org_name, org_domain]) pk_path = '{net_dir}/crypto-config/peerOrganizations/{org_dir}/ca/'. \ format(net_dir=net_dir, org_dir=org_full_domain) ca_key_file = self._get_ca_private_key(pk_path) host_port = request_host_ports[index] index = index + 1 ca_service_dict = self._construct_ca_docker_service(net_id, org_name, org_domain, ca_key_file, \ fabric_version, host_port) sevices_dict.update(ca_service_dict) ca_service_endpoint = modelv2.ServiceEndpoint( id=uuid4().hex, service_ip=container_service_ip, service_port=host_port, service_name=ca_service_name, service_type='ca', org_name=org_name, network=modelv2.BlockchainNetwork.objects.get(id=net_id)) ca_service_endpoint.save() host_id = peer_org['host_id'] host = host_handler.get_active_host_by_id(host_id) composefile_dict['services'].update(sevices_dict) deploy_dir = '{}/deploy/'.format(net_dir) if not os.path.exists(deploy_dir): os.makedirs(deploy_dir) if os.path.exists('{}/docker-compose.yaml'.format(deploy_dir)): shutil.copy('{}/docker-compose.yaml'.format(deploy_dir), '{}/docker-compose-back.yaml'.format(deploy_dir)) composefile_back = '{}/docker-compose.yaml'.format(deploy_dir) with open(composefile_back, 'w') as f: yaml.dump(composefile_dict, f) project = compose_get_project(project_dir=deploy_dir, host=host.worker_api, project_name=net_id[:12]) containers = project.up(detached=True, timeout=5) if os.path.exists('{}/docker-compose-back.yaml'.format(deploy_dir)): shutil.copy('{}/docker-compose-back.yaml'.format(deploy_dir), '{}/docker-compose.yaml'.format(deploy_dir)) composefile = '{}/docker-compose.yaml'.format(deploy_dir) f = open(composefile) compose_file_base = yaml.load(f) compose_file_base['services'].update(sevices_dict) f.close() with open(composefile, 'w') as f: yaml.dump(compose_file_base, f) portid[0] = index return containers
def organization_create(): request_debug(r, logger) # add operating log cur_time = datetime.datetime.utcnow() # get current func name opName = sys._getframe().f_code.co_name opObject = "Organization" operator = "admin" opResult = {} op_log_handler = OperatorLogHandler() if r.content_type.startswith("application/json"): body = dict(r.get_json(force=True, silent=True))['organization'] opDetails = body else: error_msg = "request header content-type is not supported, use application/json" opResult['resDes'] = "ERROR" opResult['resCode'] = 400 opResult['errorMsg'] = error_msg op_log_handler.create(opDate=cur_time, opName=opName, opObject=opObject, opResult=opResult, operator=operator) raise UnsupportedMediaType(error_msg) name = body.get('name', None) type = body.get('type', None) domain = body.get('domain', None) organizations = list(org_handler().list()) org_names = [] for org in organizations: org_names.append(org['name']) if name in org_names: error_msg = "the same org name has exist,please use another name " opResult['resDes'] = "ERROR" opResult['resCode'] = 400 opResult['errorMsg'] = error_msg op_log_handler.create(opDate=cur_time, opName=opName, opObject=opObject, opResult=opResult, operator=operator, opDetails=opDetails) raise BadRequest(msg=error_msg) if name is None or name == '': error_msg = "name is required and not allowed to be ''" opResult['resDes'] = "ERROR" opResult['resCode'] = 400 opResult['errorMsg'] = error_msg op_log_handler.create(opDate=cur_time, opName=opName, opObject=opObject, opResult=opResult, operator=operator, opDetails=opDetails) raise BadRequest(msg=error_msg) # if there is '_' in name or ID of genesis.block, orderer couldn't # start up, saying "failed: error converting config to map: Illegal characters in key: [Group]" if '_' in name: # aaa = BadRequest("'_' is not allowed in name or ID") # # raise BadRequest("'_' is not allowed in name or ID") # raise aaa error_msg = '_ is not allowed in name or ID' opResult['resDes'] = "ERROR" opResult['resCode'] = 400 opResult['errorMsg'] = error_msg op_log_handler.create(opDate=cur_time, opName=opName, opObject=opObject, opResult=opResult, operator=operator, opDetails=opDetails) raise BadRequest(msg=error_msg) if domain is None or domain == '': error_msg = "domain is required and not allowed to be ''" opResult['resDes'] = "ERROR" opResult['resCode'] = 400 opResult['errorMsg'] = error_msg op_log_handler.create(opDate=cur_time, opName=opName, opObject=opObject, opResult=opResult, operator=operator, opDetails=opDetails) raise BadRequest(msg=error_msg) if type is None: error_msg = "type is required" opResult['resDes'] = "ERROR" opResult['resCode'] = 400 opResult['errorMsg'] = error_msg op_log_handler.create(opDate=cur_time, opName=opName, opObject=opObject, opResult=opResult, operator=operator, opDetails=opDetails) raise BadRequest(msg=error_msg) if body['type'] not in ['peer', 'orderer']: error_msg = "only peer or orderer type is supported" opResult['resDes'] = "ERROR" opResult['resCode'] = 400 opResult['errorMsg'] = error_msg op_log_handler.create(opDate=cur_time, opName=opName, opObject=opObject, opResult=opResult, operator=operator, opDetails=opDetails) raise BadRequest(msg=error_msg) ordererHostnames = body.get('ordererHostnames', None) peerNum = body.get('peerNum', None) ca = body.get('ca', {}) host_id = body.get('host_id', None) host = host_handler.get_active_host_by_id(host_id) id = uuid4().hex description = body.get('description', "") if body['type'] == 'peer': if ordererHostnames is not None: error_msg = "peer type organizations don't need ordererHostnames" opResult['resDes'] = "ERROR" opResult['resCode'] = 400 opResult['errorMsg'] = error_msg op_log_handler.create(opDate=cur_time, opName=opName, opObject=opObject, opResult=opResult, operator=operator, opDetails=opDetails) raise BadRequest(msg=error_msg) if peerNum is None: peerNum = 2 try: result = org_handler().create(id=id, name=name, description=description, type=type, domain=domain, peerNum=int(peerNum), ca=ca, host=host, ordererHostnames=[]) opResult['resDes'] = "OK" opResult['resCode'] = 200 opResult['errorMsg'] = '' op_log_handler.create(opDate=cur_time, opName=opName, opObject=opObject, opResult=opResult, operator=operator, opDetails=opDetails) return make_ok_my_resp('organization', result) except Exception as e: error_msg = "internal server error" opResult['resDes'] = "ERROR" opResult['resCode'] = 500 opResult['errorMsg'] = error_msg op_log_handler.create(opDate=cur_time, opName=opName, opObject=opObject, opResult=opResult, operator=operator, opDetails=opDetails) raise InternalServerError() else: if peerNum is not None: error_msg = "orderer type organizations don't need peers" opResult['resDes'] = "ERROR" opResult['resCode'] = 400 opResult['errorMsg'] = error_msg op_log_handler.create(opDate=cur_time, opName=opName, opObject=opObject, opResult=opResult, operator=operator, opDetails=opDetails) raise BadRequest(msg=error_msg) # userNum = body.get('userNum', None) try: result = org_handler().create(id=id, name=name, description=description, type=type, domain=domain, ca=ca, peerNum=0, host=host, ordererHostnames=ordererHostnames) opResult['resDes'] = "OK" opResult['resCode'] = 200 opResult['errorMsg'] = '' op_log_handler.create(opDate=cur_time, opName=opName, opObject=opObject, opResult=opResult, operator=operator, opDetails=opDetails) return make_ok_my_resp('organization', result) except Exception as e: error_msg = "internal server error" op_log_handler.record_operating_log(opDate=cur_time, opName=opName, opObject=opObject, resCode=500, operator=operator, errorMsg=error_msg, opDetails=opDetails) raise InternalServerError()