def sys_channelOrderer_update(self, blockchain_network_id, orderer_org_dicts, request_host_ports): service_object = self.get_endpoints_list(blockchain_network_id) organizations_object = org_handler.get_by_networkid( self, blockchain_network_id) organizations = [] for each in organizations_object: organization = org_handler().schema(org_handler().get_by_id( each['id'])) organizations.append(organization) body =\ { "sysChannel": { "service_object": service_object, "organizations_object": organizations, "orderer_org_dicts": orderer_org_dicts, "request_host_ports":request_host_ports } } headers = {"Content-Type": "application/json"} rest_api = 'http://user-dashboard:8081/v2/sys_channel_orderer/{}'.format( blockchain_network_id) res = requests.post(rest_api, data=json.dumps(body), headers=headers) if res.status_code == 200: print("update syschannel from order success") return
def createyamlforneworgs(self, id, peer_orgs): ins = modelv2.BlockchainNetwork.objects.get(id=id) host = ins.host consensus_type = ins.consensus_type fabric_version = ins.fabric_version peer_org_dicts = [] orderer_org_dicts = [] filepath = file_define.commad_create_path(id) print("filepath = {}".format(filepath)) for org_id in peer_orgs: peer_org_dict = org_handler().schema(org_handler().get_by_id(org_id)) peer_org_dicts.append(peer_org_dict) #logger.info(" before function file_define.commad_create_path,and path is") # create filepath with network_id at path FABRIC_DIR fileorgpath = '{}/{}'.format(filepath,org_id) os.system('mkdir -p {}/crypto-config/peerOrganizations/'.format(fileorgpath)) #logger.info(" after function file_define.commad_create_path,and path is {}".format(filepath)) if host.type == 'docker': # create crypto-config.yaml file at filepath file_define.dump_crypto_config_yaml_file(fileorgpath, peer_org_dicts, orderer_org_dicts) # create configtx.yaml file file_define.dump_configtx_yaml_file(fileorgpath, consensus_type, peer_org_dicts, orderer_org_dicts, fabric_version) else: # create crypto-config.yaml file at filepath file_define.dump_crypto_config_yaml_file_k8s(fileorgpath, peer_org_dicts, orderer_org_dicts) # create configtx.yaml file file_define.dump_configtx_yaml_file_k8s(fileorgpath, consensus_type, peer_org_dicts, orderer_org_dicts, fabric_version) try: # change work dir to '/opt' origin_dir = os.getcwd() os.chdir(fileorgpath) print(os.getcwd()) os.system("export FABRIC_CFG_PATH=$PWD") mspid = '{}MSP'.format(peer_org_dict['name'][0:1].upper()+peer_org_dict['name'][1:]) orgname = peer_org_dict['name'] org_domain = peer_org_dict['domain'] orgdir = '{}.{}'.format(orgname,org_domain) #call("/opt/fabric_tools/v1_1/cryptogen generate --config=%s/crypto-config.yaml" % fileorgpath, shell=True) os.system('cp -r {}/crypto-config/peerOrganizations/{} {}/crypto-config/peerOrganizations/'.format(filepath, orgdir, fileorgpath)) call("/opt/fabric_tools/v1_1/configtxgen -printOrg %s > ../channel-artifacts/%s.json" % (mspid, orgname), shell=True) os.chdir(origin_dir) os.system('rm -r {}'.format(fileorgpath)) except: error_msg = 'create certificate or genesis block failed!' raise Exception(error_msg) return self._schema(ins)
def delete(self, network): """ Delete a cluster instance Clean containers, remove db entry. Only operate on active host. :param id: id of the cluster to delete :param forced: Whether to removing user-using cluster, for release :return: """ logger.debug("Delete cluster: id={}".format(network.id)) network.update(set__status='deleting') net_id = network.id try: #self.host_agents[host.type].delete(network) # remove cluster info from host logger.info("remove network from host, network:{}".format( network.id)) # if org has referenced network, remove for org_id in network.peer_orgs: peer_org = org_handler().schema( org_handler().get_by_id(org_id)) host_id = peer_org['host_id'] host_handler.refresh_status(host_id) host = host_handler.get_active_host_by_id(host_id) host.update(pull__clusters=network.id) self.host_agents[host.type].delete_peer_org( peer_org, host, net_id) org_obj = modelv2.Organization.objects.get(id=org_id) org_obj.update(unset__network=network.id) for org_id in network.orderer_orgs: orderer_org = org_handler().schema( org_handler().get_by_id(org_id)) host_id = orderer_org['host_id'] host_handler.refresh_status(host_id) host = host_handler.get_active_host_by_id(host_id) consensus_type = network.consensus_type host.update(pull__clusters=network.id) self.host_agents[host.type].delete_orderer_org( orderer_org, consensus_type, host, net_id) org_obj = modelv2.Organization.objects.get(id=org_id) org_obj.update(unset__network=network.id) #从Userdashboard的mongo中删除该network相关的数据 self.userdashboard_mongo_delete(network.id) network.delete() filepath = '{}{}'.format(CELLO_MASTER_FABRIC_DIR, network.id) os.system('rm -rf {}'.format(filepath)) return except Exception as e: logger.info("remove network {} fail from host".format(network.id)) network.update(set__status='error') raise e
def organization_query(organization_id): try: request_debug(r, logger) result = org_handler().schema(org_handler().get_by_id(organization_id)) logger.debug(result) if result: return make_ok_gaea_resp(resource='organization', result=result) else: error_msg = "organization not found with id=" + organization_id logger.warning(error_msg) raise NotFound(msg=error_msg) except: raise NotFound(msg='get organization failed')
def organization_list(): logger.info("/organization_list method=" + r.method) try: request_debug(r, logger) col_filter = dict((key, r.args.get(key)) for key in r.args) items = list(org_handler().list(filter_data=col_filter)) except: raise NotFound(msg='get organizations failed') return make_ok_gaea_resp(resource='organizations', result=items)
def remove_network(self, network): try: network.update(set__status='deleting') # remove cluster info from host logger.info("remove network from host, network:{}".format( network.id)) # if org has referenced network, remove for org_id in network.peer_orgs: peer_org = org_handler().schema( org_handler().get_by_id(org_id)) host_id = peer_org['host_id'] host = host_handler.get_active_host_by_id(host_id) host.update(pull__clusters=network.id) org_obj = modelv2.Organization.objects.get(id=org_id) org_obj.update(unset__network=network.id) for org_id in network.orderer_orgs: orderer_org = org_handler().schema( org_handler().get_by_id(org_id)) host_id = orderer_org['host_id'] host = host_handler.get_active_host_by_id(host_id) host.update(pull__clusters=network.id) org_obj = modelv2.Organization.objects.get(id=org_id) org_obj.update(unset__network=network.id) # 从Userdashboard的mongo中删除该network相关的数据 # self.userdashboard_mongo_delete(network.id) network.delete() filepath = '{}{}'.format(CELLO_MASTER_FABRIC_DIR, network.id) os.system('rm -rf {}'.format(filepath)) except Exception as e: logger.error("network remove failed for {}".format(e)) raise e
def organization_update(organization_id): request_debug(r, logger) if r.content_type.startswith("application/json"): body = dict(r.get_json(force=True, silent=True)) else: body = r.form id = organization_id peerNum = body["peerNum"] result = org_handler().update(id, peerNum) if result: logger.debug("organization PUT successfully") return make_ok_my_resp(resource='organization', result=result) else: error_msg = "Failed to update organization {}".format( result.get("name")) logger.warning(error_msg) return make_fail_resp(error=error_msg)
def organization_update(): request_debug(r, logger) if r.content_type.startswith("application/json"): body = dict(r.get_json(force=True, silent=True)) else: body = r.form if "id" not in body: error_msg = "organization PUT without enough data" logger.warning(error_msg) raise BadRequest(msg=error_msg) else: id, d = body["id"], {} for k in body: if k != "id": d[k] = body.get(k) result = org_handler().update(id, d) if result: logger.debug("organization PUT successfully") return make_ok_resp() else: error_msg = "Failed to update organization {}".format( result.get("name")) logger.warning(error_msg) return make_fail_resp(error=error_msg)
def createyamlforneworgs(self, id, peer_orgs, orderer_orgs): ins = modelv2.BlockchainNetwork.objects.get(id=id) filepath = file_define.commad_create_path(id) print("filepath = {}".format(filepath)) for org_id in peer_orgs: peer_org_dict = org_handler().schema( org_handler().get_by_id(org_id)) fileorgpath = '{}/{}'.format(filepath, org_id) os.system('mkdir -p {}/crypto-config/peerOrganizations/'.format( fileorgpath)) try: # change work dir to '/opt' origin_dir = os.getcwd() os.chdir(fileorgpath) print(os.getcwd()) os.system("export FABRIC_CFG_PATH=$PWD") mspid = '{}MSP'.format(peer_org_dict['name'][0:1].upper() + peer_org_dict['name'][1:]) orgname = peer_org_dict['name'] org_domain = peer_org_dict['domain'] orgdir = '{}.{}'.format(orgname, org_domain) #call("/opt/fabric_tools/v1_1/cryptogen generate --config=%s/crypto-config.yaml" % fileorgpath, shell=True) os.system( 'cp -r {}/crypto-config/peerOrganizations/{} {}/crypto-config/peerOrganizations/' .format(filepath, orgdir, fileorgpath)) os.system('cp -r {}/configtx.yaml {}/'.format( filepath, fileorgpath)) call( "/opt/fabric_tools/v1_4/configtxgen -printOrg %s > ../channel-artifacts/%s.json" % (mspid, orgname), shell=True) os.chdir(origin_dir) os.system('rm -r {}'.format(fileorgpath)) except: error_msg = 'create certificate or genesis block failed!' raise Exception(error_msg) if orderer_orgs != None: org_id = orderer_orgs[0] orderer_org_dict = org_handler().schema( org_handler().get_by_id(org_id)) fileorgpath = '{}/{}'.format(filepath, org_id) os.system('mkdir -p {}/crypto-config/ordererOrganizations/'.format( fileorgpath)) try: # change work dir to '/opt' origin_dir = os.getcwd() os.chdir(fileorgpath) print(os.getcwd()) os.system("export FABRIC_CFG_PATH=$PWD") mspid = '{}Org'.format(orderer_org_dict['name'][0:1].upper() + orderer_org_dict['name'][1:]) orgname = orderer_org_dict['name'] org_domain = orderer_org_dict['domain'] orgdir = '{}'.format(org_domain) #call("/opt/fabric_tools/v1_1/cryptogen generate --config=%s/crypto-config.yaml" % fileorgpath, shell=True) os.system( 'cp -r {}/crypto-config/ordererOrganizations/{} {}/crypto-config/ordererOrganizations/' .format(filepath, orgdir, fileorgpath)) os.system('cp -r {}/configtx.yaml {}/'.format( filepath, fileorgpath)) call( "/opt/fabric_tools/v1_4/configtxgen -printOrg %s > ../channel-artifacts/%s.json" % (mspid, orgname), shell=True) os.chdir(origin_dir) os.system('rm -r {}'.format(fileorgpath)) except: error_msg = 'create certificate or genesis block failed!' raise Exception(error_msg) return self._schema(ins)
def addpeertonetwork(self, id, peer_org, peers_num): ins = modelv2.BlockchainNetwork.objects.get(id=id) host = ins.host fabric_version = ins.fabric_version name = ins.name peer_org_dict = org_handler().schema(org_handler().get_by_id(peer_org)) db_type = ins.db_type couchdb_enabled = False if db_type == 'couchdb': couchdb_enabled = True ### get fabric service ports peer_num = peers_num peer_org_dict['peerNum'] += peers_num if couchdb_enabled is True: request_host_port_num = peer_num * PEER_NODE_HOSTPORT_NUM + \ peer_num * COUCHDB_NODE_HOSTPORT_NUM else: request_host_port_num = peer_num * PEER_NODE_HOSTPORT_NUM request_host_ports = self.find_free_start_ports( request_host_port_num, host) if len(request_host_ports) != request_host_port_num: error_msg = "no enough ports for network service containers" logger.error(error_msg) raise Exception(error_msg) # logger.info(" before function file_define.commad_create_path,and path is") # create filepath with network_id at path FABRIC_DIR filepath = file_define.commad_create_path(id) print("filepath = {}".format(filepath)) # logger.info(" after function file_define.commad_create_path,and path is {}".format(filepath)) # create crypto-config.yaml file at filepath file_define.update_crypto_file_for_addpeers(filepath, peer_org_dict, peers_num) try: # change work dir to '/opt' origin_dir = os.getcwd() os.chdir(filepath) print(os.getcwd()) # create certificates call( "/opt/fabric_tools/v1_4/cryptogen extend --config=%s/crypto-config.yaml" % filepath, shell=True) os.chdir(origin_dir) # os.system('rm -r {}'.format(fileorgpath)) except: error_msg = 'create certificate or genesis block failed!' raise Exception(error_msg) try: sk_file = '' org_name = peer_org_dict['name'] org_domain = peer_org_dict['domain'] org_fullDomain_name = '.'.join([org_name, org_domain]) ca_dir = '/opt/fabric/{net_dir}/crypto-config/peerOrganizations/{org_fullDomain_name}/ca/'. \ format(net_dir=id, org_fullDomain_name=org_fullDomain_name) for f in os.listdir(ca_dir): # find out sk! if f.endswith("_sk"): sk_file = f peer_org_dict['sk_file'] = sk_file except: error_msg = 'create_userdashboard failed!.' raise Exception(error_msg) # use network model to get? # network models only have org ids, no details needed network_config = { 'id': id, 'name': name, 'fabric_version': fabric_version, 'peer_org_dict': peer_org_dict, 'peer_num': peers_num, 'db_type': db_type } t = Thread(target=self._update_network_for_addpeers, args=(network_config, request_host_ports)) t.start() return self._schema(ins)
def addorgtonetwork(self, id, peer_orgs, orderer_orgs): ins = modelv2.BlockchainNetwork.objects.get(id=id) host = ins.host consensus_type = ins.consensus_type fabric_version = ins.fabric_version name = ins.name peer_org_dicts = [] orderer_org_dicts = [] peer_orgs_temp = ins.peer_orgs orderer_orgs_temp = ins.orderer_orgs if peer_orgs != None: for org_id in peer_orgs: peer_org_dict = org_handler().schema( org_handler().get_by_id(org_id)) peer_org_dicts.append(peer_org_dict) peer_orgs_temp.append(org_id) if orderer_orgs != None: org_id = orderer_orgs orderer_org_dict = org_handler().schema( org_handler().get_by_id(org_id)) orderer_org_dicts.append(orderer_org_dict) orderer_orgs_temp.append(org_id) db_type = ins.db_type couchdb_enabled = False if db_type == 'couchdb': couchdb_enabled = True ### get fabric service ports peer_org_num = len(peer_org_dicts) peer_num = 0 orderer_num = 0 for org in peer_org_dicts: peer_num += org['peerNum'] for org in orderer_org_dicts: orderer_num += len(org['ordererHostnames']) if couchdb_enabled is True: request_host_port_num = peer_org_num * CA_NODE_HOSTPORT_NUM + \ peer_num * PEER_NODE_HOSTPORT_NUM + \ peer_num * COUCHDB_NODE_HOSTPORT_NUM + \ orderer_num * ORDERER_NODE_HOSTPORT_NUM else: request_host_port_num = peer_org_num * CA_NODE_HOSTPORT_NUM + \ peer_num * PEER_NODE_HOSTPORT_NUM + \ orderer_num * ORDERER_NODE_HOSTPORT_NUM request_host_ports = self.find_free_start_ports( request_host_port_num, host) if len(request_host_ports) != request_host_port_num: error_msg = "no enough ports for network service containers" logger.error(error_msg) raise Exception(error_msg) #logger.info(" before function file_define.commad_create_path,and path is") # create filepath with network_id at path FABRIC_DIR filepath = file_define.commad_create_path(id) print("filepath = {}".format(filepath)) #logger.info(" after function file_define.commad_create_path,and path is {}".format(filepath)) # create crypto-config.yaml file at filepath file_define.update_crypto_config_yaml_file(filepath, peer_org_dicts, orderer_org_dicts) # create configtx.yaml file file_define.update_dump_configtx_yaml_file(filepath, peer_org_dicts, orderer_org_dicts, request_host_ports) try: # change work dir to '/opt' origin_dir = os.getcwd() os.chdir(filepath) print(os.getcwd()) # create certificates call( "/opt/fabric_tools/v1_4/cryptogen extend --config=%s/crypto-config.yaml" % filepath, shell=True) os.chdir(origin_dir) #os.system('rm -r {}'.format(fileorgpath)) except: error_msg = 'create certificate or genesis block failed!' raise Exception(error_msg) self.createyamlforneworgs(id, peer_orgs, orderer_orgs) self.sys_channelInfo_update(id, peer_org_dicts) ins.update(set__peer_orgs=peer_orgs_temp) self.sys_channelOrderer_update(id, orderer_org_dicts, request_host_ports) ins.update(set__orderer_orgs=orderer_orgs_temp) try: # create fabric-ca-server-config.yaml file file_define.fabric_ca_config_files(id, fabric_version, CELLO_MASTER_FABRIC_DIR, peer_org_dicts) except: error_msg = 'create fabric_ca_config_files failed!.' raise Exception(error_msg) # use network model to get? # network models only have org ids, no details needed network_config = { 'id': id, 'name': name, 'fabric_version': fabric_version, 'orderer_org_dicts': orderer_org_dicts, 'peer_org_dicts': peer_org_dicts, 'consensus_type': consensus_type, 'db_type': db_type, 'host': host } t = Thread(target=self._update_network, args=(network_config, request_host_ports)) t.start() return self._schema(ins)
def create(self, id, name, description, fabric_version, orderer_orgs, peer_orgs, host, consensus_type, db_type, create_ts): peer_org_dicts = [] orderer_org_dicts = [] for org_id in peer_orgs: peer_org_dict = org_handler().schema( org_handler().get_by_id(org_id)) # blocakchain_network_id非空,表明该组织已添加到其他nework中 if peer_org_dict['blockchain_network_id']: error_msg = ': this org has been added by another network!' raise Exception(error_msg) peer_org_dicts.append(peer_org_dict) for org_id in orderer_orgs: orderer_org_dict = org_handler().schema( org_handler().get_by_id(org_id)) if orderer_org_dict['blockchain_network_id']: error_msg = ': this org has been added by another network!' raise Exception(error_msg) orderer_org_dicts.append(orderer_org_dict) network = modelv2.BlockchainNetwork(id=id, name=name, description=description, fabric_version=fabric_version, orderer_orgs=orderer_orgs, peer_orgs=peer_orgs, host=host, consensus_type=consensus_type, db_type=db_type, create_ts=create_ts, status="creating") network.save() order_orgs_domain = [] for each in orderer_org_dicts: if each['domain'] not in order_orgs_domain: order_orgs_domain.append(each['domain']) else: network.delete() error_msg = ': orderer\'s domain in one network can not be same!' raise Exception(error_msg) couchdb_enabled = False if db_type == 'couchdb': couchdb_enabled = True ### get fabric service ports peer_org_num = len(peer_org_dicts) peer_num = 0 orderer_num = 0 # zsh修改,原本在_create_network中,为组织增加network信息,前调到这里 for org in peer_org_dicts: peer_num += org['peerNum'] org_obj = modelv2.Organization.objects.get(id=org['id']) org_obj.update(set__network=network) for org in orderer_org_dicts: orderer_num += len(org['ordererHostnames']) org_obj = modelv2.Organization.objects.get(id=org['id']) org_obj.update(set__network=network) if couchdb_enabled is True: request_host_port_num = peer_org_num * CA_NODE_HOSTPORT_NUM + \ peer_num * PEER_NODE_HOSTPORT_NUM + \ peer_num * COUCHDB_NODE_HOSTPORT_NUM + \ orderer_num * ORDERER_NODE_HOSTPORT_NUM else: request_host_port_num = peer_org_num * CA_NODE_HOSTPORT_NUM + \ peer_num * PEER_NODE_HOSTPORT_NUM + \ orderer_num * ORDERER_NODE_HOSTPORT_NUM request_host_ports = self.find_free_start_ports( request_host_port_num, host) if len(request_host_ports) != request_host_port_num: error_msg = "no enough ports for network service containers" logger.error(error_msg) raise Exception(error_msg) # create persistent volume path for peer and orderer node # TODO : code here logger.info( " before function file_define.commad_create_path,and path is") # create public.key or private.key isExist = file_define.creat_secret_key_files() if not isExist: logger.error( " after function file_define.creat_secret_key_files, and it is {} " .format(isExist)) # create filepath with network_id at path FABRIC_DIR filepath = file_define.commad_create_path(id) print("filepath = {}".format(filepath)) logger.info( " after function file_define.commad_create_path,and path is {}". format(filepath)) # create crypto-config.yaml file at filepath file_define.dump_crypto_config_yaml_file(filepath, peer_org_dicts, orderer_org_dicts) # create configtx.yaml file file_define.dump_configtx_yaml_file(filepath, consensus_type, peer_org_dicts, orderer_org_dicts, fabric_version, request_host_ports) # create channel-artifacts path blockGenesis_filepath = '{}{}/channel-artifacts'.format( CELLO_MASTER_FABRIC_DIR, id) try: os.system('mkdir -p {}'.format(blockGenesis_filepath)) except: error_msg = 'blockGenesis_filepath file create failed.' # raise FileOperaterFailed(error_msg) try: fabric_version_dir = fabric_version.replace('.', '_') # change work dir to '/opt' # origin_dir = os.getcwd() os.chdir(filepath) # print(os.getcwd()) # create certificates call([ "/opt/fabric_tools/{}/cryptogen".format(fabric_version_dir), "generate", "--config=./crypto-config.yaml" ]) # create genesis.block and channel configuration blocks call([ "/opt/fabric_tools/{}/configtxgen".format(fabric_version_dir), "-profile", "TwoOrgsOrdererGenesis", "-outputBlock", "./channel-artifacts/genesis.block" ]) # call(["/opt/configtxgen","-profile","TwoOrgsChannel","-outputCreateChannelTx","./channel-artifacts/channel.tx","-channelID","mychannel"]) # call(["/opt/configtxgen","-profile","TwoOrgsChannel","-outputAnchorPeersUpdate","./channel-artifacts/Org1MSPanchors.tx",\ # "-channelID","mychannel","-asOrg","Org1MSP"]) # call(["/opt/configtxgen","-profile","TwoOrgsChannel","-outputAnchorPeersUpdate","./channel-artifacts/Org2MSPanchors.tx",\ # "-channelID","mychannel","-asOrg","Org2MSP"]) # change back # for k8s orderer node to use genesis.block shutil.copy( '{}/genesis.block'.format(blockGenesis_filepath), '{}{}/crypto-config/ordererOrganizations/'.format( CELLO_MASTER_FABRIC_DIR, id)) # os.chdir(origin_dir) except Exception as e: error_msg = 'create certificate or genesis block failed!' self.remove_network(network) raise Exception(error_msg) try: # create fabric-ca-server-config.yaml file file_define.fabric_ca_config_files(id, fabric_version, CELLO_MASTER_FABRIC_DIR, peer_org_dicts) except: error_msg = 'create fabric_ca_config_files failed!.' self.remove_network(network) raise Exception(error_msg) # use network model to get? # no. network models only have org ids, no details needed network_config = { 'id': id, 'name': name, 'fabric_version': fabric_version, 'orderer_org_dicts': orderer_org_dicts, 'peer_org_dicts': peer_org_dicts, 'consensus_type': consensus_type, 'db_type': db_type, 'host': host } t = Thread(target=self._create_network, args=(network_config, request_host_ports)) t.start() return self._schema(network)
def organization_create(): request_debug(r, logger) # add operating log cur_time = datetime.datetime.utcnow() # get current func name opName = sys._getframe().f_code.co_name opObject = "Organization" operator = "admin" opResult = {} op_log_handler = OperatorLogHandler() if r.content_type.startswith("application/json"): body = dict(r.get_json(force=True, silent=True))['organization'] opDetails = body else: error_msg = "request header content-type is not supported, use application/json" opResult['resDes'] = "ERROR" opResult['resCode'] = 400 opResult['errorMsg'] = error_msg op_log_handler.create(opDate=cur_time, opName=opName, opObject=opObject, opResult=opResult, operator=operator) raise UnsupportedMediaType(error_msg) name = body.get('name', None) type = body.get('type', None) domain = body.get('domain', None) if name is None or name == '': error_msg = "name is required and not allowed to be ''" opResult['resDes'] = "ERROR" opResult['resCode'] = 400 opResult['errorMsg'] = error_msg op_log_handler.create(opDate=cur_time, opName=opName, opObject=opObject, opResult=opResult, operator=operator, opDetails=opDetails) raise BadRequest(msg=error_msg) # if there is '_' in name or ID of genesis.block, orderer couldn't # start up, saying "failed: error converting config to map: Illegal characters in key: [Group]" if '_' in name: # aaa = BadRequest("'_' is not allowed in name or ID") # # raise BadRequest("'_' is not allowed in name or ID") # raise aaa error_msg = '_ is not allowed in name or ID' opResult['resDes'] = "ERROR" opResult['resCode'] = 400 opResult['errorMsg'] = error_msg op_log_handler.create(opDate=cur_time, opName=opName, opObject=opObject, opResult=opResult, operator=operator, opDetails=opDetails) raise BadRequest(msg=error_msg) if domain is None or domain == '': error_msg = "domain is required and not allowed to be ''" opResult['resDes'] = "ERROR" opResult['resCode'] = 400 opResult['errorMsg'] = error_msg op_log_handler.create(opDate=cur_time, opName=opName, opObject=opObject, opResult=opResult, operator=operator, opDetails=opDetails) raise BadRequest(msg=error_msg) if type is None: error_msg = "type is required" opResult['resDes'] = "ERROR" opResult['resCode'] = 400 opResult['errorMsg'] = error_msg op_log_handler.create(opDate=cur_time, opName=opName, opObject=opObject, opResult=opResult, operator=operator, opDetails=opDetails) raise BadRequest(msg=error_msg) if body['type'] not in ['peer', 'orderer']: error_msg = "only peer or orderer type is supported" opResult['resDes'] = "ERROR" opResult['resCode'] = 400 opResult['errorMsg'] = error_msg op_log_handler.create(opDate=cur_time, opName=opName, opObject=opObject, opResult=opResult, operator=operator, opDetails=opDetails) raise BadRequest(msg=error_msg) ordererHostnames = body.get('ordererHostnames', None) peerNum = body.get('peerNum', None) ca = body.get('ca', {}) id = uuid4().hex description = body.get('description', "") if body['type'] == 'peer': if ordererHostnames is not None: error_msg = "peer type organizations don't need ordererHostnames" opResult['resDes'] = "ERROR" opResult['resCode'] = 400 opResult['errorMsg'] = error_msg op_log_handler.create(opDate=cur_time, opName=opName, opObject=opObject, opResult=opResult, operator=operator, opDetails=opDetails) raise BadRequest(msg=error_msg) if peerNum is None: peerNum = 2 try: result = org_handler().create(id=id, name=name, description=description, type=type, domain=domain, peerNum=int(peerNum), ca=ca, ordererHostnames=[]) opResult['resDes'] = "OK" opResult['resCode'] = 200 opResult['errorMsg'] = '' op_log_handler.create(opDate=cur_time, opName=opName, opObject=opObject, opResult=opResult, operator=operator, opDetails=opDetails) return make_ok_gaea_resp('organization', result) except Exception as e: error_msg = "internal server error" opResult['resDes'] = "ERROR" opResult['resCode'] = 500 opResult['errorMsg'] = error_msg op_log_handler.create(opDate=cur_time, opName=opName, opObject=opObject, opResult=opResult, operator=operator, opDetails=opDetails) raise InternalServerError() else: if peerNum is not None: error_msg = "orderer type organizations don't need peers" opResult['resDes'] = "ERROR" opResult['resCode'] = 400 opResult['errorMsg'] = error_msg op_log_handler.create(opDate=cur_time, opName=opName, opObject=opObject, opResult=opResult, operator=operator, opDetails=opDetails) raise BadRequest(msg=error_msg) # userNum = body.get('userNum', None) try: result = org_handler().create(id=id, name=name, description=description, type=type, domain=domain, ca=ca, peerNum=0, ordererHostnames=ordererHostnames) opResult['resDes'] = "OK" opResult['resCode'] = 200 opResult['errorMsg'] = '' op_log_handler.create(opDate=cur_time, opName=opName, opObject=opObject, opResult=opResult, operator=operator, opDetails=opDetails) return make_ok_gaea_resp('organization', result) except Exception as e: error_msg = "internal server error" op_log_handler.record_operating_log(opDate=cur_time, opName=opName, opObject=opObject, resCode=500, operator=operator, errorMsg=error_msg, opDetails=opDetails) raise InternalServerError()
def organization_delete(organization_id): request_debug(r, logger) request_data = org_handler().get_by_id(organization_id) # add operating log cur_time = datetime.datetime.utcnow() opName = sys._getframe().f_code.co_name opObject = "Organization" operator = "admin" opResult = {} opDetails = {} opDetails['organization_id'] = organization_id op_log_handler = OperatorLogHandler() if request_data is not None and "id" in request_data: if request_data.network is not None: error_msg = "network has created, organization {} is forbidden to delete.".format( organization_id) logger.warning(error_msg) opResult['resDes'] = "ERROR" opResult['resCode'] = 500 opResult['errorMsg'] = error_msg op_log_handler.create(opDate=cur_time, opName=opName, opObject=opObject, opResult=opResult, operator=operator) raise Forbidden(msg=error_msg) else: pass else: error_msg = "organization delete without enough data" logger.warning(error_msg) opResult['resDes'] = "ERROR" opResult['resCode'] = 404 opResult['errorMsg'] = error_msg op_log_handler.create(opDate=cur_time, opName=opName, opObject=opObject, opResult=opResult, operator=operator, opDetails=opDetails) raise NotFound(msg=error_msg) logger.debug("host delete with id={0}".format(organization_id)) if org_handler().delete(id=organization_id): opResult['resDes'] = "OK" opResult['resCode'] = 200 opResult['errorMsg'] = '' op_log_handler.create(opDate=cur_time, opName=opName, opObject=opObject, opResult=opResult, operator=operator, opDetails=opDetails) return make_ok_gaea_resp(resource='delete success!', result={}) else: error_msg = "Failed to delete organization {}".format(organization_id) logger.warning(error_msg) opResult['resDes'] = "ERROR" opResult['resCode'] = 500 opResult['errorMsg'] = error_msg op_log_handler.create(opDate=cur_time, opName=opName, opObject=opObject, opResult=opResult, operator=operator, opDetails=opDetails) raise InternalServerError(msg=error_msg)
def delete(self, network): net_id = network.id host = network.host # begin to construct python client to communicate with k8s kube_config = self._build_kube_config(host) operation = K8sNetworkOperation(kube_config) peer_org_names = [] orderer_org_names = [] for org_id in network.peer_orgs: peer_org_dict = org_handler().schema( org_handler().get_by_id(org_id)) peer_org_names.append(peer_org_dict['name']) for org_id in network.orderer_orgs: orderer_org_dict = org_handler().schema( org_handler().get_by_id(org_id)) orderer_org_names.append(orderer_org_dict['name']) deploy_dir = '/opt/fabric/{}/deploy'.format(net_id) # # begin to delete # # First delete peer org, first peer, then ca, then pv service # for peer_org in peer_org_names: # peer_dir = '{deploy_dir}/{org_name}'.format(deploy_dir=deploy_dir, org_name=peer_org) # for deploy_file in os.listdir(peer_dir): # if deploy_file.startswith('deploy_'): # with open('{}/{}'.format(peer_dir, deploy_file)) as f: # resources = yaml.load_all(f) # operation.delete_k8s_resource(resources) # # with open('{}/ca.yaml'.format(peer_dir)) as f: # resources = yaml.load_all(f) # operation.delete_k8s_resource(resources) # # with open('{}/pv.yaml'.format(peer_dir)) as f: # resources = yaml.load_all(f) # operation.delete_k8s_resource(resources) # # # Then deploy oderer org, first pv, then orderer service # for orderer_org in orderer_org_names: # orderer_dir = '{deploy_dir}/{org_name}'.format(deploy_dir=deploy_dir, org_name=orderer_org) # # for deploy_file in os.listdir(orderer_dir): # if deploy_file.startswith('deploy_'): # with open('{}/{}'.format(orderer_dir, deploy_file)) as f: # resources = yaml.load_all(f) # operation.delete_k8s_resource(resources) # # with open('{}/pv.yaml'.format(orderer_dir)) as f: # resources = yaml.load_all(f) # operation.delete_k8s_resource(resources) # first create namespace for this network # By deleting namespace to delete all resources under this namespace, # it is needed to starting k8s api server with '--admission-control=NamespaceLifecycle' with open('{deploy_dir}/namespace.yaml'.format( deploy_dir=deploy_dir)) as f: resources = yaml.load_all(f) operation.delete_k8s_resource(resources) # then delete org pv for peer_org in peer_org_names: peer_dir = '{deploy_dir}/{org_name}'.format(deploy_dir=deploy_dir, org_name=peer_org) with open('{}/pv.yaml'.format(peer_dir)) as f: resources = yaml.load_all(f) operation.delete_k8s_resource(resources) for orderer_org in orderer_org_names: orderer_dir = '{deploy_dir}/{org_name}'.format( deploy_dir=deploy_dir, org_name=orderer_org) with open('{}/pv.yaml'.format(orderer_dir)) as f: resources = yaml.load_all(f) operation.delete_k8s_resource(resources) # if consensus_type is kafka with open('{deploy_dir}/kafka_pv.yaml'.format( deploy_dir=deploy_dir)) as f: resources = yaml.load_all(f) operation.delete_k8s_resource(resources)