def update(self, network_config, request_host_ports): # only ensure network_files_dir is exist # suppose if no exception is raised during crypto generation, # then everything is OK. # below code is wrong, cause the network_files_dir is on worker node # network_files_dir = CELLO_WORKER_FABRIC_DIR + net_id # if not os.path.isdir(network_files_dir): # raise IOError("blockchain network crypto-config \ # and genesis block couldn't be found") net_id = network_config[ 'id'] # use network id 0-12 byte as name prefix net_dir = CELLO_MASTER_FABRIC_DIR + net_id host = network_config['host'] composefile_dict = { 'version': '3.2', 'networks': { 'celloNet': None }, 'services': {} } # valid worker_ip in db is like "tcp://x.x.x.x:2375" container_service_ip = host.worker_api.split(':')[1][2:] service_names = [] orderer_service_names = [] couchdb_service_names = [] # add for k8s (namespace) and docker deploy orderer_service_deploy_names = [] sevices_dict = {} index = 0 for orderer_org in network_config['orderer_org_dicts']: for hostname in orderer_org['ordererHostnames']: orderer_domain = orderer_org['domain'] orderer_service_name = '.'.join([hostname, orderer_domain]) service_names.append(orderer_service_name) orderer_service_names.append(orderer_service_name) org_name = orderer_org['name'] host_port = request_host_ports[index] index = index + 1 orderer_service_dict = self._construct_orderer_docker_service(net_id, org_name, orderer_domain, hostname,\ network_config['fabric_version'], \ host_port) sevices_dict.update(orderer_service_dict) # save orderer service endpoint to db # if container run failed, then delete network # according to reference, corresponding service endpoint # would be delete automatically orderer_service_endpoint = modelv2.ServiceEndpoint( id=uuid4().hex, service_ip=container_service_ip, service_port=host_port, service_name=orderer_service_name, service_type='orderer', network=modelv2.BlockchainNetwork.objects.get(id=net_id)) orderer_service_endpoint.save() for peer_org in network_config['peer_org_dicts']: org_name = peer_org['name'] org_domain = peer_org['domain'] for i in range(int(peer_org['peerNum'])): peer_name = 'peer{}'.format(i) peer_seq = ['couchdb', peer_name, org_name, org_domain] couchdb_service_name = '.'.join(peer_seq) service_names.append(couchdb_service_name) couchdb_service_names.append(couchdb_service_name) couch_host_port = request_host_ports[index] index = index + 1 service_names.append(couchdb_service_name) couchdb_service_dict = self._construct_couchdb_docker_service(net_id, couchdb_service_name, \ network_config['fabric_version'], couch_host_port) sevices_dict.update(couchdb_service_dict) peer_service_endpoint = modelv2.ServiceEndpoint( id=uuid4().hex, service_ip=container_service_ip, service_port=couch_host_port, service_name=couchdb_service_name, service_type='couchdb', network=modelv2.BlockchainNetwork.objects.get(id=net_id)) peer_service_endpoint.save() for peer_org in network_config['peer_org_dicts']: org_name = peer_org['name'] org_domain = peer_org['domain'] for i in range(int(peer_org['peerNum'])): peer_name = 'peer{}'.format(i) peer_seq = [peer_name, org_name, org_domain] peer_service_name = '.'.join(peer_seq) service_names.append(peer_service_name) host_ports = [ request_host_ports[index], request_host_ports[index + 1] ] index = index + 2 depends_on = orderer_service_names + couchdb_service_names peer_service_dict = self._construct_peer_docker_service(net_id, org_name, org_domain, peer_name,\ network_config['fabric_version'], \ host_ports, depends_on) sevices_dict.update(peer_service_dict) for i in range(len(host_ports)): peer_service_endpoint = modelv2.ServiceEndpoint( id=uuid4().hex, service_ip=container_service_ip, service_port=host_ports[i], service_name=peer_service_name, service_type='peer', peer_port_proto=PEER_PORT_GRPC if i == 0 else PEER_PORT_EVENT, network=modelv2.BlockchainNetwork.objects.get( id=net_id)) peer_service_endpoint.save() ca_service_name = '.'.join(['ca', org_name, org_domain]) service_names.append(ca_service_name) org_full_domain = '.'.join([org_name, org_domain]) pk_path = '{net_dir}/crypto-config/peerOrganizations/{org_dir}/ca/'.\ format(net_dir=net_dir, org_dir=org_full_domain) ca_key_file = self._get_ca_private_key(pk_path) host_port = request_host_ports[index] index = index + 1 ca_service_dict = self._construct_ca_docker_service(net_id, org_name, org_domain, ca_key_file,\ network_config['fabric_version'], host_port) sevices_dict.update(ca_service_dict) ca_service_endpoint = modelv2.ServiceEndpoint( id=uuid4().hex, service_ip=container_service_ip, service_port=host_port, service_name=ca_service_name, service_type='ca', network=modelv2.BlockchainNetwork.objects.get(id=net_id)) ca_service_endpoint.save() # TODO: first test 'solo' # if network_config['consensus_type'] == 'kafka': # pass composefile_dict['services'].update(sevices_dict) deploy_dir = '{}/deploy/'.format(net_dir) os.system( 'cp {}/docker-compose.yaml {}/docker-compose-back.yaml'.format( deploy_dir, deploy_dir)) composefile = '{}/docker-compose.yaml'.format(deploy_dir) with open(composefile, 'w') as f: yaml.dump(composefile_dict, f) # project = compose_get_project(project_dir = net_dir, # host = host.worker_api, # project_name=net_id[:12]) project = compose_get_project(project_dir=deploy_dir, host=host.worker_api, project_name=net_id[:12]) containers = project.up(detached=True, timeout=5) os.system( 'cp {}/docker-compose-back.yaml {}/docker-compose.yaml'.format( deploy_dir, deploy_dir)) composefile_back = '{}/docker-compose.yaml'.format(deploy_dir) f = open(composefile_back) yamlcomposefile = yaml.load(f) services_dict = yamlcomposefile['services'] services_dict.update(sevices_dict) composefile_dict_back = { 'version': '3.2', 'networks': { 'celloNet': None }, 'services': {} } composefile_dict_back['services'].update(services_dict) f.close() with open(composefile_back, 'w') as f: yaml.dump(composefile_dict_back, f) return containers
def create_orderer_org(self, orderer_org, consensus_type, host, net_id, net_name, fabric_version, request_host_ports, portid): index = portid[0] orderer_org_names = [] deploy_dir = '/opt/fabric/{}/deploy'.format(net_id) if not os.path.exists(deploy_dir): os.mkdir(deploy_dir) # begin to construct python client to communicate with k8s kube_config = self._build_kube_config(host) operation = K8sNetworkOperation(kube_config) node_vip = host.k8s_param.get('K8SNodeVip') if node_vip is '': node_vip = operation.get_one_availabe_node_ip() if node_vip is '' or node_vip is None: raise Exception("No ready nodes in this k8s cluster") nfs_server = host.k8s_param.get('K8SNfsServer') orderer_template = getTemplate("orderer1_4.yaml") pv_template = getTemplate("pv.yaml") networklists = host.clusters exist = False for networklist in networklists: if networklist == net_id: exist = True # first create namespace for this network if exist == False: namespaceTemplate = getTemplate("namespace.yaml") # one network one k8s namespace namespace_file = '{deploy_dir}/namespace.yaml'. \ format(deploy_dir=deploy_dir) render(namespaceTemplate, namespace_file, networkName=net_name) # first create namespace for this network with open('{deploy_dir}/namespace.yaml'.format( deploy_dir=deploy_dir)) as f: resources = yaml.load_all(f) operation.deploy_k8s_resource(resources) # kafka support if consensus_type == 'kafka': kafka_template = getTemplate("kafka.yaml") zookeeper_template = getTemplate("zookeeper.yaml") zookeeper_pvc_template = getTemplate("zookeeper_pvc.yaml") zookeeper_pv_template = getTemplate("zookeeper_pv.yaml") kafka_pvc_template = getTemplate("kafka_pvc.yaml") kafka_pv_template = getTemplate("kafka_pv.yaml") zookeeper_pv_deploy_file = '{}/zookeeper_pv.yaml'.format( deploy_dir) zookeeper_pvc_deploy_file = '{}/zookeeper_pvc.yaml'.format( deploy_dir) kafka_pv_deploy_file = '{}/kafka_pv.yaml'.format(deploy_dir) kafka_pvc_deploy_file = '{}/kafka_pvc.yaml'.format(deploy_dir) kafka_deploy_file = '{}/kafka.yaml'.format(deploy_dir) zookeeper_deploy_file = '{}/zookeeper.yaml'.format(deploy_dir) for i in range(KAFKA_NODE_NUM): kafka_node_datadir = '/opt/fabric/{}/data/kafka-{}'.format( net_id, i) os.makedirs(kafka_node_datadir) for i in range(ZOOKEEPER_NODE_NUM): zookeeper_node_datadir = '/opt/fabric/{}/data/zoo-{}'.format( net_id, i) os.makedirs(zookeeper_node_datadir) render(zookeeper_pv_template, zookeeper_pv_deploy_file, path='/{}/data'.format(net_id), networkName=net_name, nfsServer=nfs_server) render(kafka_pv_template, kafka_pv_deploy_file, path='/{}/data'.format(net_id), networkName=net_name, nfsServer=nfs_server) render(kafka_pvc_template, kafka_pvc_deploy_file, networkName=net_name) render(zookeeper_pvc_template, zookeeper_pvc_deploy_file, networkName=net_name) render(zookeeper_template, zookeeper_deploy_file, networkName=net_name) render(kafka_template, kafka_deploy_file, networkName=net_name) with open('{}/kafka_pv.yaml'.format(deploy_dir)) as f: resources = yaml.load_all(f) operation.deploy_k8s_resource(resources) with open('{}/kafka_pvc.yaml'.format(deploy_dir)) as f: resources = yaml.load_all(f) operation.deploy_k8s_resource(resources) with open('{}/zookeeper_pv.yaml'.format(deploy_dir)) as f: resources = yaml.load_all(f) operation.deploy_k8s_resource(resources) with open('{}/zookeeper_pvc.yaml'.format(deploy_dir)) as f: resources = yaml.load_all(f) operation.deploy_k8s_resource(resources) with open('{}/zookeeper.yaml'.format(deploy_dir)) as f: resources = yaml.load_all(f) operation.deploy_k8s_resource(resources) # kafka depends on zookeeper time.sleep(5) with open('{}/kafka.yaml'.format(deploy_dir)) as f: resources = yaml.load_all(f) operation.deploy_k8s_resource(resources) time.sleep(10) orderer_domain = orderer_org['domain'] org_name = orderer_org['name'] orderer_org_names.append(org_name) org_deploydir = '{deploy_dir}/{org_name}'.format(deploy_dir=deploy_dir, org_name=org_name) os.mkdir(org_deploydir) orderer_pv_file = '{org_deploydir}/pv.yaml'.format( org_deploydir=org_deploydir) org_data_path = '/opt/fabric/{}/data/{}'.format(net_id, org_name) render(pv_template, orderer_pv_file, networkName=net_name, credentialPV=org_name + '-credentialpv', dataPV=org_name + '-datapv', # this is different from fabric_on_kubernetes, because it is possible that # a network owns more than one orderer org # credentialPath='{net_dir}/crypto-config/ordererOrganizations/{org_domain}/'. \ # format(net_dir=net_dir, org_domain=orderer_domain), credentialPath='/{net_id}/crypto-config/ordererOrganizations/'. \ format(net_id=net_id), dataPath='/{net_id}/data/{org_name}'. \ format(net_id=net_id, org_name=org_name), nfsServer=nfs_server) for hostname in orderer_org['ordererHostnames']: host_port = request_host_ports[index] orderer_service_name = '.'.join([hostname, orderer_domain]) host_deploy_file = '{org_deploydir}/deploy_{orderer_service_name}.yaml'. \ format(org_deploydir=org_deploydir, orderer_service_name=orderer_service_name) k8s_orderer_name = '{}-{}'.format(hostname, org_name) temp_port = '"' + str(host_port) + '"' os.makedirs('{}/{}'.format(org_data_path, orderer_service_name)) render(orderer_template, host_deploy_file, networkName=net_name, orgDomain=orderer_domain, ordererSvcName=k8s_orderer_name, podName=k8s_orderer_name, fabVersion=fabric_version, localMSPID='{}MSP'.format(org_name[0:1].upper() + org_name[1:]), mspPath='{orderer_domain}/orderers/{orderer_service_name}/msp'. \ format(orderer_domain=orderer_domain, orderer_service_name=orderer_service_name), tlsPath='{orderer_domain}/orderers/{orderer_service_name}/tls'. \ format(orderer_domain=orderer_domain, orderer_service_name=orderer_service_name), ordererDataPath=orderer_service_name, credentialPV=org_name + '-credentialpv', dataPV=org_name + '-datapv', ordererID=hostname, listenPort=temp_port, containerPort=host_port, targetPort=host_port, port=host_port, nodePort=host_port) # save orderer service endpoint to db # if container run failed, then delete network # according to reference, corresponding service endpoint # would be delete automatically orderer_service_endpoint = modelv2.ServiceEndpoint( id=uuid4().hex, service_ip=node_vip, service_port=host_port, service_name=orderer_service_name, service_type='orderer', org_name=org_name, network=modelv2.BlockchainNetwork.objects.get(id=net_id)) orderer_service_endpoint.save() index = index + 1 with open('{}/pv.yaml'.format(org_deploydir)) as f: resources = yaml.load_all(f) operation.deploy_k8s_resource(resources) for deploy_file in os.listdir(org_deploydir): if deploy_file.startswith('deploy_'): with open('{}/{}'.format(org_deploydir, deploy_file)) as f: resources = yaml.load_all(f) operation.deploy_k8s_resource(resources) portid[0] = index
def create_peer_org(self, peer_org, couchdb_enabled, host, net_id, net_name, fabric_version, request_host_ports, portid, peer_num): index = portid[0] peer_org_names = [] net_dir = CELLO_MASTER_FABRIC_DIR + net_id deploy_dir = '/opt/fabric/{}/deploy'.format(net_id) if not os.path.exists(deploy_dir): os.mkdir(deploy_dir) # begin to construct python client to communicate with k8s kube_config = self._build_kube_config(host) operation = K8sNetworkOperation(kube_config) node_vip = host.k8s_param.get('K8SNodeVip') if node_vip is '': node_vip = operation.get_one_availabe_node_ip() if node_vip is '' or node_vip is None: raise Exception("No ready nodes in this k8s cluster") nfs_server = host.k8s_param.get('K8SNfsServer') peer_template = None if couchdb_enabled is True and fabric_version == '1.4.2': peer_template = getTemplate("peer_couchdb.yaml") if couchdb_enabled is False and fabric_version == '1.4.2': peer_template = getTemplate("peer1_4.yaml") pv_template = getTemplate("pv.yaml") namespaceTemplate = getTemplate("namespace.yaml") ca_tmplate = getTemplate("ca.yaml") # one network one k8s namespace namespace_file = '{deploy_dir}/namespace.yaml'. \ format(deploy_dir=deploy_dir) render(namespaceTemplate, namespace_file, networkName=net_name) networklists = host.clusters exist = False for networklist in networklists: if networklist == net_id: exist = True # first create namespace for this network if exist == False: with open('{deploy_dir}/namespace.yaml'.format( deploy_dir=deploy_dir)) as f: resources = yaml.load_all(f) operation.deploy_k8s_resource(resources) org_name = peer_org['name'] peer_org_names.append(org_name) org_domain = peer_org['domain'] org_fullDomain_name = '.'.join([org_name, org_domain]) org_deploydir = '{deploy_dir}/{org_name}'.format(deploy_dir=deploy_dir, org_name=org_name) org_data_path = '/opt/fabric/{}/data/{}'.format(net_id, org_name) if not os.path.exists(org_deploydir): os.mkdir(org_deploydir) org_pv_file = '{org_deploydir}/pv.yaml'.format( org_deploydir=org_deploydir) render(pv_template, org_pv_file, networkName=net_name, credentialPV=org_name + '-credentialpv', dataPV=org_name + '-datapv', # this is different from fabric_on_kubernetes, because it is possible that # a network owns more than one orderer org # credentialPath='{net_dir}/crypto-config/ordererOrganizations/{org_domain}/'. \ # format(net_dir=net_dir, org_domain=orderer_domain), credentialPath='/{net_id}/crypto-config/peerOrganizations/{org_fullDomain_name}/'. \ format(net_id=net_id, org_fullDomain_name=org_fullDomain_name), dataPath='/{net_id}/data/{org_name}'. \ format(net_id=net_id, org_name=org_name), nfsServer=nfs_server) # deploy with open('{}/pv.yaml'.format(org_deploydir)) as f: resources = yaml.load_all(f) operation.deploy_k8s_resource(resources) org_ca_file = '{org_deploydir}/ca.yaml'.format( org_deploydir=org_deploydir) host_port = request_host_ports[index] ca_service_name = '.'.join(['ca', org_name, org_domain]) k8s_ca_name = 'ca-{}'.format(org_name) sk_file = '' ca_dir = '{net_dir}/crypto-config/peerOrganizations/{org_fullDomain_name}/ca/'. \ format(net_dir=net_dir, org_fullDomain_name=org_fullDomain_name) for f in os.listdir(ca_dir): # find out sk! if f.endswith("_sk"): sk_file = f cert_file = '/etc/hyperledger/fabric-ca-server-config/ca.{}-cert.pem'.format( org_fullDomain_name) key_file = '/etc/hyperledger/fabric-ca-server-config/{}'.format( sk_file) command = "'fabric-ca-server start -b admin:adminpw -d --config /etc/hyperledger/fabric-ca-server-config/fabric-ca-server-config.yaml'" if not os.path.exists('{}/ca'.format(org_data_path)): os.makedirs('{}/ca'.format(org_data_path)) render(ca_tmplate, org_ca_file, command=command, networkName=net_name, orgDomain=org_fullDomain_name, caSvcName=k8s_ca_name, podName=k8s_ca_name, fabVersion=fabric_version, tlsCert=cert_file, tlsKey=key_file, caPath='ca/', caDataPath='ca/', credentialPV=org_name + '-credentialpv', dataPV=org_name + '-datapv', nodePort=host_port) ca_service_endpoint = modelv2.ServiceEndpoint( id=uuid4().hex, service_ip=node_vip, service_port=host_port, service_name=ca_service_name, service_type='ca', org_name=org_name, network=modelv2.BlockchainNetwork.objects.get(id=net_id)) ca_service_endpoint.save() index += 1 # deploy with open('{}/ca.yaml'.format(org_deploydir)) as f: resources = yaml.load_all(f) operation.deploy_k8s_resource(resources) for i in range(int(peer_org['peerNum'])): peer_name = 'peer{}'.format(i) peer_seq = [peer_name, org_name, org_domain] peer_service_name = '.'.join(peer_seq) if couchdb_enabled is True: k8s_peer_name = '{}-{}'.format(peer_name, org_name) peer_deploy_file = '{org_deploydir}/deploy_{peer_service_name}.yaml'. \ format(org_deploydir=org_deploydir, peer_service_name=peer_service_name) if not os.path.exists('{}/{}'.format(org_data_path, peer_service_name)): os.makedirs('{}/{}'.format(org_data_path, peer_service_name)) couchdb_service_name = 'couchdb.{peer_service_name}'.format( peer_service_name=peer_service_name) if not os.path.exists('{}/{}'.format( org_data_path, couchdb_service_name)): os.makedirs('{}/{}'.format(org_data_path, couchdb_service_name)) host_ports = [ request_host_ports[index], request_host_ports[index + 1], request_host_ports[index + 2] ] index = index + 3 # couchdb is enabled by default render( peer_template, peer_deploy_file, networkName=net_name, orgDomain=org_fullDomain_name, peerSvcName=k8s_peer_name, podName=k8s_peer_name, fabVersion=fabric_version, peerID=peer_name, corePeerID=k8s_peer_name, peerAddress='{}:7051'.format(k8s_peer_name), localMSPID='{}MSP'.format(org_name[0:1].upper() + org_name[1:]), mspPath='peers/{}/msp'.format(peer_service_name), tlsPath='peers/{}/tls'.format(peer_service_name), dataPath=peer_service_name, couchDataPath=couchdb_service_name, credentialPV=org_name + '-credentialpv', dataPV=org_name + '-datapv', nodePort1=host_ports[0], nodePort2=host_ports[1], nodePort3=host_ports[2]) couchdb_service_endpoint = modelv2.ServiceEndpoint( id=uuid4().hex, service_ip=node_vip, service_port=host_ports[2], service_name=couchdb_service_name, service_type='couchdb', org_name=org_name, network=modelv2.BlockchainNetwork.objects.get( id=net_id)) couchdb_service_endpoint.save() # 仅仅peer0做持久化存储. 注释掉暂时不做任何持久化存储 # if peer_service_name.split('.')[0][-1] == '0': # peer0DataPath(peer_deploy_file, peer_service_name) for i in range(2): peer_service_endpoint = modelv2.ServiceEndpoint( id=uuid4().hex, service_ip=node_vip, service_port=host_ports[i], service_name=peer_service_name, service_type='peer', org_name=org_name, peer_port_proto=fabric_peer_proto_14[i], network=modelv2.BlockchainNetwork.objects.get( id=net_id)) peer_service_endpoint.save() # deploy with open(peer_deploy_file) as f: resources = yaml.load_all(f) operation.deploy_k8s_resource(resources) else: k8s_peer_name = '{}-{}'.format(peer_name, org_name) peer_deploy_file = '{org_deploydir}/deploy_{peer_service_name}.yaml'. \ format(org_deploydir=org_deploydir, peer_service_name=peer_service_name) if not os.path.exists('{}/{}'.format(org_data_path, peer_service_name)): os.makedirs('{}/{}'.format(org_data_path, peer_service_name)) host_ports = [ request_host_ports[index], request_host_ports[index + 1] ] index = index + 2 # couchdb is enabled by default render(peer_template, peer_deploy_file, networkName=net_name, orgDomain=org_fullDomain_name, peerSvcName=k8s_peer_name, podName=k8s_peer_name, fabVersion=fabric_version, peerID=peer_name, corePeerID=k8s_peer_name, peerAddress='{}:7051'.format(k8s_peer_name), localMSPID='{}MSP'.format(org_name[0:1].upper() + org_name[1:]), mspPath='peers/{}/msp'.format(peer_service_name), tlsPath='peers/{}/tls'.format(peer_service_name), dataPath=peer_service_name, credentialPV=org_name + '-credentialpv', dataPV=org_name + '-datapv', nodePort1=host_ports[0], nodePort2=host_ports[1]) # 仅仅peer0做持久化存储. 注释掉暂时不做任何持久化存储 # if peer_service_name.split('.')[0][-1] == '0': # peer0DataPath(peer_deploy_file, peer_service_name) for i in range(2): peer_service_endpoint = modelv2.ServiceEndpoint( id=uuid4().hex, service_ip=node_vip, service_port=host_ports[i], service_name=peer_service_name, service_type='peer', org_name=org_name, peer_port_proto=fabric_peer_proto_14[i], network=modelv2.BlockchainNetwork.objects.get( id=net_id)) peer_service_endpoint.save() # deploy with open(peer_deploy_file) as f: resources = yaml.load_all(f) operation.deploy_k8s_resource(resources) portid[0] = index
def create_orderer_org(self, orderer_org, consensus_type, host, net_id, net_name, fabric_version, request_host_ports, portid): service_names = [] orderer_service_names = [] index = portid[0] sevices_dict = {} container_service_ip = host.worker_api.split(':')[1][2:] composefile_dict = { 'version': '3.2', 'networks': { 'celloNet': None }, 'services': {} } net_dir = CELLO_MASTER_FABRIC_DIR + net_id for hostname in orderer_org['ordererHostnames']: orderer_domain = orderer_org['domain'] orderer_service_name = '.'.join([hostname, orderer_domain]) service_names.append(orderer_service_name) orderer_service_names.append(orderer_service_name) org_name = orderer_org['name'] host_port = request_host_ports[index] index = index + 1 orderer_service_dict = self._construct_orderer_docker_service(net_id, org_name, orderer_domain, hostname, \ fabric_version, \ host_port) sevices_dict.update(orderer_service_dict) # save orderer service endpoint to db # if container run failed, then delete network # according to reference, corresponding service endpoint # would be delete automatically orderer_service_endpoint = modelv2.ServiceEndpoint( id=uuid4().hex, service_ip=container_service_ip, service_port=host_port, service_name=orderer_service_name, service_type='orderer', org_name=org_name, network=modelv2.BlockchainNetwork.objects.get(id=net_id)) orderer_service_endpoint.save() composefile_dict['services'].update(sevices_dict) deploy_dir = '{}/deploy/'.format(net_dir) if not os.path.exists(deploy_dir): os.makedirs(deploy_dir) composefile = '{}/docker-compose.yaml'.format(deploy_dir) with open(composefile, 'w') as f: yaml.dump(composefile_dict, f) project = compose_get_project(project_dir=deploy_dir, host=host.worker_api, project_name=net_id[:12]) containers = project.up(detached=True, timeout=5) portid[0] = index return containers
def create_peer_org(self, peer_org, couchdb_enabled, host, net_id, net_name, fabric_version, request_host_ports, portid, peer_num): service_names = [] couchdb_service_names = [] index = portid[0] sevices_dict = {} org_name = peer_org['name'] org_domain = peer_org['domain'] peer_num_all = int(peer_org['peerNum']) exist_peer_num = 0 if peer_num_all != peer_num: exist_peer_num = peer_num_all - peer_num container_service_ip = host.worker_api.split(':')[1][2:] composefile_dict = { 'version': '3.2', 'networks': { 'celloNet': None }, 'services': {} } net_dir = CELLO_MASTER_FABRIC_DIR + net_id for i in range(int(peer_org['peerNum'])): if exist_peer_num > i: continue peer_name = 'peer{}'.format(i) if couchdb_enabled is True: peer_seq = ['couchdb', peer_name, org_name, org_domain] couchdb_service_name = '.'.join(peer_seq) service_names.append(couchdb_service_name) couchdb_service_names.append(couchdb_service_name) couch_host_port = request_host_ports[index] index = index + 1 service_names.append(couchdb_service_name) couchdb_service_dict = self._construct_couchdb_docker_service(net_id, couchdb_service_name, \ fabric_version, couch_host_port) sevices_dict.update(couchdb_service_dict) couchdb_service_endpoint = modelv2.ServiceEndpoint( id=uuid4().hex, service_ip=container_service_ip, service_port=couch_host_port, service_name=couchdb_service_name, service_type='couchdb', network=modelv2.BlockchainNetwork.objects.get(id=net_id)) couchdb_service_endpoint.save() peer_seq = [peer_name, org_name, org_domain] peer_service_name = '.'.join(peer_seq) service_names.append(peer_service_name) host_ports = [ request_host_ports[index], request_host_ports[index + 1] ] index = index + 2 peer_service_dict = self._construct_peer_docker_service(net_id, org_name, org_domain, peer_name, \ fabric_version, \ host_ports, couchdb_enabled) sevices_dict.update(peer_service_dict) for i in range(len(host_ports)): peer_service_endpoint = modelv2.ServiceEndpoint( id=uuid4().hex, service_ip=container_service_ip, service_port=host_ports[i], service_name=peer_service_name, service_type='peer', org_name=org_name, peer_port_proto=PEER_PORT_GRPC if i == 0 else PEER_PORT_CCLISTEN, network=modelv2.BlockchainNetwork.objects.get(id=net_id)) peer_service_endpoint.save() if exist_peer_num == 0: ca_service_name = '.'.join(['ca', org_name, org_domain]) service_names.append(ca_service_name) org_full_domain = '.'.join([org_name, org_domain]) pk_path = '{net_dir}/crypto-config/peerOrganizations/{org_dir}/ca/'. \ format(net_dir=net_dir, org_dir=org_full_domain) ca_key_file = self._get_ca_private_key(pk_path) host_port = request_host_ports[index] index = index + 1 ca_service_dict = self._construct_ca_docker_service(net_id, org_name, org_domain, ca_key_file, \ fabric_version, host_port) sevices_dict.update(ca_service_dict) ca_service_endpoint = modelv2.ServiceEndpoint( id=uuid4().hex, service_ip=container_service_ip, service_port=host_port, service_name=ca_service_name, service_type='ca', org_name=org_name, network=modelv2.BlockchainNetwork.objects.get(id=net_id)) ca_service_endpoint.save() host_id = peer_org['host_id'] host = host_handler.get_active_host_by_id(host_id) composefile_dict['services'].update(sevices_dict) deploy_dir = '{}/deploy/'.format(net_dir) if not os.path.exists(deploy_dir): os.makedirs(deploy_dir) if os.path.exists('{}/docker-compose.yaml'.format(deploy_dir)): shutil.copy('{}/docker-compose.yaml'.format(deploy_dir), '{}/docker-compose-back.yaml'.format(deploy_dir)) composefile_back = '{}/docker-compose.yaml'.format(deploy_dir) with open(composefile_back, 'w') as f: yaml.dump(composefile_dict, f) project = compose_get_project(project_dir=deploy_dir, host=host.worker_api, project_name=net_id[:12]) containers = project.up(detached=True, timeout=5) if os.path.exists('{}/docker-compose-back.yaml'.format(deploy_dir)): shutil.copy('{}/docker-compose-back.yaml'.format(deploy_dir), '{}/docker-compose.yaml'.format(deploy_dir)) composefile = '{}/docker-compose.yaml'.format(deploy_dir) f = open(composefile) compose_file_base = yaml.load(f) compose_file_base['services'].update(sevices_dict) f.close() with open(composefile, 'w') as f: yaml.dump(compose_file_base, f) portid[0] = index return containers
def create(self, network_config, request_host_ports): net_id = network_config[ 'id'] # use network id 0-12 byte as name prefix net_name = network_config['name'] net_dir = CELLO_MASTER_FABRIC_DIR + net_id host = network_config['host'] fabric_version = fabric_image_version[network_config['fabric_version']] couchdb_enabled = False # begin to construct python client to communicate with k8s kube_config = self._build_kube_config(host) operation = K8sNetworkOperation(kube_config) node_vip = host.k8s_param.get('K8SNodeVip') if node_vip is '': node_vip = operation.get_one_availabe_node_ip() if node_vip is '': raise Exception("No ready nodes in this k8s cluster") nfs_server = host.k8s_param.get('K8SNfsServer') namespaceTemplate = getTemplate("namespace.yaml") ca_tmplate = getTemplate("ca.yaml") peer_template = None orderer_template = None if couchdb_enabled is True: peer_template = getTemplate("peer_couchdb.yaml") if fabric_version == '1.4.0': peer_template = getTemplate("peer1_4.yaml") orderer_template = getTemplate("orderer1_4.yaml") elif fabric_version == '1.1.0': peer_template = getTemplate("peer.yaml") orderer_template = getTemplate("orderer.yaml") pv_template = getTemplate("pv.yaml") deploy_dir = '/opt/fabric/{}/deploy'.format(net_id) os.mkdir(deploy_dir) # one network one k8s namespace namespace_file = '{deploy_dir}/namespace.yaml'. \ format(deploy_dir=deploy_dir) render(namespaceTemplate, namespace_file, networkName=net_name) # kafka support if network_config['consensus_type'] == 'kafka': kafka_template = getTemplate("kafka.yaml") zookeeper_template = getTemplate("zookeeper.yaml") kafka_pvc_template = getTemplate("kafka_pvc.yaml") kafka_pv_template = getTemplate("kafka_pv.yaml") kafka_pv_deploy_file = '{}/kafka_pv.yaml'.format(deploy_dir) kafka_pvc_deploy_file = '{}/kafka_pvc.yaml'.format(deploy_dir) kafka_deploy_file = '{}/kafka.yaml'.format(deploy_dir) zookeeper_deploy_file = '{}/zookeeper.yaml'.format(deploy_dir) for i in range(KAFKA_NODE_NUM): kafka_node_datadir = '/opt/fabric/{}/data/kafka-{}'.format( net_id, i) os.makedirs(kafka_node_datadir) render(kafka_pv_template, kafka_pv_deploy_file, path='/{}/data'.format(net_id), networkName=net_name, nfsServer=nfs_server) render(kafka_pvc_template, kafka_pvc_deploy_file, networkName=net_name) render(zookeeper_template, zookeeper_deploy_file, networkName=net_name) render(kafka_template, kafka_deploy_file, networkName=net_name) index = 0 orderer_org_names = [] peer_org_names = [] for orderer_org in network_config['orderer_org_dicts']: orderer_domain = orderer_org['domain'] org_name = orderer_org['name'] orderer_org_names.append(org_name) org_deploydir = '{deploy_dir}/{org_name}'.format( deploy_dir=deploy_dir, org_name=org_name) os.mkdir(org_deploydir) orderer_pv_file = '{org_deploydir}/pv.yaml'.format( org_deploydir=org_deploydir) org_data_path = '/opt/fabric/{}/data/{}'.format(net_id, org_name) render(pv_template, orderer_pv_file, networkName=net_name, credentialPV=org_name + '-credentialpv', dataPV=org_name + '-datapv', # this is different from fabric_on_kubernetes, because it is possible that # a network owns more than one orderer org # credentialPath='{net_dir}/crypto-config/ordererOrganizations/{org_domain}/'. \ # format(net_dir=net_dir, org_domain=orderer_domain), credentialPath='/{net_id}/crypto-config/ordererOrganizations/'. \ format(net_id=net_id), dataPath='/{net_id}/data/{org_name}'.\ format(net_id=net_id, org_name=org_name), nfsServer = nfs_server) for hostname in orderer_org['ordererHostnames']: host_port = request_host_ports[index] orderer_service_name = '.'.join([hostname, orderer_domain]) host_deploy_file = '{org_deploydir}/deploy_{orderer_service_name}.yaml'. \ format(org_deploydir=org_deploydir, orderer_service_name=orderer_service_name) k8s_orderer_name = '{}-{}'.format(hostname, org_name) os.makedirs('{}/{}'.format(org_data_path, orderer_service_name)) render(orderer_template, host_deploy_file, networkName = net_name, orgDomain = orderer_domain, ordererSvcName = k8s_orderer_name, podName = k8s_orderer_name, fabVersion=fabric_version, localMSPID = '{}MSP'.format(org_name[0:1].upper()+org_name[1:]), mspPath = '{orderer_domain}/orderers/{orderer_service_name}/msp'.\ format(orderer_domain=orderer_domain, orderer_service_name=orderer_service_name), tlsPath = '{orderer_domain}/orderers/{orderer_service_name}/tls'.\ format(orderer_domain=orderer_domain, orderer_service_name=orderer_service_name), ordererDataPath = orderer_service_name, credentialPV = org_name + '-credentialpv', dataPV = org_name + '-datapv', ordererID = hostname, nodePort = host_port) # save orderer service endpoint to db # if container run failed, then delete network # according to reference, corresponding service endpoint # would be delete automatically orderer_service_endpoint = modelv2.ServiceEndpoint( id=uuid4().hex, service_ip=node_vip, service_port=host_port, service_name=orderer_service_name, service_type='orderer', network=modelv2.BlockchainNetwork.objects.get(id=net_id)) orderer_service_endpoint.save() index = index + 1 for peer_org in network_config['peer_org_dicts']: org_name = peer_org['name'] peer_org_names.append(org_name) org_domain = peer_org['domain'] org_fullDomain_name = '.'.join([org_name, org_domain]) org_deploydir = '{deploy_dir}/{org_name}'.format( deploy_dir=deploy_dir, org_name=org_name) os.mkdir(org_deploydir) org_data_path = '/opt/fabric/{}/data/{}'.format(net_id, org_name) org_pv_file = '{org_deploydir}/pv.yaml'.format( org_deploydir=org_deploydir) render(pv_template, org_pv_file, networkName=net_name, credentialPV=org_name + '-credentialpv', dataPV=org_name + '-datapv', # this is different from fabric_on_kubernetes, because it is possible that # a network owns more than one orderer org # credentialPath='{net_dir}/crypto-config/ordererOrganizations/{org_domain}/'. \ # format(net_dir=net_dir, org_domain=orderer_domain), credentialPath='/{net_id}/crypto-config/peerOrganizations/{org_fullDomain_name}/'. \ format(net_id=net_id, org_fullDomain_name=org_fullDomain_name), dataPath='/{net_id}/data/{org_name}'.\ format(net_id=net_id, org_name=org_name), nfsServer=nfs_server) org_ca_file = '{org_deploydir}/ca.yaml'.format( org_deploydir=org_deploydir) host_port = request_host_ports[index] ca_service_name = '.'.join(['ca', org_name, org_domain]) k8s_ca_name = 'ca-{}'.format(org_name) sk_file = '' ca_dir = '{net_dir}/crypto-config/peerOrganizations/{org_fullDomain_name}/ca/'.\ format(net_dir=net_dir, org_fullDomain_name=org_fullDomain_name) for f in os.listdir(ca_dir): # find out sk! if f.endswith("_sk"): sk_file = f cert_file = '/etc/hyperledger/fabric-ca-server-config/ca.{}-cert.pem'.format( org_fullDomain_name) key_file = '/etc/hyperledger/fabric-ca-server-config/{}'.format( sk_file) command = "'fabric-ca-server start -b admin:adminpw -d --config /etc/hyperledger/fabric-ca-server-config/fabric-ca-server-config.yaml'" os.makedirs('{}/ca'.format(org_data_path)) render(ca_tmplate, org_ca_file, command=command, networkName=net_name, orgDomain=org_fullDomain_name, caSvcName=k8s_ca_name, podName=k8s_ca_name, fabVersion=fabric_version, tlsCert=cert_file, tlsKey=key_file, caPath='ca/', caDataPath='ca/', credentialPV=org_name + '-credentialpv', dataPV=org_name + '-datapv', nodePort=host_port) ca_service_endpoint = modelv2.ServiceEndpoint( id=uuid4().hex, service_ip=node_vip, service_port=host_port, service_name=ca_service_name, service_type='ca', network=modelv2.BlockchainNetwork.objects.get(id=net_id)) ca_service_endpoint.save() index += 1 for i in range(int(peer_org['peerNum'])): peer_name = 'peer{}'.format(i) peer_seq = [peer_name, org_name, org_domain] peer_service_name = '.'.join(peer_seq) if couchdb_enabled is True: host_ports = [ request_host_ports[index], request_host_ports[index + 1], request_host_ports[index + 2] ] index = index + 3 else: host_ports = [ request_host_ports[index], request_host_ports[index + 1] ] index = index + 2 k8s_peer_name = '{}-{}'.format(peer_name, org_name) peer_deploy_file = '{org_deploydir}/deploy_{peer_service_name}.yaml'. \ format(org_deploydir=org_deploydir, peer_service_name=peer_service_name) os.makedirs('{}/{}'.format(org_data_path, peer_service_name)) if couchdb_enabled is True: couchdb_template = getTemplate("couchdb.yaml") couchdb_service_name = 'couchdb.{peer_service_name}'.format( peer_service_name=peer_service_name) couchdb_deploy_file = '{org_deploydir}/deploy_{couchdb_service_name}.yaml'. \ format(org_deploydir=org_deploydir, couchdb_service_name=couchdb_service_name) os.makedirs('{}/{}'.format(org_data_path, couchdb_service_name)) render(couchdb_template, couchdb_deploy_file, networkName=net_name, peerName=k8s_peer_name, dataPath=couchdb_service_name, dataPV=org_name + '-datapv', nodePort1=host_ports[2]) couchdb_service_endpoint = modelv2.ServiceEndpoint( id=uuid4().hex, service_ip=node_vip, service_port=host_ports[2], service_name=couchdb_service_name, service_type='couchdb', network=modelv2.BlockchainNetwork.objects.get( id=net_id)) couchdb_service_endpoint.save() render(peer_template, peer_deploy_file, networkName=net_name, orgDomain=org_fullDomain_name, peerSvcName=k8s_peer_name, podName=k8s_peer_name, fabVersion=fabric_version, peerID=peer_name, corePeerID=k8s_peer_name, peerAddress='{}:7051'.format(k8s_peer_name), localMSPID='{}MSP'.format(org_name[0:1].upper() + org_name[1:]), mspPath='peers/{}/msp'.format(peer_service_name), tlsPath='peers/{}/tls'.format(peer_service_name), dataPath=peer_service_name, credentialPV=org_name + '-credentialpv', dataPV=org_name + '-datapv', nodePort1=host_ports[0], nodePort2=host_ports[1]) if fabric_version == '1.4.0': for i in range(2): peer_service_endpoint = modelv2.ServiceEndpoint( id=uuid4().hex, service_ip=node_vip, service_port=host_ports[i], service_name=peer_service_name, service_type='peer', peer_port_proto=fabric_peer_proto_14[i], network=modelv2.BlockchainNetwork.objects.get( id=net_id)) peer_service_endpoint.save() elif fabric_version == '1.1.0': for i in range(2): peer_service_endpoint = modelv2.ServiceEndpoint( id=uuid4().hex, service_ip=node_vip, service_port=host_ports[i], service_name=peer_service_name, service_type='peer', peer_port_proto=fabric_peer_proto_11[i], network=modelv2.BlockchainNetwork.objects.get( id=net_id)) peer_service_endpoint.save() # first create namespace for this network with open('{deploy_dir}/namespace.yaml'.format( deploy_dir=deploy_dir)) as f: resources = yaml.load_all(f) operation.deploy_k8s_resource(resources) # if consensus_type is kafka if network_config['consensus_type'] == 'kafka': with open('{}/kafka_pv.yaml'.format(deploy_dir)) as f: resources = yaml.load_all(f) operation.deploy_k8s_resource(resources) with open('{}/kafka_pvc.yaml'.format(deploy_dir)) as f: resources = yaml.load_all(f) operation.deploy_k8s_resource(resources) with open('{}/zookeeper.yaml'.format(deploy_dir)) as f: resources = yaml.load_all(f) operation.deploy_k8s_resource(resources) with open('{}/kafka.yaml'.format(deploy_dir)) as f: resources = yaml.load_all(f) operation.deploy_k8s_resource(resources) # Then deploy oderer org, first pv, then orderer service for orderer_org in orderer_org_names: orderer_dir = '{deploy_dir}/{org_name}/'.format( deploy_dir=deploy_dir, org_name=orderer_org) with open('{}/pv.yaml'.format(orderer_dir)) as f: resources = yaml.load_all(f) operation.deploy_k8s_resource(resources) for deploy_file in os.listdir(orderer_dir): if deploy_file.startswith('deploy_'): with open('{}/{}'.format(orderer_dir, deploy_file)) as f: resources = yaml.load_all(f) operation.deploy_k8s_resource(resources) # At last deploy peer org, first pv, then ca, then peer service for peer_org in peer_org_names: peer_dir = '{deploy_dir}/{org_name}/'.format(deploy_dir=deploy_dir, org_name=peer_org) with open('{}/pv.yaml'.format(peer_dir)) as f: resources = yaml.load_all(f) operation.deploy_k8s_resource(resources) with open('{}/ca.yaml'.format(peer_dir)) as f: resources = yaml.load_all(f) operation.deploy_k8s_resource(resources) for deploy_file in os.listdir(peer_dir): if deploy_file.startswith('deploy_'): with open('{}/{}'.format(peer_dir, deploy_file)) as f: resources = yaml.load_all(f) operation.deploy_k8s_resource(resources)