Example #1
0
def get_project(path):
    """
    get docker project given file path
    """
    logging.debug('get project ' + path)
    config_path = get_config_path_from_options(dict([('--file', path)]))
    project = compose_get_project(config_path)
    return project
Example #2
0
def get_project(path):
    """
    get docker project given file path
    """
    logging.debug('get project ' + path)
    config_path = get_config_path_from_options(dict([('--file', path)]))
    project = compose_get_project(config_path)
    return project
Example #3
0
def get_project(path):
    """
    get docker project given file path
    """
    logging.debug('get project ' + path)
    config_path = get_config_path(path)
    project = compose_get_project(config_path)
    return project
Example #4
0
def get_project(path):
    """
    get docker project given file path
    """
    logging.debug('get project ' + path)
    config_path = get_config_path(path)
    project = compose_get_project(config_path)
    return project
Example #5
0
def get_project(path):
    """
    get docker project given file path
    """
    logging.debug('get project ' + path)
    environment = Environment.from_env_file(path)
    config_path = get_config_path_from_options(path, dict(), environment)
    project = compose_get_project(path, config_path)
    return project
Example #6
0
def get_project(path, project_name=None):
    """
    get docker project given file path
    """
    environment = Environment.from_env_file(path)
    config_path = get_config_path_from_options(path, dict(), environment)
    project = compose_get_project(path, config_path, project_name=project_name,
                                  host='{0}:{1}'.format(config.swarm_scheduling_host, config.swarm_scheduling_port))
    return project
Example #7
0
def get_project(path):
    """
    get docker project given file path
    """
    logging.debug('get project ' + path)
    environment = Environment.from_env_file(path)
    config_path = get_config_path_from_options(path, dict(), environment)
    project = compose_get_project(path, config_path)
    return project
Example #8
0
def get_project(template_path):
    """ Get compose project with given template file path

    :param template_path: path of the compose template file
    :return: project object
    """
    environment = Environment.from_env_file(template_path)
    config_path = compose_get_config_path_from_options(template_path, dict(),
                                                       environment)
    project = compose_get_project(template_path, config_path)
    return project
Example #9
0
def get_project(template_path):
    """ Get compose project with given template file path

    :param template_path: path of the compose template file
    :return: project object
    """
    environment = Environment.from_env_file(template_path)
    config_path = compose_get_config_path_from_options(template_path, dict(),
                                                       environment)
    project = compose_get_project(template_path, config_path)
    return project
Example #10
0
    def delete_orderer_org(self, orderer_org, consensus_type, host, net_id):
        deploy_dir = CELLO_MASTER_FABRIC_DIR + net_id + '/deploy'
        composefile = '{}/docker-compose.yaml'.format(deploy_dir)

        if not os.path.exists(composefile):
            logger.info("network {} has no container running".format(net_id))
            return

        project = compose_get_project(project_dir=deploy_dir,
                                      host=host.worker_api,
                                      project_name=net_id[:12])
        project.down(ImageType_none, True)
Example #11
0
    def create(self, *args, **kwargs):
        environments = {
            "COMPOSE_PROJECT_NAME": self._project_name,
            "DOCKER_HOST": self._docker_host,
        }
        os.environ.update(environments)

        project = compose_get_project(self._compose_file_path)
        try:
            project.up(detached=True, timeout=10)
        except Exception as e:
            LOG.error(str(e))
Example #12
0
    def delete(self, *args, **kwargs):
        environments = {
            "COMPOSE_PROJECT_NAME": self._project_name,
            "DOCKER_HOST": self._docker_host,
        }
        os.environ.update(environments)

        project = compose_get_project(self._compose_file_path)
        try:
            project.stop(timeout=10)
            project.remove_stopped(one_off=OneOffFilter.include, force=True)
            self._clean_network()
        except Exception as e:
            LOG.error(str(e))
Example #13
0
    def delete(self, network):
        net_id = network.id
        host = network.host

        deploy_dir = CELLO_MASTER_FABRIC_DIR + net_id + '/deploy'
        composefile = '{}/docker-compose.yaml'.format(deploy_dir)

        if not os.path.exists(composefile):
            logger.info("network {} has no container running".format(net_id))
            return

        project = compose_get_project(project_dir=deploy_dir,
                                      host=host.worker_api,
                                      project_name=net_id[:12])
        # using project.down() of docker-compose directly
        # will also remove container images, but maybe chaincode
        # image and container will not be removed, because these
        # containers and images are not included in docker-compose.yaml
        project.down(ImageType_none, True)
Example #14
0
    def update(self, network_config, request_host_ports):

        # only ensure network_files_dir is exist
        # suppose if no exception is raised during crypto generation,
        # then everything is OK.
        # below code is wrong, cause the network_files_dir is on worker node
        # network_files_dir = CELLO_WORKER_FABRIC_DIR + net_id
        # if not os.path.isdir(network_files_dir):
        #     raise IOError("blockchain network crypto-config \
        #                   and genesis block couldn't be found")

        net_id = network_config[
            'id']  # use network id 0-12 byte as name prefix
        net_dir = CELLO_MASTER_FABRIC_DIR + net_id
        host = network_config['host']
        composefile_dict = {
            'version': '3.2',
            'networks': {
                'celloNet': None
            },
            'services': {}
        }
        # valid worker_ip in db is like "tcp://x.x.x.x:2375"
        container_service_ip = host.worker_api.split(':')[1][2:]

        service_names = []
        orderer_service_names = []
        couchdb_service_names = []
        # add for k8s (namespace) and docker deploy
        orderer_service_deploy_names = []
        sevices_dict = {}
        index = 0
        for orderer_org in network_config['orderer_org_dicts']:
            for hostname in orderer_org['ordererHostnames']:
                orderer_domain = orderer_org['domain']
                orderer_service_name = '.'.join([hostname, orderer_domain])
                service_names.append(orderer_service_name)
                orderer_service_names.append(orderer_service_name)
                org_name = orderer_org['name']
                host_port = request_host_ports[index]
                index = index + 1
                orderer_service_dict = self._construct_orderer_docker_service(net_id, org_name, orderer_domain, hostname,\
                                                                              network_config['fabric_version'], \
                                                                              host_port)
                sevices_dict.update(orderer_service_dict)

                # save orderer service endpoint to db
                # if container run failed, then delete network
                # according to reference, corresponding service endpoint
                # would be delete automatically
                orderer_service_endpoint = modelv2.ServiceEndpoint(
                    id=uuid4().hex,
                    service_ip=container_service_ip,
                    service_port=host_port,
                    service_name=orderer_service_name,
                    service_type='orderer',
                    network=modelv2.BlockchainNetwork.objects.get(id=net_id))
                orderer_service_endpoint.save()

        for peer_org in network_config['peer_org_dicts']:
            org_name = peer_org['name']
            org_domain = peer_org['domain']
            for i in range(int(peer_org['peerNum'])):
                peer_name = 'peer{}'.format(i)
                peer_seq = ['couchdb', peer_name, org_name, org_domain]
                couchdb_service_name = '.'.join(peer_seq)
                service_names.append(couchdb_service_name)
                couchdb_service_names.append(couchdb_service_name)
                couch_host_port = request_host_ports[index]
                index = index + 1
                service_names.append(couchdb_service_name)
                couchdb_service_dict = self._construct_couchdb_docker_service(net_id, couchdb_service_name, \
                                                                              network_config['fabric_version'],
                                                                              couch_host_port)
                sevices_dict.update(couchdb_service_dict)
                peer_service_endpoint = modelv2.ServiceEndpoint(
                    id=uuid4().hex,
                    service_ip=container_service_ip,
                    service_port=couch_host_port,
                    service_name=couchdb_service_name,
                    service_type='couchdb',
                    network=modelv2.BlockchainNetwork.objects.get(id=net_id))
                peer_service_endpoint.save()
        for peer_org in network_config['peer_org_dicts']:
            org_name = peer_org['name']
            org_domain = peer_org['domain']
            for i in range(int(peer_org['peerNum'])):
                peer_name = 'peer{}'.format(i)
                peer_seq = [peer_name, org_name, org_domain]
                peer_service_name = '.'.join(peer_seq)
                service_names.append(peer_service_name)
                host_ports = [
                    request_host_ports[index], request_host_ports[index + 1]
                ]
                index = index + 2
                depends_on = orderer_service_names + couchdb_service_names
                peer_service_dict = self._construct_peer_docker_service(net_id, org_name, org_domain, peer_name,\
                                                                              network_config['fabric_version'], \
                                                                              host_ports, depends_on)
                sevices_dict.update(peer_service_dict)
                for i in range(len(host_ports)):
                    peer_service_endpoint = modelv2.ServiceEndpoint(
                        id=uuid4().hex,
                        service_ip=container_service_ip,
                        service_port=host_ports[i],
                        service_name=peer_service_name,
                        service_type='peer',
                        peer_port_proto=PEER_PORT_GRPC
                        if i == 0 else PEER_PORT_EVENT,
                        network=modelv2.BlockchainNetwork.objects.get(
                            id=net_id))
                    peer_service_endpoint.save()

            ca_service_name = '.'.join(['ca', org_name, org_domain])
            service_names.append(ca_service_name)
            org_full_domain = '.'.join([org_name, org_domain])
            pk_path = '{net_dir}/crypto-config/peerOrganizations/{org_dir}/ca/'.\
                format(net_dir=net_dir, org_dir=org_full_domain)
            ca_key_file = self._get_ca_private_key(pk_path)
            host_port = request_host_ports[index]
            index = index + 1
            ca_service_dict = self._construct_ca_docker_service(net_id, org_name, org_domain, ca_key_file,\
                                        network_config['fabric_version'],
                                        host_port)
            sevices_dict.update(ca_service_dict)
            ca_service_endpoint = modelv2.ServiceEndpoint(
                id=uuid4().hex,
                service_ip=container_service_ip,
                service_port=host_port,
                service_name=ca_service_name,
                service_type='ca',
                network=modelv2.BlockchainNetwork.objects.get(id=net_id))
            ca_service_endpoint.save()

        # TODO: first test 'solo'
        # if network_config['consensus_type'] == 'kafka':
        #     pass

        composefile_dict['services'].update(sevices_dict)
        deploy_dir = '{}/deploy/'.format(net_dir)

        os.system(
            'cp {}/docker-compose.yaml {}/docker-compose-back.yaml'.format(
                deploy_dir, deploy_dir))

        composefile = '{}/docker-compose.yaml'.format(deploy_dir)

        with open(composefile, 'w') as f:
            yaml.dump(composefile_dict, f)

        # project = compose_get_project(project_dir = net_dir,
        #                                  host = host.worker_api,
        #                               project_name=net_id[:12])

        project = compose_get_project(project_dir=deploy_dir,
                                      host=host.worker_api,
                                      project_name=net_id[:12])

        containers = project.up(detached=True, timeout=5)

        os.system(
            'cp {}/docker-compose-back.yaml {}/docker-compose.yaml'.format(
                deploy_dir, deploy_dir))

        composefile_back = '{}/docker-compose.yaml'.format(deploy_dir)

        f = open(composefile_back)
        yamlcomposefile = yaml.load(f)

        services_dict = yamlcomposefile['services']

        services_dict.update(sevices_dict)

        composefile_dict_back = {
            'version': '3.2',
            'networks': {
                'celloNet': None
            },
            'services': {}
        }

        composefile_dict_back['services'].update(services_dict)
        f.close()

        with open(composefile_back, 'w') as f:
            yaml.dump(composefile_dict_back, f)

        return containers
Example #15
0
    def create_orderer_org(self, orderer_org, consensus_type, host, net_id,
                           net_name, fabric_version, request_host_ports,
                           portid):
        service_names = []
        orderer_service_names = []
        index = portid[0]
        sevices_dict = {}
        container_service_ip = host.worker_api.split(':')[1][2:]
        composefile_dict = {
            'version': '3.2',
            'networks': {
                'celloNet': None
            },
            'services': {}
        }
        net_dir = CELLO_MASTER_FABRIC_DIR + net_id
        for hostname in orderer_org['ordererHostnames']:
            orderer_domain = orderer_org['domain']
            orderer_service_name = '.'.join([hostname, orderer_domain])
            service_names.append(orderer_service_name)
            orderer_service_names.append(orderer_service_name)
            org_name = orderer_org['name']
            host_port = request_host_ports[index]
            index = index + 1
            orderer_service_dict = self._construct_orderer_docker_service(net_id, org_name, orderer_domain, hostname, \
                                                                          fabric_version, \
                                                                          host_port)
            sevices_dict.update(orderer_service_dict)

            # save orderer service endpoint to db
            # if container run failed, then delete network
            # according to reference, corresponding service endpoint
            # would be delete automatically
            orderer_service_endpoint = modelv2.ServiceEndpoint(
                id=uuid4().hex,
                service_ip=container_service_ip,
                service_port=host_port,
                service_name=orderer_service_name,
                service_type='orderer',
                org_name=org_name,
                network=modelv2.BlockchainNetwork.objects.get(id=net_id))
            orderer_service_endpoint.save()

        composefile_dict['services'].update(sevices_dict)
        deploy_dir = '{}/deploy/'.format(net_dir)
        if not os.path.exists(deploy_dir):
            os.makedirs(deploy_dir)
        composefile = '{}/docker-compose.yaml'.format(deploy_dir)

        with open(composefile, 'w') as f:
            yaml.dump(composefile_dict, f)

        project = compose_get_project(project_dir=deploy_dir,
                                      host=host.worker_api,
                                      project_name=net_id[:12])

        containers = project.up(detached=True, timeout=5)

        portid[0] = index

        return containers
Example #16
0
 def get_project(self, path):
     environment = Environment.from_env_file(path)
     config_path = get_config_path_from_options(path, dict(), environment)
     project = compose_get_project(path, config_path)
     return project
Example #17
0
    def create_peer_org(self, peer_org, couchdb_enabled, host, net_id,
                        net_name, fabric_version, request_host_ports, portid,
                        peer_num):
        service_names = []
        couchdb_service_names = []
        index = portid[0]
        sevices_dict = {}
        org_name = peer_org['name']
        org_domain = peer_org['domain']
        peer_num_all = int(peer_org['peerNum'])
        exist_peer_num = 0
        if peer_num_all != peer_num:
            exist_peer_num = peer_num_all - peer_num
        container_service_ip = host.worker_api.split(':')[1][2:]
        composefile_dict = {
            'version': '3.2',
            'networks': {
                'celloNet': None
            },
            'services': {}
        }
        net_dir = CELLO_MASTER_FABRIC_DIR + net_id
        for i in range(int(peer_org['peerNum'])):
            if exist_peer_num > i:
                continue
            peer_name = 'peer{}'.format(i)
            if couchdb_enabled is True:
                peer_seq = ['couchdb', peer_name, org_name, org_domain]
                couchdb_service_name = '.'.join(peer_seq)
                service_names.append(couchdb_service_name)
                couchdb_service_names.append(couchdb_service_name)
                couch_host_port = request_host_ports[index]
                index = index + 1
                service_names.append(couchdb_service_name)
                couchdb_service_dict = self._construct_couchdb_docker_service(net_id, couchdb_service_name, \
                                                                              fabric_version,
                                                                              couch_host_port)
                sevices_dict.update(couchdb_service_dict)
                couchdb_service_endpoint = modelv2.ServiceEndpoint(
                    id=uuid4().hex,
                    service_ip=container_service_ip,
                    service_port=couch_host_port,
                    service_name=couchdb_service_name,
                    service_type='couchdb',
                    network=modelv2.BlockchainNetwork.objects.get(id=net_id))
                couchdb_service_endpoint.save()
            peer_seq = [peer_name, org_name, org_domain]
            peer_service_name = '.'.join(peer_seq)
            service_names.append(peer_service_name)
            host_ports = [
                request_host_ports[index], request_host_ports[index + 1]
            ]
            index = index + 2
            peer_service_dict = self._construct_peer_docker_service(net_id, org_name, org_domain, peer_name, \
                                                                    fabric_version, \
                                                                    host_ports, couchdb_enabled)
            sevices_dict.update(peer_service_dict)
            for i in range(len(host_ports)):
                peer_service_endpoint = modelv2.ServiceEndpoint(
                    id=uuid4().hex,
                    service_ip=container_service_ip,
                    service_port=host_ports[i],
                    service_name=peer_service_name,
                    service_type='peer',
                    org_name=org_name,
                    peer_port_proto=PEER_PORT_GRPC
                    if i == 0 else PEER_PORT_CCLISTEN,
                    network=modelv2.BlockchainNetwork.objects.get(id=net_id))
                peer_service_endpoint.save()

        if exist_peer_num == 0:
            ca_service_name = '.'.join(['ca', org_name, org_domain])
            service_names.append(ca_service_name)
            org_full_domain = '.'.join([org_name, org_domain])
            pk_path = '{net_dir}/crypto-config/peerOrganizations/{org_dir}/ca/'. \
                format(net_dir=net_dir, org_dir=org_full_domain)
            ca_key_file = self._get_ca_private_key(pk_path)
            host_port = request_host_ports[index]
            index = index + 1
            ca_service_dict = self._construct_ca_docker_service(net_id, org_name, org_domain, ca_key_file, \
                                                                fabric_version,
                                                                host_port)
            sevices_dict.update(ca_service_dict)
            ca_service_endpoint = modelv2.ServiceEndpoint(
                id=uuid4().hex,
                service_ip=container_service_ip,
                service_port=host_port,
                service_name=ca_service_name,
                service_type='ca',
                org_name=org_name,
                network=modelv2.BlockchainNetwork.objects.get(id=net_id))
            ca_service_endpoint.save()
        host_id = peer_org['host_id']
        host = host_handler.get_active_host_by_id(host_id)

        composefile_dict['services'].update(sevices_dict)
        deploy_dir = '{}/deploy/'.format(net_dir)
        if not os.path.exists(deploy_dir):
            os.makedirs(deploy_dir)

        if os.path.exists('{}/docker-compose.yaml'.format(deploy_dir)):
            shutil.copy('{}/docker-compose.yaml'.format(deploy_dir),
                        '{}/docker-compose-back.yaml'.format(deploy_dir))

        composefile_back = '{}/docker-compose.yaml'.format(deploy_dir)
        with open(composefile_back, 'w') as f:
            yaml.dump(composefile_dict, f)

        project = compose_get_project(project_dir=deploy_dir,
                                      host=host.worker_api,
                                      project_name=net_id[:12])

        containers = project.up(detached=True, timeout=5)

        if os.path.exists('{}/docker-compose-back.yaml'.format(deploy_dir)):
            shutil.copy('{}/docker-compose-back.yaml'.format(deploy_dir),
                        '{}/docker-compose.yaml'.format(deploy_dir))

        composefile = '{}/docker-compose.yaml'.format(deploy_dir)
        f = open(composefile)
        compose_file_base = yaml.load(f)
        compose_file_base['services'].update(sevices_dict)
        f.close()
        with open(composefile, 'w') as f:
            yaml.dump(compose_file_base, f)

        portid[0] = index

        return containers