Esempio n. 1
0
def format_ports(ports):
    service_ports_formatted = []
    for port in ports:
        port_published = port['published']
        port_target = port['target']
        formatter_string = DoubleQuotedScalarString(f"{port_published}")

        if port_target:
            formatter_string = DoubleQuotedScalarString(f"{port_published}:{port_target}")

        service_ports_formatted.append(formatter_string)
    
    return service_ports_formatted
Esempio n. 2
0
def generate_dc(services, volumes, networks, secrets, configs, version="3", return_format='yaml'):
    if return_format == 'yaml':
        s = io.StringIO()
        ret_yaml = YAML()
        ret_yaml.indent(mapping=2, sequence=4, offset=2)
        ret_yaml.explicit_start = True
        specified_version = get_version(version)
        base_version = int(specified_version)

        if services:
            if base_version in [2, 3]:
                ret_yaml.dump({'version': DoubleQuotedScalarString(specified_version)}, s)
                ret_yaml.explicit_start = False
                s.write('\n')
                services_formatted = format_services_version_three(specified_version, services, volumes, networks)
                ret_yaml.dump({'services': services_formatted}, s, transform=sequence_indent_four)

            if base_version == 1:
                ret_yaml.dump({'version': DoubleQuotedScalarString(specified_version)}, s)
                ret_yaml.explicit_start = False
                s.write('\n')
                services_formatted = format_services_version_one(specified_version, services, volumes, networks)
                ret_yaml.dump(services_formatted, s, transform=sequence_indent_one)
            
            s.write('\n')

        if base_version in [3, 2]:
            if networks:
                networks_formatted = format_networks_top_level(networks, version)
                ret_yaml.dump({'networks': networks_formatted}, s)
                s.write('\n')

        if volumes:
            volumes_formatted = format_volumes_top_level(volumes, version)
            ret_yaml.dump({'volumes': volumes_formatted}, s)
            s.write('\n')
        
        if secrets:
            ret_yaml.dump({'secrets': secrets}, s)
            s.write('\n')

        if configs:
            ret_yaml.dump({'configs': configs}, s)
            s.write('\n')

        s.seek(0)

        return s
Esempio n. 3
0
    def stories_to_yaml(self,
                        story_steps: List[StoryStep],
                        is_test_story: bool = False) -> Dict[Text, Any]:
        """Converts a sequence of story steps into yaml format.

        Args:
            story_steps: Original story steps to be converted to the YAML.
            is_test_story: `True` if the story is an end-to-end conversation test story.
        """
        from rasa.shared.utils.validation import KEY_TRAINING_DATA_FORMAT_VERSION

        self._is_test_story = is_test_story

        stories = []
        rules = []
        for story_step in story_steps:
            if isinstance(story_step, RuleStep):
                rules.append(self.process_rule_step(story_step))
            else:
                stories.append(self.process_story_step(story_step))

        result = OrderedDict()
        result[KEY_TRAINING_DATA_FORMAT_VERSION] = DoubleQuotedScalarString(
            LATEST_TRAINING_DATA_FORMAT_VERSION)

        if stories:
            result[KEY_STORIES] = stories
        if rules:
            result[KEY_RULES] = rules

        return result
Esempio n. 4
0
    def read_xml(self):
        namespaces = {
            str(x[0]) if x[0] != "" else "atom": x[1]
            for _, x in ET.iterparse(self.opt["input_xml"],
                                     events=['start-ns'])
        }

        for k, v in namespaces.items():
            if k == "atom":
                k = ""
            ET.register_namespace(k, v)
        tree = ET.parse(self.opt["input_xml"])
        self.filters_xml = tree.getroot()
        for e in self.filters_xml.iter('*'):
            if e.text is not None:
                e.text = e.text.strip()
            if e.tail is not None:
                e.tail = e.tail.strip()

        filter_list = []
        for e in self.filters_xml.findall("./atom:entry", namespaces):
            properties = {}
            for p in e.findall("./apps:property", namespaces):
                name = p.get("name")
                value = p.get("value")
                properties[name] = DoubleQuotedScalarString(value)
            if "size" not in properties:
                for noneed in ["sizeOperator", "sizeUnit"]:
                    if noneed in properties:
                        del properties[noneed]
            filter_list.append(properties)

        self.filters = {"namespaces": namespaces, "filter": filter_list}
Esempio n. 5
0
def update_config(beat, path):
    # Read the winlogbeat config
    config_doc = open(path + beat + '.yml', 'r')
    loaded_config = yaml.load(config_doc)

    try:
        # Create logstash hostname:port string and insert into the config map
        hostName = DoubleQuotedScalarString(ipAddr + ":" + port)
        print(f"\tSetting logstash host IP: {hostName} -->", end=' ')
        # Remove default host and add new hostname
        loaded_config['output.logstash']['hosts'].pop(0)
        loaded_config['output.logstash']['hosts'].insert(0, hostName)
        print("Success")
    except:
        print("Failed")
        print(f"\t{beat} config does not have logstash enabled")

    # Remove the outpout.elasticsearch section if it isn't commented out
    del loaded_config['output.elasticsearch']
    print("\tElasticsearch output removed")

    # Output the corrected config to screen, can also dump to a file
    # yaml.dump(loaded_config, sys.stdout)
    try:
        newConfigFile = open("deploy\\" + beat + "\\" + beat + '.yml', 'w')
        yaml.dump(loaded_config, newConfigFile)
    except (FileNotFoundError):
        # Not using this beat
        pass

    return
Esempio n. 6
0
    def dump(
        self,
        target: Union[Text, Path, ruamel_yaml.StringIO],
        story_steps: List[StoryStep],
    ) -> None:
        """Writes Story steps into a target file/stream.

        Args:
            target: name of the target file/stream to write the YAML to.
            story_steps: Original story steps to be converted to the YAML.
        """
        from rasa.validator import KEY_TRAINING_DATA_FORMAT_VERSION

        self.target = target

        stories = []
        for story_step in story_steps:
            processed_story_step = self.process_story_step(story_step)
            if processed_story_step:
                stories.append(processed_story_step)

        result = OrderedDict()
        result[KEY_TRAINING_DATA_FORMAT_VERSION] = DoubleQuotedScalarString(
            LATEST_TRAINING_DATA_FORMAT_VERSION)
        result[KEY_STORIES] = stories

        io_utils.write_yaml(result, self.target, True)
Esempio n. 7
0
def FSlist(l):  # concert list into flow-style (default is block style)
    from ruamel.yaml.comments import CommentedSeq

    double_quoted_list = [DoubleQuotedScalarString(x) for x in l]
    cs = CommentedSeq(double_quoted_list)
    cs.fa.set_flow_style()
    return cs
Esempio n. 8
0
def format_quotes(s):
    if '\'' in s:
        return SingleQuotedScalarString(s.replace("'", ''))
    if '"' in s:
        return DoubleQuotedScalarString(s.replace('"', ''))

    return SingleQuotedScalarString(s)
Esempio n. 9
0
    def training_data_to_dict(
            cls, training_data: "TrainingData") -> Optional[OrderedDict]:
        """Represents NLU training data to a dict/list structure ready to be
        serialized as YAML.

        Args:
            training_data: `TrainingData` to convert.

        Returns:
            `OrderedDict` containing all training data.
        """
        from rasa.shared.utils.validation import KEY_TRAINING_DATA_FORMAT_VERSION
        from ruamel.yaml.scalarstring import DoubleQuotedScalarString

        nlu_items = []
        nlu_items.extend(cls.process_intents(training_data))
        nlu_items.extend(cls.process_synonyms(training_data))
        nlu_items.extend(cls.process_regexes(training_data))
        nlu_items.extend(cls.process_lookup_tables(training_data))

        if not any([nlu_items, training_data.responses]):
            return None

        result = OrderedDict()
        result[KEY_TRAINING_DATA_FORMAT_VERSION] = DoubleQuotedScalarString(
            LATEST_TRAINING_DATA_FORMAT_VERSION)

        if nlu_items:
            result[KEY_NLU] = nlu_items

        if training_data.responses:
            result[KEY_RESPONSES] = training_data.responses

        return result
Esempio n. 10
0
    def dump(self, target: Union[Text, Path, StringIO],
             training_data: "TrainingData") -> None:
        """Writes training data into a file in a YAML format.

        Args:
            target: Name of the target object to write the YAML to.
            training_data: TrainingData object.
        """
        from rasa.validator import KEY_TRAINING_DATA_FORMAT_VERSION
        from ruamel.yaml.scalarstring import DoubleQuotedScalarString

        nlu_items = []
        nlu_items.extend(self.process_intents(training_data))
        nlu_items.extend(self.process_synonyms(training_data))
        nlu_items.extend(self.process_regexes(training_data))
        nlu_items.extend(self.process_lookup_tables(training_data))

        result = OrderedDict()
        result[KEY_TRAINING_DATA_FORMAT_VERSION] = DoubleQuotedScalarString(
            LATEST_TRAINING_DATA_FORMAT_VERSION)

        if nlu_items:
            result[KEY_NLU] = nlu_items

        if training_data.responses:
            result[KEY_RESPONSES] = training_data.responses

        io_utils.write_yaml(result, target, True)
def rewrite_role(role_path):
    cfgs = {
        'defaults': Acfg(J(role_path, 'defaults/main.yml')),
        'tasks': Acfg(J(role_path, 'tasks/main.yml')),
        'vars': Acfg(J(role_path, 'vars/main.yml')),
    }
    role_name = os.path.split(role_path)[-1]
    prefix = role_name
    if role_name.endswith('_vars'):
        prefix = prefix[:-5]
    if not prefix.startswith('corpusops'):
        prefix = 'corpusops_{0}'.format(prefix)
    if not prefix.endswith('_'):
        prefix += '_'
    # transfers vars from default flatten to default dict
    dsetup = False
    rvars = cfgs['vars'].data
    dvars = cfgs['defaults'].data
    for v in [a for a in dvars]:
        val = dvars[v]
        if v.startswith(prefix):
            fv = prefix.join((v.split(prefix)[1:]))
            if fv in registry_knobs:
                continue
            dsetup = True
            ddata = dvars.setdefault('___{0}'.format(prefix[:-1]), {})
            ddata[fv] = val
            dvars.pop(v)
            rvars.setdefault(
                '_{0}vars'.format(prefix),
                DoubleQuotedScalarString(
                    ("{{% set prefix = '{0}' %}}"
                     "{{% set snamespaced, vars ="
                     " vars|copsf_scoped_registry(prefix) %}}"
                     "{{{{ snamespaced | to_json}}}}").format(prefix)))
            rvars.setdefault(
                '{0}vars'.format(prefix),
                DoubleQuotedScalarString('{{{{_{0}vars}}}}'.format(prefix)))
            rvars.setdefault(
                v,
                DoubleQuotedScalarString('{{{{{0}vars.{1}}}}}'.format(
                    prefix, fv)))
    if dsetup:
        cfgs['defaults'].write()
        cfgs['vars'].write()
def rewrite_quiz_to_new_structure(quiz_data):
    for nr in range(len(quiz_data['questions'])):
        q_nr = nr + 1
        if quiz_data['questions'][q_nr - 1].get(q_nr):
            options_array = []
            index = 0
            for options in quiz_data['questions'][q_nr - 1].get(
                    q_nr)['mp_choice_options']:
                option_obj = []
                for options_key, options_value in options.items():
                    option_obj.append({
                        DoubleQuotedScalarString(options_key):
                        DoubleQuotedScalarString(options_value)
                    })
                options_array.append({index: option_obj})
                index += 1
            quiz_data['questions'][q_nr - 1].get(
                q_nr)['mp_choice_options'] = options_array
    return quiz_data
Esempio n. 13
0
    def _config(self):
        """
        Configures ejabberd server
        :return: 
        """
        self._find_dirs_if_needed()
        config_file = os.path.join(self._config_dir, 'ejabberd.yml')
        config_file_backup = os.path.join(self._config_dir,
                                          'ejabberd.yml.backup')

        # Backup the config file. If config is present, use that one
        if os.path.exists(config_file_backup):
            shutil.copy(config_file_backup, config_file)
        else:
            shutil.copy(config_file, config_file_backup)

        config_data = open(config_file).read()
        config_yml = ruamel.yaml.round_trip_load(config_data,
                                                 preserve_quotes=True)

        # virtual host setup
        config_yml['hosts'] = [DoubleQuotedScalarString(self.hostname)]

        # external authentication setup
        ext_auth_path = os.path.join(self._extauth_path, 'external_cloud.py')
        config_yml['auth_method'] = SingleQuotedScalarString('external')
        config_yml['extauth_cache'] = 0
        config_yml['extauth_program'] = DoubleQuotedScalarString(
            '%s -t ejabberd -s %s -u %s' %
            (ext_auth_path, self.extauth_token, self.extauth_endpoint))

        # add admin user - from NextCloud
        if self.hostname is None and self.config is not None:
            self.hostname = self.config.hostname
        util.setpath(config_yml, ['acl', 'admin', 'user'],
                     [DoubleQuotedScalarString('admin@%s' % self.hostname)])

        with open(config_file, 'w') as fh:
            new_config = ruamel.yaml.round_trip_dump(config_yml)
            fh.write(new_config)

        self._create_cert_files()
Esempio n. 14
0
def main(mgmt_prefix):
    """
    Execution starts here.
    """

    # Create an IPv6 network object to test subnet containment later
    mgmt_net = IPv6Network(mgmt_prefix)

    # Create netmiko SSH connection handler to access the device
    conn = Netmiko(
        host="192.0.2.1",
        username="******",
        password="******",
        device_type="cisco_ios",
    )

    # Should be using "show bgp ipv6 unicast" but code has bug
    # https://github.com/CiscoTestAutomation/genieparser/issues/362
    resp = conn.send_command("show bgp all", use_genie=True)
    v6_rte = resp["vrf"]["default"]["address_family"]["ipv6 unicast"]["routes"]

    # Initialize Ansible YAML inventory dictionary
    ansible_inv = {"all": {"children": {"remotes": {"hosts": {}}}}}

    # Iterate over all collected BGP prefixes
    for index, prefix in enumerate(v6_rte.keys()):

        # Create an IPv6 network representing the specific prefix
        prefix_net = IPv6Network(prefix.lower())

        # Test for subnet containment and for /128 mask
        if prefix_net.subnet_of(mgmt_net) and prefix.endswith("/128"):

            # Assemble inventory item and update inventory dict
            prefix_str = DoubleQuotedScalarString(prefix_net.network_address)
            ansible_inv["all"]["children"]["remotes"]["hosts"].update(
                {f"node_{index + 1}": {
                    "ansible_host": prefix_str
                }})
            print(prefix_str)

    # Close connection when finished
    conn.disconnect()

    # Instantiate the YAML object, preserving quotes and
    # using explicit start (---) and end (...) markers
    yaml = YAML()
    yaml.preserve_quotes = True
    yaml.explicit_start = True
    yaml.explicit_end = True

    # Dump the Ansible inventory to a new file for use later
    with open("bgp_hosts.yml", "w") as handle:
        yaml.dump(ansible_inv, handle)
Esempio n. 15
0
def main():
    """ Main function """
    conf = read_config()
    token = conf['deploy']

    args = parse_args()

    file_yaml = get_gitlab_yaml(PATH, token)
    print(file_yaml['section']['subsection']['field'])
    for domain in args.domains:
        if not domain.startswith("https://"):
            domain = "https://{}".format(domain)
        file_yaml['section']['subsection']['field'].append(
            DoubleQuotedScalarString(domain))
    r = update_gitlab_yaml(PATH, token, file_yaml)  #pylint: disable=invalid-name
    # print(r.text)
    print(r)
Esempio n. 16
0
class Test_enums_YAMLValueFormats():
    """Tests for the YAMLValueFormats enumeration."""
    def test_get_names(self):
        assert YAMLValueFormats.get_names() == [
            "BARE",
            "BOOLEAN",
            "DEFAULT",
            "DQUOTE",
            "FLOAT",
            "FOLDED",
            "INT",
            "LITERAL",
            "SQUOTE",
        ]

    @pytest.mark.parametrize("input,output", [
        ("BARE", YAMLValueFormats.BARE),
        ("BOOLEAN", YAMLValueFormats.BOOLEAN),
        ("DEFAULT", YAMLValueFormats.DEFAULT),
        ("DQUOTE", YAMLValueFormats.DQUOTE),
        ("FLOAT", YAMLValueFormats.FLOAT),
        ("FOLDED", YAMLValueFormats.FOLDED),
        ("INT", YAMLValueFormats.INT),
        ("LITERAL", YAMLValueFormats.LITERAL),
        ("SQUOTE", YAMLValueFormats.SQUOTE),
    ])
    def test_from_str(self, input, output):
        assert output == YAMLValueFormats.from_str(input)

    def test_from_str_nameerror(self):
        with pytest.raises(NameError):
            YAMLValueFormats.from_str("NO SUCH NAME")

    @pytest.mark.parametrize("input,output", [
        (FoldedScalarString(""), YAMLValueFormats.FOLDED),
        (LiteralScalarString(""), YAMLValueFormats.LITERAL),
        (DoubleQuotedScalarString(''), YAMLValueFormats.DQUOTE),
        (SingleQuotedScalarString(""), YAMLValueFormats.SQUOTE),
        (PlainScalarString(""), YAMLValueFormats.BARE),
        (ScalarBoolean(False), YAMLValueFormats.BOOLEAN),
        (ScalarFloat(1.01), YAMLValueFormats.FLOAT),
        (ScalarInt(10), YAMLValueFormats.INT),
        (None, YAMLValueFormats.DEFAULT),
    ])
    def test_from_node(self, input, output):
        assert output == YAMLValueFormats.from_node(input)
Esempio n. 17
0
def _assemble_new_domain(domain_file: Path, new_forms: Dict[Text, Any],
                         new_slots: Dict[Text, Any]) -> Dict[Text, Any]:
    original_content = rasa.shared.utils.io.read_yaml(
        rasa.shared.utils.io.read_file(domain_file))
    new_domain: Dict[Text, Any] = {}
    for key, value in original_content.items():
        if key == KEY_SLOTS:
            new_domain.update({key: new_slots})
        elif key == KEY_FORMS:
            new_domain.update({key: new_forms})
        elif key == "version":
            new_domain.update({
                key:
                DoubleQuotedScalarString(LATEST_TRAINING_DATA_FORMAT_VERSION)
            })
        else:
            new_domain.update({key: value})
    return new_domain
Esempio n. 18
0
    def stories_to_yaml(self, story_steps: List[StoryStep]) -> Dict[Text, Any]:
        """Converts a sequence of story steps into yaml format.

        Args:
            story_steps: Original story steps to be converted to the YAML.
        """
        from rasa.shared.utils.validation import KEY_TRAINING_DATA_FORMAT_VERSION

        stories = []
        for story_step in story_steps:
            processed_story_step = self.process_story_step(story_step)
            stories.append(processed_story_step)

        result = OrderedDict()
        result[KEY_TRAINING_DATA_FORMAT_VERSION] = DoubleQuotedScalarString(
            LATEST_TRAINING_DATA_FORMAT_VERSION)

        result[KEY_STORIES] = stories
        return result
def to_ruamel_format(element):
    """
        Make a given element to the proper format so it can be properly written in a YAML file

        @param element: String, integer or dictionary that needs to be stored in a YAML file
        @return: Properly formatted element
    """
    # If it's a dictionary or OrderedDict
    if isinstance(element, dict):
        # This is the equivalent of an OrderedDict
        commented_map = CommentedMap()
        # Call recursively this function on values of the dictionary
        for key, value in element.items():
            commented_map[key] = to_ruamel_format(value)
        return commented_map
    # If it's an empty string
    elif element == "\"\"" or element == "\'\'":
        return DoubleQuotedScalarString('')
    # Otherwise just return the element as it is
    return element
Esempio n. 20
0
def replace_quoted_strings(data: Any, options: QuotedStringOptions) -> Any:
    quote_cast = (
        SingleQuotedScalarString
        if options.quote_type == SINGLE
        else DoubleQuotedScalarString
    )
    patterns = options.extra_required + options.extra_allowed
    # it won't work with items() or iterating through it like list
    if isinstance(data, CommentedMap):
        for key in data.keys():
            data[key] = replace_quoted_strings(data[key], options)
    elif isinstance(data, CommentedSeq):
        for indx in range(len(data)):
            data[indx] = replace_quoted_strings(data[indx], options)
    elif isinstance(data, str):
        if (
            options.required == ONLY_WHEN_NEEDED
            and isinstance(data, (SingleQuotedScalarString, DoubleQuotedScalarString))
            and not _quotes_are_needed(data)
            and all(pattern.search(data) is None for pattern in patterns)
        ):
            return PlainScalarString(data)
        elif (
            options.required == TRUE
            or _quotes_are_needed(data)
            or any(
                pattern.search(data) is not None for pattern in options.extra_required
            )
        ) and not isinstance(
            data, (SingleQuotedScalarString, DoubleQuotedScalarString)
        ):
            return quote_cast(data)
        elif options.quote_type == SINGLE and isinstance(
            data, DoubleQuotedScalarString
        ):
            return SingleQuotedScalarString(data)
        elif options.quote_type == DOUBLE and isinstance(
            data, SingleQuotedScalarString
        ):
            return DoubleQuotedScalarString(data)
    return data
Esempio n. 21
0
def _volumes_template(tosca_model):
    """ Generate the docker-compose template for the TOSCA volumes. """

    volumes = {}
    for volume in tosca_model.volumes:

        logger.debug('Translating the volume node [{0}]'.format(volume.name))

        volumes[volume.name] = None

        if volume.driver_opt is not None:
            volumes[volume.name] = {
                'driver_opts':
                [DoubleQuotedScalarString(e) for e in volume.driver_opt]
            }

        # TODO external=True|False?
        # what if the volume is created outside compose?
        # make an option --ext-volumes to use ext. volumes (if they exist)?
        # TODO what about labels?
        # TODO what about name?

    return volumes
Esempio n. 22
0
def gen_inputs_column_and_rows(parquet_file, table_name=''):
    global sess
    if sess is None:
        sess = pyspark.sql.SparkSession(pyspark.SparkContext())
    dataframe = sess.read.parquet(parquet_file)
    hdfs_schema = dataframe.schema
    schema = [
        DoubleQuotedScalarString(to_column_str(f)) for f in hdfs_schema.fields
    ]

    table = yaml.load(INPUT_TEMPLATE, Loader=RoundTripLoader)

    if table_name:
        table['name'] = table_name

    table['columns'] = schema

    data_set = []
    row_cnt = random.randint(1, 10)
    for _ in range(row_cnt):
        data_set.append(random_row(hdfs_schema))

    table['rows'] = [list(map(to_string, row)) for row in data_set]
    return table
with open(filename) as f:
    all = yaml.load(f)
    for t in ("presubmits", "postsubmits", "periodics"):
        if t not in filename:
            continue
        for repo in all[t]:
            jobs = []
            for job in all[t][repo]:
                jobs.append(job)
                if "labels" in job and "ci-operator.openshift.io/prowgen-controlled" in job[
                        "labels"] and job["labels"][
                            "ci-operator.openshift.io/prowgen-controlled"] == "true":
                    build01_job = copy.deepcopy(job)
                    build01_job["name"] += "-migrated"
                    build01_job['context'] += "-migrated"
                    build01_job[
                        'cluster'] = 'ci/api-build01-ci-devcluster-openshift-com:6443'
                    del build01_job["labels"][
                        "ci-operator.openshift.io/prowgen-controlled"]
                    build01_job["labels"][
                        "ci-operator.openshift.io/semantics-ignored"] = DoubleQuotedScalarString(
                            "true")
                    build01_job['optional'] = True
                    build01_job['skip_report'] = True
                    jobs.append(build01_job)
            #jobs = sorted(jobs, key=lambda job: job['name'])
            all[t][repo] = jobs

with open(sys.argv[1], 'w') as f:
    yaml.dump(all, f)
def generate_configtx(_network_config: NetworkConfiguration,
                      _orgs=2,
                      _orderers=3,
                      _kafka_brokers=3,
                      _consortium="WebConsortium",
                      _domain="dredev.de",
                      _blocksize=10,
                      _timeout=1):
    """
    This Function will generate a configtx.yaml file in the current working dir.
    :param _network_config: Configuration Structure for Network affairs like ports or dns names
    :param _orgs: Number of Organizations to configure (default 2)
    :param _orderers: Number of Orderers to configure (default 3)
    :param _kafka_brokers: (Optional) if Kafka = true, Number of Kafka Nodes to configure (default 3)
    :param _consortium: Generic Consortium name (default "WebConsortium")
    :param _domain: Domain name of the network (default "dredev.de")
    :param _blocksize: Configurable amount of transactions per block
    :param _timeout: Block Timeout, influencing block generation
    """

    yaml_new = ruamel.yaml.YAML()

    orgs = [
        Orga(org="",
             domain="ordererOrganizations",
             orgmsp="OrdererMSP",
             ap=False)
    ]  # Default one orderer!
    # Create Orga Objects to have an object orriented approach. Otherwise this would be too tedious to maintain
    for i in range(_orgs):
        orgs.append(
            Orga(org=f"org{i + 1}.",
                 domain="peerOrganizations",
                 orgmsp=f"Org{i + 1}MSP",
                 ap=True))

    orga_list = []

    # Configure the Access Policies
    for org in orgs:
        print(bcolors.WARNING + f"       [*] Configuring Org {org.org_msp}")
        org_policies = {
            "Readers": {
                "Type": "Signature",
                "Rule": DoubleQuotedScalarString(f"OR('{org.org_msp}.member')")
            },
            "Writers": {
                "Type": "Signature",
                "Rule": DoubleQuotedScalarString(f"OR('{org.org_msp}.member')")
            },
            "Admins": {
                "Type": 'Signature',
                "Rule": DoubleQuotedScalarString(f"OR('{org.org_msp}.admin')")
            },
            "Endorsement": {
                "Type": "Signature",
                "Rule": DoubleQuotedScalarString(f"OR('{org.org_msp}.member')")
            }
        }
        orderer_org = {
            "Name": f"{org.org_msp}",
            "ID": f"{org.org_msp}",
            "MSPDir": f"crypto-config/{org.domain}/{org.org}{_domain}/msp",
            "Policies": org_policies
        }
        if org.anchor_peer:
            orderer_org.update({
                "AnchorPeers": [{
                    "Host": f"peer0.{org.org}{_domain}",
                    "Port": 7051
                }]
            })

        orga_list.append(orderer_org)
        print(bcolors.OKGREEN +
              f"       [+] Configuring for Org {org.org_msp} COMPLETE")
    print(bcolors.WARNING + "       [*] Configuring Capabilities")
    channel_capabilities = {"V2_0": True}
    orderer_capabilities = {"V2_0": True}
    app_capabilities = {"V2_0": True}

    capabilities = {
        "Channel": channel_capabilities,
        "Orderer": orderer_capabilities,
        "Application": app_capabilities
    }
    print(bcolors.OKGREEN + "       [+] Configuring Capabilities COMPLETE")

    print(bcolors.WARNING + "       [*] Configuring App Permissions")
    application = {
        "ACLs": {
            "_lifecycle/CheckCommitReadiness": "/Channel/Application/Writers",

            # ACL policy for _lifecycle's "CommitChaincodeDefinition" function
            "_lifecycle/CommitChaincodeDefinition":
            "/Channel/Application/Writers",

            # ACL policy for _lifecycle's "QueryChaincodeDefinition" function
            "_lifecycle/QueryChaincodeDefinition":
            "/Channel/Application/Readers",

            # ACL policy for _lifecycle's "QueryChaincodeDefinitions" function
            "_lifecycle/QueryChaincodeDefinitions":
            "/Channel/Application/Readers",

            # ---Lifecycle System Chaincode (lscc) function to policy mapping for access control---#
            # ACL policy for lscc's "getid" function
            "lscc/ChaincodeExists": "/Channel/Application/Readers",

            # ACL policy for lscc's "getdepspec" function
            "lscc/GetDeploymentSpec": "/Channel/Application/Readers",

            # ACL policy for lscc's "getccdata" function
            "lscc/GetChaincodeData": "/Channel/Application/Readers",

            # ACL Policy for lscc's "getchaincodes" function
            "lscc/GetInstantiatedChaincodes": "/Channel/Application/Readers",

            # ---Query System Chaincode (qscc) function to policy mapping for access control---#

            # ACL policy for qscc's "GetChainInfo" function
            "qscc/GetChainInfo": "/Channel/Application/Readers",

            # ACL policy for qscc's "GetBlockByNumber" function
            "qscc/GetBlockByNumber": "/Channel/Application/Readers",

            # ACL policy for qscc's  "GetBlockByHash" function
            "qscc/GetBlockByHash": "/Channel/Application/Readers",

            # ACL policy for qscc's "GetTransactionByID" function
            "qscc/GetTransactionByID": "/Channel/Application/Readers",

            # ACL policy for qscc's "GetBlockByTxID" function
            "qscc/GetBlockByTxID": "/Channel/Application/Readers",

            # ---Configuration System Chaincode (cscc) function to policy mapping for access control---#

            # ACL policy for cscc's "GetConfigBlock" function
            "cscc/GetConfigBlock": "/Channel/Application/Readers",

            # ACL policy for cscc's "GetConfigTree" function
            "cscc/GetConfigTree": "/Channel/Application/Readers",

            # ACL policy for cscc's "SimulateConfigTreeUpdate" function
            "cscc/SimulateConfigTreeUpdate": "/Channel/Application/Readers",

            # ---Miscellanesous peer function to policy mapping for access control---#

            # ACL policy for invoking chaincodes on peer
            "peer/Propose": "/Channel/Application/Writers",

            # ACL policy for chaincode to chaincode invocation
            "peer/ChaincodeToChaincode": "/Channel/Application/Readers",

            # ---Events resource to policy mapping for access control###---#

            # ACL policy for sending block events
            "event/Block": "/Channel/Application/Readers",

            # ACL policy for sending filtered block events
            "event/FilteredBlock": "/Channel/Application/Readers",
        },
        "Organizations": None,
        "Policies": {
            "LifecycleEndorsement": {
                "Type": "ImplicitMeta",
                "Rule": DoubleQuotedScalarString("MAJORITY Endorsement"),
            },
            "Endorsement": {
                "Type": "ImplicitMeta",
                "Rule": DoubleQuotedScalarString("MAJORITY Endorsement"),
            },
            "Readers": {
                "Type": "ImplicitMeta",
                "Rule": DoubleQuotedScalarString("ANY Readers")
            },
            "Writers": {
                "Type": "ImplicitMeta",
                "Rule": DoubleQuotedScalarString("ANY Writers")
            },
            "Admins": {
                "Type": "ImplicitMeta",
                "Rule": DoubleQuotedScalarString("MAJORITY Admins")
            }
        },
        "Capabilities": {
            "<<": app_capabilities
        }
    }
    print(bcolors.OKGREEN + "       [+] Configuring App Permissions COMPLETE")
    orderer_addresses = []
    orderer_addresses_with_port = []
    kafka_list = []
    for i in range(_orderers):
        orderer_addresses.append(f"orderer{i + 1}.{_domain}")
        orderer_addresses_with_port.append(f"orderer{i + 1}.{_domain}:7050")

    for i in range(_kafka_brokers):
        kafka_list.append(f"kafka{i}:9092")

    print(bcolors.WARNING + "       [*] Generating Orderer Config")

    orderer = {
        "Addresses": orderer_addresses_with_port,
        # Batch Timeout: The amount of time to wait before creating a batch.
        "BatchTimeout": f"{_timeout}s",
        "BatchSize": {
            "MaxMessageCount": _blocksize,
            "AbsoluteMaxBytes": "10 MB",
            "PreferredMaxBytes": "2 MB",
        },
        "MaxChannels": 0,
        "Organizations": None,
        "Policies": {
            "Readers": {
                "Type": "ImplicitMeta",
                "Rule": DoubleQuotedScalarString("ANY Readers")
            },
            "Writers": {
                "Type": "ImplicitMeta",
                "Rule": DoubleQuotedScalarString("ANY Writers")
            },
            "Admins": {
                "Type": "ImplicitMeta",
                "Rule": DoubleQuotedScalarString("MAJORITY Admins")
            },
            "BlockValidation": {
                "Type": "ImplicitMeta",
                "Rule": DoubleQuotedScalarString("ANY Writers")
            }
        },
        "Capabilities": {
            "<<": orderer_capabilities
        }
    }
    if _network_config.ordering_service is "kafka":
        # Use Kafka Ordering
        orderer.update({"OrdererType": "kafka"})
        orderer.update({"Kafka": {"Brokers": kafka_list}})
    else:
        # Use Raft Ordering
        orderer.update({"OrdererType": "etcdraft"})
        raft_config = {
            "Consenters": [{
                "Host":
                addr,
                "Port":
                _network_config.orderer_defport,
                "ClientTLSCert":
                f"crypto-config/ordererOrganizations/{_domain}/orderers/{addr}/tls/server.crt",
                "ServerTLSCert":
                f"crypto-config/ordererOrganizations/{_domain}/orderers/{addr}/tls/server.crt"
            } for addr in orderer_addresses]
        }
        orderer.update({"EtcdRaft": raft_config})

    print(bcolors.OKGREEN + "       [+] Generating Orderer Config COMPLETE")
    print(bcolors.WARNING + "       [*] Generating Channel Config")
    channel = {
        "Policies": {
            "Readers": {
                "Type": "ImplicitMeta",
                "Rule": DoubleQuotedScalarString("ANY Readers"),
            },
            "Writers": {
                "Type": "ImplicitMeta",
                "Rule": DoubleQuotedScalarString("ANY Writers"),
            },
            "Admins": {
                "Type": "ImplicitMeta",
                "Rule": DoubleQuotedScalarString("MAJORITY Admins")
            }
        },
        "Capabilities": {
            "<<": channel_capabilities,
        }
    }
    print(bcolors.OKGREEN + "       [*] Generating Channel Config COMPLETE")

    ord_list = []
    for i in range(_orderers):
        ord_list.append(f"orderer{i + 1}.{_domain}:7050")

    print(bcolors.WARNING + "       [*] Generating Profiles")
    orderer_p = {
        "<<": orderer,
        "Addresses": ord_list,
        "Organizations": [orga_list[0]],
        "Capabilities": {
            "<<": orderer_capabilities,
        }
    }
    if _network_config.ordering_service is "kafka":
        orderer_p.update({"OrdererType": "kafka"})
    else:
        orderer_p.update({"OrdererType": "etcdraft"})
        orderer_p.update({"EtcdRaft": raft_config})

    profiles = {
        "OrdererDefault": {
            "<<": channel,
            "Capabilities": {
                "<<": channel_capabilities
            },
            "Orderer": orderer_p,
            "Consortiums": {
                _consortium: {
                    "Organizations": orga_list[1:]
                }
            }
        },
        "MainChannel": {
            "<<": channel,
            "Consortium": _consortium,
            "Application": {
                "<<": application,
                "Organizations": orga_list[1:]
            },
            "Capabilities": {
                "<<": app_capabilities
            }
        }
    }
    print(bcolors.OKGREEN + "       [+] Generating Profiles COMPLETE")
    print(bcolors.OKBLUE + "    [*] Generating Final Object")
    final = {
        "Organizations": orga_list,
        "Capabilities": capabilities,
        "Application": application,
        "Orderer": orderer,
        "Channel": channel,
        "Profiles": profiles
    }
    print(bcolors.OKBLUE + "    [+] Generating Final Object COMPLETE")

    f = open("configtx.yaml", "w")
    yaml_new.dump(final, f, transform=tr)

    print(bcolors.HEADER + "========================================")
    print(">>> configtx.yaml has been dumped!")
    print("========================================")
Esempio n. 25
0
                    type=str,
                    help='Path to the output directory')

args = parser.parse_args()

yaml = ruamel.yaml.YAML()
yaml.preserve_quotes = True

input_file_path = args.input_file_path
output_dir_path = args.output_dir_path

file_name = os.path.basename(input_file_path)

with open(input_file_path) as fp:
    data = yaml.load(fp)

sequence = data[args.key_to_explode]

for item in sequence:
    print(item)
    item_dir_path = os.path.join(output_dir_path, item)

    if not os.path.exists(item_dir_path):
        os.makedirs(item_dir_path)

    data[args.key_to_explode] = [DoubleQuotedScalarString(item)]

    output_file_path = os.path.join(item_dir_path, file_name)
    with open(output_file_path, 'w') as fp:
        yaml.dump(data, fp)
def generate_core():

    yaml_new = ruamel.yaml.YAML()
    print(bcolors.WARNING + "     [*] Generating Peer Core")
    peer = {
        "id": "peer",
        "networkId": "byfn",
        "listenAddress": "0.0.0.0:7051",
        "address": "0.0.0.0:7051",
        "addressAutoDetect": False,
        "keepalive": {
            "interval": "7200s",
            "timeout": "20s",
            "minInterval": "60s",
            "client": {
                "interval": "60s",
                "timeout": "20s",
            },
            "deliveryClient": {
                "interval": "60s",
                "timeout": "20s"
            }
        },
        "gossip": {
            "bootstrap": "127.0.0.1:7051",
            "useLeaderElection": True,
            "orgLeader": False,
            "membershipTrackerInterval": "5s",
            "endpoint": None,
            "maxBlockCountToStore": 100,
            "maxPropagationBurstLatency": "10ms",
            "maxPropagationBurstSize": 10,
            "propagateIterations": 1,
            "propagatePeerNum": 3,
            "pullInterval": "4s",
            "pullPeerNum": 3,
            "requestStateInfoInterval": "4s",
            # Determines frequency of pushing state info messages to peers(unit: second)
            "publishStateInfoInterval": "4s",
            # Maximum time a stateInfo message is kept until expired
            "stateInfoRetentionInterval": None,
            # Time from startup certificates are included in Alive messages(unit: second)
            "publishCertPeriod": "10s",
            # Should we skip verifying block messages or not (currently not in use)
            "skipBlockVerification": False,
            # Dial timeout(unit: second)
            "dialTimeout": "3s",
            # Connection timeout(unit: second)
            "connTimeout": "2s",
            # Buffer size of received messages
            "recvBuffSize": 20,
            # Buffer size of sending messages
            "sendBuffSize": 200,
            # Time to wait before pull engine processes incoming digests (unit: second)
            # Should be slightly smaller than requestWaitTime
            "digestWaitTime": "1s",
            # Time to wait before pull engine removes incoming nonce (unit: milliseconds)
            # Should be slightly bigger than digestWaitTime
            "requestWaitTime": "1500ms",
            # Time to wait before pull engine ends pull (unit: second)
            "responseWaitTime": "2s",
            # Alive check interval(unit: second)
            "aliveTimeInterval": "5s",
            # Alive expiration timeout(unit: second)
            "aliveExpirationTimeout": "25s",
            # Reconnect interval(unit: second)
            "reconnectInterval": "25s",
            # This is an endpoint that is published to peers outside of the organization.
            # If this isn't set, the peer will not be known to other organizations.
            "externalEndpoint": None,
            # Leader election service configuration
            "election": {
                # Longest time peer waits for stable membership during leader election startup (unit: second)
                "startupGracePeriod": "15s",
                # Interval gossip membership samples to check its stability (unit: second)
                "membershipSampleInterval": "1s",
                # Time passes since last declaration message before peer decides to perform leader election (unit: second)
                "leaderAliveThreshold": "10s",
                # Time between peer sends propose message and declares itself as a leader (sends declaration message) (unit: second)
                "leaderElectionDuration": "5s"
            },
            "pvtData": {
                "pullRetryThreshold": "60s",
                "transientstoreMaxBlockRetention": 1000,
                "pushAckTimeout": "3s",
                "btlPullMargin": 10,
                "reconcileBatchSize": 10,
                "reconcileSleepInterval": "1m",
                "reconciliationEnabled": True,
                "skipPullingInvalidTransactionsDuringCommit": False,
            },
            "state": {
                "enabled": True,
                "checkInterval": "10s",
                "responseTimeout": "3s",
                "batchSize": 10,
                "blockBufferSize": 100,
                "maxRetries": 3
            },
        },
        "tls": {
            "enabled": False,
            "clientAuthRequired": False,
            "cert": {
                "file": "tls/server.crt",
            },
            "key": {
                "file": "tls/server.key",
            },
            "rootcert": {
                "file": "tls/ca.crt",
            },
            "clientRootCAs": {
                "files": ["tls/ca.crt"]
            },
            "clientKey": {
                "file": None
            },
            "clientCert": {
                "file": None
            }
        },
        "authentication": {
            "timewindow": "15m"
        },
        "fileSystemPath": "/var/hyperledger/production",
        "BCCSP": {
            "Default": "SW",
            "SW": {
                "Hash": "SHA2",
                "Security": 256,
                "FileKeyStore": {
                    "KeyStore": None,
                },
            },
            "PKCS11": {
                "Library": None,
                "Label": None,
                "Pin": None,
                "Hash": None,
                "Security": None
            }
        },
        "mspConfigPath": "msp",
        "localMspId": "SampleOrg",
        "client": {
            "connTimeout": "3s"
        },
        "deliveryclient": {
            "reconnectTotalTimeThreshold": "3600s",
            "connTimeout": "3s",
            "reConnectBackoffThreshold": "3600s",
            "addressOverrides": None,
        },
        "localMspType": "bccsp",
        "profile": {
            "enabled": False,
            "listenAddress": "0.0.0.0:6060"
        },
        "handlers": {
            "authFilters": [
                {
                    "name": "DefaultAuth"
                },
                {
                    "name": "ExpirationCheck"
                },
            ],
            "decorators": [{
                "name": "DefaultDecorator"
            }],
            "endorsers": {
                "escc": {
                    "name": "DefaultEndorsement",
                    "library": None,
                }
            },
            "validators": {
                "vscc": {
                    "name": "DefaultValidation",
                    "library": None,
                }
            }
        },
        "validatorPoolSize": None,
        "discovery": {
            "enabled": True,
            "authCacheEnabled": True,
            "authCacheMaxSize": 1000,
            "authCachePurgeRetentionRatio": 0.75,
            "orgMembersAllowedAccess": False,
        },
        "limits": {
            "concurrency": {
                "qscc": 5000,
            }
        }
    }
    print(bcolors.OKGREEN + "     [+] Generating Peer Core COMPLETE")

    print(bcolors.WARNING + "     [*] Generating VM Core ")
    vm = {
        "endpoint": "unix:///var/run/docker.sock",
        "docker": {
            "tls": {
                "enabled": False,
                "ca": {
                    "file": "docker/ca.crt",
                },
                "cert": {
                    "file": "docker/tls.crt",
                },
                "key": {
                    "file": "docker/tls.key",
                },
            },
            "attachStdout": False,
            "hostConfig": {
                "NetworkMode": "host",
                "Dns": None,
                "LogConfig": {
                    "Type": "json-file",
                    "Config": {
                        "max-size": DoubleQuotedScalarString("50m"),
                        "max-file": DoubleQuotedScalarString("5")
                    }
                },
                "Memory": 2147483648
            }
        }
    }

    print(bcolors.OKGREEN + "     [+] Generating VM Core COMPLETE")

    print(bcolors.WARNING + "     [*] Generating Chaincode Core ")
    chaincode = {
        "id": {
            "path": None,
            "name": None,
        },
        "builder": "$(DOCKER_NS)/fabric-ccenv:$(TWO_DIGIT_VERSION)",
        "pull": False,
        "golang": {
            "runtime": "$(DOCKER_NS)/fabric-baseos:$(TWO_DIGIT_VERSION)",
            "dynamicLink": False,
        },
        "java": {
            "runtime": "$(DOCKER_NS)/fabric-javaenv:$(TWO_DIGIT_VERSION)",
        },
        "node": {
            "runtime": "$(DOCKER_NS)/fabric-nodeenv:$(TWO_DIGIT_VERSION)",
        },
        "externalBuilders": [],
        "installTimeout": "300s",
        "startuptimeout": "300s",
        "executetimeout": "30s",
        "mode": "net",
        "keepalive": 0,
        "system": {
            "_lifecycle": "enable",
            "cscc": "enable",
            "lscc": "enable",
            "escc": "enable",
            "vscc": "enable",
            "qscc": "enable",
        },
        "logging": {
            "level":
            "info",
            "shim":
            "warning",
            "format":
            ruamel.yaml.scalarstring.SingleQuotedScalarString(
                '%{color}%{time:2006-01-02 15:04:05.000 MST} [%{module}] %{shortfunc} -> %{level:.4s} %{id:03x}%{color:reset} %{message}'
            )
        }
    }

    print(bcolors.OKGREEN + "     [+] Generating Chaincode Core COMPLETE")

    print(bcolors.WARNING + "     [*] Generating Ledger Core ")
    ledger = {
        "blockchain": None,
        "state": {
            "stateDatabase": "goleveldb",
            "totalQueryLimit": 100000,
            "couchDBConfig": {
                "couchDBAddress": "127.0.0.1:5984",
                "username": None,
                "password": None,
                "maxRetries": 3,
                "maxRetriesOnStartup": 12,
                "requestTimeout": "35s",
                "internalQueryLimit": 1000,
                "maxBatchUpdateSize": 1000,
                "warmIndexesAfterNBlocks": 1,
                "createGlobalChangesDB": False,
                "cacheSize": 64,
            }
        },
        "history": {
            "enableHistoryDatabase": True,
        },
        "pvtdataStore": {
            "collElgProcMaxDbBatchSize": 5000,
            "collElgProcDbBatchesInterval": 1000
        }
    }

    print(bcolors.OKGREEN + "     [+] Generating Ledger Core COMPLETE")
    print(bcolors.WARNING + "     [*] Generating Operations Core ")
    operations = {
        "listenAddress": "127.0.0.1:9443",
        "tls": {
            "enabled": False,
            "cert": {
                "file": None,
            },
            "key": {
                "file": None,
            },
            "clientAuthRequired": False,
            "clientRootCAs": {
                "files": []
            }
        }
    }
    print(bcolors.OKGREEN + "     [+] Generating Operations Core COMPLETE")
    print(bcolors.WARNING + "     [*] Generating Metrics Core ")

    metrics = {
        "provider": "disabled",
        "statsd": {
            "network": "udp",
            "address": "127.0.0.1:8125",
            "writeInterval": "10s",
            "prefix": None
        }
    }
    print(bcolors.OKGREEN + "     [*] Generating Metrics Core COMPLETE")

    print(bcolors.OKBLUE + "======= Generating final Structure =======")
    final = {
        "peer": peer,
        "vm": vm,
        "chaincode": chaincode,
        "ledger": ledger,
        "operations": operations,
        "metrics": metrics
    }

    # yaml_new.dump(final, sys.stdout)
    f = open("core.yaml", "w")
    yaml_new.dump(final, f)
    print(bcolors.HEADER + "========================================")
    print(">>> core.yaml has been dumped!")
    print("========================================")
def generate_configtx(_orgs=2,
                      _orderers=3,
                      _kafka_brokers=4,
                      _consortium="WebConsortium",
                      _domain="dredev.de",
                      _blocksize=10,
                      _timeout=1):
    yaml_new = ruamel.yaml.YAML()

    orgs = [
        Orga(org="",
             domain="ordererOrganizations",
             orgmsp="OrdererMSP",
             ap=False)
    ]  # Default one orderer!
    for i in range(_orgs):
        orgs.append(
            Orga(org="org{}.".format(i + 1),
                 domain="peerOrganizations",
                 orgmsp="Org{}MSP".format(i + 1),
                 ap=True))

    orga_list = []
    for org in orgs:
        print(bcolors.WARNING +
              "       [*] Configuring Org {}".format(org.org_msp))
        org_policies = {
            "Readers": {
                "Type":
                "Signature",
                "Rule":
                DoubleQuotedScalarString("OR('{}.member')".format(org.org_msp))
            },
            "Writers": {
                "Type":
                "Signature",
                "Rule":
                DoubleQuotedScalarString("OR('{}.member')".format(org.org_msp))
            },
            "Admins": {
                "Type":
                'Signature',
                "Rule":
                DoubleQuotedScalarString("OR('{}.admin')".format(org.org_msp))
            },
            "Endorsement": {
                "Type":
                "Signature",
                "Rule":
                DoubleQuotedScalarString("OR('{}.member')".format(org.org_msp))
            }
        }
        orderer_org = {
            "Name":
            "{}".format(org.org_msp),
            "ID":
            "{}".format(org.org_msp),
            "MSPDir":
            "crypto-config/{}/{}{}/msp".format(org.domain, org.org, _domain),
            "Policies":
            org_policies
        }
        if org.anchor_peer:
            orderer_org.update({
                "AnchorPeers": [{
                    "Host": "peer0.{}{}".format(org.org, _domain),
                    "Port": 7051
                }]
            })

        orga_list.append(orderer_org)
        print(bcolors.OKGREEN +
              "       [+] Configuring for Org {} COMPLETE".format(org.org_msp))
    print(bcolors.WARNING + "       [*] Configuring Capabilities")
    channel_capabilities = {"V2_0": True}
    orderer_capabilities = {"V2_0": True}
    app_capabilities = {"V2_0": True}

    capabilities = {
        "Channel": channel_capabilities,
        "Orderer": orderer_capabilities,
        "Application": app_capabilities
    }
    print(bcolors.OKGREEN + "       [+] Configuring Capabilities COMPLETE")

    print(bcolors.WARNING + "       [*] Configuring App Permissions")
    application = {
        "ACLs": {
            "_lifecycle/CheckCommitReadiness": "/Channel/Application/Writers",

            # ACL policy for _lifecycle's "CommitChaincodeDefinition" function
            "_lifecycle/CommitChaincodeDefinition":
            "/Channel/Application/Writers",

            # ACL policy for _lifecycle's "QueryChaincodeDefinition" function
            "_lifecycle/QueryChaincodeDefinition":
            "/Channel/Application/Readers",

            # ACL policy for _lifecycle's "QueryChaincodeDefinitions" function
            "_lifecycle/QueryChaincodeDefinitions":
            "/Channel/Application/Readers",

            # ---Lifecycle System Chaincode (lscc) function to policy mapping for access control---#
            # ACL policy for lscc's "getid" function
            "lscc/ChaincodeExists": "/Channel/Application/Readers",

            # ACL policy for lscc's "getdepspec" function
            "lscc/GetDeploymentSpec": "/Channel/Application/Readers",

            # ACL policy for lscc's "getccdata" function
            "lscc/GetChaincodeData": "/Channel/Application/Readers",

            # ACL Policy for lscc's "getchaincodes" function
            "lscc/GetInstantiatedChaincodes": "/Channel/Application/Readers",

            # ---Query System Chaincode (qscc) function to policy mapping for access control---#

            # ACL policy for qscc's "GetChainInfo" function
            "qscc/GetChainInfo": "/Channel/Application/Readers",

            # ACL policy for qscc's "GetBlockByNumber" function
            "qscc/GetBlockByNumber": "/Channel/Application/Readers",

            # ACL policy for qscc's  "GetBlockByHash" function
            "qscc/GetBlockByHash": "/Channel/Application/Readers",

            # ACL policy for qscc's "GetTransactionByID" function
            "qscc/GetTransactionByID": "/Channel/Application/Readers",

            # ACL policy for qscc's "GetBlockByTxID" function
            "qscc/GetBlockByTxID": "/Channel/Application/Readers",

            # ---Configuration System Chaincode (cscc) function to policy mapping for access control---#

            # ACL policy for cscc's "GetConfigBlock" function
            "cscc/GetConfigBlock": "/Channel/Application/Readers",

            # ACL policy for cscc's "GetConfigTree" function
            "cscc/GetConfigTree": "/Channel/Application/Readers",

            # ACL policy for cscc's "SimulateConfigTreeUpdate" function
            "cscc/SimulateConfigTreeUpdate": "/Channel/Application/Readers",

            # ---Miscellanesous peer function to policy mapping for access control---#

            # ACL policy for invoking chaincodes on peer
            "peer/Propose": "/Channel/Application/Writers",

            # ACL policy for chaincode to chaincode invocation
            "peer/ChaincodeToChaincode": "/Channel/Application/Readers",

            # ---Events resource to policy mapping for access control###---#

            # ACL policy for sending block events
            "event/Block": "/Channel/Application/Readers",

            # ACL policy for sending filtered block events
            "event/FilteredBlock": "/Channel/Application/Readers",
        },
        "Organizations": None,
        "Policies": {
            "LifecycleEndorsement": {
                "Type": "ImplicitMeta",
                "Rule": DoubleQuotedScalarString("MAJORITY Endorsement"),
            },
            "Endorsement": {
                "Type": "ImplicitMeta",
                "Rule": DoubleQuotedScalarString("MAJORITY Endorsement"),
            },
            "Readers": {
                "Type": "ImplicitMeta",
                "Rule": DoubleQuotedScalarString("ANY Readers")
            },
            "Writers": {
                "Type": "ImplicitMeta",
                "Rule": DoubleQuotedScalarString("ANY Writers")
            },
            "Admins": {
                "Type": "ImplicitMeta",
                "Rule": DoubleQuotedScalarString("MAJORITY Admins")
            }
        },
        "Capabilities": {
            "<<": app_capabilities
        }
    }
    print(bcolors.OKGREEN + "       [+] Configuring App Permissions COMPLETE")
    orderer_addresses = []
    kafka_list = []
    for i in range(_orderers):
        orderer_addresses.append("orderer{}.{}:7050".format(i + 1, _domain))

    for i in range(_kafka_brokers):
        kafka_list.append("kafka{}:9092".format(i))

    print(bcolors.WARNING + "       [*] Generating Orderer Config")
    orderer = {
        "OrdererType": "kafka",
        "Addresses": orderer_addresses,

        # Batch Timeout: The amount of time to wait before creating a batch.
        "BatchTimeout": "{}s".format(_timeout),
        "BatchSize": {
            "MaxMessageCount": _blocksize,
            "AbsoluteMaxBytes": "10 MB",
            "PreferredMaxBytes": "2 MB",
        },
        "MaxChannels": 0,
        "Kafka": {
            "Brokers": kafka_list
        },
        "Organizations": None,
        "Policies": {
            "Readers": {
                "Type": "ImplicitMeta",
                "Rule": DoubleQuotedScalarString("ANY Readers")
            },
            "Writers": {
                "Type": "ImplicitMeta",
                "Rule": DoubleQuotedScalarString("ANY Writers")
            },
            "Admins": {
                "Type": "ImplicitMeta",
                "Rule": DoubleQuotedScalarString("MAJORITY Admins")
            },
            "BlockValidation": {
                "Type": "ImplicitMeta",
                "Rule": DoubleQuotedScalarString("ANY Writers")
            }
        },
        "Capabilities": {
            "<<": orderer_capabilities
        }
    }

    print(bcolors.OKGREEN + "       [+] Generating Orderer Config COMPLETE")
    print(bcolors.WARNING + "       [*] Generating Channel Config")
    channel = {
        "Policies": {
            "Readers": {
                "Type": "ImplicitMeta",
                "Rule": DoubleQuotedScalarString("ANY Readers"),
            },
            "Writers": {
                "Type": "ImplicitMeta",
                "Rule": DoubleQuotedScalarString("ANY Writers"),
            },
            "Admins": {
                "Type": "ImplicitMeta",
                "Rule": DoubleQuotedScalarString("MAJORITY Admins")
            }
        },
        "Capabilities": {
            "<<": channel_capabilities,
        }
    }
    print(bcolors.OKGREEN + "       [*] Generating Channel Config COMPLETE")

    ord_list = []
    for i in range(_orderers):
        ord_list.append("orderer{}.{}:7050".format(i + 1, _domain))

    print(bcolors.WARNING + "       [*] Generating Profiles")
    profiles = {
        "OrdererDefault": {
            "<<": channel,
            "Capabilities": {
                "<<": channel_capabilities
            },
            "Orderer": {
                "<<": orderer,
                "OrdererType": "kafka",
                "Addresses": ord_list,
                "Organizations": [orga_list[0]],
                "Capabilities": {
                    "<<": orderer_capabilities,
                }
            },
            "Consortiums": {
                _consortium: {
                    "Organizations": orga_list[1:]
                }
            }
        },
        "MainChannel": {
            "<<": channel,
            "Consortium": _consortium,
            "Application": {
                "<<": application,
                "Organizations": orga_list[1:]
            },
            "Capabilities": {
                "<<": app_capabilities
            }
        }
    }
    print(bcolors.OKGREEN + "       [+] Generating Profiles COMPLETE")
    print(bcolors.OKBLUE + "    [*] Generating Final Object")
    final = {
        "Organizations": orga_list,
        "Capabilities": capabilities,
        "Application": application,
        "Orderer": orderer,
        "Channel": channel,
        "Profiles": profiles
    }
    print(bcolors.OKBLUE + "    [+] Generating Final Object COMPLETE")

    f = open("configtx.yaml", "w")
    yaml_new.dump(final, f, transform=tr)

    print(bcolors.HEADER + "========================================")
    print(">>> configtx.yaml has been dumped!")
    print("========================================")
def create_connection_profile(_peers, _orgs, _orderers, _domain):
    new_yaml = ruamel.yaml.YAML()
    orderer_list = [
        "orderer{}.{}".format(i + 1, _domain) for i in range(_orderers)
    ]
    print(bcolors.WARNING + "[*] Creating Connection Profile")
    peer_list = {}
    print(bcolors.WARNING + "   [*] Create Peer List")
    for peer in range(_peers):
        for org in range(_orgs):
            peer_list.update({
                "peer{}.org{}.{}".format(peer, org + 1, _domain): {
                    "endorsingPeer": True,
                    "chaincodeQuery": True,
                    "ledgerQuery": True,
                    "eventSource": True,
                }
            })
    print(bcolors.OKGREEN + "   [+] Peer List COMPLETE")
    print(bcolors.WARNING + "   [*] Create Channel List")

    channels = {"mychannel": {"orderers": orderer_list, "peers": peer_list}}
    print(bcolors.OKGREEN + "   [+] Channel List COMPLETE")
    print(bcolors.WARNING + "   [*] Create Organization List")
    organiz = {}
    for org in range(_orgs):
        peers_ls = [
            "peer{}.org{}.{}".format(i, org + 1, _domain)
            for i in range(_peers)
        ]
        organiz.update({
            "Org{}".format(org + 1): {
                "mspid": "Org{}MSP".format(org + 1),
                "peers": peers_ls,
                "certificateAuthorities":
                ["ca.org{}.{}".format(org + 1, _domain)]
            }
        })
    print(bcolors.OKGREEN + "   [+] Organization List COMPLETE")
    print(bcolors.WARNING + "   [*] Create Orderer List")

    ordes = {}
    i = 0
    for orderer in orderer_list:
        ordes.update({
            orderer: {
                "url":
                "grpc://localhost:{}".format(orderer_defport + 1000 * i),
                "grpcOptions": {
                    "ssl-target-name-override": orderer
                }
            }
        })
        i += 1
    print(bcolors.OKGREEN + "   [+] Orderer List COMPLETE")
    print(bcolors.WARNING + "   [*] Create Detail Peer List")

    peer_ls = {}
    for peer in range(_peers):
        for org in range(_orgs):
            peer_ls.update({
                "peer{}.org{}.{}".format(peer, org + 1, _domain): {
                    "url":
                    "grpc://localhost:{}".format(
                        peer_defport + 1000 * ((_peers * org) + peer),
                        peer_defport),
                    "grpcOptions": {
                        "ssl-target-name-override":
                        "peer{}.org{}.{}".format(peer, org + 1, _domain),
                        "request-timeout":
                        120001
                    }
                }
            })
    print(bcolors.OKGREEN + "   [+] Detail Peer List COMPLETE")
    print(bcolors.WARNING + "   [*] Create Detail CA List")

    ca_ls = {}
    i = 0
    for org in range(_orgs):
        ca_ls.update({
            "ca.org{}.{}".format(org + 1, _domain): {
                "url": "http://localhost:{}".format(ca_defport + 1000 * i),
                "httpOptions": {
                    "verify": False,
                },
                "registrar": [{
                    "enrollId": "admin",
                    "enrollSecret": "adminpw"
                }],
                "caName": "ca.org{}.{}".format(org + 1, _domain)
            }
        })
        i += 1
    print(bcolors.OKGREEN + "   [+] Detail CA List COMPLETE")
    print(bcolors.OKBLUE + "======= Generating final Structure =======")

    final = {
        "name":
        DoubleQuotedScalarString("{}-peer.{}-org.{}-orderers.{}".format(
            _peers, _orgs, _orderers, _domain)),
        "x-type":
        DoubleQuotedScalarString("hlfv2"),
        "description":
        DoubleQuotedScalarString("Connection profile"),
        "version":
        DoubleQuotedScalarString("1.0"),
        "channels":
        channels,
        "organizations":
        organiz,
        "orderers":
        ordes,
        "peers":
        peer_ls,
        "certificateAuthorities":
        ca_ls
    }
    print(bcolors.OKBLUE + "======= Final Structure COMPLETE =======")

    f = open("connection_profile.yaml", "w")
    new_yaml.dump(final, f)
    f.close()
    print(bcolors.OKGREEN + "[+] Connection Profile Created")
Esempio n. 29
0
# http://stackoverflow.com/questions/39262556/preserve-quotes-and-also-add-data-with-quotes-in-ruamel

from ruamel.yaml.scalarstring import SingleQuotedScalarString, DoubleQuotedScalarString

hiera['foo'] = SingleQuotedScalarString('bar')
hiera['bar'] = DoubleQuotedScalarString('baz')

# This will add the following at the end of every YAML file:
# foo: 'bar'
# bar: "baz"
Esempio n. 30
0
yaml_file = None

if args.yaml_file:
    yaml_file = pathlib.Path(args.yaml_file)
    file = yaml.load(yaml_file)
else:
    file = yaml.load(sys.stdin)

existing_value = getFromDict(file, key[:-1])[key[-1]]

if yaml_file:
    print("Existing key", key, "has value", existing_value)

if existing_value != value:
    if quotes == Quotes.single:
        setInDict(
            file, key,
            list(map(lambda value: SingleQuotedScalarString(value), value)))
    else:
        setInDict(
            file, key,
            list(map(lambda value: DoubleQuotedScalarString(value), value)))
elif yaml_file:
    print("Key has not changed")

if yaml_file:
    yaml.dump(file, yaml_file)
else:
    yaml.dump(file, sys.stdout)