def format_quotes(s): if '\'' in s: return SingleQuotedScalarString(s.replace("'", '')) if '"' in s: return DoubleQuotedScalarString(s.replace('"', '')) return SingleQuotedScalarString(s)
def load_build_config(self, filename): print('opening file {}'.format(filename)) dictname = filename.read() dictname = eval(dictname) self.ssh_private = SingleQuotedScalarString(dictname['ssh_private']) self.ssh_public = dictname['ssh_public'] self.ip_master = dictname['ip_master'] self.ip_repo = dictname['ip_repo'] self.timezone = dictname['timezone'] self.jenkins_user = dictname['jenkins_user'] self.jenkins_password = dictname['jenkins_password'] self.repo_hostkey = dictname['repo_hostkey'] self.gpg_key_id = dictname['gpg_key_id'] self.gpg_private_key = SingleQuotedScalarString( dictname['gpg_private_key']) self.gpg_public_key = SingleQuotedScalarString( dictname['gpg_public_key']) self.architectures = dictname['architectures'] self.distros = dictname['distros'] self.git_fetch_ssh_id = dictname['git_fetch_ssh_id'] all_ros_distros = [item['ros'] for item in self.distros] # creating a dict enables removing duplaicated entries. self.ros_distros = sorted(list(dict.fromkeys(all_ros_distros))) return True
def __init__(self, id: str = None, description: str = None, account_root_directory: str = None, oxnote_home_folder: str = None, application_data_folder: str = None, url_scheme: str = SingleQuotedScalarString('https'), url_host: str = None, url_port: str = SingleQuotedScalarString('443'), url_uri: str = None, username: str = None, password: str = None, context_id: str = None, user_id: str = None, enabled: bool = False, drive_quota: int = 0): self._id = id self._description = description self._account_root_directory = account_root_directory self._oxnote_home_folder = oxnote_home_folder self._application_data_folder = application_data_folder self._url_scheme = url_scheme self._url_host = url_host self._url_port = url_port self._url_uri = url_uri self._username = username self._password = password self._context_id = context_id self._user_id = user_id self._enabled = enabled self._drive_quotas = drive_quota
def __update_synchronization_state( self, directory: str, action: DriveExtendedAction, synchronization_state: typing.Dict) -> typing.Dict: ''' ..todo: Account for the situation that a file has been moved and content has changed. ''' if action.version and action.new_version: del synchronization_state['files']['original_versions'][ '/{}'.format(directory)][action.version.name] synchronization_state['files']['original_versions']['/{}'.format( directory)][ action.new_version.name] = SingleQuotedScalarString( action.new_version.checksum) elif action.new_version: synchronization_state['files']['original_versions']['/{}'.format( directory)][ action.new_version.name] = SingleQuotedScalarString( action.new_version.checksum) elif action.version: del synchronization_state['files']['original_versions'][ '/{}'.format(directory)][action.version.name] self.__save_synchronization_state(synchronization_state) return synchronization_state
def apply_config(self, buildfarm_config_root_folder): custom_yaml_config = self.to_yaml() yaml = YAML() yaml.preserve_quotes = True buildfarm_config_files = [ Path("hiera/hieradata/common.yaml"), Path("hiera/hieradata/buildfarm_role/repo.yaml"), Path("hiera/hieradata/buildfarm_role/agent.yaml"), Path("hiera/hieradata/buildfarm_role/master.yaml"), ] for buildfarm_config_file in buildfarm_config_files: print('Loading file %s' % buildfarm_config_file) with open( str(buildfarm_config_root_folder / buildfarm_config_file), 'r') as bcfile: hiera_yaml = yaml.load(bcfile) for hiera_key in hiera_yaml.keys(): if hiera_key in custom_yaml_config.keys(): print('Substituting field %s in file %s' % (hiera_key, str(buildfarm_config_file))) hiera_yaml[hiera_key] = custom_yaml_config[hiera_key] with open( str(buildfarm_config_root_folder / buildfarm_config_file), 'w') as bcfile: yaml.dump(hiera_yaml, bcfile) # The fields credentials::git-fetch-ssh::XXX are not in the original yaml configuration, so they need to be added separately # FIXME: here we are adding the raw passphrase, but it should be the hash instead if self.git_fetch_ssh_private_key: yaml_str = { 'credentials::git-fetch-ssh::username': SingleQuotedScalarString(self.git_fetch_ssh_username), 'credentials::git-fetch-ssh::id': self.git_fetch_ssh_id, 'credentials::git-fetch-ssh::passphrase': SingleQuotedScalarString(self.git_fetch_ssh_passphrase), 'credentials::git-fetch-ssh::private_key': PreservedScalarString(self.git_fetch_ssh_private_key), } with open( str(buildfarm_config_root_folder / Path("hiera/hieradata/buildfarm_role/master.yaml")), 'r', ) as master_file: master_yaml = yaml.load(master_file) # Add the new keys (update) master_yaml.update(yaml_str) with open( str(buildfarm_config_root_folder / Path("hiera/hieradata/buildfarm_role/master.yaml")), 'w', ) as master_file: yaml.dump(master_yaml, master_file)
def generate_action(self, unit, count, regex, exclude=False): action = { 'action': 'delete_indices', 'description': 'auto-generated', 'options': { 'ignore_empty_list': True, 'continue_if_exception': False, 'timeout_override': os.getenv('CURATOR_TIMEOUT', 300) }, 'filters': [{ 'filtertype': 'pattern', 'kind': 'regex', 'value': SingleQuotedScalarString(regex), 'exclude': exclude }, { 'filtertype': 'age', 'source': 'name', 'direction': 'older', 'timestring': '%Y.%m.%d', 'unit': unit, 'unit_count': count }] } return action
def build_images(prefix, images, tag=None, commit_range=None, push=False, chart_version=None): """Build a collection of docker images Args: prefix (str): the prefix to add to images images (dict): dict of image-specs from chartpress.yml tag (str): Specific tag to use instead of the last modified commit. If unspecified the tag for each image will be the hash of the last commit to modify the image's files. commit_range (str): The range of commits to consider, e.g. for building in CI. If an image hasn't changed in the given range, it will not be rebuilt. push (bool): Whether to push the resulting images (default: False). chart_version (str): The chart version, included as a prefix on image tags if `tag` is not specified. """ value_modifications = {} for name, options in images.items(): image_path = options.get('contextPath', os.path.join('images', name)) image_tag = tag # include chartpress.yaml itself as it can contain build args and # similar that influence the image that would be built paths = list(options.get('paths', [])) + [image_path, 'chartpress.yaml'] last_commit = last_modified_commit(*paths) if tag is None: if chart_version: image_tag = "{}-{}".format(chart_version, last_commit) else: image_tag = last_commit image_name = prefix + name image_spec = '{}:{}'.format(image_name, image_tag) value_modifications[options['valuesPath']] = { 'repository': image_name, 'tag': SingleQuotedScalarString(image_tag), } if tag is None and commit_range and not path_touched(*paths, commit_range=commit_range): print(f"Skipping {name}, not touched in {commit_range}") continue template_namespace = { 'LAST_COMMIT': last_commit, 'TAG': image_tag, } build_args = render_build_args(options, template_namespace) build_image(image_path, image_spec, build_args, options.get('dockerfilePath')) if push: subprocess.check_call([ 'docker', 'push', image_spec ]) return value_modifications
def fix_sexadecimal_numbers(value): import re SEXADECIMAL_NUMBER = '(?P<left>\d+):(?P<right>\d+)' match = re.match(SEXADECIMAL_NUMBER, value) if not match or int(match.group('left')) > 60 or int( match.group('right')) > 60: return value return SingleQuotedScalarString('{0}:{1}'.format( match.group('left'), match.group('right')))
def walk_tree(base): if isinstance(base, MutableMapping): for k in base: v = base[k] # type: Text if isinstance(v, string_types): if '\n' in v: base[k] = preserve_literal(v) elif '${' in v or ':' in v: base[k] = SingleQuotedScalarString(v) else: walk_tree(v) elif isinstance(base, MutableSequence): for idx, elem in enumerate(base): if isinstance(elem, string_types): if '\n' in elem: base[idx] = preserve_literal(elem) elif '${' in elem or ':' in elem: base[idx] = SingleQuotedScalarString(elem) else: walk_tree(elem)
def add_committer(self, commiter_change: CommitterChange) -> None: """Add committer action. All provided data are going to be formatted properly and added into INFO.yaml file 'committers' section. Args: commiter_change (CommitterChange): Change to be added """ self._info["committers"].append( { key: SingleQuotedScalarString(value) for key, value in commiter_change.addition_change.items() } )
def add_tsc_change(self, tsc_change: TscChange) -> None: """Add Technical Steering Committee entry. All actions need to be confirmed by the TSC. That entry proves that TSC was informed and approved the change. Args: committer_change (CommitterChange): Committer change object. """ self._info["tsc"]["changes"].append( { key: SingleQuotedScalarString(value) for key, value in tsc_change.tsc_change.items() } )
class Test_enums_YAMLValueFormats(): """Tests for the YAMLValueFormats enumeration.""" def test_get_names(self): assert YAMLValueFormats.get_names() == [ "BARE", "BOOLEAN", "DEFAULT", "DQUOTE", "FLOAT", "FOLDED", "INT", "LITERAL", "SQUOTE", ] @pytest.mark.parametrize("input,output", [ ("BARE", YAMLValueFormats.BARE), ("BOOLEAN", YAMLValueFormats.BOOLEAN), ("DEFAULT", YAMLValueFormats.DEFAULT), ("DQUOTE", YAMLValueFormats.DQUOTE), ("FLOAT", YAMLValueFormats.FLOAT), ("FOLDED", YAMLValueFormats.FOLDED), ("INT", YAMLValueFormats.INT), ("LITERAL", YAMLValueFormats.LITERAL), ("SQUOTE", YAMLValueFormats.SQUOTE), ]) def test_from_str(self, input, output): assert output == YAMLValueFormats.from_str(input) def test_from_str_nameerror(self): with pytest.raises(NameError): YAMLValueFormats.from_str("NO SUCH NAME") @pytest.mark.parametrize("input,output", [ (FoldedScalarString(""), YAMLValueFormats.FOLDED), (LiteralScalarString(""), YAMLValueFormats.LITERAL), (DoubleQuotedScalarString(''), YAMLValueFormats.DQUOTE), (SingleQuotedScalarString(""), YAMLValueFormats.SQUOTE), (PlainScalarString(""), YAMLValueFormats.BARE), (ScalarBoolean(False), YAMLValueFormats.BOOLEAN), (ScalarFloat(1.01), YAMLValueFormats.FLOAT), (ScalarInt(10), YAMLValueFormats.INT), (None, YAMLValueFormats.DEFAULT), ]) def test_from_node(self, input, output): assert output == YAMLValueFormats.from_node(input)
def _config(self): """ Configures ejabberd server :return: """ self._find_dirs_if_needed() config_file = os.path.join(self._config_dir, 'ejabberd.yml') config_file_backup = os.path.join(self._config_dir, 'ejabberd.yml.backup') # Backup the config file. If config is present, use that one if os.path.exists(config_file_backup): shutil.copy(config_file_backup, config_file) else: shutil.copy(config_file, config_file_backup) config_data = open(config_file).read() config_yml = ruamel.yaml.round_trip_load(config_data, preserve_quotes=True) # virtual host setup config_yml['hosts'] = [DoubleQuotedScalarString(self.hostname)] # external authentication setup ext_auth_path = os.path.join(self._extauth_path, 'external_cloud.py') config_yml['auth_method'] = SingleQuotedScalarString('external') config_yml['extauth_cache'] = 0 config_yml['extauth_program'] = DoubleQuotedScalarString( '%s -t ejabberd -s %s -u %s' % (ext_auth_path, self.extauth_token, self.extauth_endpoint)) # add admin user - from NextCloud if self.hostname is None and self.config is not None: self.hostname = self.config.hostname util.setpath(config_yml, ['acl', 'admin', 'user'], [DoubleQuotedScalarString('admin@%s' % self.hostname)]) with open(config_file, 'w') as fh: new_config = ruamel.yaml.round_trip_dump(config_yml) fh.write(new_config) self._create_cert_files()
def __generate_preview(self): logger.debug('Generating preview document for {}'.format(self._file)) synchronization_state = self.__load_synchronization_state() if self.filename in synchronization_state[ 'preview_files'] and os.path.isfile( synchronization_state['preview_files'][self.filename]): os.remove(synchronization_state['preview_files'][self.filename]) preview_filename = os.path.join( self.account.get_oxnote_home_folder_path(), Note.generate_normalized_filename( self._title, 'pdf', True, self.account.get_oxnote_home_folder_path())) synchronization_state['preview_files'][ self.filename] = SingleQuotedScalarString(preview_filename) pdf_printer = QPrinter(QPrinter.ScreenResolution) pdf_printer.setOutputFormat(QPrinter.PdfFormat) pdf_printer.setPaperSize(QPrinter.Letter) pdf_printer.setPageMargins(0.56, 0.56, 0.56, 0.56, QPrinter.Inch) pdf_printer.setOutputFileName(preview_filename) pdf_paper_size = QSizeF() pdf_paper_size.setWidth(pdf_printer.width()) pdf_paper_size.setHeight(pdf_printer.height()) pdf_document: QTextDocument = QTextDocument() pdf_cursor: QTextCursor = QTextCursor(pdf_document) pdf_cursor.movePosition(QTextCursor.Start) pdf_cursor.insertHtml('<h1>{}</h1><hr><br>'.format(self._title)) pdf_cursor.movePosition(QTextCursor.End) pdf_cursor.insertHtml(self.html_content) pdf_document.setMetaInformation(QTextDocument.DocumentTitle, self._title) pdf_document.setPageSize(pdf_paper_size) pdf_document.print(pdf_printer) self.__save_synchronization_state(synchronization_state)
def replace_quoted_strings(data: Any, options: QuotedStringOptions) -> Any: quote_cast = ( SingleQuotedScalarString if options.quote_type == SINGLE else DoubleQuotedScalarString ) patterns = options.extra_required + options.extra_allowed # it won't work with items() or iterating through it like list if isinstance(data, CommentedMap): for key in data.keys(): data[key] = replace_quoted_strings(data[key], options) elif isinstance(data, CommentedSeq): for indx in range(len(data)): data[indx] = replace_quoted_strings(data[indx], options) elif isinstance(data, str): if ( options.required == ONLY_WHEN_NEEDED and isinstance(data, (SingleQuotedScalarString, DoubleQuotedScalarString)) and not _quotes_are_needed(data) and all(pattern.search(data) is None for pattern in patterns) ): return PlainScalarString(data) elif ( options.required == TRUE or _quotes_are_needed(data) or any( pattern.search(data) is not None for pattern in options.extra_required ) ) and not isinstance( data, (SingleQuotedScalarString, DoubleQuotedScalarString) ): return quote_cast(data) elif options.quote_type == SINGLE and isinstance( data, DoubleQuotedScalarString ): return SingleQuotedScalarString(data) elif options.quote_type == DOUBLE and isinstance( data, SingleQuotedScalarString ): return DoubleQuotedScalarString(data) return data
def __init__(self, intent_name: str, domain: str = None, intent_description: str = None, scenario_id: str = None, sample_utterances: list[str] = None, opening_utterances: list[str] = None, slots: list[SlotProvider] = None, script: list[Turn] = None, parent_intent: str = None, locale: str = 'en-US'): self.name = intent_name self.domain = domain self.locale = locale self.description = intent_description self.scenario_id = SingleQuotedScalarString(scenario_id) self.sample_utterances = sample_utterances self.opening_utterances = opening_utterances self.slots = slots self.script = script self.parent_intent = parent_intent
def url_scheme(self): return SingleQuotedScalarString( 'https') if not self._url_scheme else self._url_scheme
def as_dict(self) -> typing.Dict: account_as_dict = dict() account_as_dict['description'] = SingleQuotedScalarString( self.description) account_as_dict['account_root_directory'] = SingleQuotedScalarString( self.account_root_directory) account_as_dict['folder'] = dict() account_as_dict['folder'][ 'oxnote_home_folder'] = SingleQuotedScalarString( self.oxnote_home_folder) account_as_dict['folder'][ 'application_data_folder'] = SingleQuotedScalarString( self.application_data_folder) account_as_dict['url'] = dict() account_as_dict['url']['scheme'] = SingleQuotedScalarString( self.url_scheme) account_as_dict['url']['host'] = SingleQuotedScalarString( self.url_host) account_as_dict['url']['port'] = self.url_port account_as_dict['url']['uri'] = SingleQuotedScalarString(self.url_uri) account_as_dict['username'] = SingleQuotedScalarString(self.username) account_as_dict['password'] = SingleQuotedScalarString(self.password) account_as_dict['context_id'] = SingleQuotedScalarString( self.context_id) account_as_dict['user_id'] = SingleQuotedScalarString(self.user_id) account_as_dict['enabled'] = self.enabled account_as_dict['drive_quota'] = self.drive_quota return account_as_dict
def single(value): return SingleQuotedScalarString(value)
def generate_docker_compose(_network_config: NetworkConfiguration, _orderers, _orgs, _peers, _domain, _kafka_nodes=2): """ This function will create a docker-compose.yaml file within the current workdir. :param _network_config: The Network Configuration structure, containing Ports and stuff :param _orderers: the number of orderers to configure :param _orgs: the number of organizations to configure :param _peers: the number of peers to configure :param _domain: the domain of the channel :param _kafka_nodes: (Optional) the number of kafka nodes, if kafka ordering is enabled """ yaml_new = ruamel.yaml.YAML() services = {} # Docker Compose Services all_node_containers = [] print(bcolors.OKBLUE + "======== Creating CAs ========") for i in range(_orgs): print(bcolors.WARNING + f" [*] Generating CA for org{i + 1}") ca = { "image": "hyperledger/fabric-ca:1.4", "environment": [ "FABRIC_CA_HOME=/etc/hyperledger/fabric-ca-server", f"FABRIC_CA_SERVER_CA_NAME=ca.org{i+1}.{_domain}", f"FABRIC_CA_SERVER_CA_CERTFILE=/etc/hyperledger/fabric-ca-server-config/ca.org{i+1}.{_domain}-cert.pem", "FABRIC_CA_SERVER_CA_KEYFILE=/etc/hyperledger/fabric-ca-server-config/priv_sk", "FABRIC_CA_SERVER_TLS_ENABLED=true", f"FABRIC_CA_SERVER_TLS_CERTFILE=/etc/hyperledger/fabric-ca-server-config/" f"ca.org{i+1}.{_domain}-cert.pem", "FABRIC_CA_SERVER_TLS_KEYFILE=/etc/hyperledger/fabric-ca-server-config/priv_sk" ], "ports": [f"{_network_config.ca_defport + i * 1000}:{_network_config.ca_defport}"], "command": f"sh -c 'fabric-ca-server start " f"--ca.certfile /etc/hyperledger/fabric-ca-server-config/ca.org{i + 1}.{_domain}-cert.pem " f"--ca.keyfile /etc/hyperledger/fabric-ca-server-config/priv_sk -b admin:adminpw -d'", "volumes": [ f"./crypto-config/peerOrganizations/org{i + 1}.{_domain}/ca/:/etc/hyperledger/fabric-ca-server-config" ], "container_name": f"ca.org{i+1}.{_domain}", "networks": [ _network_config.network_name ] } services.update({f"ca.org{i + 1}.{_domain}": ca}) print(bcolors.OKGREEN + f" [+] Generating CA for org{i + 1} COMPLETE") if _network_config.ordering_service is "kafka": print(bcolors.OKBLUE + "======== Creating Zookeeper ========") zookeepers = [] zoo_servers = "" zoo_connect = "" for i in range(_kafka_nodes): zoo_servers += f"server.{i + 1}=zookeeper{i + 1}:2888:3888 " zoo_connect += f"zookeeper{i + 1}:2181," zookeepers.append(f"zookeeper{i + 1}") zoo_servers = zoo_servers[:-1] zoo_connect = zoo_connect[:-1] for i in range(_kafka_nodes): print(bcolors.WARNING + f" [*] Generating Zookeeper{i + 1}") zoo = { "image": "hyperledger/fabric-zookeeper", "container_name": f"zookeeper{i + 1}", "restart": "always", "environment": [ f"ZOO_MY_ID={i + 1}", "ZOO_SERVERS=" + zoo_servers ], "ports": [ 2181, 2888, 3888, ], "networks": [ _network_config.network_name ] } services.update({f"zookeeper{i + 1}": zoo}) print(bcolors.OKGREEN + f" [+] Zookeeper{i + 1} complete") print(bcolors.OKBLUE + "======== Creating Kafka Brokers ========") for i in range(_kafka_nodes): print(bcolors.WARNING + f" [*] Generating Kafka{i}") ka = { "image": "hyperledger/fabric-kafka", "container_name": f"kafka{i}", # restart: always "environment": [ f"KAFKA_ADVERTISED_HOST_NAME=kafka{i}", "KAFKA_ADVERTISED_PORT=9092", f"KAFKA_BROKER_ID={i}", "KAFKA_MESSAGE_MAX_BYTES=103809024", # 99 * 1024 * 1024 B "KAFKA_REPLICA_FETCH_MAX_BYTES=103809024", # 99 * 1024 * 1024 B "KAFKA_UNCLEAN_LEADER_ELECTION_ENABLE=false", "KAFKA_NUM_REPLICA_FETCHERS=1", f"KAFKA_DEFAULT_REPLICATION_FACTOR={i+1}", "KAFKA_ZOOKEEPER_CONNECT=" + zoo_connect ], "ports": [ 9092 ], "depends_on": zookeepers, "networks": [ _network_config.network_name ] } services.update({f"kafka{i}": ka}) print(bcolors.OKGREEN + f" [+] Kafka{i} Completed") print(bcolors.OKBLUE + "======= Generating Orderers =======") if _network_config.ordering_service is "kafka": kafka_brokers = "" kafka_broker_list = [] for i in range(_kafka_nodes): kafka_brokers += f"kafka{i}:9092," kafka_broker_list.append(f"kafka{i}") kafka_brokers = kafka_brokers[:-1] orderer_str = "" for i in range(_orderers): print(bcolors.WARNING + f" [*] Generating Orderer{i + 1}") env = [ f"ORDERER_HOST=orderer{i + 1}.{_domain}", "ORDERER_GENERAL_LOGLEVEL=debug", "ORDERER_GENERAL_LISTENADDRESS=0.0.0.0", f"ORDERER_GENERAL_LISTENPORT={_network_config.orderer_defport}", "ORDERER_GENERAL_GENESISMETHOD=file", "ORDERER_GENERAL_GENESISFILE=/etc/hyperledger/configtx/genesis.block", "ORDERER_GENERAL_LOCALMSPID=OrdererMSP", "ORDERER_GENERAL_LOCALMSPDIR=/etc/hyperledger/msp/orderer/msp", "CONFIGTX_ORDERER_BATCHTIMEOUT=1s", "ORDERER_GENERAL_CLUSTER_CLIENTCERTIFICATE=/etc/hyperledger/orderer/tls/server.crt", "ORDERER_GENERAL_CLUSTER_CLIENTPRIVATEKEY=/etc/hyperledger/orderer/tls/server.key", "ORDERER_GENERAL_CLUSTER_ROOTCAS=[/etc/hyperledger/orderer/tls/ca.crt]", "ORDERER_ABSOLUTEMAXBYTES=10 MB", "ORDERER_PREFERREDMAXBYTES=512 KB" ] if _network_config.ordering_service is "kafka": env.extend(["ORDERER_GENERAL_TLS_ENABLED=true", "ORDERER_GENERAL_TLS_PRIVATEKEY=/etc/hyperledger/orderer/tls/server.key", "ORDERER_GENERAL_TLS_CERTIFICATE=/etc/hyperledger/orderer/tls/server.crt", "ORDERER_GENERAL_TLS_ROOTCAS=[/etc/hyperledger/orderer/tls/ca.crt]", # Raft Orderer Type "CONFIGTX_ORDERER_ORDERERTYPE=kafka", f"CONFIGTX_ORDERER_KAFKA_BROKERS=[{kafka_brokers}]", "ORDERER_KAFKA_RETRY_SHORTINTERVAL=1s", "ORDERER_KAFKA_RETRY_SHORTTOTAL=30s", "ORDERER_KAFKA_VERBOSE=true", ]) else: env.extend(["ORDERER_GENERAL_TLS_ENABLED=true", "ORDERER_GENERAL_TLS_PRIVATEKEY=/etc/hyperledger/orderer/tls/server.key", "ORDERER_GENERAL_TLS_CERTIFICATE=/etc/hyperledger/orderer/tls/server.crt", "ORDERER_GENERAL_TLS_ROOTCAS=[/etc/hyperledger/orderer/tls/ca.crt]", # Raft Orderer Type "CONFIGTX_ORDERER_ORDERERTYPE=etcdraft" ]) order = { "container_name": f"orderer{i + 1}.{_domain}", "image": "hyperledger/fabric-orderer:2.0", "environment": env, "working_dir": "/opt/gopath/src/github.com/hyperledger/fabric/orderer", "command": "orderer", "ports": [ f"{_network_config.orderer_defport + i * 1000}:{_network_config.orderer_defport}" # 8050:7050 ], "volumes": [ "./config/:/etc/hyperledger/configtx", "./config/genesis.block:/etc/hyperledger/orderer/orderer.genesis.block", f"./crypto-config/ordererOrganizations/{_domain}/orderers/orderer{i+1}.{_domain}/:" f"/etc/hyperledger/msp/orderer", f"./crypto-config/ordererOrganizations/{_domain}/orderers/orderer{i + 1}.{_domain}/msp:" f"/etc/hyperledger/orderer/msp", f"./crypto-config/ordererOrganizations/{_domain}/orderers/orderer{i + 1}.{_domain}/tls/:" "/etc/hyperledger/orderer/tls", ], "networks": [ _network_config.network_name ], } if _network_config.ordering_service is "kafka": order.update({"depends_on": kafka_broker_list}) services.update({f"orderer{i + 1}.{_domain}": order}) all_node_containers.append(f"orderer{i + 1}.{_domain}") if i == 0: # Enabling TLS: We need to only specify one single orderer with Raft. We choose the first one # Additionally, since we call the peer tool from external, the override of the orderer TLS Name is mandatory # for certificate verification orderer_str += f"-o localhost:{_network_config.orderer_defport + i * 1000} " \ f"--tls " \ f"--ordererTLSHostnameOverride orderer{i+1}.dredev.de " \ f"--cafile=./crypto-config/ordererOrganizations/{_domain}/orderers/orderer{i+1}.{_domain}" \ f"/msp/tlscacerts/tlsca.{_domain}-cert.pem" print(bcolors.OKGREEN + f" [+] Orderer{i + 1} COMPLETE") os.environ["ORDERERS"] = orderer_str print(bcolors.OKBLUE + "======= Generating Peers for Organizations =======") peer_addresses = "" basepath = os.getcwd() + "/crypto-config" for org in range(_orgs): print(bcolors.WARNING + f" [*] Generating org{org + 1}.{_domain}") for peer in range(_peers): peer_addresses += f"--peerAddresses " \ f"localhost:{_network_config.peer_defport + 1000 * ((_peers * org) + peer)} " \ f"--tlsRootCertFiles {basepath}/peerOrganizations/org{org + 1}.{_domain}/" \ f"peers/peer{peer}.org{org + 1}.{_domain}/tls/ca.crt " print(bcolors.WARNING + f" [+] Generating peer{peer}.org{org + 1}.{_domain}") pe = { "container_name": f"peer{peer}.org{org + 1}.{_domain}", "image": "hyperledger/fabric-peer:2.0", "environment": [ "CORE_VM_ENDPOINT=unix:///host/var/run/docker.sock", "CORE_LOGGING_PEER=debug", "CORE_CHAINCODE_LOGGING_LEVEL=DEBUG", f"CORE_PEER_ID=peer{peer}.org{org + 1}.{_domain}", f"CORE_PEER_ADDRESS=peer{peer}.org{org + 1}.{_domain}:{_network_config.peer_defport}", f"CORE_PEER_LOCALMSPID=Org{org + 1}MSP", f"CORE_PEER_CHAINCODEADDRESS=peer{peer}.org{org + 1}.{_domain}:{_network_config.peer_defport+1}", f"CORE_PEER_CHAINCODELISTENADDRESS=0.0.0.0:{_network_config.peer_defport+1}", "CORE_PEER_MSPCONFIGPATH=/etc/hyperledger/msp/peer/", "CORE_VM_DOCKER_HOSTCONFIG_NETWORKMODE=${COMPOSE_PROJECT_NAME}_" + _network_config.network_name, "CORE_LEDGER_STATE_STATEDATABASE=CouchDB", f"CORE_LEDGER_STATE_COUCHDBCONFIG_COUCHDBADDRESS=couchdb{peer}.org{org + 1}.{_domain}:" f"{_network_config.couchdb_defport}", # The CORE_LEDGER_STATE_COUCHDBCONFIG_USERNAME and CORE_LEDGER_STATE_COUCHDBCONFIG_PASSWORD # provide the credentials for ledger to connect to CouchDB. The username and password must # match the username and password set for the associated CouchDB. "CORE_LEDGER_STATE_COUCHDBCONFIG_USERNAME="******"CORE_LEDGER_STATE_COUCHDBCONFIG_PASSWORD="******"CORE_PEER_TLS_ENABLED=true", "CORE_PEER_GOSSIP_USELEADERELECTION=true", "CORE_PEER_GOSSIP_ORGLEADER=false", f"CORE_PEER_GOSSIP_BOOTSTRAP=peer{peer}.org{org + 1}.{_domain}:{_network_config.peer_defport}", f"CORE_PEER_GOSSIP_EXTERNALENDPOINT=peer{peer}.org{org + 1}.{_domain}:{_network_config.peer_defport}", "CORE_PEER_PROFILE_ENABLED=true", "CORE_PEER_TLS_CERT_FILE=/etc/hyperledger/fabric/tls/server.crt", "CORE_PEER_TLS_KEY_FILE=/etc/hyperledger/fabric/tls/server.key", "CORE_PEER_TLS_ROOTCERT_FILE=/etc/hyperledger/fabric/tls/ca.crt" ], "working_dir": "/opt/gopath/src/github.com/hyperledger/fabric", "command": "peer node start", "ports": [ f"{ _network_config.peer_defport +1000*((_peers * org) + peer)}:{_network_config.peer_defport}", # f"{(_network_config.peer_defport+1)+1000*((_peers * org) + peer)}:{_network_config.peer_defport+1}", # f"{(_network_config.peer_defport+2) + 1000*((_peers*org) + peer)}:{_network_config.peer_defport+2}", ], "volumes": [ "/var/run/:/host/var/run/", f"./crypto-config/peerOrganizations/org{org + 1}.{_domain}/peers/peer{peer}.org{org + 1}.{_domain}/" f"msp:/etc/hyperledger/msp/peer", f"./crypto-config/peerOrganizations/org{org + 1}.{_domain}/peers/peer{peer}.org{org + 1}.{_domain}/" f"tls:/etc/hyperledger/fabric/tls", f"./crypto-config/peerOrganizations/org{org + 1}.{_domain}/users:/etc/hyperledger/msp/users", "./config:/etc/hyperledger/configtx" ], "depends_on": [ f"couchdb{peer}.org{org + 1}.{_domain}" ], "networks": [ _network_config.network_name ] } services.update({f"peer{peer}.org{org + 1}.{_domain}": pe}) all_node_containers.append(f"peer{peer}.org{org + 1}.{_domain}") print(bcolors.OKGREEN + f" [+] peer{peer}.org{org + 1}.{_domain} COMPLETE") print(bcolors.WARNING + f" [*] Generating couchdb{peer}.org{org + 1}.{_domain}") cdb = { "container_name": f"couchdb{peer}.org{org + 1}.{_domain}", "image": "hyperledger/fabric-couchdb", # Populate the COUCHDB_USER and COUCHDB_PASSWORD to set an admin user and password # for CouchDB. This will prevent CouchDB from operating in an "Admin Party" mode. "environment": [ "COUCHDB_USER="******"COUCHDB_PASSWORD="******"ports": [ f"{_network_config.couchdb_defport + 1000*((_peers*org)+peer)}:{_network_config.couchdb_defport}" ], "networks": [ _network_config.network_name ] } services.update({f"couchdb{peer}.org{org + 1}.{_domain}": cdb}) all_node_containers.append(f"couchdb{peer}.org{org + 1}.{_domain}") print(bcolors.OKGREEN + f" [+] couchdb{peer}.org{org + 1}.{_domain} COMPLETE") print(bcolors.OKGREEN + f" [+] .org{org + 1}.{_domain} COMPLETE") os.environ["PEER_CON_PARAMS"] = peer_addresses print(bcolors.OKBLUE + "======= Generating CLI =======") print(bcolors.WARNING + " [*] CLI Generation started") cli = { "container_name": "cli", "image": "hyperledger/fabric-tools", "tty": True, # stdin_open: true "environment": [ "GOPATH=/opt/gopath", "CORE_VM_ENDPOINT=unix:///host/var/run/docker.sock", "FABRIC_LOGGING_SPEC=DEBUG", "CORE_PEER_ID=cli", f"CORE_PEER_ADDRESS=peer0.org1.{_domain}:{_network_config.peer_defport}", "CORE_PEER_LOCALMSPID=Org1MSP", f"CORE_PEER_MSPCONFIGPATH=/opt/gopath/src/github.com/hyperledger/fabric/peer/crypto/peerOrganizations/" f"org1.{_domain}/users/Admin@org1.{_domain}/msp", "CORE_PEER_TLS_ENABLED=true", f"CORE_PEER_TLS_CERT_FILE=/opt/gopath/src/github.com/hyperledger/fabric/peer/crypto/peerOrganizations/" f"org1.{_domain}/peers/peer0.org1.{_domain}/tls/server.crt", f"CORE_PEER_TLS_KEY_FILE=/opt/gopath/src/github.com/hyperledger/fabric/peer/crypto/peerOrganizations/" f"org1.{_domain}/peers/peer0.org1.{_domain}/tls/server.key", f"CORE_PEER_TLS_ROOTCERT_FILE=/opt/gopath/src/github.com/hyperledger/fabric/peer/crypto/peerOrganizations/" f"org1.{_domain}/peers/peer0.org1.{_domain}/tls/ca.crt", "CORE_CHAINCODE_KEEPALIVE=10" ], "working_dir": "/opt/gopath/src/github.com/hyperledger/fabric/peer", "command": "/bin/bash", "volumes": [ "/var/run/:/host/var/run/", "./chaincodes/java:/opt/gopath/src/github.com/chaincodes/java", "./crypto-config:/opt/gopath/src/github.com/hyperledger/fabric/peer/crypto/", "./config:/etc/hyperledger/configtx" ], "networks": [ _network_config.network_name ], "depends_on": all_node_containers } services.update({"cli": cli}) print(bcolors.OKGREEN + " [+] CLI Generation COMPLETE") print(bcolors.OKBLUE + "======= Generating final Structure =======") final = { "version": SingleQuotedScalarString("2"), "networks": { _network_config.network_name: None }, "services": services } # yaml_new.dump(final, sys.stdout) f = open("docker-compose.yaml", "w") yaml_new.dump(final, f) print(bcolors.HEADER + "========================================") print(">>> docker-compose.yaml has been dumped!") print("========================================")
def build_images(prefix, images, tag=None, push=False, force_push=False, force_build=False, skip_build=False, long=False): """Build a collection of docker images Args: prefix (str): the prefix to add to image names images (dict): dict of image-specs from chartpress.yaml tag (str): Specific tag to use instead of the last modified commit. If unspecified the tag for each image will be the hash of the last commit to modify the image's files. push (bool): Whether to push the resulting images (default: False). force_push (bool): Whether to push the built images even if they already exist in the image registry (default: False). force_build (bool): To build even if the image is available locally or remotely already. skip_build (bool): Whether to skip the actual image build (only updates tags). long (bool): Whether to include the generated tag's build suffix even when the commit has a tag. Setting long to true could be useful if you have two build pipelines, one for commits and one for tags, and want to avoid generating conflicting build artifacts. Example 1: - long=False: 0.9.0 - long=True: 0.9.0-n000.hasdf1234 Example 2: - long=False: 0.9.0-n004.hsdfg2345 - long=True: 0.9.0-n004.hsdfg2345 """ values_file_modifications = {} for name, options in images.items(): # include chartpress.yaml in the image paths to inspect as # chartpress.yaml can contain build args influencing the image all_image_paths = _get_all_image_paths(name, options) # decide a tag string if tag is None: tag = _get_identifier_from_paths(*all_image_paths, long=long) image_name = options.get('imageName', prefix + name) # update values_file_modifications to return values_path_list = options.get('valuesPath', []) if isinstance(values_path_list, str): values_path_list = [values_path_list] for values_path in values_path_list: values_file_modifications[values_path] = { 'repository': image_name, 'tag': SingleQuotedScalarString(tag), } if skip_build: continue image_spec = f'{image_name}:{tag}' # build image if force_build or _image_needs_building(image_spec): build_image(image_spec, _get_image_build_context_path(name, options), dockerfile_path=_get_image_dockerfile_path( name, options), build_args=_get_image_build_args( options, { 'LAST_COMMIT': _get_latest_commit_tagged_or_modifying_paths( *all_image_paths, echo=False), 'TAG': tag, }, )) else: _log(f"Skipping build for {image_spec}, it already exists") # push image if push or force_push: if force_push or _image_needs_pushing(image_spec): _check_call(['docker', 'push', image_spec]) else: _log(f"Skipping push for {image_spec}, already on registry") return values_file_modifications
meths_range = mews.get_used_range() meth_columns, *meth_values = meths_range.values data = {} for row in df.itertuples(name='row'): if row.IntentName not in data: data[row.IntentName] = { 'intent_name': row.IntentName, 'intent_description': row.IntentDescription, 'domain': 'Media_Cable' if 'M' in row.IntentID else 'Finance', 'scenario_id': SingleQuotedScalarString(f"202012{row.IntentID}".replace(' ', '')), 'opening_utterances': [ utterance for utterance in [ row.Customer1, row.Customer2, row.Customer3, row.Customer4, row.Customer5, row.Customer6, row.Customer7, row.Customer8 ] if utterance ], 'script': [ Turn( **{ 'agent': False, 'sample_response': False, 'slot_to_elicit': False, 'intent_to_elicit': row.IntentName, 'confirm_intent': False, 'assume_intent': False,
def to_yaml(self): self.ubuntu_distros = [x['ubuntu'] for x in self.distros] self.ubuntu_distros = list(set(self.ubuntu_distros)) self.ubuntu_distros.sort() ubuntu_building_config = """\ [ubuntu_building] architectures: %s distros: %s repository_path: /var/repos/ubuntu/building signing_key: %s upstream_config: /home/jenkins-agent/reprepro_config """ % ( ' '.join(self.architectures), ' '.join(self.ubuntu_distros), self.gpg_key_id, ) ubuntu_testing_config = """\ [ubuntu_testing] architectures: %s distros: %s repository_path: /var/repos/ubuntu/testing signing_key: %s upstream_config: /home/jenkins-agent/reprepro_config """ % ( ' '.join(self.architectures), ' '.join(self.ubuntu_distros), self.gpg_key_id, ) ubuntu_main_config = """\ [ubuntu_main] architectures: %s distros: %s repository_path: /var/repos/ubuntu/main signing_key: %s upstream_config: /home/jenkins-agent/reprepro_config """ % ( ' '.join(self.architectures), ' '.join(self.ubuntu_distros), self.gpg_key_id, ) reprepro_config_content = """\ name: ros_bootstrap method: http://repos.ros.org/repos/ros_bootstrap suites: [%s] component: main architectures: [%s] verify_release: blindtrust """ % ( ', '.join(self.ubuntu_distros), ', '.join(self.architectures), ) yaml_str = { 'master::ip': self.ip_master, 'repo::ip': self.ip_repo, 'timezone': SingleQuotedScalarString(self.timezone), 'ssh_keys': { SingleQuotedScalarString(self.ssh_name): { 'key': SingleQuotedScalarString(self.ssh_public), 'type': self.ssh_type, 'user': '******', 'require': SingleQuotedScalarString('User[jenkins-agent]'), } }, 'jenkins::slave::ui_user': self.jenkins_user, 'jenkins::slave::ui_pass': SingleQuotedScalarString(self.jenkins_password), 'user::admin::name': self.jenkins_user, 'user::admin::password_hash': '#jbcrypt:' + PreservedScalarString( hashpw(self.jenkins_password.encode('UTF-8'), gensalt(10, prefix=b"2a")).decode('UTF-8')), 'jenkins::private_ssh_key': PreservedScalarString(self.ssh_private), 'ssh_host_keys': { 'repo': SingleQuotedScalarString(self.repo_hostkey) }, 'jenkins-agent::gpg_key_id': self.gpg_key_id, 'jenkins-agent::gpg_private_key': PreservedScalarString(self.gpg_private_key), 'jenkins-agent::gpg_public_key': PreservedScalarString(self.gpg_public_key), 'jenkins-agent::reprepro_updater_config': ubuntu_building_config + "\n" + ubuntu_testing_config + "\n" + ubuntu_main_config, 'jenkins-agent::reprepro_config': { SingleQuotedScalarString('/home/jenkins-agent/reprepro_config/ros_bootstrap.yaml'): { 'ensure': SingleQuotedScalarString('present'), 'content': PreservedScalarString(reprepro_config_content), } }, } # If there was an additional hotkey defined, add it to the configuration if self.git_fetch_hostkey: yaml_str['ssh_host_keys'] = { 'ssh_host_keys': { 'repo': SingleQuotedScalarString(self.repo_hostkey), self.git_fetch_hostkey.split()[0]: SingleQuotedScalarString(self.git_fetch_hostkey), } } return yaml_str
# http://stackoverflow.com/questions/39262556/preserve-quotes-and-also-add-data-with-quotes-in-ruamel from ruamel.yaml.scalarstring import SingleQuotedScalarString, DoubleQuotedScalarString hiera['foo'] = SingleQuotedScalarString('bar') hiera['bar'] = DoubleQuotedScalarString('baz') # This will add the following at the end of every YAML file: # foo: 'bar' # bar: "baz"
yaml_file = None if args.yaml_file: yaml_file = pathlib.Path(args.yaml_file) file = yaml.load(yaml_file) else: file = yaml.load(sys.stdin) existing_value = getFromDict(file, key[:-1])[key[-1]] if yaml_file: print("Existing key", key, "has value", existing_value) if existing_value != value: if quotes == Quotes.single: setInDict( file, key, list(map(lambda value: SingleQuotedScalarString(value), value))) else: setInDict( file, key, list(map(lambda value: DoubleQuotedScalarString(value), value))) elif yaml_file: print("Key has not changed") if yaml_file: yaml.dump(file, yaml_file) else: yaml.dump(file, sys.stdout)
def url_port(self): return SingleQuotedScalarString( '443') if not self._url_port else self._url_port
def extract_to_package_format(self) -> int: """Extracts the self.yml_path into several files according to the Demisto standard of the package format. Returns: int. status code for the operation. """ print("Starting migration of: {} to dir: {}".format(self.yml_path, self.dest_path)) arg_path = self.dest_path output_path = os.path.abspath(self.dest_path) os.makedirs(output_path, exist_ok=True) base_name = os.path.basename(output_path) yml_type = self.get_yml_type() code_file = "{}/{}.py".format(output_path, base_name) self.extract_code(code_file) self.extract_image("{}/{}_image.png".format(output_path, base_name)) self.extract_long_description("{}/{}_description.md".format(output_path, base_name)) yaml_out = "{}/{}.yml".format(output_path, base_name) print("Creating yml file: {} ...".format(yaml_out)) ryaml = YAML() ryaml.preserve_quotes = True with open(self.yml_path, 'r') as yf: yaml_obj = ryaml.load(yf) script_obj = yaml_obj if yml_type == INTEGRATION: script_obj = yaml_obj['script'] del yaml_obj['image'] if 'detaileddescription' in yaml_obj: del yaml_obj['detaileddescription'] if script_obj['type'] != 'python': print('Script is not of type "python". Found type: {}. Nothing to do.'.format(script_obj['type'])) return 1 script_obj['script'] = SingleQuotedScalarString('') with open(yaml_out, 'w') as yf: ryaml.dump(yaml_obj, yf) print("Running autopep8 on file: {} ...".format(code_file)) try: subprocess.call(["autopep8", "-i", "--max-line-length", "130", code_file]) except FileNotFoundError: print_color("autopep8 skipped! It doesn't seem you have autopep8 installed.\n" "Make sure to install it with: pip install autopep8.\n" "Then run: autopep8 -i {}".format(code_file), LOG_COLORS.YELLOW) print("Detecting python version and setting up pipenv files ...") docker = get_docker_images(script_obj)[0] py_ver = get_python_version(docker, self.config.log_verbose) pip_env_dir = get_pipenv_dir(py_ver, self.config.envs_dirs_base) print("Copying pipenv files from: {}".format(pip_env_dir)) shutil.copy("{}/Pipfile".format(pip_env_dir), output_path) shutil.copy("{}/Pipfile.lock".format(pip_env_dir), output_path) try: subprocess.call(["pipenv", "install", "--dev"], cwd=output_path) print("Installing all py requirements from docker: [{}] into pipenv".format(docker)) requirements = subprocess.check_output(["docker", "run", "--rm", docker, "pip", "freeze", "--disable-pip-version-check"], universal_newlines=True, stderr=subprocess.DEVNULL).strip() fp = tempfile.NamedTemporaryFile(delete=False) fp.write(requirements.encode('utf-8')) fp.close() try: subprocess.check_call(["pipenv", "install", "-r", fp.name], cwd=output_path) except Exception: print_color("Failed installing requirements in pipenv.\n " "Please try installing manually after extract ends\n", LOG_COLORS.RED) os.unlink(fp.name) print("Installing flake8 for linting") subprocess.call(["pipenv", "install", "--dev", "flake8"], cwd=output_path) except FileNotFoundError: print_color("pipenv install skipped! It doesn't seem you have pipenv installed.\n" "Make sure to install it with: pip3 install pipenv.\n" "Then run in the package dir: pipenv install --dev", LOG_COLORS.YELLOW) # check if there is a changelog yml_changelog = os.path.splitext(self.yml_path)[0] + '_CHANGELOG.md' changelog = arg_path + '/CHANGELOG.md' if os.path.exists(yml_changelog): shutil.copy(yml_changelog, changelog) else: with open(changelog, 'wt', encoding='utf-8') as changelog_file: changelog_file.write("## [Unreleased]\n-\n") print_color("\nCompleted: setting up package: {}\n".format(arg_path), LOG_COLORS.GREEN) print("Next steps: \n", "* Install additional py packages for unit testing (if needed): cd {}; pipenv install <package>\n".format( arg_path), "* Create unit tests\n", "* Check linting and unit tests by running: ./Tests/scripts/pkg_dev_test_tasks.py -d {}\n".format( arg_path), "* When ready rm from git the source yml and add the new package:\n", " git rm {}\n".format(self.yml_path), " git add {}\n".format(arg_path), sep='' ) return 0
# ssh_keys: # [email protected]: # type: 'ssh-rsa' # key: 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDSWvVUwYDFbFEoYPSQAJBLwuWUVnfQG0tVSKWNK7Q+Pt4BWoK4qW9oaZs6vKQLSEwHhXsygu6JsggT+pzbTQ4PCbcEZqNyBo3X5D+tvc1mVqYB+oL0aTyFB+gt6/RZxsPF4J3ihDHNQXG6gIZ5SI21u5gkRnWWz3E9XRkhHOPDnjwkpTqo6lnNVVsGUhVRTEy2G9rvwHA6K8ZxSEwhPzFS2Wv9HgEng22ojJ3MSDrrBDa/FBxSsZhieajdEPev73qUB9od3YNbzyMbiXdHmk7GFlckxEi9twT+vOrZdOxJVZylSPFqIDr1V5buk9mPyav9wN3ntqGAqj42pXdmNaNd [email protected]' from ruamel.yaml.scalarstring import SingleQuotedScalarString team_keys = { '*****@*****.**': 'AAAAB3NzaC1yc2EAAAADAQABAAABAQDSWvVUwYDFbFEoYPSQAJBLwuWUVnfQG0tVSKWNK7Q+Pt4BWoK4qW9oaZs6vKQLSEwHhXsygu6JsggT+pzbTQ4PCbcEZqNyBo3X5D+tvc1mVqYB+oL0aTyFB+gt6/RZxsPF4J3ihDHNQXG6gIZ5SI21u5gkRnWWz3E9XRkhHOPDnjwkpTqo6lnNVVsGUhVRTEy2G9rvwHA6K8ZxSEwhPzFS2Wv9HgEng22ojJ3MSDrrBDa/FBxSsZhieajdEPev73qUB9od3YNbzyMbiXdHmk7GFlckxEi9twT+vOrZdOxJVZylSPFqIDr1V5buk9mPyav9wN3ntqGAqj42pXdmNaNd', '*****@*****.**': 'AAAAB3NzaC1yc2EAAAADAQABAAABAQDqNKFOy4kYtMEPLEqAPR5crpLNiIkxxtdA4+MGMJbocQZNmBulWfbgeWbYnqE/IsjTUY8TWDjc34vHVxEuOb1vKP4Qn56vTWGmxyQXRDZbscO/UwQ8NHquFggDKK+xWg9v/VVCp1gPrz95DO7qjihZK3uqOZCfssdrMFI8dE/swZeaXBTmsPf2mj9FaI2SOuahdNd1wCMs64Fqhs7rwsk5O8I4L83Or8ttFXjmTELhjn3bs6odZYmtb0jiOlohJrQ/+IsOcJ4qwHtC96fQhDkH+YIC6FoaThn5ZFBlOkRoIpe49DFv/kbGQNufLrIZSGwq9dnnphfclWbXmy0G1IbF', } if 'profile::base::users' in hiera and 'ec2-user' in hiera[ 'profile::base::users'] and 'ssh_keys' in hiera[ 'profile::base::users']['ec2-user']: try: for email, key in team_keys.items(): if email not in hiera['profile::base::users']['ec2-user'][ 'ssh_keys']: hiera['profile::base::users']['ec2-user']['ssh_keys'][ email] = { 'type': SingleQuotedScalarString('ssh-rsa'), 'key': SingleQuotedScalarString(key), } except: e = sys.exc_info()[0] print "Got %s when executing %s for %s" % (e, code_file, f)
def build_images( prefix, images, tag=None, commit_range=None, push=False, chart_version=None, skip_build=False, tag_prefix="", tag_latest=False, ): """Build a collection of docker images Args: prefix (str): the prefix to add to images images (dict): dict of image-specs from chartpress.yml tag (str): Specific tag to use instead of the last modified commit. If unspecified the tag for each image will be the hash of the last commit to modify the image's files. commit_range (str): The range of commits to consider, e.g. for building in CI. If an image hasn't changed in the given range, it will not be rebuilt. push (bool): Whether to push the resulting images (default: False). chart_version (str): The chart version, included as a prefix on image tags if `tag` is not specified. skip_build (bool): Whether to skip the actual image build (only updates tags). tag_prefix (str): An optional prefix on all image tags (in front of chart_version or tag) tag_latest (bool): Whether to also tag images with the latest tag """ value_modifications = {} for name, options in images.items(): image_path = options.get("contextPath", os.path.join("images", name)) image_tag = tag # include chartpress.yaml itself as it can contain build args and # similar that influence the image that would be built paths = list(options.get("paths", [])) + [image_path, "chartpress.yaml"] last_commit = last_modified_commit(*paths) if tag is None: if chart_version: image_tag = "{}-{}".format(chart_version, last_commit) else: image_tag = last_commit image_name = prefix + name image_tag = tag_prefix + image_tag image_spec = "{}:{}".format(image_name, image_tag) image_aliases = [] if tag_latest: image_aliases = ["{}:latest".format(image_name)] value_modifications[options["valuesPath"]] = { "repository": image_name, "tag": SingleQuotedScalarString(image_tag), } if skip_build: continue template_namespace = { "LAST_COMMIT": last_commit, "TAG": image_tag, } if tag or image_needs_building(image_spec): build_args = render_build_args(options, template_namespace) build_image( image_path, image_spec, build_args, options.get("dockerfilePath"), image_aliases, ) else: print(f"Skipping build for {image_spec}, it already exists") if push: if tag or image_needs_pushing(image_spec): check_call(["docker", "push", image_spec]) for alias in image_aliases: check_call(["docker", "push", alias]) else: print(f"Skipping push for {image_spec}, already on registry") return value_modifications
def extract_to_package_format(self) -> int: """Extracts the self.input yml file into several files according to the Demisto standard of the package format. Returns: int. status code for the operation. """ try: output_path = self.get_output_path() except ValueError as ex: print_error(str(ex)) return 1 self.print_logs("Starting migration of: {} to dir: {}".format(self.input, output_path), log_color=LOG_COLORS.NATIVE) os.makedirs(output_path, exist_ok=True) base_name = os.path.basename(output_path) if not self.base_name else self.base_name code_file = "{}/{}".format(output_path, base_name) self.extract_code(code_file) script = self.yml_data['script'] lang_type: str = script['type'] if self.file_type == 'integration' else self.yml_data['type'] code_file = f"{code_file}{TYPE_TO_EXTENSION[lang_type]}" self.extract_image("{}/{}_image.png".format(output_path, base_name)) self.extract_long_description("{}/{}_description.md".format(output_path, base_name)) yaml_out = "{}/{}.yml".format(output_path, base_name) self.print_logs("Creating yml file: {} ...".format(yaml_out), log_color=LOG_COLORS.NATIVE) ryaml = YAML() ryaml.preserve_quotes = True with open(self.input, 'r') as yf: yaml_obj = ryaml.load(yf) script_obj = yaml_obj if self.file_type == 'integration': script_obj = yaml_obj['script'] if 'image' in yaml_obj: del yaml_obj['image'] if 'detaileddescription' in yaml_obj: del yaml_obj['detaileddescription'] script_obj['script'] = SingleQuotedScalarString('') code_type = script_obj['type'] if code_type == TYPE_PWSH and not yaml_obj.get('fromversion'): self.print_logs("Setting fromversion for PowerShell to: 5.5.0", log_color=LOG_COLORS.NATIVE) yaml_obj['fromversion'] = "5.5.0" with open(yaml_out, 'w') as yf: ryaml.dump(yaml_obj, yf) # check if there is a README and if found, set found_readme to True found_readme = False if self.readme: yml_readme = os.path.splitext(self.input)[0] + '_README.md' readme = output_path + '/README.md' if os.path.exists(yml_readme): found_readme = True self.print_logs(f"Copying {readme} to {readme}", log_color=LOG_COLORS.NATIVE) shutil.copy(yml_readme, readme) else: # open an empty file with open(readme, 'w'): pass # Python code formatting and dev env setup if code_type == TYPE_PYTHON: if self.basic_fmt: self.print_logs("Running autopep8 on file: {} ...".format(code_file), log_color=LOG_COLORS.NATIVE) try: subprocess.call(["autopep8", "-i", "--max-line-length", "130", code_file]) except FileNotFoundError: self.print_logs("autopep8 skipped! It doesn't seem you have autopep8 installed.\n" "Make sure to install it with: pip install autopep8.\n" "Then run: autopep8 -i {}".format(code_file), LOG_COLORS.YELLOW) if self.pipenv: if self.basic_fmt: self.print_logs("Running isort on file: {} ...".format(code_file), LOG_COLORS.NATIVE) try: subprocess.call(["isort", code_file]) except FileNotFoundError: self.print_logs("isort skipped! It doesn't seem you have isort installed.\n" "Make sure to install it with: pip install isort.\n" "Then run: isort {}".format(code_file), LOG_COLORS.YELLOW) self.print_logs("Detecting python version and setting up pipenv files ...", log_color=LOG_COLORS.NATIVE) docker = get_all_docker_images(script_obj)[0] py_ver = get_python_version(docker, self.config.log_verbose) pip_env_dir = get_pipenv_dir(py_ver, self.config.envs_dirs_base) self.print_logs("Copying pipenv files from: {}".format(pip_env_dir), log_color=LOG_COLORS.NATIVE) shutil.copy("{}/Pipfile".format(pip_env_dir), output_path) shutil.copy("{}/Pipfile.lock".format(pip_env_dir), output_path) env = os.environ.copy() env["PIPENV_IGNORE_VIRTUALENVS"] = "1" try: subprocess.call(["pipenv", "install", "--dev"], cwd=output_path, env=env) self.print_logs("Installing all py requirements from docker: [{}] into pipenv".format(docker), LOG_COLORS.NATIVE) requirements = get_pip_requirements(docker) fp = tempfile.NamedTemporaryFile(delete=False) fp.write(requirements.encode('utf-8')) fp.close() try: subprocess.check_call(["pipenv", "install", "-r", fp.name], cwd=output_path, env=env) except Exception: self.print_logs("Failed installing requirements in pipenv.\n " "Please try installing manually after extract ends\n", LOG_COLORS.RED) os.unlink(fp.name) self.print_logs("Installing flake8 for linting", log_color=LOG_COLORS.NATIVE) subprocess.call(["pipenv", "install", "--dev", "flake8"], cwd=output_path, env=env) except FileNotFoundError as err: self.print_logs("pipenv install skipped! It doesn't seem you have pipenv installed.\n" "Make sure to install it with: pip3 install pipenv.\n" f"Then run in the package dir: pipenv install --dev\n.Err: {err}", LOG_COLORS.YELLOW) arg_path = os.path.relpath(output_path) self.print_logs("\nCompleted: setting up package: {}\n".format(arg_path), LOG_COLORS.GREEN) next_steps: str = "Next steps: \n" \ "* Install additional py packages for unit testing (if needed): cd {};" \ " pipenv install <package>\n".format(arg_path) if code_type == TYPE_PYTHON else '' next_steps += "* Create unit tests\n" \ "* Check linting and unit tests by running: demisto-sdk lint -i {}\n".format(arg_path) next_steps += "* When ready, remove from git the old yml and/or README and add the new package:\n" \ " git rm {}\n".format(self.input) if found_readme: next_steps += " git rm {}\n".format(os.path.splitext(self.input)[0] + '_README.md') next_steps += " git add {}\n".format(arg_path) self.print_logs(next_steps, log_color=LOG_COLORS.NATIVE) else: self.print_logs("Skipping pipenv and requirements installation - Note: no Pipfile will be created", log_color=LOG_COLORS.YELLOW) self.print_logs(f"Finished splitting the yml file - you can find the split results here: {output_path}", log_color=LOG_COLORS.GREEN) return 0