def _get_cert_key_from_property(self, cert_property, cert_type): """Gets the cert & key from dataprov info :type cert_property: CertProperty :type cert_type: str """ cert, priv_key, pub_key = None, None, None # Validate the cert property if not cert_property.validate(): raise RuntimeError(cert_type.title() + " certificate params are invalid! Please check config file.") logger.info('Initialization with dataprov. These fields might not be used in final output if overridden') # Extract the private and public key if cert_property.priv_path and c_path.validate_file(cert_property.priv_path): logger.info('Using a predefined ' + cert_type + ' private key from: ' + cert_property.priv_path) with open(cert_property.priv_path, 'rb') as fp: priv_key = fp.read() if self.using_ecdsa: priv_key = ecdsa_functions.get_key_in_format(priv_key, utils.FORMAT_PEM) pub_key = ecdsa_functions.get_public_key_from_private(priv_key) else: priv_key = rsa_functions.get_key_in_format(priv_key, utils.FORMAT_PEM) pub_key = rsa_functions.get_public_key_from_private(priv_key) # Extract the certificate if cert_property.cert_path and c_path.validate_file(cert_property.cert_path): logger.info('Using a predefined ' + cert_type + ' certificate from: ' + cert_property.cert_path) with open(cert_property.cert_path, 'rb') as fp: cert = fp.read() cert = cert_functions.get_cert_in_format(cert, utils.FORMAT_PEM) return cert, priv_key, pub_key
def validate(self, signing, *args): assert (isinstance(signing, complex_signing)) retval = True error_str = '' # signing paths for trust_keystore and keystore_file from cass_signer_attributes cass_signer_attr = signing.get_signer_attributes( ).get_cass_signer_attributes() if cass_signer_attr is not None: # keystore rule, they must all exists if cass_signer_attr.get_server(): self.trust_keystore = cass_signer_attr.get_server( ).get_trust_keystore() if c_path.validate_file(self.trust_keystore) is False: retval = False error_str += '\n trust_keystore is invalid, path = %s' % self.trust_keystore self.keystore_file = cass_signer_attr.get_user_identity( ).get_keystore_file() if self.keystore_file and c_path.validate_file( self.keystore_file) is False: retval = False error_str += '\n keystore_file is invalid, path = %s' % self.keystore_file return retval, error_str
def validate(self, signing, *args): assert (isinstance(signing, complex_signing)) default_attr = signing.get_default_attributes() retval = True error_str = '' ''' self.debug = debug ''' # debug rule # self.debug = default_attr.get_debug() # if (self.debug is None) or (not int(self.debug, 16)): # retval = False # error_str += '\n debug is not set: %s' %self.debug # signing paths rules, they must all exists openssl_config_inputs = signing.get_signer_attributes( ).get_local_signer_attributes().get_openssl_config_inputs() self.attestation_certificate_extensions_path = openssl_config_inputs.get_attestation_certificate_extensions_path( ) self.ca_certificate_extensions_path = openssl_config_inputs.get_ca_certificate_extensions_path( ) self.openssl_configfile_path = openssl_config_inputs.get_openssl_configfile_path( ) if c_path.validate_file( self.attestation_certificate_extensions_path) is False: retval = False error_str += '\n atteststation_certificate_extensions_path is invalid, path = %s' % self.attestation_certificate_extensions_path if c_path.validate_file(self.ca_certificate_extensions_path) is False: retval = False error_str += '\n ca_certificate_extensions_path is invalid, path = %s' % self.ca_certificate_extensions_path if c_path.validate_file(self.openssl_configfile_path) is False: retval = False error_str += '\n openssl_configfile_path is invalid, path = %s' % self.openssl_configfile_path # signing paths for trust_keystore and keystore_file from cass_signer_attributes cass_signer_attr = signing.get_signer_attributes( ).get_cass_signer_attributes() if cass_signer_attr is not None: # keystore rule, they must all exists if cass_signer_attr.get_server(): self.trust_keystore = cass_signer_attr.get_server( ).get_trust_keystore() if c_path.validate_file(self.trust_keystore) is False: retval = False error_str += '\n trust_keystore is invalid, path = %s' % self.trust_keystore self.keystore_file = cass_signer_attr.get_user_identity( ).get_keystore_file() if self.keystore_file and c_path.validate_file( self.keystore_file) is False: retval = False error_str += '\n keystore_file is invalid, path = %s' % self.keystore_file return retval, error_str
def discover(self): '''Searches for the openssl binary in: #. The environment using the openssl tag #. Prepackaged binary folder #. Current path #. System path :returns str: Path to the openssl binary. ''' module_name = BINARY_NAME.title() filenames = bin_names(BINARY_NAME) module = ModuleNotFound for filename in filenames: # Using the environment if OPENSSL_ENV_DIR_TAG in os.environ: env_module = c_path.join(os.environ[OPENSSL_ENV_DIR_TAG], filename) if not c_path.validate_file(env_module): logger.debug2( module_name + ': File from environment does not exist at - ' + env_module) elif not self.is_supported_version(env_module): logger.debug2( module_name + ': File from environment is not the correct version - ' + env_module) else: module = env_module logger.debug2(module_name + ': Found from environment at - ' + env_module) break # Searching in prepacked dir, current dir and system paths else: folder = packaged_bin_folder modules_found = c_path.which(filename, paths=[folder]) for module_found in modules_found: if not self.is_supported_version(module_found): continue module = module_found conf_file = c_path.join(folder, OPENSSL_CONF_FILE) if c_path.validate_file(conf_file): os.environ[OPENSSL_ENV_CONF_TAG] = conf_file logger.debug2(module_name + ': Found at - ' + module) break else: logger.debug2(module_name + ': Not Found') # Log if permissions are not correct if module != ModuleNotFound and not os.access(module, os.X_OK): logger.error(module_name + ': Cannot execute. Missing execution permission.') return module
def discover(self): """Searches for the openssl binary in: #. The environment using the openssl tag #. Prepackaged binary folder #. Current path #. System path :returns str: Path to the openssl binary. """ module_name = BINARY_NAME.title() filenames = bin_names(BINARY_NAME) module = ModuleNotFound logger.debug2('module_name: ' + str(module_name) + ', filenames: ' + str(filenames)) for filename in filenames: # Using the environment if OPENSSL_ENV_DIR_TAG in os.environ: logger.debug2(str(OPENSSL_ENV_DIR_TAG) + ' tag found in environment') env_module = c_path.join(os.environ[OPENSSL_ENV_DIR_TAG], filename) logger.debug2('Looking for: ' + str(env_module)) if not c_path.validate_file(env_module): logger.warning(module_name + ': File from environment does not exist at - ' + env_module) elif not self.is_supported_version(env_module): logger.warning(module_name + ': File from environment is not the correct version - ' + env_module) else: module = env_module logger.debug2(module_name + ': Found from environment at - ' + env_module) break # Searching in prepacked dir, current dir and system paths else: folder = packaged_bin_folder logger.debug2('Looking for: ' + str(filename) + ' in folder: ' + str(folder)) for module_found in c_path.which_generator(filename, paths=[folder], find_one=False): if not self.is_supported_version(module_found): logger.debug2('Incorrect version: ' + str(module_found)) continue module = module_found conf_file = c_path.join(folder, OPENSSL_CONF_FILE) if c_path.validate_file(conf_file): os.environ[OPENSSL_ENV_CONF_TAG] = conf_file logger.debug2(module_name + ': Found at - ' + module) break # Check if module is found if module != ModuleNotFound: break else: logger.error(module_name + ': Not Found') # Log if permissions are not correct if module != ModuleNotFound and not os.access(module, os.X_OK): logger.error(module_name + ': Cannot execute. Missing execution permission.') return module
def __init__(self, image_path, img_config_parser, parsegen_config, authority, sign_id=None, gen_multi_image=False, multi_image_path=None): from sectools.features.isc.parsegen.config.parser import ParsegenCfgParser assert isinstance(image_path, str) assert isinstance(img_config_parser, ConfigParser) assert isinstance(parsegen_config, ParsegenCfgParser) if sign_id is not None: assert isinstance(sign_id, str) # Initialize the BaseStager BaseStager.__init__(self, authority) # Validate that the image path exists image_path = c_path.normalize(image_path) if not c_path.validate_file(image_path): raise RuntimeError('No read access to the image path: ' + image_path) # Put the image info object into the list imageinfo = self._create_imageinfo(img_config_parser, parsegen_config, sign_id, image_path, False) self._image_info_list.append(imageinfo) # Validate that the Multi-Image Sign & Integrity image path exists if multi_image_path is not None: multi_image_path = c_path.normalize(multi_image_path) if not c_path.validate_file(multi_image_path): raise RuntimeError('No read access to the ' + multi_image_string() + ' image path: ' + multi_image_path) # Set sign id to Multi-Image image's sign id sign_id = MULTI_IMAGE_SIGN_ID[authority] # Set the Multi-Image Signing & Integrity image's imageinfo multi_image_imageinfo = self._create_imageinfo( img_config_parser, parsegen_config, sign_id, multi_image_path, False) self._multi_image_imageinfo_dict[ multi_image_imageinfo.chipset] = multi_image_imageinfo # Create image info object for to-be-created Multi-Image Signing and Integrity image elif gen_multi_image: # Set sign id to Multi-Image Sign & Integrity image's sign id sign_id = MULTI_IMAGE_SIGN_ID[authority] # Set the Multi-Image Signing & Integrity image's imageinfo multi_image_imageinfo = self._create_imageinfo( img_config_parser, parsegen_config, sign_id, None, gen_multi_image) self._multi_image_imageinfo_dict[ multi_image_imageinfo.chipset] = multi_image_imageinfo
def validate(self, signing, *args): assert(isinstance(signing, complex_signing)) default_attr = signing.get_default_attributes() retval = True error_str = '' ''' self.debug = debug ''' # debug rule # self.debug = default_attr.get_debug() # if (self.debug is None) or (not int(self.debug, 16)): # retval = False # error_str += '\n debug is not set: %s' %self.debug # signing paths rules, they must all exists openssl_config_inputs = signing.get_signer_attributes().get_local_signer_attributes().get_openssl_config_inputs() self.attestation_certificate_extensions_path = openssl_config_inputs.get_attestation_certificate_extensions_path() self.ca_certificate_extensions_path = openssl_config_inputs.get_ca_certificate_extensions_path() self.openssl_configfile_path = openssl_config_inputs.get_openssl_configfile_path() if c_path.validate_file(self.attestation_certificate_extensions_path) is False: retval = False error_str += '\n atteststation_certificate_extensions_path is invalid, path = %s' % self.attestation_certificate_extensions_path if c_path.validate_file(self.ca_certificate_extensions_path) is False: retval = False error_str += '\n ca_certificate_extensions_path is invalid, path = %s' % self.ca_certificate_extensions_path if c_path.validate_file(self.openssl_configfile_path) is False: retval = False error_str += '\n openssl_configfile_path is invalid, path = %s' % self.openssl_configfile_path # signing paths for trust_keystore and keystore_file from cass_signer_attributes cass_signer_attr = signing.get_signer_attributes().get_cass_signer_attributes() if cass_signer_attr is not None: # keystore rule, they must all exists if cass_signer_attr.get_server(): self.trust_keystore = cass_signer_attr.get_server().get_trust_keystore() if c_path.validate_file(self.trust_keystore) is False: retval = False error_str += '\n trust_keystore is invalid, path = %s' % self.trust_keystore self.keystore_file = cass_signer_attr.get_user_identity().get_keystore_file() if self.keystore_file and c_path.validate_file(self.keystore_file) is False: retval = False error_str += '\n keystore_file is invalid, path = %s' % self.keystore_file return retval, error_str
def _post_process(self, image, pil_splitter_path, meta_build_path): ''' Replacement tags in postsign commands for images. ''' TAG_OUTPUT_DIR = '$(OUTPUT_DIR)' TAG_PIL_SPLITTER = '$(PIL_SPLITTER)' TAG_IMAGE_FILE = '$(IMAGE_FILE)' REPL_META_PATH = '$(META_BUILD)' if image.post_process_commands: # Ensure pil splitter is available if pil_splitter_path.find(REPL_META_PATH) != -1: if not meta_build_path: raise RuntimeError('Metabuild path is not available for pil splitter') pil_splitter_path = pil_splitter_path.replace(REPL_META_PATH, meta_build_path) if not c_path.validate_file(pil_splitter_path): raise RuntimeError('Cannot access pil splitter at: ' + pil_splitter_path) # Run all the commands for cmd in [c.strip() for c in image.post_process_commands.split()]: # Perform any needed replacements cmd = cmd.replace(TAG_OUTPUT_DIR, os.path.dirname(image.image_under_operation)) cmd = cmd.replace(TAG_PIL_SPLITTER, pil_splitter_path) cmd = cmd.replace(TAG_IMAGE_FILE, image.image_under_operation) logger.info('Running postsign command: ' + cmd) err = os.system(cmd) logger.info('Result: ' + str(err))
def __init__(self, image_path, img_config_parser, parsegen_config, authority, sign_id=None, crypto_params={}, imageinfo_class=None): assert isinstance(image_path, str) assert isinstance(img_config_parser, ConfigParser) assert isinstance(parsegen_config, ParsegenCfgParser) if sign_id is not None: assert isinstance(sign_id, str) # Initialize the BaseStager super(ImagePathsStagerIot, self).__init__(authority) # Validate that the image path exists image_path = c_path.normalize(image_path) if not c_path.validate_file(image_path): raise RuntimeError('No read access to the image path: ' + image_path) # Put the image info object into the list imageinfo = self._create_imageinfo(img_config_parser, parsegen_config, sign_id, image_path, crypto_params=crypto_params, imageinfo_class=imageinfo_class) self._image_info_list.append(imageinfo)
def secdat(self, secdat_path): if secdat_path: secdat_path = c_path.normalize(secdat_path) if not c_path.validate_file(secdat_path): raise RuntimeError('Cannot access the secdat file: ' + secdat_path) self._secdat = secdat_path logger.info('Secdat path is set to: ' + str(secdat_path))
def chipset(self, chipset): # Check that the workspace is set if self._workspace_dir_obj is None: raise RuntimeError('Please set workspace before setting the chipset.') # Log if the chipset is changed try: selected_chipset = self.chipset if selected_chipset: logger.note('Switching chipset from "' + selected_chipset + '" to "' + chipset + '"') except Exception: pass # Update the workspace self._update_workspace(chipset) # Set the config paths self.set_config_paths(*self._workspace_dir_obj.get_chipset_config_paths(chipset)) # Set the ouput dir self.output_dir = c_path.join(os.path.dirname(self._workspace_dir_obj.config_dir), 'secdat/' + chipset) # Add the existing secdat secdat = c_path.join(self.output_dir, 'sec.dat') if c_path.validate_file(secdat): self.secdat = secdat else: self.secdat = None
def _get_config_path(self, chipset_dir): """Returns the config found in the chipset dir matching the naming conventions. If the config file is not found in the dir, None is returned. :param str chipset_dir: The directory in which to look for config path. :returns: config_file :rtype: (str) """ config = None chipset_from_dir_name = os.path.basename(chipset_dir) for entry in os.listdir(chipset_dir): path = c_path.join(chipset_dir, entry) if c_path.validate_file(path) and re.match(defines.XML_NAME_REGEX, entry): # Extract the chipset from the file try: chipset_from_file = ConfigParser.get_chipset_from_file(path) except Exception as e: logger.warning('Skipping file: ' + entry + '\n' ' ' + 'Failed to load the file: ' + str(e)) continue # Check the naming conventions if chipset_from_file == chipset_from_dir_name: config = path else: logger.warning('Skipping file: ' + entry + '\n' ' ' + 'Chipset from file: "' + chipset_from_file + '" does not match chipset from dir name: "' + chipset_from_dir_name + '"') else: logger.debug2('Skipping file: ' + entry + '\n' ' ' + 'Name does not match any of the naming convention patters') logger.debug2('Config path found for chipset_dir: ' + chipset_dir + '\n' ' ' + 'config: ' + str(config)) return config
def __init__(self, image_path, img_config_parser, sign_id=None): assert isinstance(image_path, str) assert isinstance(img_config_parser, ConfigParser) if sign_id is not None: assert isinstance(sign_id, str) # Initialize the BaseStager BaseStager.__init__(self) # Validate that the image path exists image_path = c_path.normalize(image_path) if not c_path.validate_file(image_path): raise RuntimeError('No read access to the image path: ' + image_path) # Validate the sign_id sign_id = self._get_sign_id(img_config_parser, os.path.basename(image_path), sign_id) # Get the config block for the sign id img_config_block = img_config_parser.get_config_for_sign_id(sign_id) # Create the one image info object image_info = ImageInfo(image_path, sign_id, img_config_block, img_config_parser) image_info.dest_image.image_name = image_info.src_image.image_name # Check if the dest image name should be overriden if img_config_block.output_file_name is not None: image_info.dest_image.image_name = img_config_block.output_file_name # Put the image info object into the list self._image_info_list.append(image_info)
def secimage_config(self, config_path): config_path = c_path.normalize(config_path) if not c_path.validate_file(config_path): raise RuntimeError( 'Secimage config is inaccessible at given path: ' + str(config_path)) self._secimage_config = config_path
def _execute_pilsplit(env, pilsplitter_target_base_dir, install_file_name, sectools_builder_output, msmid_jtagid_dict, build_policy, pilsplitter_path ): util = BuilderUtil(env) pilsplitter_target_base_dir = os.path.realpath(util.envsubst(pilsplitter_target_base_dir)) pilsplitter_path = os.path.realpath(util.envsubst(pilsplitter_path)) install_file_name = util.envsubst(install_file_name) rt_list = [] if c_path.validate_file(pilsplitter_path): util.loadToolScript(pilsplitter_path) for i, sec_image_policy in enumerate(build_policy.enabled_sec_image_policies): rtt = _execute_postprocess_policy(env, sectools_install_base_dir=pilsplitter_target_base_dir, install_file_name=install_file_name, sectools_builder_output=sectools_builder_output[i], msmid_jtagid_dict=msmid_jtagid_dict, install_policies=build_policy.enabled_install_policies, sec_image_policy=sec_image_policy, postprocess=POSTPROCESS_PILSPLIT) rt_list.append(rtt) return rt_list
def query_data_prov(self, level1_namespace, level2_namespace, level3_namespace, asset_tag): query_result = self.data_prov.query(level1_namespace, level2_namespace, level3_namespace)[0] '''check if config.xml exists''' if not query_result.config: raise RuntimeError('DataProvisioner: ' + c_path.join(query_result.path, 'config.xml') + ' is not found') '''check if config.xml is valid''' try: asset_file_name = str( getattr(query_result.config.METACONFIG, asset_tag)) except Exception: raise RuntimeError('DataProvisioner: ' + asset_tag + ' is not found in config.xml') '''check if asset file exists''' asset_file_path = os.path.join(query_result.path, asset_file_name) if not c_path.validate_file(asset_file_path): raise RuntimeError('DataProvisioner: ' + asset_file_path + ' is not found') self.log_once(asset_tag + " = " + asset_file_path) logger.debug("DataProvisioner config: " + str(query_result.config)) return query_result.files[asset_file_name]
def _execute_pilsplit(env, pilsplitter_target_base_dir, install_file_name, sectools_builder_output, build_policy, pilsplitter_path): util = BuilderUtil(env) pilsplitter_target_base_dir = os.path.realpath( util.envsubst(pilsplitter_target_base_dir)) pilsplitter_path = os.path.realpath(util.envsubst(pilsplitter_path)) install_file_name = util.envsubst(install_file_name) rt_list = [] if c_path.validate_file(pilsplitter_path): util.loadToolScript(pilsplitter_path) for i, sec_image_policy in enumerate( build_policy.enabled_sec_image_policies): rtt = _execute_postprocess_policy( env, sectools_install_base_dir=pilsplitter_target_base_dir, install_file_name=install_file_name, sectools_builder_output=sectools_builder_output[i], install_policies=build_policy.enabled_install_policies, sec_image_policy=sec_image_policy, postprocess=POSTPROCESS_PILSPLIT) rt_list.append(rtt) return rt_list
def _get_config_path(self, chipset_dir): """Returns the config found in the chipset dir matching the naming conventions. If the config file is not found in the dir, None is returned. :param str chipset_dir: The directory in which to look for config path. :returns: config_file :rtype: (str) """ config = None chipset_from_dir_name = os.path.basename(chipset_dir) for entry in os.listdir(chipset_dir): path = c_path.join(chipset_dir, entry) if c_path.validate_file(path) and entry.endswith(defines.XML_NAME_ENDING): # Extract the chipset from the file try: chipset_from_file = ConfigParser.get_chipset_from_file(path) except Exception as e: logger.warning('Skipping file: ' + entry + '\n' ' ' + 'Failed to load the file: ' + str(e)) continue # Check the naming conventions if chipset_from_file == chipset_from_dir_name: config = path else: logger.warning('Skipping file: ' + entry + '\n' ' ' + 'Chipset from file: "' + chipset_from_file + '" does not match chipset from dir name: "' + chipset_from_dir_name + '"') else: logger.debug2('Skipping file: ' + entry + '\n' ' ' + 'Name does not match any of the naming convention patters') logger.debug2('Config path found for chipset_dir: ' + chipset_dir + '\n' ' ' + 'config: ' + str(config)) return config
def __init__(self, image_path, img_config_parser, sign_id=None): assert isinstance(image_path, str) assert isinstance(img_config_parser, ConfigParser) if sign_id is not None: assert isinstance(sign_id, str) # Initialize the BaseStager BaseStager.__init__(self) # Validate that the image path exists image_path = c_path.normalize(image_path) if not c_path.validate_file(image_path): raise RuntimeError('No read access to the image path: ' + image_path) # Validate the sign_id sign_id = self._get_sign_id(img_config_parser, os.path.basename(image_path), sign_id) # Get the config block for the sign id img_config_block = img_config_parser.get_config_for_sign_id(sign_id) # Create the one image info object image_info = ImageInfo(image_path, sign_id, img_config_block, img_config_parser) image_info.dest_image.image_name = image_info.src_image.image_name # Check if the dest image name should be overriden if img_config_block.output_file_name is not None: image_info.dest_image.image_name = img_config_block.output_file_name # Put the image info object into the list self._image_info_list.append(image_info)
def discover(self): """Searches for the ecies binary in the predefined packaged path. :returns str: Path to the ecies binary. """ module_name = BINARY_NAME.title() filenames = bin_names(BINARY_NAME) filenames += alternate_bin_names(BINARY_NAME) module = ModuleNotFound for filename in filenames: file_path = c_path.join(packaged_bin_folder, filename) if c_path.validate_file(file_path): module = file_path logger.debug2(module_name + ': Found at - ' + module) break else: logger.debug2(module_name + ': Not Found') # Log if permissions are not correct if module != ModuleNotFound and not os.access(module, os.X_OK): logger.error(module_name + ': Cannot execute. Missing execution permission.') return module
def dbgp_config(self, config_path): config_path = c_path.normalize(config_path) if not c_path.validate_file(config_path): raise RuntimeError( 'debugpolicy config is inaccessible at given path: ' + str(config_path)) self._config_parser = ConfigParser(config_path)
def query_data_path(self, T1_namespace, T2_namespace, T3_namespace, asset_tag): query_result = self.data_prov.query(T1_namespace, T2_namespace, T3_namespace)[0] # check if asset folder has been found correctly # #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~# if not query_result.config: raise RuntimeError('DataProvisioner: ' + c_path.join(query_result.path, 'config.xml') + ' is not found') # check if config.xml is valid # #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~# try: asset_file_name = str(getattr(query_result.config.METACONFIG, asset_tag)) except Exception: raise RuntimeError('DataProvisioner: ' + asset_tag + ' is not found in config.xml') return 'ERROR' # Inject the mrc_index into file string before the period # #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~# asset_file_name = string.replace(asset_file_name, '.', str(self.cert_mrc_index) + '.') # check if asset file exists # #~~~~~~~~~~~~~~~~~~~~~~~~~~~~# asset_file_path = os.path.join(query_result.path, asset_file_name) asset_file_path = os.path.normpath(asset_file_path) if not c_path.validate_file(asset_file_path): raise RuntimeError('DataProvisioner: ' + asset_file_path + ' is not found') self.log_once(asset_tag + " = " + asset_file_path) logger.debug("DataProvisioner config: " + str(query_result.config)) return asset_file_path
def validate_file(path, var_name, error_list): if path: path = c_path.normalize(path) if not c_path.validate_file(path): error_list.append( "Provided {0} \"{1}\" is not a valid file or does not have read access." .format(var_name, path))
def _post_process(self, image, pil_splitter_path, meta_build_path): ''' Replacement tags in postsign commands for images. ''' TAG_OUTPUT_DIR = '$(OUTPUT_DIR)' TAG_PIL_SPLITTER = '$(PIL_SPLITTER)' TAG_IMAGE_FILE = '$(IMAGE_FILE)' REPL_META_PATH = '$(META_BUILD)' if image.pil_split: image_file = image.image_under_operation self._pil_split(image_file, os.path.splitext(image_file)[0]) if image.post_process_commands: # Ensure pil splitter is available if pil_splitter_path.find(REPL_META_PATH) != -1: if not meta_build_path: raise RuntimeError('Metabuild path is not available for pil splitter') pil_splitter_path = pil_splitter_path.replace(REPL_META_PATH, meta_build_path) if not c_path.validate_file(pil_splitter_path): raise RuntimeError('Cannot access pil splitter at: ' + pil_splitter_path) # Run all the commands for cmd in [c.strip() for c in image.post_process_commands.split()]: # Perform any needed replacements cmd = cmd.replace(TAG_OUTPUT_DIR, os.path.dirname(image.image_under_operation)) cmd = cmd.replace(TAG_PIL_SPLITTER, pil_splitter_path) cmd = cmd.replace(TAG_IMAGE_FILE, image.image_under_operation) logger.info('Running postsign command: ' + cmd) err = os.system(cmd) logger.info('Result: ' + str(err))
def token_driver_home(self, token_driver_home_config): if sys.platform.startswith('linux'): self._token_driver_home = token_driver_home_config.linux else: self._token_driver_home = token_driver_home_config.windows if c_path.validate_file(self._token_driver_home) is False: raise ConfigError(self.MESG_TOKEN_DRIVER_INVALID.format(self._token_driver_home))
def get_metabuild_log(self): data = [] logdir = os.path.dirname(self.log) ufc_log = c_path.join(logdir, UFC_LOG) regenerate_log = c_path.join(logdir, REGENERATE_BUID_LOG) buildloading_log = c_path.join(logdir, FASTBOOT_COMPLETE_LOG) metabuild_log = c_path.join(logdir, META_BUILD_LOG) # remove meta build log from previous session if c_path.validate_file(metabuild_log): os.remove(metabuild_log) data.append('Meta build log:\n') if c_path.validate_file(ufc_log): data.append(SECTION_BREAK) data.append('Step 1: UltraFastCopy meta build') data.append('Meta Build: ' + self.meta_build_path) data.append(SECTION_BREAK) data.append(load_data_from_file(ufc_log)) else: data.append('meta build copy skipped') data.append(SECTION_BREAK) if c_path.validate_file(regenerate_log): data.append(SECTION_BREAK) data.append('Step 2: Meta build regeneration') data.append('Meta Build: ' + logdir) data.append('Image Build:' + self.image_build_path) data.append(SECTION_BREAK) data.append(load_data_from_file(regenerate_log)) else: data.append('meta build regeneration skipped') data.append(SECTION_BREAK) if c_path.validate_file(buildloading_log): data.append(SECTION_BREAK) data.append('Step 3: Meta build loading') data.append('Meta Build: ' + logdir) data.append(SECTION_BREAK) data.append(load_data_from_file(buildloading_log)) store_data_to_file(metabuild_log, "\n".join(data)) return metabuild_log
def validate_input_file(self, args): if args.input_file is None: raise RuntimeError("Please specify an input file") if not c_path.validate_file(args.input_file): raise RuntimeError( "input file {0} could not be opened for reading".format( args.input_file)) logger.debug("input file {0} has been validated successfully".format( args.input_file))
def log_contains(self, string): isPresent = False log_path = os.path.join(self.args.output_dir, TOOL_NAME + "_log.txt") if c_path.validate_file(log_path): log_data = c_misc.load_data_from_file(log_path) if log_data.find(string) > 0: isPresent = True return isPresent
def __init__(self, path, pf, package=None): self.path = path self.pf = pf if package is not None: if not c_path.validate_file(package): raise RuntimeError('Cannot access: ' + package) self.package = package else: self.package = self.find_package(pf.package_glob)
def set_input_file(self, i_file): """(str) Path to the input debugpolicy elf file. """ i_file = c_path.normalize(i_file) if not c_path.validate_file(i_file): raise RuntimeError( 'debugpolicy elf file is inaccessible at given path: ' + str(i_file)) self._input_files_base = os.path.dirname(i_file) self._input_files_list = [i_file]
def c_validate(self): """Validates the command line args provided by the user. :raises: RuntimeError if any error occurs. """ args = self.parsed_args err = [] # Check the input files if not args.image_file: err.append('Provide an image_file for processing.') if args.chipset is None: err.append('Provide chipset to process the image.') if args.integrity_check or args.sign or args.validate: # Validate the output image resulting from all operations args.validate = True else: err.append('Specify one or more operations to perform.') # Check root certificate and key if args.sign: if not args.root_cert: err.append('Provide a root certificate file.') if not args.root_key: err.append('Provide a root private key file.') # Check and sanitize any paths for read access for path in ['image_file', 'root_cert', 'root_key']: path_val = getattr(args, path, None) # the file path is normalized earlier as part of its callback if args.sign: if getattr(args, path) and not c_path.validate_file(path_val): err.append('Cannot access %s at: %s' % (path, path_val)) # Check and sanitize paths for write access for path in ['output-dir']: path_val = getattr(args, path.replace('-', '_'), None) try: c_path.create_dir(path_val) except Exception as e: err.append('Cannot write at: %s\n Error: %s' % (path_val, e)) # Raise error if any if err: if len(err) > 1: err = [(' ' + str(idx + 1) + '. ' + error) for idx, error in enumerate(err)] err = 'Please check the command line args:\n\n' + '\n'.join( err) else: err = err[0] raise RuntimeError(err)
def _get_config_paths(self, chipset_dir): """Returns a tuple of the configs found in the chipset dir matching the naming conventions. If any of the config files is not found in the dir, its value is returned as None. :param str chipset_dir: The directory in which to look for config paths. :returns: (oem_config_path, qc_config_path, ui_config_path, user_config_path) :rtype: (tuple(str)) """ oem, qc, ui, user = None, None, None, None chipset_from_dir_name = os.path.basename(chipset_dir) for entry in os.listdir(chipset_dir): path = c_path.join(chipset_dir, entry) if c_path.validate_file(path): # Extract the chipset from the file try: chipset_from_file = ConfigParser.get_chipset_from_file( path) except Exception as e: logger.debug2('Skipping file: ' + entry + '\n' ' ' + 'Failed to load the file: ' + str(e)) continue # Check the naming conventions if chipset_from_file == chipset_from_dir_name: if entry.endswith(defines.XML_NAME_ENDING_OEM): oem = path elif entry.endswith(defines.XML_NAME_ENDING_QC): qc = path elif entry.endswith(defines.XML_NAME_ENDING_UI): ui = path elif entry.endswith(defines.XML_NAME_ENDING_USER): user = path else: logger.debug2( 'Skipping file: ' + entry + '\n' ' ' + 'Name does not match any of the naming convention patters' ) else: logger.debug2('Skipping file: ' + entry + '\n' ' ' + 'Chipset from file: "' + chipset_from_file + '" does not match chipset from dir name: "' + chipset_from_dir_name + '"') logger.debug2('Config paths found for chipset_dir: ' + chipset_dir + '\n' ' ' + 'oem: ' + str(oem) + '\n' ' ' + 'qc: ' + str(qc) + '\n' ' ' + 'ui: ' + str(ui) + '\n' ' ' + 'user: ' + str(user)) return oem, qc, ui, user
def _validate_config(self, cert_config, general_properties, openssl_config_file_paths): if c_path.validate_file(openssl_config_file_paths.attestation_certificate_extensions_path) is False: raise ConfigError("Attestation certificate extensions path is invalid: {0}". format(openssl_config_file_paths.attestation_certificate_extensions_path)) if c_path.validate_file(openssl_config_file_paths.ca_certificate_extensions_path) is False: raise ConfigError("CA certificate extensions path is invalid: {0}". format(openssl_config_file_paths.ca_certificate_extensions_path)) if c_path.validate_file(openssl_config_file_paths.openssl_configfile_path) is False: raise ConfigError("Openssl config file path is invalid: {0}". format(openssl_config_file_paths.openssl_configfile_path)) if (general_properties.num_root_certs == 0): raise ConfigError("Number of root certificates cannot be set zero") if (general_properties.num_root_certs > 16): raise ConfigError("Number of root certificates cannot be more than 16") if (cert_config.multirootcert and cert_config.multirootcert.index >= general_properties.num_root_certs): raise ConfigError("Multirootcert index {0} must be smaller than the number of root certs {1}" .format(cert_config.multirootcert.index, general_properties.num_root_certs))
def _validate_config(self, cert_config, general_properties, openssl_config_file_paths): if c_path.validate_file( openssl_config_file_paths. attestation_certificate_extensions_path) is False: raise ConfigError( "Attestation certificate extensions path is invalid: {0}". format(openssl_config_file_paths. attestation_certificate_extensions_path)) if c_path.validate_file(openssl_config_file_paths. ca_certificate_extensions_path) is False: raise ConfigError( "CA certificate extensions path is invalid: {0}".format( openssl_config_file_paths.ca_certificate_extensions_path)) if c_path.validate_file( openssl_config_file_paths.openssl_configfile_path) is False: raise ConfigError( "Openssl config file path is invalid: {0}".format( openssl_config_file_paths.openssl_configfile_path)) if (general_properties.num_root_certs == 0): raise ConfigError("Number of root certificates cannot be set zero") if (general_properties.num_root_certs > 16): raise ConfigError( "Number of root certificates cannot be more than 16") if (cert_config.multirootcert and cert_config.multirootcert.index >= general_properties.num_root_certs): raise ConfigError( "Multirootcert index {0} must be smaller than the number of root certs {1}" .format(cert_config.multirootcert.index, general_properties.num_root_certs))
def __init__(self, image_path, img_config_parser, parsegen_config, authority, sign_id=None): from sectools.features.isc.parsegen.config.parser import ParsegenCfgParser assert isinstance(image_path, str) assert isinstance(img_config_parser, ConfigParser) assert isinstance(parsegen_config, ParsegenCfgParser) if sign_id is not None: assert isinstance(sign_id, str) # Initialize the BaseStager BaseStager.__init__(self, authority) # Validate that the image path exists image_path = c_path.normalize(image_path) if not c_path.validate_file(image_path): raise RuntimeError('No read access to the image path: ' + image_path) # Put the image info object into the list imageinfo = self._create_imageinfo(img_config_parser, parsegen_config, sign_id, image_path) self._image_info_list.append(imageinfo)
def generatesigned(chipset, output_dir, sig_package, sign_id=None, imagefile=None, metabuild=None, verbose=False, debug=False, quiet=False): """Returns the signed image file. """ retcode = 0 errstr = '' signed_image = '' expected_path = '' # Launch secimage once to get the image info list il = launch_secimage(chipset=chipset, output_dir=output_dir, sign_id=sign_id, imagefile=imagefile, metabuild=metabuild, signer=SIGNER_REMOTE, sign=True, verbose=verbose, debug=debug, quiet=(False if verbose else True)) # Copy the zip to where its expected in the output directory sig_package_exp = SecimageRemoteClientSigner.get_signature_package_path(il[0]) if (sig_package != sig_package_exp): c_path.create_dir(os.path.dirname(sig_package_exp)) ret, err = copyFile(sig_package, sig_package_exp) if not ret: raise RuntimeError(err) try: # Launch secimage il = launch_secimage(chipset=chipset, output_dir=output_dir, sign_id=sign_id, imagefile=imagefile, metabuild=metabuild, signer=SIGNER_REMOTE, sign=True, verbose=verbose, debug=debug, quiet=quiet) # Verify the signed image was generated signed_image = il[0].dest_image.image_path if not c_path.validate_file(signed_image): retcode = 1 errstr = 'Failed to generate the signed image. ' + str(il[0].status.sign.error) else: expected_path = il[0].src_image.image_path except Exception as e: retcode = 1 errstr = 'Exception occurred while running secimage. Exception - ' + str(e) return retcode, errstr, signed_image, expected_path