def _validate_tokens(config): '''Validates and hashes the tokens''' config.setdefault('token_size', 128) config['token_size'] = check_int(config['token_size']) # This needs to be byte aligned if config['token_size'] % 8: raise ValueError('Size of token {} must be byte aligned'.format( token['name'])) num_bytes = config['token_size'] // 8 hashed_tokens = [] for token in config['tokens']: random_or_hexvalue(token, 'value', config['token_size']) hashed_token = OrderedDict() hashed_token['name'] = token['name'] + 'Hashed' data = token['value'].to_bytes(num_bytes, byteorder='big') # Custom string chosen for life cycle KMAC App interface custom = 'LC_CTRL'.encode('UTF-8') hashobj = cSHAKE128.new(data=data, custom=custom) hashed_token['value'] = int.from_bytes(hashobj.read(num_bytes), byteorder='big') hashed_tokens.append(hashed_token) config['tokens'] += hashed_tokens
def _validate_item(item): '''Validates an item within a partition''' item.setdefault("name", "unknown_name") item.setdefault("size", "0") item.setdefault("isdigest", "false") item.setdefault("ismubi", "false") # make sure these have the correct types item["isdigest"] = check_bool(item["isdigest"]) item["ismubi"] = check_bool(item["ismubi"]) item["size"] = check_int(item["size"]) item_width = item["size"] * 8 # defaults are handled differently in case of mubi if item["ismubi"]: if not is_width_valid(item_width): raise RuntimeError("Mubi value {} has invalid width".format( item["name"])) # Convert default to correct mubi value item.setdefault("inv_default", "false") item["inv_default"] = check_bool(item["inv_default"]) item["inv_default"] = mubi_value_as_int(item["inv_default"], item_width) else: # Generate random constant to be used when partition has # not been initialized yet or when it is in error state. random_or_hexvalue(item, "inv_default", item_width)
def _validate_item(item): '''Validates an item within a partition''' item.setdefault("name", "unknown_name") item.setdefault("size", "0") item.setdefault("isdigest", "false") # Make sure this has integer type. item["size"] = check_int(item["size"]) # Generate random constant to be used when partition has # not been initialized yet or when it is in error state. random_or_hexvalue(item, "inv_default", check_int(item["size"]) * 8)
def merge_item_data(self, part, item): '''This validates and merges the item data into the memory map dict''' item.setdefault('name', 'unknown_name') mmap_item = self.get_item(part['name'], item['name']) if mmap_item is None: raise RuntimeError('Item {} does not exist'.format(item['name'])) item_size = mmap_item['size'] item_width = item_size * 8 # if needed, resolve the mubi value first if mmap_item['ismubi']: mubi_str = "mubi " mubi_val_str = " kMultiBitBool{}".format(item_width) item.setdefault("value", "false") item["value"] = check_bool(item["value"]) mubi_val_str += "True" if item["value"] else "False" item["value"] = mubi_value_as_int(item["value"], item_width) else: mubi_str = "" mubi_val_str = "" item.setdefault('value', '0x0') random_or_hexvalue(item, 'value', item_width) mmap_item['value'] = item['value'] log.info('> Adding {}item {} with size {}B and value{}:'.format( mubi_str, item['name'], item_size, mubi_val_str)) fmt_str = '{:0' + str(item_size * 2) + 'x}' value_str = fmt_str.format(item['value']) bytes_per_line = 8 j = 0 while value_str: # Print out max 64bit per line line_str = '' for k in range(bytes_per_line): num_chars = min(len(value_str), 2) line_str += value_str[-num_chars:] if k < bytes_per_line - 1: line_str += ' ' value_str = value_str[:len(value_str) - num_chars] log.info(' {:06x}: '.format(j) + line_str) j += bytes_per_line # Key accounting item_check = item.copy() del item_check['name'] del item_check['value'] _check_unused_keys(item_check, 'in item {}'.format(item['name']))
def _validate_tokens(config): '''Validates and hashes the tokens''' config.setdefault('token_size', 128) config['token_size'] = check_int(config['token_size']) hashed_tokens = [] for token in config['tokens']: random_or_hexvalue(token, 'value', config['token_size']) hashed_token = OrderedDict() hashed_token['name'] = token['name'] + 'Hashed' # TODO: plug in PRESENT-based hashing algo or KMAC hashed_token['value'] = token['value'] hashed_tokens.append(hashed_token) config['tokens'] += hashed_tokens
def _validate_scrambling(scr): '''Validate SCrambling entry''' scr.setdefault("key_size", "16") scr.setdefault("iv_size", "8") scr.setdefault("cnst_size", "16") scr["key_size"] = check_int(scr["key_size"]) scr["iv_size"] = check_int(scr["iv_size"]) scr["cnst_size"] = check_int(scr["cnst_size"]) if "keys" not in scr: log.error("Missing key configuration.") exit(1) if "digests" not in scr: log.error("Missing digest configuration.") exit(1) for key in scr["keys"]: key.setdefault("name", "unknown_key_name") key.setdefault("value", "<random>") random_or_hexvalue(key, "value", scr["key_size"] * 8) for dig in scr["digests"]: dig.setdefault("name", "unknown_key_name") dig.setdefault("iv_value", "<random>") dig.setdefault("cnst_value", "<random>") random_or_hexvalue(dig, "iv_value", scr["iv_size"] * 8) random_or_hexvalue(dig, "cnst_value", scr["cnst_size"] * 8)
def merge_item_data(self, part, item): '''This validates and merges the item data into the memory map dict''' item.setdefault('name', 'unknown_name') item.setdefault('value', '0x0') mmap_item = self.get_item(part['name'], item['name']) if mmap_item is None: raise RuntimeError('Item {} does not exist'.format(item['name'])) item_size = mmap_item['size'] random_or_hexvalue(item, 'value', item_size * 8) mmap_item['value'] = item['value'] log.info('> Adding item {} with size {}B and value:'.format( item['name'], item_size)) fmt_str = '{:0' + str(item_size * 2) + 'x}' value_str = fmt_str.format(item['value']) bytes_per_line = 8 j = 0 while value_str: # Print out max 64bit per line line_str = '' for k in range(bytes_per_line): num_chars = min(len(value_str), 2) line_str += value_str[-num_chars:] if k < bytes_per_line - 1: line_str += ' ' value_str = value_str[:len(value_str) - num_chars] log.info(' {:06x}: '.format(j) + line_str) j += bytes_per_line # Key accounting item_check = item.copy() del item_check['name'] del item_check['value'] _check_unused_keys(item_check, 'in item {}'.format(item['name']))
def _validate_mmap(config): '''Validate the memory map configuration''' # Get valid key names. key_names = [] for key in config["scrambling"]["keys"]: key_names.append(key["name"]) offset = 0 num_part = 0 for part in config["partitions"]: num_part += 1 _validate_part(part, offset, key_names) # Loop over items within a partition for item in part["items"]: _validate_item(item, offset) log.info("> Item {} at offset {} with size {}".format( item["name"], offset, item["size"])) offset += check_int(item["size"]) # Place digest at the end of a partition. if part["sw_digest"] or part["hw_digest"]: part["items"].append({ "name": part["name"] + DIGEST_SUFFIX, "size": DIGEST_SIZE, "offset": check_int(part["offset"]) + check_int(part["size"]) - DIGEST_SIZE, "isdigest": "True", "inv_default": "<random>" }) # Randomize the digest default. random_or_hexvalue(part["items"][-1], "inv_default", DIGEST_SIZE * 8) log.info("> Adding digest {} at offset {} with size {}".format( part["name"] + DIGEST_SUFFIX, offset, DIGEST_SIZE)) offset += DIGEST_SIZE # check offsets and size if offset > check_int(part["offset"]) + check_int(part["size"]): log.error("Not enough space in partitition " "{} to accommodate all items. Bytes available " "= {}, bytes requested = {}".format( part["name"], part["size"], offset - part["offset"])) exit(1) offset = check_int(part["offset"]) + check_int(part["size"]) if offset > config["otp"]["size"]: log.error( "OTP is not big enough to store all partitions. " "Bytes available {}, bytes required {}", config["otp"]["size"], offset) exit(1) log.info("Total number of partitions: {}".format(num_part)) log.info("Bytes available in OTP: {}".format(config["otp"]["size"])) log.info("Bytes required for partitions: {}".format(offset))
def _validate_mmap(config): '''Validate the memory map configuration''' # Get valid key names. key_names = [] for key in config["scrambling"]["keys"]: key_names.append(key["name"]) if not isinstance(config['partitions'], list): raise RuntimeError('the "partitions" key must contain a list') # validate inputs before use allocated = 0 for part in config["partitions"]: _validate_part(part, key_names) allocated += part['size'] # distribute unallocated bits _dist_unused(config, allocated) # Determine offsets and generation dicts offset = 0 part_dict = {} for j, part in enumerate(config["partitions"]): if part['name'] in part_dict: raise RuntimeError('Partition name {} is not unique'.format( part['name'])) part['offset'] = offset if check_int(part['offset']) % SCRAMBLE_BLOCK_WIDTH: raise RuntimeError( "Partition {} offset must be 64bit aligned".format( part['name'])) log.info("Partition {} at offset {} size {}".format( part["name"], part["offset"], part["size"])) # Loop over items within a partition item_dict = {} for k, item in enumerate(part["items"]): if item['name'] in item_dict: raise RuntimeError('Item name {} is not unique'.format( item['name'])) item['offset'] = offset log.info("> Item {} at offset {} with size {}".format( item["name"], offset, item["size"])) offset += check_int(item["size"]) item_dict[item['name']] = k # Place digest at the end of a partition. if part["sw_digest"] or part["hw_digest"]: digest_name = part["name"] + DIGEST_SUFFIX if digest_name in item_dict: raise RuntimeError( 'Digest name {} is not unique'.format(digest_name)) item_dict[digest_name] = len(part["items"]) part["items"].append({ "name": digest_name, "size": DIGEST_SIZE, "offset": check_int(part["offset"]) + check_int(part["size"]) - DIGEST_SIZE, "ismubi": False, "isdigest": True, "inv_default": "<random>" }) # Randomize the digest default. random_or_hexvalue(part["items"][-1], "inv_default", DIGEST_SIZE * 8) # We always place the digest into the last 64bit word # of a partition. canonical_offset = (check_int(part["offset"]) + check_int(part["size"]) - DIGEST_SIZE) if offset > canonical_offset: raise RuntimeError( "Not enough space in partitition " "{} to accommodate a digest. Bytes available " "= {}, bytes allocated to items = {}".format( part["name"], part["size"], offset - part["offset"])) offset = canonical_offset log.info("> Adding digest {} at offset {} with size {}".format( digest_name, offset, DIGEST_SIZE)) offset += DIGEST_SIZE # check offsets and size if offset > check_int(part["offset"]) + check_int(part["size"]): raise RuntimeError("Not enough space in partitition " "{} to accommodate all items. Bytes available " "= {}, bytes allocated to items = {}".format( part["name"], part["size"], offset - part["offset"])) offset = check_int(part["offset"]) + check_int(part["size"]) part_dict.setdefault(part['name'], {'index': j, 'items': item_dict}) if offset > config["otp"]["size"]: raise RuntimeError( "OTP is not big enough to store all partitions. " "Bytes available {}, bytes required {}", config["otp"]["size"], offset) log.info("Total number of partitions: {}".format(len( config["partitions"]))) log.info("Bytes available in OTP: {}".format(config["otp"]["size"])) log.info("Bytes required for partitions: {}".format(offset)) # return the partition/item index dict return part_dict
def __init__(self, config): '''The constructor validates the configuration dict.''' log.info('') log.info('Generate life cycle state') log.info('') if 'seed' not in config: log.error('Missing seed in configuration') exit(1) if 'secded' not in config: log.error('Missing secded configuration') exit(1) if 'tokens' not in config: log.error('Missing token configuration') exit(1) config['secded'].setdefault('data_width', 0) config['secded'].setdefault('ecc_width', 0) config['secded'].setdefault('ecc_matrix', [[]]) config.setdefault('num_ab_words', 0) config.setdefault('num_cd_words', 0) config.setdefault('num_ef_words', 0) config.setdefault('min_hw', 0) config.setdefault('max_hw', 0) config.setdefault('min_hd', 0) config.setdefault('token_size', 128) config['seed'] = check_int(config['seed']) log.info('Seed: {0:x}'.format(config['seed'])) log.info('') # Re-initialize with seed to make results reproducible. random.seed(LC_SEED_DIVERSIFIER + int(config['seed'])) config['secded']['data_width'] = check_int( config['secded']['data_width']) config['secded']['ecc_width'] = check_int( config['secded']['ecc_width']) total_width = config['secded']['data_width'] + config['secded'][ 'ecc_width'] config['num_ab_words'] = check_int(config['num_ab_words']) config['num_cd_words'] = check_int(config['num_cd_words']) config['num_ef_words'] = check_int(config['num_ef_words']) config['min_hw'] = check_int(config['min_hw']) config['max_hw'] = check_int(config['max_hw']) config['min_hd'] = check_int(config['min_hd']) config['token_size'] = check_int(config['token_size']) total_width = config['secded']['data_width'] + config['secded'][ 'ecc_width'] if config['min_hw'] >= total_width or \ config['max_hw'] > total_width or \ config['min_hw'] >= config['max_hw']: log.error('Hamming weight constraints are inconsistent.') exit(1) if config['max_hw'] - config['min_hw'] + 1 < config['min_hd']: log.error('Hamming distance constraint is inconsistent.') exit(1) if config['secded']['ecc_width'] != len( config['secded']['ecc_matrix']): log.error('ECC matrix does not have correct number of rows') exit(1) log.info('SECDED Matrix:') for i, l in enumerate(config['secded']['ecc_matrix']): log.info('ECC Bit {} Fanin: {}'.format(i, l)) for j, e in enumerate(l): e = check_int(e) if e < 0 or e >= total_width: log.error('ECC bit position is out of bounds') exit(1) config['secded']['ecc_matrix'][i][j] = e log.info('') hashed_tokens = [] for token in config['tokens']: random_or_hexvalue(token, 'value', config['token_size']) hashed_token = OrderedDict() hashed_token['name'] = token['name'] + 'Hashed' # TODO: plug in PRESENT-based hashing algo or KMAC hashed_token['value'] = token['value'] hashed_tokens.append(hashed_token) config['tokens'] += hashed_tokens self.config = config # Generate new encoding words word_types = ['ab_words', 'cd_words', 'ef_words'] existing_words = [] for w in word_types: while len(self.gen[w]) < self.config['num_' + w]: new_word = _get_new_state_word_pair(self.config, existing_words) self.gen[w].append(new_word) # Validate words (this must not fail at this point). _validate_words(self.config, existing_words) # Print out HD histogram self.gen['stats'] = hd_histogram(existing_words) log.info('') log.info('Hamming distance histogram:') log.info('') for bar in self.gen['stats']["bars"]: log.info(bar) log.info('') log.info('Minimum HD: {}'.format(self.gen['stats']['min_hd'])) log.info('Maximum HD: {}'.format(self.gen['stats']['max_hd'])) log.info('Minimum HW: {}'.format(self.gen['stats']['min_hw'])) log.info('Maximum HW: {}'.format(self.gen['stats']['max_hw'])) log.info('') log.info('Successfully generated life cycle state.') log.info('')