Esempio n. 1
0
def publish_registry(parsed_arguments):
    registry = repo_tool.load_repository(REPO_DIR)

    snapshot_keypath = os.path.join(KEYSTORE_DIR, SNAPSHOT_KEY_NAME)
    timestamp_keypath = os.path.join(KEYSTORE_DIR, TIMESTAMP_KEY_NAME)
    snapshot_password = '******'

    encrypted_key = None
    with open(snapshot_keypath, 'rb') as file_object:
        encrypted_key = file_object.read().decode('utf-8')

    snapshot_private = securesystemslib.keys.decrypt_key(
        encrypted_key, snapshot_password)

    timestamp_password = '******'
    encrypted_key = None
    with open(timestamp_keypath, 'rb') as file_object:
        encrypted_key = file_object.read().decode('utf-8')

    timestamp_private = securesystemslib.keys.decrypt_key(
        encrypted_key, timestamp_password)

    registry.snapshot.load_signing_key(snapshot_private)
    registry.timestamp.load_signing_key(timestamp_private)

    registry.writeall(snapshot_merkle=True)

    staged_dir = os.path.join(REPO_DIR, STAGED_METADATA_DIR)
    live_dir = os.path.join(REPO_DIR, METADATA_DIR)

    shutil.rmtree(live_dir, ignore_errors=True)
    shutil.copytree(staged_dir, live_dir)
def delete_conf(file):
    repository = repo_tool.load_repository(os.path.join(os.getcwd(), REPO_DIR))

    # NOTE: The following approach of using tuf.roledb to update the target TODO
    # files will be modified in the future when the repository tool's API is
    # refactored.
    roleinfo = tuf.roledb.get_roleinfo('targets', repository._repository_name)

    del roleinfo['paths'][file]

    tuf.roledb.update_roleinfo('targets',
                               roleinfo,
                               mark_role_as_dirty=True,
                               repository_name=repository._repository_name)

    targets_private = import_privatekey_from_file(
        os.path.join(os.getcwd(), KEYSTORE_DIR, TARGETS_KEY_NAME), PW)
    repository.targets.load_signing_key(targets_private)

    # Load the top-level keys for Snapshot and Timestamp to make a new release.
    snapshot_private = import_privatekey_from_file(
        os.path.join(os.getcwd(), KEYSTORE_DIR, SNAPSHOT_KEY_NAME), PW)
    timestamp_private = import_privatekey_from_file(
        os.path.join(os.getcwd(), KEYSTORE_DIR, TIMESTAMP_KEY_NAME), PW)

    repository.snapshot.load_signing_key(snapshot_private)
    repository.timestamp.load_signing_key(timestamp_private)

    consistent_snapshot = tuf.roledb.get_roleinfo(
        'root', repository._repository_name)['consistent_snapshot']
    repository.writeall(consistent_snapshot=consistent_snapshot)
    write_to_live_repo()
def load_manifests(arguments):
    repo = repo_tool.load_repository(
        os.path.join(arguments.path, tuf.scripts.repo.REPO_DIR))
    if not arguments.manifest_dir:
        dirs = [os.getcwd()]
    else:
        dirs = arguments.manifest_dir

    if not arguments.manifest:
        tuf.exceptions.Error("--Manifest must be specified with "
                             "--load-manifest")
    roleinfo = tuf.roledb.get_roleinfo(arguments.role,
                                       repository_name=repo._repository_name)
    newfiles = []
    for manifest in arguments.manifest:
        newfiles += load_manifest(manifest, dirs, arguments)
    for filename in newfiles:
        roleinfo['paths'].update({filename: {}})
    tuf.roledb.update_roleinfo(arguments.role,
                               roleinfo,
                               mark_role_as_dirty=True,
                               repository_name=repo._repository_name)

    tuf.scripts.repo.write_updated_targets(arguments, repo)
    return
def update_timestamp(arguments):
    expires = get_expiry(arguments)

    repo = repo_tool.load_repository(
        os.path.join(arguments.path, tuf.scripts.repo.REPO_DIR))

    role = get_arg_repo_role(repo, arguments)

    role.expiration = expires

    repo.mark_dirty([arguments.role])

    if arguments.sign is not None:
        keypath = arguments.sign
    else:
        keypath = get_role_privatekey(role, arguments)

    role_privatekey = tuf.scripts.repo.import_privatekey_from_file(
        keypath, arguments.pw)
    role.load_signing_key(role_privatekey)

    consistent_snapshot = tuf.roledb.get_roleinfo(
        'root', repo._repository_name)['consistent_snapshot']
    repo.writeall(consistent_snapshot=consistent_snapshot)
    tuf.scripts.repo.write_to_live_repo(arguments)
    return
Esempio n. 5
0
  def test_root_rotation_full(self):
    """Test that a client whose root is outdated by multiple versions and who
    has none of the latest nor next-to-latest root keys can still update and
    does so by incrementally verifying all roots until the most recent one. """
    # Load initial repository with 1.root.json == root.json, signed by "root"
    # key. This is the root.json that is already on the client.
    repository = repo_tool.load_repository(self.repository_directory)

    # 1st rotation: 1.root.json --> 2.root.json
    # 2.root.json will be signed by previous "root" key and by new "root2" key
    repository.root.load_signing_key(self.role_keys['root']['private'])
    repository.root.add_verification_key(self.role_keys['root2']['public'])
    repository.root.load_signing_key(self.role_keys['root2']['private'])
    repository.writeall()

    # 2nd rotation: 2.root.json --> 3.root.json
    # 3.root.json will be signed by previous "root2" key and by new "root3" key
    repository.root.unload_signing_key(self.role_keys['root']['private'])
    repository.root.remove_verification_key(self.role_keys['root']['public'])
    repository.root.add_verification_key(self.role_keys['root3']['public'])
    repository.root.load_signing_key(self.role_keys['root3']['private'])
    repository.writeall()

    # Move staged metadata to "live" metadata
    shutil.rmtree(os.path.join(self.repository_directory, 'metadata'))
    shutil.copytree(os.path.join(self.repository_directory, 'metadata.staged'),
                    os.path.join(self.repository_directory, 'metadata'))

    # Update on client 1.root.json --> 2.root.json --> 3.root.json
    self.repository_updater.refresh()

    # Assert that client updated to the latest root from the repository
    self.assertTrue(filecmp.cmp(
      os.path.join(self.repository_directory, 'metadata', '3.root.json'),
      os.path.join(self.client_metadata_current, 'root.json')))
    def test_root_rotation(self):
        repository = repo_tool.load_repository(self.repository_directory)
        repository.root.threshold = 2

        repository.snapshot.load_signing_key(
            self.role_keys['snapshot']['private'])
        repository.timestamp.load_signing_key(
            self.role_keys['timestamp']['private'])

        # Errors, not enough signing keys to satisfy root's threshold.
        self.assertRaises(tuf.exceptions.UnsignedMetadataError,
                          repository.writeall)

        repository.root.add_verification_key(self.role_keys['role1']['public'])
        repository.root.load_signing_key(self.role_keys['root']['private'])
        repository.root.load_signing_key(self.role_keys['role1']['private'])
        repository.writeall()

        repository.root.add_verification_key(
            self.role_keys['snapshot']['public'])
        repository.root.load_signing_key(self.role_keys['snapshot']['private'])
        repository.root.threshold = 3
        repository.writeall()

        # Move the staged metadata to the "live" metadata.
        shutil.rmtree(os.path.join(self.repository_directory, 'metadata'))
        shutil.copytree(
            os.path.join(self.repository_directory, 'metadata.staged'),
            os.path.join(self.repository_directory, 'metadata'))
        self.repository_updater.refresh()
Esempio n. 7
0
def add_verification_key(parsed_arguments):
  if not parsed_arguments.pubkeys:
    raise tuf.exceptions.Error('--pubkeys must be given with --trust.')

  repository = repo_tool.load_repository(
      os.path.join(parsed_arguments.path, REPO_DIR))

  for keypath in parsed_arguments.pubkeys:
    imported_pubkey = import_publickey_from_file(keypath)

    if parsed_arguments.role not in ('root', 'targets', 'snapshot', 'timestamp'):
      raise tuf.exceptions.Error('The given --role is not a top-level role.')

    elif parsed_arguments.role == 'root':
      repository.root.add_verification_key(imported_pubkey)

    elif parsed_arguments.role == 'targets':
      repository.targets.add_verification_key(imported_pubkey)

    elif parsed_arguments.role == 'snapshot':
      repository.snapshot.add_verification_key(imported_pubkey)

    # The timestamp role..
    else:
      repository.timestamp.add_verification_key(imported_pubkey)

  consistent_snapshot = tuf.roledb.get_roleinfo('root',
      repository._repository_name)['consistent_snapshot']
  repository.write('root', consistent_snapshot=consistent_snapshot,
      increment_version_number=False)

  # Move staged metadata directory to "live" metadata directory.
  write_to_live_repo(parsed_arguments)
Esempio n. 8
0
    def build(self, root_pub_path, targets_pub_path, timestamp_pub_path):
        """
        Create or update the repo

        :param root_pub_path: path where the public root key lives
        :type root_pub_path: str
        :param targets_pub_path: path where the public targets key lives
        :type targets_pub_path: str
        :param timestamp_pub_path: path where the public timestamp key lives
        :type timestamp_pub_path: str
        """
        if exists(self._repo_path) and listdir(self._repo_path) != []:
            repository = load_repository(self._repo_path)
        else:
            repository = create_new_repository(self._repo_path)

        pub_root_key = import_rsa_publickey_from_file(root_pub_path)
        repository.root.add_verification_key(pub_root_key)
        repository.root.load_signing_key(self._key)
        repository.root.expiration = (datetime.datetime.now() +
                                      datetime.timedelta(days=EXPIRATION_DAYS))

        pub_target_key = import_rsa_publickey_from_file(targets_pub_path)
        repository.targets.add_verification_key(pub_target_key)
        repository.snapshot.add_verification_key(pub_target_key)
        repository.targets.compressions = ["gz"]
        repository.snapshot.compressions = ["gz"]

        pub_timestamp_key = import_rsa_publickey_from_file(timestamp_pub_path)
        repository.timestamp.add_verification_key(pub_timestamp_key)

        try:
            repository.write_partial()
        except:
            pass
Esempio n. 9
0
  def test_root_rotation_discard_untrusted_version(self):
    """Test that client discards root.json version that failed the
    signature verification """
    repository = repo_tool.load_repository(self.repository_directory)

    # Rotate the root key without signing with the previous version key 'root'
    repository.root.remove_verification_key(self.role_keys['root']['public'])
    repository.root.add_verification_key(self.role_keys['root2']['public'])
    repository.root.load_signing_key(self.role_keys['root2']['private'])

    # 2.root.json
    repository.writeall()

    # Move the staged metadata to the "live" metadata.
    shutil.rmtree(os.path.join(self.repository_directory, 'metadata'))
    shutil.copytree(os.path.join(self.repository_directory, 'metadata.staged'),
                  os.path.join(self.repository_directory, 'metadata'))

    # Refresh on the client side should fail because 2.root.json is not signed
    # with a threshold of prevous keys
    with self.assertRaises(tuf.exceptions.NoWorkingMirrorError) as cm:
      self.repository_updater.refresh()

    for mirror_url, mirror_error in cm.exception.mirror_errors.items():
      self.assertTrue(mirror_url.endswith('/2.root.json'))
      self.assertTrue(isinstance(mirror_error,
          securesystemslib.exceptions.BadSignatureError))

    # Assert that the current 'root.json' on the client side is the trusted one
    # and 2.root.json is discarded
    self.assertTrue(filecmp.cmp(
        os.path.join(self.repository_directory, 'metadata', '1.root.json'),
        os.path.join(self.client_metadata_current, 'root.json')))
    def test_verify_root_with_duplicate_current_keyids(self):
        """
     Each root file is signed by the current root threshold of keys as well
     as the previous root threshold of keys. In each case, a keyid must only
     count once towards the threshold. Test that the new root signatures
     specific signature verification implemented in _verify_root_self_signed()
     only counts one signature per keyid towards the threshold.
     """
        # Load repository with root.json == 1.root.json (available on client)
        # Signing key: "root", Threshold: 1
        repository = repo_tool.load_repository(self.repository_directory)

        # Add an additional signing key and bump the threshold to 2
        repository.root.load_signing_key(self.role_keys['root']['private'])
        repository.root.add_verification_key(self.role_keys['root2']['public'])
        repository.root.load_signing_key(self.role_keys['root2']['private'])
        repository.root.threshold = 2
        repository.writeall()

        # Move staged metadata to "live" metadata
        shutil.rmtree(os.path.join(self.repository_directory, 'metadata'))
        shutil.copytree(
            os.path.join(self.repository_directory, 'metadata.staged'),
            os.path.join(self.repository_directory, 'metadata'))

        # Modify 2.root.json and list two signatures with the same keyid
        root2_path_live = os.path.join(self.repository_directory, 'metadata',
                                       '2.root.json')
        root2 = securesystemslib.util.load_json_file(root2_path_live)

        signatures = []
        signatures.append(root2['signatures'][0])
        signatures.append(root2['signatures'][0])

        root2['signatures'] = signatures

        root2_fobj = tempfile.TemporaryFile()
        root2_fobj.write(tuf.repository_lib._get_written_metadata(root2))
        securesystemslib.util.persist_temp_file(root2_fobj, root2_path_live)

        # Update 1.root.json -> 2.root.json
        # Signature verification with new keys should fail because the threshold
        # can only be met by two signatures with the same keyid
        with self.assertRaises(tuf.exceptions.NoWorkingMirrorError) as cm:
            self.repository_updater.refresh()

        for mirror_url, mirror_error in six.iteritems(
                cm.exception.mirror_errors):
            self.assertTrue(mirror_url.endswith('/2.root.json'))
            self.assertTrue(
                isinstance(mirror_error,
                           securesystemslib.exceptions.BadSignatureError))

        # Assert that the current 'root.json' on the client side is the verified one
        self.assertTrue(
            filecmp.cmp(
                os.path.join(self.repository_directory, 'metadata',
                             '1.root.json'),
                os.path.join(self.client_metadata_current, 'root.json')))
    def test_root_rotation_missing_keys(self):
        repository = repo_tool.load_repository(self.repository_directory)

        # A partially written root.json (threshold = 2, and signed with only 1 key)
        # causes an invalid root chain later.
        repository.root.threshold = 2
        repository.root.load_signing_key(self.role_keys['root']['private'])
        repository.snapshot.load_signing_key(
            self.role_keys['snapshot']['private'])
        repository.timestamp.load_signing_key(
            self.role_keys['timestamp']['private'])

        repository.write('root')
        repository.write('snapshot')
        repository.write('timestamp')

        # Move the staged metadata to the "live" metadata.
        shutil.rmtree(os.path.join(self.repository_directory, 'metadata'))
        shutil.copytree(
            os.path.join(self.repository_directory, 'metadata.staged'),
            os.path.join(self.repository_directory, 'metadata'))

        # Create a new, valid root.json.
        # Still not valid, because it is not written with a threshold of 2
        # previous keys
        repository.root.add_verification_key(self.role_keys['role1']['public'])
        repository.root.load_signing_key(self.role_keys['role1']['private'])

        repository.writeall()

        repository.root.add_verification_key(
            self.role_keys['snapshot']['public'])
        repository.root.load_signing_key(self.role_keys['snapshot']['private'])
        repository.root.threshold = 3
        repository.writeall()

        # Move the staged metadata to the "live" metadata.
        shutil.rmtree(os.path.join(self.repository_directory, 'metadata'))
        shutil.copytree(
            os.path.join(self.repository_directory, 'metadata.staged'),
            os.path.join(self.repository_directory, 'metadata'))

        with self.assertRaises(tuf.exceptions.NoWorkingMirrorError) as cm:
            self.repository_updater.refresh()

        for mirror_url, mirror_error in six.iteritems(
                cm.exception.mirror_errors):
            self.assertTrue(mirror_url.endswith('/2.root.json'))
            self.assertTrue(
                isinstance(mirror_error,
                           securesystemslib.exceptions.BadSignatureError))

        # Assert that the current 'root.json' on the client side is the verified one
        self.assertTrue(
            filecmp.cmp(
                os.path.join(self.repository_directory, 'metadata',
                             '1.root.json'),
                os.path.join(self.client_metadata_current, 'root.json')))
  def test_root_rotation_unmet_threshold(self):
    repository = repo_tool.load_repository(self.repository_directory)

    # Add verification keys
    repository.root.add_verification_key(self.role_keys['root']['public'])
    repository.root.add_verification_key(self.role_keys['role1']['public'])

    repository.targets.add_verification_key(self.role_keys['targets']['public'])
    repository.snapshot.add_verification_key(self.role_keys['snapshot']['public'])
    repository.timestamp.add_verification_key(self.role_keys['timestamp']['public'])

    repository.snapshot.load_signing_key(self.role_keys['snapshot']['private'])
    repository.timestamp.load_signing_key(self.role_keys['timestamp']['private'])

    # Add signing keys
    repository.root.load_signing_key(self.role_keys['root']['private'])
    repository.root.load_signing_key(self.role_keys['role1']['private'])

    # Set root threshold
    repository.root.threshold = 2
    repository.writeall()

    # Unload Root's previous signing keys to ensure that these keys are not
    # used by mistake.
    repository.root.unload_signing_key(self.role_keys['role1']['private'])
    repository.root.unload_signing_key(self.role_keys['root']['private'])

    # Add new verification key
    repository.root.add_verification_key(self.role_keys['snapshot']['public'])

    # Remove one of the original signing keys
    repository.root.remove_verification_key(self.role_keys['role1']['public'])

    # Set the threshold for the new Root file, but note that the previous
    # threshold of 2 must still be met.
    repository.root.threshold = 1

    repository.root.load_signing_key(self.role_keys['role1']['private'])
    repository.root.load_signing_key(self.role_keys['snapshot']['private'])

    repository.snapshot.load_signing_key(self.role_keys['snapshot']['private'])
    repository.timestamp.load_signing_key(self.role_keys['timestamp']['private'])

    # We use write() rather than writeall() because the latter should fail due
    # to the missing self.role_keys['root'] signature.
    repository.write('root', increment_version_number=True)
    repository.write('snapshot', increment_version_number=True)
    repository.write('timestamp', increment_version_number=True)

    # Move the staged metadata to the "live" metadata.
    shutil.rmtree(os.path.join(self.repository_directory, 'metadata'))
    shutil.copytree(os.path.join(self.repository_directory, 'metadata.staged'),
                    os.path.join(self.repository_directory, 'metadata'))

    # The following refresh should fail because root must be signed by the
    # previous self.role_keys['root'] key, which wasn't loaded.
    self.assertRaises(tuf.exceptions.NoWorkingMirrorError,
        self.repository_updater.refresh)
Esempio n. 13
0
  def test_generate_snapshot_metadata(self):
    # Test normal case.
    temporary_directory = tempfile.mkdtemp(dir=self.temporary_directory)
    original_repository_path = os.path.join('repository_data',
                                            'repository')
    repository_directory = os.path.join(temporary_directory, 'repository')
    shutil.copytree(original_repository_path, repository_directory)
    metadata_directory = os.path.join(repository_directory,
                                      repo_lib.METADATA_STAGED_DIRECTORY_NAME)
    targets_directory = os.path.join(repository_directory, repo_lib.TARGETS_DIRECTORY_NAME)
    targets_filename = os.path.join(metadata_directory,
                                    repo_lib.TARGETS_FILENAME)
    version = 1
    expiration_date = '1985-10-21T13:20:00Z'

    # Load a valid repository so that top-level roles exist in roledb and
    # generate_snapshot_metadata() has roles to specify in snapshot metadata.
    storage_backend = securesystemslib.storage.FilesystemBackend()
    repository = repo_tool.Repository(repository_directory, metadata_directory,
                                      targets_directory, storage_backend)

    repository_junk = repo_tool.load_repository(repository_directory)

    # For testing purposes, store an invalid metadata file in the metadata directory
    # to verify that it isn't loaded by generate_snapshot_metadata().  Unknown
    # metadata file extensions should be ignored.
    invalid_metadata_file = os.path.join(metadata_directory, 'role_file.xml')
    with open(invalid_metadata_file, 'w') as file_object:
      file_object.write('bad extension on metadata file')

    targets_filename = 'targets'

    snapshot_metadata = \
      repo_lib.generate_snapshot_metadata(metadata_directory, version,
                                          expiration_date,
                                          targets_filename,
                                          storage_backend,
                                          consistent_snapshot=False)
    self.assertTrue(tuf.formats.SNAPSHOT_SCHEMA.matches(snapshot_metadata))


    # Test improperly formatted arguments.
    self.assertRaises(securesystemslib.exceptions.FormatError, repo_lib.generate_snapshot_metadata,
                      3, version, expiration_date,
                      targets_filename, consistent_snapshot=False, storage_backend=storage_backend)
    self.assertRaises(securesystemslib.exceptions.FormatError, repo_lib.generate_snapshot_metadata,
                      metadata_directory, '3', expiration_date,
                      targets_filename, storage_backend, consistent_snapshot=False)
    self.assertRaises(securesystemslib.exceptions.FormatError, repo_lib.generate_snapshot_metadata,
                      metadata_directory, version, '3',
                      targets_filename, storage_backend, consistent_snapshot=False)
    self.assertRaises(securesystemslib.exceptions.FormatError, repo_lib.generate_snapshot_metadata,
                      metadata_directory, version, expiration_date,
                      3, storage_backend, consistent_snapshot=False)
    self.assertRaises(securesystemslib.exceptions.FormatError, repo_lib.generate_snapshot_metadata,
                      metadata_directory, version, expiration_date,
                      targets_filename, 3, storage_backend)
    def test_targets_key_revocation(self):
        # First verify that the Targets role is properly signed.  Calling
        # refresh() should not raise an exception.
        self.repository_updater.refresh()

        # There should only be one key for Targets.  Store the keyid to later
        # verify that it has been revoked.
        targets_roleinfo = tuf.roledb.get_roleinfo('targets',
                                                   self.repository_name)
        targets_keyid = targets_roleinfo['keyids']
        self.assertEqual(len(targets_keyid), 1)

        # Remove 'targets_keyid' and add a new key.  Verify that the client
        # detects the removal and addition of keys to the Targets role.
        repository = repo_tool.load_repository(self.repository_directory)
        repository.targets.remove_verification_key(
            self.role_keys['targets']['public'])
        repository.targets.add_verification_key(
            self.role_keys['timestamp']['public'])

        # Root, Snapshot, and Timestamp must be rewritten.  Root must be written
        # because the timestamp key has changed; Snapshot, because  Root has
        # changed, and Timestamp because it must sign its metadata with a new key.
        repository.root.load_signing_key(self.role_keys['root']['private'])
        # Note: we added Timestamp's key to the Targets role.
        repository.targets.load_signing_key(
            self.role_keys['timestamp']['private'])
        repository.snapshot.load_signing_key(
            self.role_keys['snapshot']['private'])
        repository.timestamp.load_signing_key(
            self.role_keys['timestamp']['private'])
        repository.writeall()

        # Move the staged metadata to the "live" metadata.
        shutil.rmtree(os.path.join(self.repository_directory, 'metadata'))
        shutil.copytree(
            os.path.join(self.repository_directory, 'metadata.staged'),
            os.path.join(self.repository_directory, 'metadata'))

        # The client performs a refresh of top-level metadata to get the latest
        # changes.
        self.repository_updater.refresh()

        # Verify that the client is able to recognize that a new set of keys have
        # been added to the Targets role.
        # First, has 'targets_keyid' been removed?
        targets_roleinfo = tuf.roledb.get_roleinfo('targets',
                                                   self.repository_name)
        self.assertTrue(targets_keyid not in targets_roleinfo['keyids'])

        # Second, is Targets's new key correct?  The new key should be
        # Timestamp's.
        self.assertEqual(len(targets_roleinfo['keyids']), 1)
        timestamp_roleinfo = tuf.roledb.get_roleinfo('timestamp',
                                                     self.repository_name)
        self.assertEqual(targets_roleinfo['keyids'],
                         timestamp_roleinfo['keyids'])
Esempio n. 15
0
  def test_verify_root_with_current_keyids_and_threshold(self):
     """
     Each root file is signed by the current root threshold of keys as well
     as the previous root threshold of keys. Test that a root file which is
     not 'self-signed' with the current root threshold of keys causes the
     update to fail
     """
     # Load repository with root.json == 1.root.json (available on client)
     # Signing key: "root", Threshold: 1
     repository = repo_tool.load_repository(self.repository_directory)

     # Rotate keys and update root: 1.root.json --> 2.root.json
     # Signing key: "root" (previous) and "root2" (current)
     # Threshold (for both): 1
     repository.root.load_signing_key(self.role_keys['root']['private'])
     repository.root.add_verification_key(self.role_keys['root2']['public'])
     repository.root.load_signing_key(self.role_keys['root2']['private'])
     # Remove the previous "root" key from the list of current
     # verification keys
     repository.root.remove_verification_key(self.role_keys['root']['public'])
     repository.writeall()

     # Move staged metadata to "live" metadata
     shutil.rmtree(os.path.join(self.repository_directory, 'metadata'))
     shutil.copytree(os.path.join(self.repository_directory, 'metadata.staged'),
         os.path.join(self.repository_directory, 'metadata'))

     # Intercept 2.root.json and tamper with "root2" (current) key signature
     root2_path_live = os.path.join(
         self.repository_directory, 'metadata', '2.root.json')
     root2 = securesystemslib.util.load_json_file(root2_path_live)

     for idx, sig in enumerate(root2['signatures']):
       if sig['keyid'] == self.role_keys['root2']['public']['keyid']:
         sig_len = len(root2['signatures'][idx]['sig'])
         root2['signatures'][idx]['sig'] = "deadbeef".ljust(sig_len, '0')

     roo2_fobj = tempfile.TemporaryFile()
     roo2_fobj.write(tuf.repository_lib._get_written_metadata(root2))
     securesystemslib.util.persist_temp_file(roo2_fobj, root2_path_live)

     # Update 1.root.json -> 2.root.json
     # Signature verification with current keys should fail because we replaced
     with self.assertRaises(tuf.exceptions.NoWorkingMirrorError) as cm:
       self.repository_updater.refresh()

     for mirror_url, mirror_error in cm.exception.mirror_errors.items():
       self.assertTrue(mirror_url.endswith('/2.root.json'))
       self.assertTrue(isinstance(mirror_error,
           securesystemslib.exceptions.BadSignatureError))

     # Assert that the current 'root.json' on the client side is the verified one
     self.assertTrue(filecmp.cmp(
       os.path.join(self.repository_directory, 'metadata', '1.root.json'),
       os.path.join(self.client_metadata_current, 'root.json')))
Esempio n. 16
0
def add_targets(parsed_arguments):
    repo_targets_path = os.path.join(parsed_arguments.path, REPO_DIR,
                                     'targets')
    repository = repo_tool.load_repository(
        os.path.join(parsed_arguments.path, REPO_DIR))

    # Copy the target files in --path to the repo directory, and
    # add them to Targets metadata.  Make sure to also copy & add files
    # in directories (and subdirectories, if --recursive is True).
    for target_path in parsed_arguments.add:
        if os.path.isdir(target_path):
            for sub_target_path in repository.get_filepaths_in_directory(
                    target_path, parsed_arguments.recursive):
                add_target_to_repo(parsed_arguments, sub_target_path,
                                   repo_targets_path, repository)

        else:
            add_target_to_repo(parsed_arguments, target_path,
                               repo_targets_path, repository)

    consistent_snapshot = tuf.roledb.get_roleinfo(
        'root', repository._repository_name)['consistent_snapshot']

    if parsed_arguments.role == 'targets':
        # Load the top-level, non-root, keys to make a new release.
        targets_private = import_privatekey_from_file(
            os.path.join(parsed_arguments.path, KEYSTORE_DIR,
                         TARGETS_KEY_NAME), parsed_arguments.targets_pw)
        repository.targets.load_signing_key(targets_private)

    elif parsed_arguments.role not in ('root', 'snapshot', 'timestamp'):
        repository.write(parsed_arguments.role,
                         consistent_snapshot=consistent_snapshot,
                         increment_version_number=True)
        return

    # Update the required top-level roles, Snapshot and Timestamp, to make a new
    # release.  Automatically making a new release can be disabled via
    # --no_release.
    if not parsed_arguments.no_release:
        snapshot_private = import_privatekey_from_file(
            os.path.join(parsed_arguments.path, KEYSTORE_DIR,
                         SNAPSHOT_KEY_NAME), parsed_arguments.snapshot_pw)
        timestamp_private = import_privatekey_from_file(
            os.path.join(parsed_arguments.path, KEYSTORE_DIR,
                         TIMESTAMP_KEY_NAME), parsed_arguments.timestamp_pw)

        repository.snapshot.load_signing_key(snapshot_private)
        repository.timestamp.load_signing_key(timestamp_private)

    repository.writeall(consistent_snapshot=consistent_snapshot)

    # Move staged metadata directory to "live" metadata directory.
    write_to_live_repo(parsed_arguments)
Esempio n. 17
0
    def list_targets(self):
        """
        Return the names of all the targets defined in the local TUF metadata.

        :returns: List of target names
        :rtype: list
        """
        from tuf.repository_tool import load_repository
        repository = load_repository(self._master_repo_dir)
        #  pylint: disable=no-member
        return [p.lstrip(path.sep) for p in repository.targets.target_files]
    def test_root_rotation_max(self):
        """Test that client does not rotate beyond a configured upper bound, i.e.
    `current_version + MAX_NUMBER_ROOT_ROTATIONS`. """
        # NOTE: The nature of below root changes is irrelevant. Here we only want
        # the client to update but not beyond a configured upper bound.

        # 1.root.json --> 2.root.json (add root2 and root3 keys)
        repository = repo_tool.load_repository(self.repository_directory)
        repository.root.load_signing_key(self.role_keys['root']['private'])
        repository.root.add_verification_key(self.role_keys['root2']['public'])
        repository.root.load_signing_key(self.role_keys['root2']['private'])
        repository.root.add_verification_key(self.role_keys['root3']['public'])
        repository.root.load_signing_key(self.role_keys['root3']['private'])
        repository.writeall()

        # 2.root.json --> 3.root.json (change threshold)
        repository.root.threshold = 2
        repository.writeall()

        # 3.root.json --> 4.root.json (change threshold again)
        repository.root.threshold = 3
        repository.writeall()

        # Move staged metadata to "live" metadata
        shutil.rmtree(os.path.join(self.repository_directory, 'metadata'))
        shutil.copytree(
            os.path.join(self.repository_directory, 'metadata.staged'),
            os.path.join(self.repository_directory, 'metadata'))

        # Assert that repo indeed has "4.root.json" and that it's the latest root
        self.assertTrue(
            filecmp.cmp(
                os.path.join(self.repository_directory, 'metadata',
                             '4.root.json'),
                os.path.join(self.repository_directory, 'metadata',
                             'root.json')))

        # Lower max root rotation cap so that client stops updating early
        max_rotation_backup = tuf.settings.MAX_NUMBER_ROOT_ROTATIONS
        tuf.settings.MAX_NUMBER_ROOT_ROTATIONS = 2

        # Update on client 1.root.json --> 2.root.json --> 3.root.json,
        # but stop before updating to 4.root.json
        self.repository_updater.refresh()

        # Assert that the client indeed only updated until 3.root.json
        self.assertTrue(
            filecmp.cmp(
                os.path.join(self.repository_directory, 'metadata',
                             '3.root.json'),
                os.path.join(self.client_metadata_current, 'root.json')))

        # reset
        tuf.settings.MAX_NUMBER_ROOT_ROTATIONS = max_rotation_backup
Esempio n. 19
0
    def list_targets(self):
        """
        Return the names of all the targets defined in the local TUF metadata.

        :returns: List of target names
        :rtype: list
        """
        from tuf.repository_tool import load_repository
        repository = load_repository(self._master_repo_dir)
        #  pylint: disable=no-member
        return [p.lstrip(path.sep) for p in repository.targets.target_files]
Esempio n. 20
0
    def test_root_rotation_missing_keys(self):
        repository = repo_tool.load_repository(self.repository_directory)

        # A partially written root.json (threshold = 1, and not signed in this
        # case) causes an invalid root chain later.
        repository.snapshot.load_signing_key(
            self.role_keys['snapshot']['private'])
        repository.timestamp.load_signing_key(
            self.role_keys['timestamp']['private'])
        repository.write('root')
        repository.write('snapshot')
        repository.write('timestamp')

        # Move the staged metadata to the "live" metadata.
        shutil.rmtree(os.path.join(self.repository_directory, 'metadata'))
        shutil.copytree(
            os.path.join(self.repository_directory, 'metadata.staged'),
            os.path.join(self.repository_directory, 'metadata'))

        # Create a new, valid root.json.
        repository.root.threshold = 2
        repository.root.add_verification_key(self.role_keys['role1']['public'])
        repository.root.load_signing_key(self.role_keys['root']['private'])
        repository.root.load_signing_key(self.role_keys['role1']['private'])

        repository.writeall()

        repository.root.add_verification_key(
            self.role_keys['snapshot']['public'])
        repository.root.load_signing_key(self.role_keys['snapshot']['private'])
        repository.root.threshold = 3
        repository.writeall()

        # Move the staged metadata to the "live" metadata.
        shutil.rmtree(os.path.join(self.repository_directory, 'metadata'))
        shutil.copytree(
            os.path.join(self.repository_directory, 'metadata.staged'),
            os.path.join(self.repository_directory, 'metadata'))

        try:
            self.repository_updater.refresh()

        except tuf.exceptions.NoWorkingMirrorError as exception:
            for mirror_url, mirror_error in six.iteritems(
                    exception.mirror_errors):
                url_prefix = self.repository_mirrors['mirror1']['url_prefix']
                url_file = os.path.join(url_prefix, 'metadata', '2.root.json')

                # Verify that '2.root.json' is the culprit.
                self.assertEqual(url_file.replace('\\', '/'), mirror_url)
                self.assertTrue(
                    isinstance(mirror_error,
                               securesystemslib.exceptions.BadSignatureError))
Esempio n. 21
0
    def build(self):
        """
        Generate snapshot.json[.gz] and targets.json[.gz]
        """
        self._repo = load_repository(self._repo_path)
        self._load_targets()

        self._repo.targets.load_signing_key(self._key)
        self._repo.snapshot.load_signing_key(self._key)
        self._repo.targets.compressions = ["gz"]
        self._repo.snapshot.compressions = ["gz"]
        self._repo.snapshot.expiration = datetime.datetime.now() + datetime.timedelta(days=EXPIRATION_DAYS)
        self._repo.targets.expiration = datetime.datetime.now() + datetime.timedelta(days=EXPIRATION_DAYS)
        self._repo.write_partial()
Esempio n. 22
0
def sign_role(parsed_arguments):
    registry = repo_tool.load_repository(REPO_DIR)

    role = parsed_arguments.sign[0]
    keypath = parsed_arguments.sign[1]

    password = securesystemslib.interface.get_password('Enter a password for'
                                                       ' the encrypted key (' +
                                                       repr(keypath) + '): ',
                                                       confirm=False)

    encrypted_key = None
    with open(keypath, 'rb') as file_object:
        encrypted_key = file_object.read().decode('utf-8')

    role_privatekey = securesystemslib.keys.decrypt_key(
        encrypted_key, password)

    if role == 'targets':
        registry.targets.load_signing_key(role_privatekey)
    elif role == 'root':
        registry.root.load_signing_key(role_privatekey)
    elif role == 'snapshot':
        registry.snapshot.load_signing_key(role_privatekey)
    elif role == 'timestamp':
        registry.timestamp.load_signing_key(role_privatekey)
    else:
        registry.targets(role).load_signing_key(role_privatekey)

        targets_keypath = os.path.join(KEYSTORE_DIR, TARGETS_KEY_NAME)
        password = securesystemslib.interface.get_password(
            'Enter a password for'
            ' the encrypted key (' + repr(targets_keypath) + '): ',
            confirm=False)

        encrypted_key = None
        with open(targets_keypath, 'rb') as file_object:
            encrypted_key = file_object.read().decode('utf-8')
        targets_privatekey = securesystemslib.keys.decrypt_key(
            encrypted_key, password)

        registry.targets.load_signing_key(targets_privatekey)

        registry.write(role, increment_version_number=True)
        registry.write('targets', increment_version_number=True)

    # write the role that was signed. a call to --publish will write the
    # top-level metadata
    registry.writeall(snapshot_merkle=True)
    def test_root_rotation_unmet_new_threshold(self):
        """Test that client detects a root.json version that is not signed
     by a current threshold of signatures """
        repository = repo_tool.load_repository(self.repository_directory)

        # Create a new, valid root.json.
        repository.root.threshold = 2
        repository.root.load_signing_key(self.role_keys['root']['private'])
        repository.root.add_verification_key(self.role_keys['root2']['public'])
        repository.root.load_signing_key(self.role_keys['root2']['private'])

        repository.writeall()

        # Increase the threshold and add a new verification key without
        # actually loading the signing key
        repository.root.threshold = 3
        repository.root.add_verification_key(self.role_keys['root3']['public'])

        # writeall fails as expected since the third signature is missing
        self.assertRaises(tuf.exceptions.UnsignedMetadataError,
                          repository.writeall)
        # write an invalid '3.root.json' as partially signed
        repository.write('root')

        # Move the staged metadata to the "live" metadata.
        shutil.rmtree(os.path.join(self.repository_directory, 'metadata'))
        shutil.copytree(
            os.path.join(self.repository_directory, 'metadata.staged'),
            os.path.join(self.repository_directory, 'metadata'))

        # The following refresh should fail because root must be signed by the
        # current self.role_keys['root3'] key, which wasn't loaded.
        with self.assertRaises(tuf.exceptions.NoWorkingMirrorError) as cm:
            self.repository_updater.refresh()

        for mirror_url, mirror_error in six.iteritems(
                cm.exception.mirror_errors):
            self.assertTrue(mirror_url.endswith('/3.root.json'))
            self.assertTrue(
                isinstance(mirror_error,
                           securesystemslib.exceptions.BadSignatureError))

        # Assert that the current 'root.json' on the client side is the verified one
        self.assertTrue(
            filecmp.cmp(
                os.path.join(self.repository_directory, 'metadata',
                             '2.root.json'),
                os.path.join(self.client_metadata_current, 'root.json')))
Esempio n. 24
0
    def get_expirations(self):
        """
        Return the expiration dates of the TUF metadata.

        :returns: A dictionary containing `datetime <https://docs.python.org/2/library/datetime.html#datetime.datetime>`_ values for the keys ``root``, ``targets``, ``snapshot`` and ``timestamp``.
        :rtype: dict
        """
        from tuf.repository_tool import load_repository
        repository = load_repository(self._master_repo_dir)
        # pylint: disable=no-member
        return {
            'root': repository.root.expiration,
            'targets': repository.targets.expiration,
            'snapshot': repository.snapshot.expiration,
            'timestamp': repository.timestamp.expiration
        }
Esempio n. 25
0
    def get_expirations(self):
        """
        Return the expiration dates of the TUF metadata.

        :returns: A dictionary containing `datetime <https://docs.python.org/2/library/datetime.html#datetime.datetime>`_ values for the keys ``root``, ``targets``, ``snapshot`` and ``timestamp``.
        :rtype: dict
        """
        from tuf.repository_tool import load_repository
        repository = load_repository(self._master_repo_dir)
        # pylint: disable=no-member
        return {
            'root': repository.root.expiration,
            'targets': repository.targets.expiration,
            'snapshot': repository.snapshot.expiration,
            'timestamp': repository.timestamp.expiration
        }
Esempio n. 26
0
    def test_generate_timestamp_metadata(self):
        # Test normal case.
        repository_name = 'test_repository'
        temporary_directory = tempfile.mkdtemp(dir=self.temporary_directory)
        original_repository_path = os.path.join('repository_data',
                                                'repository')
        repository_directory = os.path.join(temporary_directory, 'repository')
        shutil.copytree(original_repository_path, repository_directory)
        metadata_directory = os.path.join(
            repository_directory, repo_lib.METADATA_STAGED_DIRECTORY_NAME)
        targets_directory = os.path.join(repository_directory,
                                         repo_lib.TARGETS_DIRECTORY_NAME)

        snapshot_filename = os.path.join(metadata_directory,
                                         repo_lib.SNAPSHOT_FILENAME)

        # Set valid generate_timestamp_metadata() arguments.
        version = 1
        expiration_date = '1985-10-21T13:20:00Z'

        # Load a valid repository so that top-level roles exist in roledb and
        # generate_snapshot_metadata() has roles to specify in snapshot metadata.
        repository = repo_tool.Repository(repository_directory,
                                          metadata_directory,
                                          targets_directory, repository_name)

        repository_junk = repo_tool.load_repository(repository_directory,
                                                    repository_name)

        timestamp_metadata = repo_lib.generate_timestamp_metadata(
            snapshot_filename, version, expiration_date, repository_name)
        self.assertTrue(
            tuf.formats.TIMESTAMP_SCHEMA.matches(timestamp_metadata))

        # Test improperly formatted arguments.
        self.assertRaises(securesystemslib.exceptions.FormatError,
                          repo_lib.generate_timestamp_metadata, 3, version,
                          expiration_date, repository_name)
        self.assertRaises(securesystemslib.exceptions.FormatError,
                          repo_lib.generate_timestamp_metadata,
                          snapshot_filename, '3', expiration_date,
                          repository_name)
        self.assertRaises(securesystemslib.exceptions.FormatError,
                          repo_lib.generate_timestamp_metadata,
                          snapshot_filename, version, '3', repository_name)
Esempio n. 27
0
def remove_verification_key(parsed_arguments):
    if not parsed_arguments.pubkeys:
        raise tuf.exceptions.Error('--pubkeys must be given with --distrust.')

    repository = repo_tool.load_repository(
        os.path.join(parsed_arguments.path, REPO_DIR))

    for keypath in parsed_arguments.pubkeys:
        imported_pubkey = import_publickey_from_file(keypath)

        try:
            if parsed_arguments.role not in ('root', 'targets', 'snapshot',
                                             'timestamp'):
                raise tuf.exceptions.Error(
                    'The given --role is not a top-level role.')

            elif parsed_arguments.role == 'root':
                repository.root.remove_verification_key(imported_pubkey)

            elif parsed_arguments.role == 'targets':
                repository.targets.remove_verification_key(imported_pubkey)

            elif parsed_arguments.role == 'snapshot':
                repository.snapshot.remove_verification_key(imported_pubkey)

            # The Timestamp key..
            else:
                repository.timestamp.remove_verification_key(imported_pubkey)

        # It is assumed remove_verification_key() only raises
        # securesystemslib.exceptions.Error and
        # securesystemslib.exceptions.FormatError, and the latter is not raised
        # bacause a valid key should have been returned by
        # import_publickey_from_file().
        except securesystemslib.exceptions.Error:
            print(repr(keypath) + ' is not a trusted key.  Skipping.')

    consistent_snapshot = tuf.roledb.get_roleinfo(
        'root', repository._repository_name)['consistent_snapshot']
    repository.write('root',
                     consistent_snapshot=consistent_snapshot,
                     increment_version_number=False)

    # Move staged metadata directory to "live" metadata directory.
    write_to_live_repo(parsed_arguments)
Esempio n. 28
0
    def build(self):
        """
        Generate snapshot.json[.gz] and targets.json[.gz]
        """
        self._repo = load_repository(self._repo_path)
        self._load_targets()

        self._repo.targets.load_signing_key(self._key)
        self._repo.snapshot.load_signing_key(self._key)
        self._repo.targets.compressions = ["gz"]
        self._repo.snapshot.compressions = ["gz"]
        self._repo.snapshot.expiration = (
            datetime.datetime.now() +
            datetime.timedelta(days=EXPIRATION_DAYS))
        self._repo.targets.expiration = (
            datetime.datetime.now() +
            datetime.timedelta(days=EXPIRATION_DAYS))
        self._repo.write_partial()
Esempio n. 29
0
def revoke(parsed_arguments):

    repository = repo_tool.load_repository(
        os.path.join(parsed_arguments.path, REPO_DIR))

    if parsed_arguments.role == 'targets':
        repository.targets.revoke(parsed_arguments.delegatee)

        targets_private = import_privatekey_from_file(
            os.path.join(parsed_arguments.path, KEYSTORE_DIR,
                         TARGETS_KEY_NAME), parsed_arguments.targets_pw)

        repository.targets.load_signing_key(targets_private)

    # A non-top-level role.
    else:
        repository.targets(parsed_arguments.role).revoke(
            parsed_arguments.delegatee)

        role_privatekey = import_privatekey_from_file(parsed_arguments.sign)

        repository.targets(
            parsed_arguments.role).load_signing_key(role_privatekey)

    # Update the required top-level roles, Snapshot and Timestamp, to make a new
    # release.  Automatically making a new release can be disabled via
    # --no_release.
    if not parsed_arguments.no_release:
        snapshot_private = import_privatekey_from_file(
            os.path.join(parsed_arguments.path, KEYSTORE_DIR,
                         SNAPSHOT_KEY_NAME), parsed_arguments.snapshot_pw)
        timestamp_private = import_privatekey_from_file(
            os.path.join(parsed_arguments.path, KEYSTORE_DIR,
                         TIMESTAMP_KEY_NAME), parsed_arguments.timestamp_pw)

        repository.snapshot.load_signing_key(snapshot_private)
        repository.timestamp.load_signing_key(timestamp_private)

    consistent_snapshot = tuf.roledb.get_roleinfo(
        'root', repository._repository_name)['consistent_snapshot']
    repository.writeall(consistent_snapshot=consistent_snapshot)

    # Move staged metadata directory to "live" metadata directory.
    write_to_live_repo(parsed_arguments)
    def test_with_tuf(self):
        # The same scenario outlined in test_without_tuf() is followed here, except
        # with a TUF client.  The TUF client performs a refresh of top-level
        # metadata, which also includes 'timestamp.json'.

        timestamp_path = os.path.join(self.repository_directory, 'metadata',
                                      'timestamp.json')

        # Modify the timestamp file on the remote repository.  'timestamp.json'
        # must be properly updated and signed with 'repository_tool.py', otherwise
        # the client will reject it as invalid metadata.  The resulting
        # 'timestamp.json' should be valid metadata, but expired (as intended).
        repository = repo_tool.load_repository(self.repository_directory)

        key_file = os.path.join(self.keystore_directory, 'timestamp_key')
        timestamp_private = repo_tool.import_rsa_privatekey_from_file(
            key_file, 'password')

        repository.timestamp.load_signing_key(timestamp_private)

        # expire in 1 second.
        datetime_object = tuf.formats.unix_timestamp_to_datetime(
            int(time.time() + 1))
        repository.timestamp.expiration = datetime_object
        repository.write()

        # Move the staged metadata to the "live" metadata.
        shutil.rmtree(os.path.join(self.repository_directory, 'metadata'))
        shutil.copytree(
            os.path.join(self.repository_directory, 'metadata.staged'),
            os.path.join(self.repository_directory, 'metadata'))

        # Verify that the TUF client detects outdated metadata and refuses to
        # continue the update process.  Sleep for at least 2 seconds to ensure
        # 'repository.timestamp.expiration' is reached.
        time.sleep(2)
        try:
            self.repository_updater.refresh()

        except tuf.NoWorkingMirrorError as e:
            for mirror_url, mirror_error in six.iteritems(e.mirror_errors):
                self.assertTrue(
                    isinstance(mirror_error, tuf.ExpiredMetadataError))
Esempio n. 31
0
def add_target(parsed_arguments):
    registry = repo_tool.load_repository(REPO_DIR)

    role = parsed_arguments.add[0]
    filepath = parsed_arguments.add[1]
    keypath = parsed_arguments.add[2]

    registry.targets(role).add_target(filepath)

    password = securesystemslib.interface.get_password('Enter a password for'
                                                       ' the encrypted key (' +
                                                       repr(keypath) + '): ',
                                                       confirm=False)
    role_key = repo_tool.import_ed25519_privatekey_from_file(keypath,
                                                             password=password)

    registry.targets(role).load_signing_key(role_key)

    registry.writeall(snapshot_merkle=True)
Esempio n. 32
0
def remove_targets(parsed_arguments):
    repository = repo_tool.load_repository(
        os.path.join(parsed_arguments.path, REPO_DIR))

    # Remove target files from the Targets metadata (or the role specified in
    # --role) that match the glob patterns specified in --remove.
    remove_target_files_from_metadata(parsed_arguments, repository)

    # Examples of how the --pw command-line option is interpreted:
    # repo.py --init': parsed_arguments.pw = 'pw'
    # repo.py --init --pw my_password: parsed_arguments.pw = 'my_password'
    # repo.py --init --pw: The user is prompted for a password, as follows:
    if not parsed_arguments.pw:
        parsed_arguments.pw = securesystemslib.interface.get_password(
            prompt='Enter a password for the top-level role keys: ',
            confirm=True)

    targets_private = import_privatekey_from_file(
        os.path.join(parsed_arguments.path, KEYSTORE_DIR, TARGETS_KEY_NAME),
        parsed_arguments.targets_pw)
    repository.targets.load_signing_key(targets_private)

    # Load the top-level keys for Snapshot and Timestamp to make a new release.
    # Automatically making a new release can be disabled via --no_release.
    if not parsed_arguments.no_release:
        snapshot_private = import_privatekey_from_file(
            os.path.join(parsed_arguments.path, KEYSTORE_DIR,
                         SNAPSHOT_KEY_NAME), parsed_arguments.snapshot_pw)
        timestamp_private = import_privatekey_from_file(
            os.path.join(parsed_arguments.path, KEYSTORE_DIR,
                         TIMESTAMP_KEY_NAME), parsed_arguments.timestamp_pw)

        repository.snapshot.load_signing_key(snapshot_private)
        repository.timestamp.load_signing_key(timestamp_private)

    consistent_snapshot = tuf.roledb.get_roleinfo(
        'root', repository._repository_name)['consistent_snapshot']
    repository.writeall(consistent_snapshot=consistent_snapshot)

    # Move staged metadata directory to "live" metadata directory.
    write_to_live_repo(parsed_arguments)
Esempio n. 33
0
def add_target(repo_dir, target):
    os.chdir(repo_dir)
    repository = rt.load_repository('tufrepo')
    (public_root_key, private_root_key) = loadkey('root')
    (public_targets_key, private_targets_key) = loadkey('targets')
    (public_snapshots_key, private_snapshots_key) = loadkey('snapshot')
    (public_timestamps_key, private_timestamps_key) = loadkey('timestamp')
    #repository.root.add_verification_key(public_root_key)
    repository.root.load_signing_key(private_root_key)
    # Add additional roles
    #repository.targets.add_verification_key(public_targets_key)
    repository.targets.load_signing_key(private_targets_key)
    #repository.snapshot.add_verification_key(public_snapshots_key)
    repository.snapshot.load_signing_key(private_snapshots_key)
    #repository.timestamp.add_verification_key(public_timestamps_key)
    repository.timestamp.load_signing_key(private_timestamps_key)
    repository.status()
    repository.targets.add_targets([target])
    # Make it so (consistently)
    repository.mark_dirty(['root', 'snapshot', 'targets', 'timestamp'])
    repository.writeall(consistent_snapshot=True)
Esempio n. 34
0
    def reset_keys(self,
                   root_key_password=None,
                   targets_key_password=None,
                   snapshot_key_password=None,
                   timestamp_key_password=None):
        """
        Re-sign the TUF metadata for the repository.

        Call this if you've generated new root or metadata keys (because one
        of the keys has been compromised, for example) but you don't want to
        delete the repository and start again.

        :param root_key_password: Password to use for decrypting the TUF root private key. You'll be prompted for one if you don't supply it.
        :type password: str

        :param targets_key_password: Password to use for decrypting the TUF targets private key. You'll be prompted for one if you don't supply it.
        :type password: str

        :param snapshot_key_password: Password to use for decrypting the TUF snapshot private key. You'll be prompted for one if you don't supply it.
        :type password: str

        :param timestamp_key_password: Password to use for decrypting the TUF timestamp private key. You'll be prompted for one if you don't supply it.
        :type password: str
        """
        from tuf.repository_tool import load_repository
        # Load repository object
        repository = load_repository(self._master_repo_dir)
        #  pylint: disable=no-member
        # Remove keys
        _remove_keys(repository.root)
        _remove_keys(repository.targets)
        _remove_keys(repository.snapshot)
        _remove_keys(repository.timestamp)
        # Add metadata to repository (adds keys)
        self._add_metadata(repository,
                           root_key_password,
                           targets_key_password,
                           snapshot_key_password,
                           timestamp_key_password)
    def test_generate_snapshot_metadata(self):
        # Test normal case.
        temporary_directory = tempfile.mkdtemp(dir=self.temporary_directory)
        original_repository_path = os.path.join("repository_data", "repository")
        repository_directory = os.path.join(temporary_directory, "repository")
        shutil.copytree(original_repository_path, repository_directory)
        metadata_directory = os.path.join(repository_directory, repo_lib.METADATA_STAGED_DIRECTORY_NAME)
        targets_directory = os.path.join(repository_directory, repo_lib.TARGETS_DIRECTORY_NAME)
        root_filename = os.path.join(metadata_directory, repo_lib.ROOT_FILENAME)
        targets_filename = os.path.join(metadata_directory, repo_lib.TARGETS_FILENAME)
        version = 1
        expiration_date = "1985-10-21T13:20:00Z"

        # Load a valid repository so that top-level roles exist in roledb and
        # generate_snapshot_metadata() has roles to specify in snapshot metadata.
        repository = repo_tool.Repository(repository_directory, metadata_directory, targets_directory)

        repository_junk = repo_tool.load_repository(repository_directory)

        root_filename = "root"
        targets_filename = "targets"
        snapshot_metadata = repo_lib.generate_snapshot_metadata(
            metadata_directory, version, expiration_date, root_filename, targets_filename, consistent_snapshot=False
        )
        self.assertTrue(tuf.formats.SNAPSHOT_SCHEMA.matches(snapshot_metadata))

        # Test improperly formatted arguments.
        self.assertRaises(
            tuf.FormatError,
            repo_lib.generate_snapshot_metadata,
            3,
            version,
            expiration_date,
            root_filename,
            targets_filename,
            consistent_snapshot=False,
        )
        self.assertRaises(
            tuf.FormatError,
            repo_lib.generate_snapshot_metadata,
            metadata_directory,
            "3",
            expiration_date,
            root_filename,
            targets_filename,
            consistent_snapshot=False,
        )
        self.assertRaises(
            tuf.FormatError,
            repo_lib.generate_snapshot_metadata,
            metadata_directory,
            version,
            "3",
            root_filename,
            targets_filename,
            consistent_snapshot=False,
        )
        self.assertRaises(
            tuf.FormatError,
            repo_lib.generate_snapshot_metadata,
            metadata_directory,
            version,
            expiration_date,
            3,
            targets_filename,
            consistent_snapshot=False,
        )
        self.assertRaises(
            tuf.FormatError,
            repo_lib.generate_snapshot_metadata,
            metadata_directory,
            version,
            expiration_date,
            root_filename,
            3,
            consistent_snapshot=False,
        )
        self.assertRaises(
            tuf.FormatError,
            repo_lib.generate_snapshot_metadata,
            metadata_directory,
            version,
            expiration_date,
            root_filename,
            targets_filename,
            3,
        )
Esempio n. 36
0
  def test_without_tuf(self):
    # Scenario:
    # 'timestamp.json' specifies the latest version of the repository files.
    # A client should only accept the same version number (specified in the
    # file) of the metadata, or greater.  A version number less than the one
    # currently trusted should be rejected.  A non-TUF client may use a
    # different mechanism for determining versions of metadata, but version
    # numbers in this integrations because that is what TUF uses.
    # 
    # Modify the repository's timestamp.json' so that a new version is generated
    # and accepted by the client, and backup the previous version.  The previous
    # is then returned the next time the client requests an update.  A non-TUF
    # client (without a way to detect older versions of metadata, and thus
    # updates) is expected to download older metadata and outdated files.
    # Verify that the older version of timestamp.json' is downloaded by the
    # non-TUF client.

    # Backup the current version of 'timestamp'.  It will be used as the
    # outdated version returned to the client.  The repository tool removes
    # obsolete metadadata, so do *not* save the backup version in the 
    # repository's metadata directory.
    timestamp_path = os.path.join(self.repository_directory, 'metadata',
                                  'timestamp.json')
    backup_timestamp = os.path.join(self.repository_directory,
                                    'timestamp.json.backup')
    shutil.copy(timestamp_path, backup_timestamp)
    
    # The fileinfo of the previous version is saved to verify that it is indeed
    # accepted by the non-TUF client.
    length, hashes = tuf.util.get_file_details(backup_timestamp)
    previous_fileinfo = tuf.formats.make_fileinfo(length, hashes)
    
    # Modify the timestamp file on the remote repository.
    repository = repo_tool.load_repository(self.repository_directory)
    key_file = os.path.join(self.keystore_directory, 'timestamp_key') 
    timestamp_private = repo_tool.import_rsa_privatekey_from_file(key_file,
                                                                  'password')
    repository.timestamp.load_signing_key(timestamp_private)
    
    # Set an arbitrary expiration so that the repository tool generates a new
    # version.
    repository.timestamp.expiration = datetime.datetime(2030, 1, 1, 12, 12)
    repository.write()
    
    # Move the staged metadata to the "live" metadata.
    shutil.rmtree(os.path.join(self.repository_directory, 'metadata'))
    shutil.copytree(os.path.join(self.repository_directory, 'metadata.staged'),
                    os.path.join(self.repository_directory, 'metadata'))

    # Save the fileinfo of the new version generated to verify that it is
    # saved by the client. 
    length, hashes = tuf.util.get_file_details(timestamp_path)
    new_fileinfo = tuf.formats.make_fileinfo(length, hashes)

    url_prefix = self.repository_mirrors['mirror1']['url_prefix']
    url_file = os.path.join(url_prefix, 'metadata', 'timestamp.json')
    client_timestamp_path = os.path.join(self.client_directory, 'metadata',
                                         'current', 'timestamp.json')
   
    six.moves.urllib.request.urlretrieve(url_file, client_timestamp_path)
   
    length, hashes = tuf.util.get_file_details(client_timestamp_path)
    download_fileinfo = tuf.formats.make_fileinfo(length, hashes)
    
    # Verify 'download_fileinfo' is equal to the new version.
    self.assertEqual(download_fileinfo, new_fileinfo)

    # Restore the previous version of 'timestamp.json' on the remote repository
    # and verify that the non-TUF client downloads it (expected, but not ideal).
    shutil.move(backup_timestamp, timestamp_path)
    
    six.moves.urllib.request.urlretrieve(url_file, client_timestamp_path)
   
    length, hashes = tuf.util.get_file_details(client_timestamp_path)
    download_fileinfo = tuf.formats.make_fileinfo(length, hashes)
    
    # Verify 'download_fileinfo' is equal to the previous version.
    self.assertEqual(download_fileinfo, previous_fileinfo)
    self.assertNotEqual(download_fileinfo, new_fileinfo)
Esempio n. 37
0
  def test_with_tuf(self):
    # The same scenario outlined in test_without_tuf() is followed here, except
    # with a TUF client (scenario description provided in the opening comment
    # block of that test case.) The TUF client performs a refresh of top-level
    # metadata, which also includes 'timestamp.json'.
    
    # Backup the current version of 'timestamp'.  It will be used as the
    # outdated version returned to the client.  The repository tool removes
    # obsolete metadadata, so do *not* save the backup version in the 
    # repository's metadata directory.
    timestamp_path = os.path.join(self.repository_directory, 'metadata',
                                  'timestamp.json')
    backup_timestamp = os.path.join(self.repository_directory,
                                    'timestamp.json.backup')
    shutil.copy(timestamp_path, backup_timestamp)
    
    # The fileinfo of the previous version is saved to verify that it is indeed
    # accepted by the non-TUF client.
    length, hashes = tuf.util.get_file_details(backup_timestamp)
    previous_fileinfo = tuf.formats.make_fileinfo(length, hashes)
    
    # Modify the timestamp file on the remote repository.
    repository = repo_tool.load_repository(self.repository_directory)
    key_file = os.path.join(self.keystore_directory, 'timestamp_key') 
    timestamp_private = repo_tool.import_rsa_privatekey_from_file(key_file,
                                                                  'password')
    repository.timestamp.load_signing_key(timestamp_private)
    
    # Set an arbitrary expiration so that the repository tool generates a new
    # version.
    repository.timestamp.expiration = datetime.datetime(2030, 1, 1, 12, 12)
    repository.write()
   
    # Move the staged metadata to the "live" metadata.
    shutil.rmtree(os.path.join(self.repository_directory, 'metadata'))
    shutil.copytree(os.path.join(self.repository_directory, 'metadata.staged'),
                    os.path.join(self.repository_directory, 'metadata'))

    # Save the fileinfo of the new version generated to verify that it is
    # saved by the client. 
    length, hashes = tuf.util.get_file_details(timestamp_path)
    new_fileinfo = tuf.formats.make_fileinfo(length, hashes)

    # Refresh top-level metadata, including 'timestamp.json'.  Installation of
    # new version of 'timestamp.json' is expected.
    self.repository_updater.refresh()

    client_timestamp_path = os.path.join(self.client_directory, 'metadata',
                                         'current', 'timestamp.json')
    length, hashes = tuf.util.get_file_details(client_timestamp_path)
    download_fileinfo = tuf.formats.make_fileinfo(length, hashes)
    
    # Verify 'download_fileinfo' is equal to the new version.
    self.assertEqual(download_fileinfo, new_fileinfo)

    # Restore the previous version of 'timestamp.json' on the remote repository
    # and verify that the non-TUF client downloads it (expected, but not ideal).
    shutil.move(backup_timestamp, timestamp_path)
    logger.info('Moving the timestamp.json backup to the current version.')
    
    # Verify that the TUF client detects replayed metadata and refuses to
    # continue the update process.
    try:
      self.repository_updater.refresh()
   
    # Verify that the specific 'tuf.ReplayedMetadataError' is raised by each
    # mirror.
    except tuf.NoWorkingMirrorError as exception:
      for mirror_url, mirror_error in six.iteritems(exception.mirror_errors):
        url_prefix = self.repository_mirrors['mirror1']['url_prefix']
        url_file = os.path.join(url_prefix, 'metadata', 'timestamp.json')
       
        # Verify that 'timestamp.json' is the culprit.
        self.assertEqual(url_file, mirror_url)
        self.assertTrue(isinstance(mirror_error, tuf.ReplayedMetadataError))

    else:
      self.fail('TUF did not prevent a replay attack.')
Esempio n. 38
0
  def test_with_tuf(self):
    # Scenario:
    # An attacker tries to trick the client into installing files indicated by
    # a previous release of its corresponding metatadata.  The outdated metadata
    # is properly named and was previously valid, but is no longer current
    # according to the latest 'snapshot.json' role.  Generate a new snapshot of
    # the repository after modifying a target file of 'role1.json'.
    # Backup 'role1.json' (the delegated role to be updated, and then inserted
    # again for the mix-and-match attack.)
    role1_path = os.path.join(self.repository_directory, 'metadata', 'targets',
                                  'role1.json')
    backup_role1 = os.path.join(self.repository_directory, 'role1.json.backup') 
    shutil.copy(role1_path, backup_role1)

    # Backup 'file3.txt', specified by 'role1.json'.
    file3_path = os.path.join(self.repository_directory, 'targets', 'file3.txt')
    shutil.copy(file3_path, file3_path + '.backup')
    
    # Re-generate the required metadata on the remote repository.  The affected
    # metadata must be properly updated and signed with 'repository_tool.py',
    # otherwise the client will reject them as invalid metadata.  The resulting
    # metadata should be valid metadata.
    repository = repo_tool.load_repository(self.repository_directory)

    # Load the signing keys so that newly generated metadata is properly signed.
    timestamp_keyfile = os.path.join(self.keystore_directory, 'timestamp_key') 
    role1_keyfile = os.path.join(self.keystore_directory, 'delegation_key') 
    snapshot_keyfile = os.path.join(self.keystore_directory, 'snapshot_key') 
    timestamp_private = \
      repo_tool.import_rsa_privatekey_from_file(timestamp_keyfile, 'password')
    role1_private = \
      repo_tool.import_rsa_privatekey_from_file(role1_keyfile, 'password')
    snapshot_private = \
      repo_tool.import_rsa_privatekey_from_file(snapshot_keyfile, 'password')

    repository.targets('role1').load_signing_key(role1_private)
    repository.snapshot.load_signing_key(snapshot_private)
    repository.timestamp.load_signing_key(timestamp_private)
  
    # Modify a 'role1.json' target file, and add it to its metadata so that a
    # new version is generated.
    with open(file3_path, 'wt') as file_object:
      file_object.write('This is role2\'s target file.')
    repository.targets('role1').add_target(file3_path)

    repository.write()
    
    # Move the staged metadata to the "live" metadata.
    shutil.rmtree(os.path.join(self.repository_directory, 'metadata'))
    shutil.copytree(os.path.join(self.repository_directory, 'metadata.staged'),
                    os.path.join(self.repository_directory, 'metadata'))
  
    # Insert the previously valid 'role1.json'.  The TUF client should reject it.
    shutil.move(backup_role1, role1_path)
    
    # Verify that the TUF client detects unexpected metadata (previously valid,
    # but not up-to-date with the latest snapshot of the repository) and refuses
    # to continue the update process.
    # Refresh top-level metadata so that the client is aware of the latest
    # snapshot of the repository.
    self.repository_updater.refresh()

    try:
      self.repository_updater.targets_of_role('targets/role1')
   
    # Verify that the specific 'tuf.BadVersionNumberError' exception is raised
    # by each mirror.
    except tuf.NoWorkingMirrorError as exception:
      for mirror_url, mirror_error in six.iteritems(exception.mirror_errors):
        url_prefix = self.repository_mirrors['mirror1']['url_prefix']
        url_file = os.path.join(url_prefix, 'metadata', 'targets', 'role1.json')
       
        # Verify that 'role1.json' is the culprit.
        self.assertEqual(url_file, mirror_url)
        self.assertTrue(isinstance(mirror_error, tuf.BadVersionNumberError))

    else:
      self.fail('TUF did not prevent a mix-and-match attack.')
Esempio n. 39
0
  def test_with_tuf(self):
    # Two tests are conducted here.
    #
    # Test 1: If we find that the timestamp acquired from a mirror indicates
    #         that there is no new snapshot file, and our current snapshot
    #         file is expired, is it recognized as such?
    # Test 2: If an expired timestamp is downloaded, is it recognized as such?


    # Test 1 Begin:
    #
    # Addresses this issue: https://github.com/theupdateframework/tuf/issues/322
    #
    # If time has passed and our snapshot or targets role is expired, and
    # the mirror whose timestamp we fetched doesn't indicate the existence of a
    # new snapshot version, we still need to check that it's expired and notify
    # the software update system / application / user. This test creates that
    # scenario. The correct behavior is to raise an exception.
    #
    # Background: Expiration checks (updater._ensure_not_expired) were
    # previously conducted when the metadata file was downloaded. If no new
    # metadata file was downloaded, no expiry check would occur. In particular, 
    # while root was checked for expiration at the beginning of each
    # updater.refresh() cycle, and timestamp was always checked because it was
    # always fetched, snapshot and targets were never checked if the user did 
    # not receive evidence that they had changed. This bug allowed a class of
    # freeze attacks.
    # That bug was fixed and this test tests that fix going forward.

    # Modify the timestamp file on the remote repository.  'timestamp.json'
    # must be properly updated and signed with 'repository_tool.py', otherwise
    # the client will reject it as invalid metadata.

    # Load the repository
    repository = repo_tool.load_repository(self.repository_directory)

    # Load the timestamp and snapshot keys, since we will be signing a new
    # timestamp and a new snapshot file.
    key_file = os.path.join(self.keystore_directory, 'timestamp_key') 
    timestamp_private = repo_tool.import_rsa_privatekey_from_file(key_file,
                                                                  'password')
    repository.timestamp.load_signing_key(timestamp_private)
    key_file = os.path.join(self.keystore_directory, 'snapshot_key') 
    snapshot_private = repo_tool.import_rsa_privatekey_from_file(key_file,
                                                                  'password')
    repository.snapshot.load_signing_key(snapshot_private)

    # Expire snapshot in 8s. This should be far enough into the future that we
    # haven't reached it before the first refresh validates timestamp expiry.
    # We want a successful refresh before expiry, then a second refresh after
    # expiry (which we then expect to raise an exception due to expired
    # metadata).
    expiry_time = time.time() + 8
    datetime_object = tuf.formats.unix_timestamp_to_datetime(int(expiry_time))

    repository.snapshot.expiration = datetime_object

    # Now write to the repository.
    repository.write()

    # And move the staged metadata to the "live" metadata.
    shutil.rmtree(os.path.join(self.repository_directory, 'metadata'))
    shutil.copytree(os.path.join(self.repository_directory, 'metadata.staged'),
                    os.path.join(self.repository_directory, 'metadata'))

    # Refresh metadata on the client. For this refresh, all data is not expired.
    logger.info('Test: Refreshing #1 - Initial metadata refresh occurring.')
    self.repository_updater.refresh()
    logger.info('Test: Refreshed #1 - Initial metadata refresh completed '
                'successfully. Now sleeping until snapshot metadata expires.')

    # Sleep until expiry_time ('repository.snapshot.expiration')
    time.sleep(max(0, expiry_time - time.time()))

    logger.info('Test: Refreshing #2 - Now trying to refresh again after local'
      ' snapshot expiry.')
    try:
      self.repository_updater.refresh() # We expect this to fail!

    except tuf.ExpiredMetadataError:
      logger.info('Test: Refresh #2 - failed as expected. Expired local'
                  ' snapshot case generated a tuf.ExpiredMetadataError'
                  ' exception as expected. Test pass.')
    
    # I think that I only expect tuf.ExpiredMetadata error here. A
    # NoWorkingMirrorError indicates something else in this case - unavailable
    # repo, for example.
    else:
      self.fail('TUF failed to detect expired stale snapshot metadata. Freeze'
        ' attack successful.')




    # Test 2 Begin:
    #
    # 'timestamp.json' specifies the latest version of the repository files.
    # A client should only accept the same version of this file up to a certain
    # point, or else it cannot detect that new files are available for download.
    # Modify the repository's 'timestamp.json' so that it is about to expire,
    # copy it over the to client, wait a moment until it expires, and attempt to
    # re-fetch the same expired version.

    # The same scenario as in test_without_tuf() is followed here, except with
    # a TUF client. The TUF client performs a refresh of top-level metadata,
    # which includes 'timestamp.json', and should detect a freeze attack if
    # the repository serves an outdated 'timestamp.json'.
    
    # Modify the timestamp file on the remote repository.  'timestamp.json'
    # must be properly updated and signed with 'repository_tool.py', otherwise
    # the client will reject it as invalid metadata.  The resulting
    # 'timestamp.json' should be valid metadata, but expired (as intended).
    repository = repo_tool.load_repository(self.repository_directory)
 
    key_file = os.path.join(self.keystore_directory, 'timestamp_key')
    timestamp_private = repo_tool.import_rsa_privatekey_from_file(key_file,
                                                                  'password')

    repository.timestamp.load_signing_key(timestamp_private)
    
    # Set timestamp metadata to expire soon.
    # We cannot set the timestamp expiration with
    # 'repository.timestamp.expiration = ...' with already-expired timestamp
    # metadata because of consistency checks that occur during that assignment.
    expiry_time = time.time() + 1
    datetime_object = tuf.formats.unix_timestamp_to_datetime(int(expiry_time))
    repository.timestamp.expiration = datetime_object
    repository.write()
    
    # Move the staged metadata to the "live" metadata.
    shutil.rmtree(os.path.join(self.repository_directory, 'metadata'))
    shutil.copytree(os.path.join(self.repository_directory, 'metadata.staged'),
                    os.path.join(self.repository_directory, 'metadata'))

    # Wait just long enough for the timestamp metadata (which is now both on
    # the repository and on the client) to expire.
    time.sleep(max(0, expiry_time - time.time()))

    # Try to refresh top-level metadata on the client. Since we're already past
    # 'repository.timestamp.expiration', the TUF client is expected to detect
    # that timestamp metadata is outdated and refuse to continue the update
    # process.
    try:
      self.repository_updater.refresh() # We expect NoWorkingMirrorError.
    
    except tuf.NoWorkingMirrorError as e:
      # NoWorkingMirrorError indicates that we did not find valid, unexpired
      # metadata at any mirror. That exception class preserves the errors from
      # each mirror. We now assert that for each mirror, the particular error
      # detected was that metadata was expired (the timestamp we manually
      # expired).
      for mirror_url, mirror_error in six.iteritems(e.mirror_errors):
        self.assertTrue(isinstance(mirror_error, tuf.ExpiredMetadataError))
    
    else:
      self.fail('TUF failed to detect expired, stale timestamp metadata.'
        ' Freeze attack successful.')
Esempio n. 40
0
    def push_metadata(self,
                      targets_key_password=None,
                      snapshot_key_password=None,
                      timestamp_key_password=None,
                      progress=None):
        """
        Upload local TUF metadata to the repository.

        The TUF metadata consists of a list of targets (which were uploaded by
        :meth:`push_target`), a snapshot of the state of the metadata (list of
        hashes), a timestamp and a list of public keys.

        This function signs the metadata except for the list of public keys,
        so you'll need to supply the password to the respective private keys.

        The list of public keys was signed (along with the rest of the metadata)
        with the root private key when you called :meth:`create_metadata`
        (or :meth:`reset_keys`).

        :param targets_key_password: Password to use for decrypting the TUF targets private key. You'll be prompted for one if you don't supply it.
        :type password: str

        :param snapshot_key_password: Password to use for decrypting the TUF snapshot private key. You'll be prompted for one if you don't supply it.
        :type password: str

        :param timestamp_key_password: Password to use for decrypting the TUF timestamp private key. You'll be prompted for one if you don't supply it.
        :type password: str

        :param progress: Optional function to call as the upload progresses. The function will be called with the hash of the content of the file currently being uploaded, the blob just read from the file and the total size of the file.
        :type progress: function(dgst, chunk, total)
        """
        from tuf.repository_tool import load_repository, \
                                        Repository, \
                                        import_rsa_privatekey_from_file
        # Load repository object
        repository = load_repository(self._master_repo_dir)
        #  pylint: disable=no-member

        # Update targets
        repository.targets.clear_targets()
        repository.targets.add_targets([
            _strip_consistent_target_digest(f)
            for f in Repository.get_filepaths_in_directory(self._master_targets_dir)])

        # Update expirations
        repository.targets.expiration = datetime.now() + self._targets_lifetime
        repository.snapshot.expiration = datetime.now() + self._snapshot_lifetime
        repository.timestamp.expiration = datetime.now() + self._timestamp_lifetime

        # Load targets key
        if targets_key_password is None:
            print('importing targets key...')
        private_targets_key = import_rsa_privatekey_from_file(
            self._targets_key_file,
            targets_key_password)
        repository.targets.load_signing_key(private_targets_key)

        # Load snapshot key
        if snapshot_key_password is None:
            print('importing snapshot key...')
        private_snapshot_key = import_rsa_privatekey_from_file(
            self._snapshot_key_file,
            snapshot_key_password)
        repository.snapshot.load_signing_key(private_snapshot_key)

        # Load timestamp key
        if timestamp_key_password is None:
            print('importing timestamp key...')
        private_timestamp_key = import_rsa_privatekey_from_file(
            self._timestamp_key_file,
            timestamp_key_password)
        repository.timestamp.load_signing_key(private_timestamp_key)

        # Update metadata
        repository.write(consistent_snapshot=True)

        # Upload root.json and timestamp.json without hash prefix
        for f in ['root.json', 'timestamp.json']:
            dgst = self._dxf.push_blob(path.join(self._master_staged_dir, f),
                                       progress)
            self._dxf.set_alias(f, dgst)

        # Upload consistent snapshot versions of current metadata files...
        # first load timestamp.json
        with open(path.join(self._master_staged_dir, 'timestamp.json'), 'rb') as f:
            timestamp_data = f.read()
        # hash of content is timestamp prefix
        timestamp_cs = hash_bytes(timestamp_data) + '.timestamp.json'
        files = [timestamp_cs]
        # parse timestamp data
        timestamp = json.loads(timestamp_data.decode('utf-8'))
        # get snapshot prefix
        snapshot_cs = timestamp['signed']['meta']['snapshot.json']['hashes']['sha256'] + '.snapshot.json'
        files.append(snapshot_cs)
        # load prefixed snapshot.json
        with open(path.join(self._master_staged_dir, snapshot_cs), 'rb') as f:
            snapshot_data = f.read()
        # parse snapshot data
        snapshot = json.loads(snapshot_data.decode('utf-8'))
        # get targets and root prefixes
        targets_cs = snapshot['signed']['meta']['targets.json']['hashes']['sha256'] + '.targets.json'
        files.append(targets_cs)
        root_cs = snapshot['signed']['meta']['root.json']['hashes']['sha256'] + '.root.json'
        files.append(root_cs)
        # Upload metadata
        for f in files:
            dgst = self._dxf.push_blob(path.join(self._master_staged_dir, f),
                                       progress)
Esempio n. 41
0
  def setUp(self):
    # We are inheriting from custom class.
    unittest_toolbox.Modified_TestCase.setUp(self)
  
    # Copy the original repository files provided in the test folder so that
    # any modifications made to repository files are restricted to the copies.
    # The 'repository_data' directory is expected to exist in 'tuf/tests/'.
    original_repository_files = os.path.join(os.getcwd(), 'repository_data') 
    temporary_repository_root = \
      self.make_temp_directory(directory=self.temporary_directory)
  
    # The original repository, keystore, and client directories will be copied
    # for each test case. 
    original_repository = os.path.join(original_repository_files, 'repository')
    original_client = os.path.join(original_repository_files, 'client')
    original_keystore = os.path.join(original_repository_files, 'keystore')
    
    # Save references to the often-needed client repository directories.
    # Test cases need these references to access metadata and target files. 
    self.repository_directory = \
      os.path.join(temporary_repository_root, 'repository')
    self.client_directory = os.path.join(temporary_repository_root, 'client')
    self.keystore_directory = os.path.join(temporary_repository_root, 'keystore')
    
    # Copy the original 'repository', 'client', and 'keystore' directories
    # to the temporary repository the test cases can use.
    shutil.copytree(original_repository, self.repository_directory)
    shutil.copytree(original_client, self.client_directory)
    shutil.copytree(original_keystore, self.keystore_directory)
    
    # The slow retrieval server, in mode 2 (1 byte per second), will only
    # sleep for a  total of (target file size) seconds.  Add a target file
    # that contains sufficient number of bytes to trigger a slow retrieval
    # error.  "sufficient number of bytes" assumed to be
    # >> 'tuf.conf.SLOW_START_GRACE_PERIOD' bytes.
    extra_bytes = 8
    total_bytes = tuf.conf.SLOW_START_GRACE_PERIOD + extra_bytes 

    repository = repo_tool.load_repository(self.repository_directory)
    file1_filepath = os.path.join(self.repository_directory, 'targets',
                                  'file1.txt')
    with open(file1_filepath, 'wb') as file_object:
      data = 'a' * total_bytes
      file_object.write(data.encode('utf-8'))

    key_file = os.path.join(self.keystore_directory, 'timestamp_key') 
    timestamp_private = repo_tool.import_rsa_privatekey_from_file(key_file,
                                                                  'password')
    key_file = os.path.join(self.keystore_directory, 'snapshot_key') 
    snapshot_private = repo_tool.import_rsa_privatekey_from_file(key_file,
                                                                  'password')
    key_file = os.path.join(self.keystore_directory, 'targets_key') 
    targets_private = repo_tool.import_rsa_privatekey_from_file(key_file,
                                                                  'password')

    repository.targets.load_signing_key(targets_private)
    repository.snapshot.load_signing_key(snapshot_private)
    repository.timestamp.load_signing_key(timestamp_private)
    
    repository.write()
    
    # Move the staged metadata to the "live" metadata.
    shutil.rmtree(os.path.join(self.repository_directory, 'metadata'))
    shutil.copytree(os.path.join(self.repository_directory, 'metadata.staged'),
                    os.path.join(self.repository_directory, 'metadata'))
    
    # Set the url prefix required by the 'tuf/client/updater.py' updater.
    # 'path/to/tmp/repository' -> 'localhost:8001/tmp/repository'. 
    repository_basepath = self.repository_directory[len(os.getcwd()):]
    url_prefix = \
      'http://localhost:' + str(self.SERVER_PORT) + repository_basepath 
    
    # Setting 'tuf.conf.repository_directory' with the temporary client
    # directory copied from the original repository files.
    tuf.conf.repository_directory = self.client_directory 
    self.repository_mirrors = {'mirror1': {'url_prefix': url_prefix,
                                           'metadata_path': 'metadata',
                                           'targets_path': 'targets',
                                           'confined_target_dirs': ['']}}

    # Create the repository instance.  The test cases will use this client
    # updater to refresh metadata, fetch target files, etc.
    self.repository_updater = updater.Updater('test_repository',
                                              self.repository_mirrors)