def test_cmd_remove(self): config_path = self.test_repo_path / 'sample-config' jsons.dump_dataobject(self.sample_config, config_path) self.assertEqual(self.list_pod_dir_paths(), []) self.assertEqual(list(pods._get_graveyard_path().iterdir()), []) self.assertEqual(list(pods._get_tmp_path().iterdir()), []) self.create_pod_dir(self.sample_pod_id, self.sample_config) self.assertEqual(pods._get_ref_count(self.sample_pod_dir_path), 1) self.assertEqual(self.list_pod_dir_paths(), [self.sample_pod_id]) self.assertEqual(list(pods._get_graveyard_path().iterdir()), []) self.assertEqual(list(pods._get_tmp_path().iterdir()), []) ref_path = self.test_repo_path / 'ref' pods.cmd_add_ref(self.sample_pod_id, ref_path) with unittest.mock.patch(pods.__name__ + '.scripts'): with self.assertRaisesRegex( AssertionError, r'expect x <= 1, not 2' ): pods.cmd_remove(self.sample_pod_id) self.assertEqual(pods._get_ref_count(self.sample_pod_dir_path), 2) self.assertEqual(self.list_pod_dir_paths(), [self.sample_pod_id]) self.assertEqual(list(pods._get_graveyard_path().iterdir()), []) self.assertEqual(list(pods._get_tmp_path().iterdir()), []) self.mock_journals.remove_journal_dir.assert_not_called() ref_path.unlink() with unittest.mock.patch(pods.__name__ + '.scripts'): pods.cmd_remove(self.sample_pod_id) self.assertEqual(self.list_pod_dir_paths(), []) self.assertEqual(list(pods._get_graveyard_path().iterdir()), []) self.assertEqual(list(pods._get_tmp_path().iterdir()), []) self.mock_journals.remove_journal_dir.assert_called_once_with( self.sample_pod_id )
def test_check_invariants(self): b1_path = self.test_bundle_dir_path / 'bundle-1' b2_path = self.test_bundle_dir_path / 'bundle-2' for path, deploy_instruction in ( (b1_path, self.XAR_DEPLOY_INSTRUCTION), (b2_path, self.XAR_DEPLOY_INSTRUCTION_2), ): path.mkdir() jsons.dump_dataobject( deploy_instruction, path / models.BUNDLE_DEPLOY_INSTRUCTION_FILENAME, ) (path / models.XAR_BUNDLE_IMAGE_FILENAME).touch() ops_dirs = xar_ops_dirs.make_ops_dirs() with ops_dirs.listing_ops_dirs() as actual: self.assertEqual(actual, []) self.assertTrue(ops_dirs.install(b1_path)) with ops_dirs.listing_ops_dirs() as actual: self.assertEqual(len(actual), 1) self.assertEqual(actual[0].metadata, self.XAR_METADATA) with self.assertRaisesRegex( AssertionError, r'expect unique xar label name:', ): ops_dirs.install(b2_path) with ops_dirs.listing_ops_dirs() as actual: self.assertEqual(len(actual), 1) self.assertEqual(actual[0].metadata, self.XAR_METADATA)
def make_bundle_dir(self): jsons.dump_dataobject( self.DEPLOY_INSTRUCTION, self.test_bundle_dir_path / \ models.BUNDLE_DEPLOY_INSTRUCTION_FILENAME, ) return NullBundleDir(self.test_bundle_dir_path)
def install(self, bundle_dir, target_ops_dir_path): del target_ops_dir_path # Unused. ASSERT.isinstance(bundle_dir, XarBundleDir) log_args = (bundle_dir.label, bundle_dir.version) # Make metadata first so that uninstall may roll back properly. LOG.info('xars install: metadata: %s %s', *log_args) jsons.dump_dataobject( models.XarMetadata( label=bundle_dir.label, version=bundle_dir.version, image=bundle_dir.deploy_instruction.image, ), self.metadata_path, ) bases.set_file_attrs(self.metadata_path) # Sanity check of the just-written metadata file. ASSERT.equal(self.label, bundle_dir.label) ASSERT.equal(self.version, bundle_dir.version) if bundle_dir.deploy_instruction.is_zipapp(): LOG.info('xars install: zipapp: %s %s', *log_args) bases.copy_exec(bundle_dir.zipapp_path, self.zipapp_target_path) else: LOG.info('xars install: xar: %s %s', *log_args) ctr_scripts.ctr_import_image(bundle_dir.image_path) ctr_scripts.ctr_install_xar( bundle_dir.deploy_instruction.name, bundle_dir.deploy_instruction.exec_relpath, bundle_dir.deploy_instruction.image, ) return True
def make_zipapp_bundle(self): jsons.dump_dataobject( self.ZIPAPP_DEPLOY_INSTRUCTION, self.test_bundle_dir_path / \ models.BUNDLE_DEPLOY_INSTRUCTION_FILENAME, ) (self.test_bundle_dir_path / models.XAR_BUNDLE_ZIPAPP_FILENAME).touch() return xar_ops_dirs.XarBundleDir(self.test_bundle_dir_path)
def create_image_dir(image_id, metadata): image_dir_path = images.get_image_dir_path(image_id) image_dir_path.mkdir() jsons.dump_dataobject( metadata, images._get_metadata_path(image_dir_path), ) images.get_rootfs_path(image_dir_path).mkdir()
def create_image_dir(image_id, metadata, exec_relpath): image_dir_path = images.get_image_dir_path(image_id) image_dir_path.mkdir() jsons.dump_dataobject( metadata, images._get_metadata_path(image_dir_path), ) rootfs_path = images.get_rootfs_path(image_dir_path) (rootfs_path / exec_relpath.parent).mkdir(parents=True) (rootfs_path / exec_relpath).touch()
def make_bundle_dir(self): jsons.dump_dataobject( self.DEPLOY_INSTRUCTION, self.test_bundle_dir_path / \ models.BUNDLE_DEPLOY_INSTRUCTION_FILENAME, ) for path in ( self.test_bundle_dir_path / self.BUNDLE_IMAGE_RELPATH, self.test_bundle_dir_path / self.BUNDLE_VOLUME_RELPATH, ): path.parent.mkdir(parents=True) path.touch() return pod_ops_dirs.PodBundleDir(self.test_bundle_dir_path)
def test_cmd_prepare(self): config_path = self.test_repo_path / 'sample-config' jsons.dump_dataobject(self.sample_config, config_path) for i, image in enumerate(self.sample_config.images): self.create_image_dir( self.make_image_id(i + 1), images.ImageMetadata(name=image.name, version=image.version), ) self.assertEqual(self.list_pod_dir_paths(), []) self.assertEqual(list(pods._get_tmp_path().iterdir()), []) with unittest.mock.patch.multiple( pods.__name__, scripts=unittest.mock.DEFAULT, # We don't have a valid base image, and so we can't really # call ``builders.generate_unit_file``, etc. builders=unittest.mock.DEFAULT, _generate_hostname=unittest.mock.DEFAULT, ): pods.cmd_prepare(self.sample_pod_id, config_path) self.assertEqual(self.list_pod_dir_paths(), [self.sample_pod_id]) self.assertEqual(list(pods._get_tmp_path().iterdir()), []) self.assertFalse(self.check_exclusive(self.sample_pod_dir_path))
def install(self, bundle_dir, target_ops_dir_path): ASSERT.isinstance(bundle_dir, PodBundleDir) log_args = (bundle_dir.label, bundle_dir.version) # Make metadata first so that uninstall may roll back properly. LOG.debug('pods install: metadata: %s %s', *log_args) metadata, groups = self._make_metadata(bundle_dir.deploy_instruction) jsons.dump_dataobject(metadata, self.metadata_path) bases.set_file_attrs(self.metadata_path) # Sanity check of the just-written metadata file. ASSERT.equal(self.label, bundle_dir.label) ASSERT.equal(self.version, bundle_dir.version) ASSERT.equal(self.metadata, metadata) LOG.debug('pods install: pod ids: %s %s: %s', *log_args, ', '.join(groups)) LOG.debug('pods install: volumes: %s %s', *log_args) bases.make_dir(self.volumes_dir_path) for volume, volume_path in bundle_dir.iter_volumes(): volume_dir_path = self.volumes_dir_path / volume.name LOG.debug('pods: extract: %s -> %s', volume_path, volume_dir_path) bases.make_dir(ASSERT.not_predicate(volume_dir_path, Path.exists)) scripts.tar_extract( volume_path, directory=volume_dir_path, extra_args=( '--same-owner', '--same-permissions', ), ) LOG.debug('pods install: images: %s %s', *log_args) for _, image_path in bundle_dir.iter_images(): ctr_scripts.ctr_import_image(image_path) LOG.debug('pods install: tokens: %s %s', *log_args) assignments = {} with tokens.make_tokens_database().writing() as active_tokens: for pod_id in groups: assignments[pod_id] = { alias: active_tokens.assign(token_name, pod_id, alias) for alias, token_name in bundle_dir.deploy_instruction.token_names.items() } envs = ops_envs.load() LOG.debug('pods install: prepare pods: %s %s', *log_args) bases.make_dir(self.refs_dir_path) for pod_id, group in groups.items(): pod_config = self._make_pod_config( bundle_dir.deploy_instruction, target_ops_dir_path, systemds.make_envs( pod_id, self.metadata, group.envs, envs, assignments[pod_id], ), ) with tempfile.NamedTemporaryFile() as config_tempfile: config_path = Path(config_tempfile.name) jsons.dump_dataobject(pod_config, config_path) ctr_scripts.ctr_prepare_pod(pod_id, config_path) ctr_scripts.ctr_add_ref_to_pod(pod_id, self.refs_dir_path / pod_id) LOG.debug('pods install: systemd units: %s %s', *log_args) units = {(pod_id, unit.name): unit for pod_id, group in groups.items() for unit in group.units} for config in self.metadata.systemd_unit_configs: systemds.install( config, self.metadata, groups[config.pod_id], units[config.pod_id, config.name], envs, assignments[config.pod_id], ) systemds.daemon_reload() return True
def _write_orig_config(config, pod_dir_path): jsons.dump_dataobject(config, _get_orig_config_path(pod_dir_path))
def save(self, path): jsons.dump_dataobject(self, path)
def dump(self, path): jsons.dump_dataobject(self, path)
def _write_metadata(metadata, image_dir_path): jsons.dump_dataobject(metadata, _get_metadata_path(image_dir_path))