def setUp(self): super(FunctionalTest, self).setUp() self.test_dir = self.useFixture(fixtures.TempDir()).path self.api_protocol = 'http' self.api_port, api_sock = test_utils.get_unused_port_and_socket() self.registry_port, registry_sock = \ test_utils.get_unused_port_and_socket() conf_dir = os.path.join(self.test_dir, 'etc') utils.safe_mkdirs(conf_dir) self.copy_data_file('schema-image.json', conf_dir) self.copy_data_file('policy.json', conf_dir) self.copy_data_file('property-protections.conf', conf_dir) self.property_file = os.path.join(conf_dir, 'property-protections.conf') self.policy_file = os.path.join(conf_dir, 'policy.json') self.api_server = ApiServer(self.test_dir, self.api_port, self.policy_file, sock=api_sock) self.registry_server = RegistryServer(self.test_dir, self.registry_port, sock=registry_sock) self.scrubber_daemon = ScrubberDaemon(self.test_dir) self.pid_files = [self.api_server.pid_file, self.registry_server.pid_file, self.scrubber_daemon.pid_file] self.files_to_destroy = [] self.launched_servers = []
def schedule_delete_from_backend(uri, options, context, image_id, **kwargs): """ Given a uri and a time, schedule the deletion of an image. """ use_delay = config.get_option(options, 'delayed_delete', type='bool', default=False) if not use_delay: registry.update_image_metadata(context, image_id, {'status': 'deleted'}) try: return delete_from_backend(uri, **kwargs) except (UnsupportedBackend, exception.NotFound): msg = _("Failed to delete image from store (%(uri)s).") % locals() logger.error(msg) datadir = config.get_option(options, 'scrubber_datadir') scrub_time = config.get_option(options, 'scrub_time', type='int', default=0) delete_time = time.time() + scrub_time file_path = os.path.join(datadir, str(image_id)) utils.safe_mkdirs(datadir) if os.path.exists(file_path): msg = _("Image id %(image_id)s already queued for delete") % { 'image_id': image_id} raise exception.Duplicate(msg) with open(file_path, 'w') as f: f.write('\n'.join([uri, str(int(delete_time))])) os.chmod(file_path, 0600) os.utime(file_path, (delete_time, delete_time)) registry.update_image_metadata(context, image_id, {'status': 'pending_delete'})
def __init__(self): self.datadir = CONF.scrubber_datadir self.cleanup = CONF.cleanup_scrubber self.cleanup_time = CONF.cleanup_scrubber_time # configs for registry API store auth self.admin_user = CONF.admin_user self.admin_tenant = CONF.admin_tenant_name host, port = CONF.registry_host, CONF.registry_port LOG.info( _("Initializing scrubber with conf: %s") % { "datadir": self.datadir, "cleanup": self.cleanup, "cleanup_time": self.cleanup_time, "registry_host": host, "registry_port": port, } ) registry.configure_registry_client() registry.configure_registry_admin_creds() ctx = context.RequestContext() self.registry = registry.get_registry_client(ctx) utils.safe_mkdirs(self.datadir)
def setUp(self): super(TestImportTask, self).setUp() self.work_dir = os.path.join(self.test_dir, "work_dir") utils.safe_mkdirs(self.work_dir) self.config(work_dir=self.work_dir, group="task") self.context = mock.MagicMock() self.img_repo = mock.MagicMock() self.task_repo = mock.MagicMock() self.gateway = gateway.Gateway() self.task_factory = domain.TaskFactory() self.img_factory = self.gateway.get_image_factory(self.context) self.image = self.img_factory.new_image(image_id=UUID1, disk_format="raw", container_format="bare") task_input = { "import_from": "http://cloud.foo/image.raw", "import_from_format": "raw", "image_properties": {"disk_format": "qcow2", "container_format": "bare"}, } task_ttl = CONF.task.task_time_to_live self.task_type = "import" self.task = self.task_factory.new_task( self.task_type, TENANT1, task_time_to_live=task_ttl, task_input=task_input ) glance_store.register_opts(CONF) self.config( default_store="file", stores=["file", "http"], filesystem_store_datadir=self.test_dir, group="glance_store" ) self.config(conversion_format="qcow2", group="taskflow_executor") glance_store.create_stores(CONF)
def setUp(self): self.test_id, self.test_dir = test_utils.get_isolated_test_env() self.api_protocol = 'http' self.api_port = get_unused_port() self.registry_port = get_unused_port() conf_dir = os.path.join(self.test_dir, 'etc') utils.safe_mkdirs(conf_dir) self.copy_data_file('schema-image.json', conf_dir) self.copy_data_file('schema-access.json', conf_dir) self.copy_data_file('policy.json', conf_dir) self.policy_file = os.path.join(conf_dir, 'policy.json') self.api_server = ApiServer(self.test_dir, self.api_port, self.registry_port, self.policy_file) self.registry_server = RegistryServer(self.test_dir, self.registry_port) self.scrubber_daemon = ScrubberDaemon(self.test_dir, self.registry_port) self.pid_files = [self.api_server.pid_file, self.registry_server.pid_file, self.scrubber_daemon.pid_file] self.files_to_destroy = [] self.log_files = []
def __init__(self, store_api): LOG.info(_LI("Initializing scrubber with configuration: %s") % six.text_type({'scrubber_datadir': CONF.scrubber_datadir, 'cleanup': CONF.cleanup_scrubber, 'cleanup_time': CONF.cleanup_scrubber_time, 'registry_host': CONF.registry_host, 'registry_port': CONF.registry_port})) utils.safe_mkdirs(CONF.scrubber_datadir) self.store_api = store_api registry.configure_registry_client() registry.configure_registry_admin_creds() self.registry = registry.get_registry_client(context.RequestContext()) # Here we create a request context with credentials to support # delayed delete when using multi-tenant backend storage admin_tenant = CONF.admin_tenant_name auth_token = self.registry.auth_token self.admin_context = context.RequestContext(user=CONF.admin_user, tenant=admin_tenant, auth_token=auth_token) (self.file_queue, self.db_queue) = get_scrub_queues()
def schedule_delete_from_backend(uri, conf, context, image_id, **kwargs): """ Given a uri and a time, schedule the deletion of an image. """ conf.register_opts(delete_opts) if not conf.delayed_delete: registry.update_image_metadata(context, image_id, {"status": "deleted"}) try: return delete_from_backend(uri, **kwargs) except (UnsupportedBackend, exception.StoreDeleteNotSupported, exception.NotFound): exc_type = sys.exc_info()[0].__name__ msg = _("Failed to delete image at %s from store (%s)") % (uri, exc_type) logger.error(msg) finally: # avoid falling through to the delayed deletion logic return datadir = get_scrubber_datadir(conf) delete_time = time.time() + conf.scrub_time file_path = os.path.join(datadir, str(image_id)) utils.safe_mkdirs(datadir) if os.path.exists(file_path): msg = _("Image id %(image_id)s already queued for delete") % {"image_id": image_id} raise exception.Duplicate(msg) with open(file_path, "w") as f: f.write("\n".join([uri, str(int(delete_time))])) os.chmod(file_path, 0600) os.utime(file_path, (delete_time, delete_time)) registry.update_image_metadata(context, image_id, {"status": "pending_delete"})
def __init__(self, conf, **local_conf): self.conf = conf self.conf.register_opts(self.opts) self.datadir = store.get_scrubber_datadir(conf) self.cleanup = self.conf.cleanup_scrubber self.cleanup_time = self.conf.cleanup_scrubber_time host, port = registry.get_registry_addr(conf) logger.info( _("Initializing scrubber with conf: %s") % { "datadir": self.datadir, "cleanup": self.cleanup, "cleanup_time": self.cleanup_time, "registry_host": host, "registry_port": port, } ) self.registry = client.RegistryClient(host, port) utils.safe_mkdirs(self.datadir) store.create_stores(conf)
def write_conf(self, **kwargs): """ Writes the configuration file for the server to its intended destination. Returns the name of the configuration file. """ if self.conf_file_name: return self.conf_file_name if not self.conf_base: raise RuntimeError("Subclass did not populate config_base!") conf_override = self.__dict__.copy() if kwargs: conf_override.update(**kwargs) # A config file to use just for this test...we don't want # to trample on currently-running Glance servers, now do we? conf_dir = os.path.join(self.test_dir, 'etc') conf_filepath = os.path.join(conf_dir, "%s.conf" % self.server_name) utils.safe_mkdirs(conf_dir) with open(conf_filepath, 'wb') as conf_file: conf_file.write(self.conf_base % conf_override) conf_file.flush() self.conf_file_name = conf_file.name return self.conf_file_name
def setUp(self): super(TestGlanceManage, self).setUp() conf_dir = os.path.join(self.test_dir, 'etc') utils.safe_mkdirs(conf_dir) self.conf_filepath = os.path.join(conf_dir, 'glance-manage.conf') self.db_filepath = os.path.join(self.test_dir, 'tests.sqlite') self.connection = ('sql_connection = sqlite:///%s' % self.db_filepath)
def get_isolated_test_env(): """ Returns a tuple of (test_id, test_dir) that is unique for an isolated test environment. Also ensure the test_dir is created. """ test_id = random.randint(0, 100000) test_dir = os.path.join("/", "tmp", "test.%d" % test_id) utils.safe_mkdirs(test_dir) return test_id, test_dir
def _make_cache_directory_if_needed(self): """Creates main cache directory along with incomplete subdirectory""" # NOTE(sirp): making the incomplete_path will have the effect of # creating the main cache path directory as well paths = [self.incomplete_path, self.invalid_path, self.prefetch_path, self.prefetching_path] for path in paths: cutils.safe_mkdirs(path)
def get_isolated_test_env(): """ Returns a tuple of (test_id, test_dir) that is unique for an isolated test environment. Also ensure the test_dir is created. """ test_id = random.randint(0, 100000) test_tmp_dir = os.getenv('GLANCE_TEST_TMP_DIR', '/tmp') test_dir = os.path.join(test_tmp_dir, "test.%d" % test_id) utils.safe_mkdirs(test_dir) return test_id, test_dir
def set_paths(self): """ Creates all necessary directories under the base cache directory """ self.base_dir = self.options.get('image_cache_dir') self.incomplete_dir = os.path.join(self.base_dir, 'incomplete') self.invalid_dir = os.path.join(self.base_dir, 'invalid') self.queue_dir = os.path.join(self.base_dir, 'queue') dirs = [self.incomplete_dir, self.invalid_dir, self.queue_dir] for path in dirs: utils.safe_mkdirs(path)
def create_database(self): """Create database if required for this server""" if self.needs_database: conf_dir = os.path.join(self.test_dir, "etc") utils.safe_mkdirs(conf_dir) conf_filepath = os.path.join(conf_dir, "glance-manage.conf") with open(conf_filepath, "wb") as conf_file: conf_file.write("[DEFAULT]\n") conf_file.write("sql_connection = %s" % self.sql_connection) conf_file.flush() cmd = "bin/glance-manage --config-file %s db_sync" % conf_filepath execute(cmd, no_venv=self.no_venv, exec_env=self.exec_env, expect_exit=True)
def setUp(self): super(BaseTestCase, self).setUp() # NOTE(bcwaldon): parse_args has to be called to register certain # command-line options - specifically we need config_dir for # the following policy tests config.parse_args(args=[]) self.addCleanup(CONF.reset) self.stubs = stubout.StubOutForTesting() self.stubs.Set(exception, '_FATAL_EXCEPTION_FORMAT_ERRORS', True) self.test_dir = self.useFixture(fixtures.TempDir()).path self.conf_dir = os.path.join(self.test_dir, 'etc') utils.safe_mkdirs(self.conf_dir) self.set_policy()
def create_database(self): """Create database if required for this server""" if self.needs_database: conf_dir = os.path.join(self.test_dir, 'etc') utils.safe_mkdirs(conf_dir) conf_filepath = os.path.join(conf_dir, 'glance-manage.conf') with open(conf_filepath, 'wb') as conf_file: conf_file.write('[DEFAULT]\n') conf_file.write('sql_connection = %s' % self.sql_connection) conf_file.flush() cmd = ('bin/glance-manage --config-file %s db_sync' % conf_filepath) execute(cmd, no_venv=self.no_venv, exec_env=self.exec_env, expect_exit=True)
def schedule_delayed_delete_from_backend(uri, image_id, **kwargs): """Given a uri, schedule the deletion of an image.""" datadir = CONF.scrubber_datadir delete_time = time.time() + CONF.scrub_time file_path = os.path.join(datadir, str(image_id)) utils.safe_mkdirs(datadir) if os.path.exists(file_path): msg = _("Image id %(image_id)s already queued for delete") % { 'image_id': image_id} raise exception.Duplicate(msg) with open(file_path, 'w') as f: f.write('\n'.join([uri, str(int(delete_time))])) os.chmod(file_path, 0600) os.utime(file_path, (delete_time, delete_time))
def schedule_delayed_delete_from_backend(uri, image_id, **kwargs): """Given a uri, schedule the deletion of an image.""" datadir = CONF.scrubber_datadir delete_time = time.time() + CONF.scrub_time file_path = os.path.join(datadir, str(image_id)) utils.safe_mkdirs(datadir) if os.path.exists(file_path): msg = _("Image id %(image_id)s already queued for delete") % {"image_id": image_id} raise exception.Duplicate(msg) if CONF.metadata_encryption_key is not None: uri = crypt.urlsafe_encrypt(CONF.metadata_encryption_key, uri, 64) with open(file_path, "w") as f: f.write("\n".join([uri, str(int(delete_time))])) os.chmod(file_path, 0o600) os.utime(file_path, (delete_time, delete_time))
def __init__(self): self.datadir = CONF.scrubber_datadir self.cleanup = CONF.cleanup_scrubber self.cleanup_time = CONF.cleanup_scrubber_time host, port = CONF.registry_host, CONF.registry_port LOG.info(_("Initializing scrubber with conf: %s") % {'datadir': self.datadir, 'cleanup': self.cleanup, 'cleanup_time': self.cleanup_time, 'registry_host': host, 'registry_port': port}) self.registry = client.RegistryClient(host, port) utils.safe_mkdirs(self.datadir) store.create_stores()
def __init__(self, store_api): LOG.info(_("Initializing scrubber with configuration: %s") % unicode({'scrubber_datadir': CONF.scrubber_datadir, 'cleanup': CONF.cleanup_scrubber, 'cleanup_time': CONF.cleanup_scrubber_time, 'registry_host': CONF.registry_host, 'registry_port': CONF.registry_port})) utils.safe_mkdirs(CONF.scrubber_datadir) self.store_api = store_api registry.configure_registry_client() registry.configure_registry_admin_creds() self.registry = registry.get_registry_client(context.RequestContext()) (self.file_queue, self.db_queue) = get_scrub_queues()
def setUp(self): self.test_id = random.randint(0, 100000) self.test_dir = os.path.join("/", "tmp", "test.%d" % self.test_id) utils.safe_mkdirs(self.test_dir) self.api_protocol = "http" self.api_port = get_unused_port() self.registry_port = get_unused_port() self.api_server = ApiServer(self.test_dir, self.api_port, self.registry_port) self.registry_server = RegistryServer(self.test_dir, self.registry_port) self.scrubber_daemon = ScrubberDaemon(self.test_dir, self.registry_port) self.pid_files = [self.api_server.pid_file, self.registry_server.pid_file, self.scrubber_daemon.pid_file] self.files_to_destroy = []
def __init__(self, options): logger.info(_("Initializing scrubber with options: %s") % options) self.options = options self.datadir = config.get_option(options, 'scrubber_datadir') self.cleanup = config.get_option(options, 'cleanup_scrubber', type='bool', default=False) host = config.get_option(options, 'registry_host') port = config.get_option(options, 'registry_port', type='int') self.registry = client.RegistryClient(host, port) utils.safe_mkdirs(self.datadir) if self.cleanup: self.cleanup_time = config.get_option(options, 'cleanup_scrubber_time', type='int', default=86400) store.create_stores(options)
def write_conf(self, **kwargs): """ Writes the configuration file for the server to its intended destination. Returns the name of the configuration file and the over-ridden config content (may be useful for populating error messages). """ if not self.conf_base: raise RuntimeError("Subclass did not populate config_base!") conf_override = self.__dict__.copy() if kwargs: conf_override.update(**kwargs) # A config file and paste.ini to use just for this test...we don't want # to trample on currently-running Glance servers, now do we? conf_dir = os.path.join(self.test_dir, 'etc') conf_filepath = os.path.join(conf_dir, "%s.conf" % self.server_name) if os.path.exists(conf_filepath): os.unlink(conf_filepath) paste_conf_filepath = conf_filepath.replace(".conf", "-paste.ini") if os.path.exists(paste_conf_filepath): os.unlink(paste_conf_filepath) utils.safe_mkdirs(conf_dir) def override_conf(filepath, overridden): with open(filepath, 'wb') as conf_file: conf_file.write(overridden) conf_file.flush() return conf_file.name overridden_core = self.conf_base % conf_override self.conf_file_name = override_conf(conf_filepath, overridden_core) overridden_paste = '' if self.paste_conf_base: overridden_paste = self.paste_conf_base % conf_override override_conf(paste_conf_filepath, overridden_paste) overridden = ('==Core config==\n%s\n==Paste config==\n%s' % (overridden_core, overridden_paste)) return self.conf_file_name, overridden
def write_conf(self, **kwargs): """ Writes the configuration file for the server to its intended destination. Returns the name of the configuration file and the over-ridden config content (may be useful for populating error messages). """ if not self.conf_base: raise RuntimeError("Subclass did not populate config_base!") conf_override = self.__dict__.copy() if kwargs: conf_override.update(**kwargs) # A config file and paste.ini to use just for this test...we don't want # to trample on currently-running Glance servers, now do we? conf_dir = os.path.join(self.test_dir, 'etc') conf_filepath = os.path.join(conf_dir, "%s.conf" % self.server_name) if os.path.exists(conf_filepath): os.unlink(conf_filepath) paste_conf_filepath = conf_filepath.replace(".conf", "-paste.ini") if os.path.exists(paste_conf_filepath): os.unlink(paste_conf_filepath) utils.safe_mkdirs(conf_dir) def override_conf(filepath, overridden): with open(filepath, 'w') as conf_file: conf_file.write(overridden) conf_file.flush() return conf_file.name overridden_core = self.conf_base % conf_override self.conf_file_name = override_conf(conf_filepath, overridden_core) overridden_paste = '' if self.paste_conf_base: overridden_paste = self.paste_conf_base % conf_override override_conf(paste_conf_filepath, overridden_paste) overridden = ('==Core config==\n%s\n==Paste config==\n%s' % (overridden_core, overridden_paste)) return self.conf_file_name, overridden
def setUp(self): super(FunctionalTest, self).setUp() self.test_dir = self.useFixture(fixtures.TempDir()).path self.api_protocol = 'http' self.api_port, api_sock = test_utils.get_unused_port_and_socket() self.registry_port, reg_sock = test_utils.get_unused_port_and_socket() # NOTE: Scrubber is enabled by default for the functional tests. # Please disbale it by explicitly setting 'self.include_scrubber' to # False in the test SetUps that do not require Scrubber to run. self.include_scrubber = True self.tracecmd = tracecmd_osmap.get(platform.system()) conf_dir = os.path.join(self.test_dir, 'etc') utils.safe_mkdirs(conf_dir) self.copy_data_file('schema-image.json', conf_dir) self.copy_data_file('policy.json', conf_dir) self.copy_data_file('property-protections.conf', conf_dir) self.copy_data_file('property-protections-policies.conf', conf_dir) self.property_file_roles = os.path.join(conf_dir, 'property-protections.conf') property_policies = 'property-protections-policies.conf' self.property_file_policies = os.path.join(conf_dir, property_policies) self.policy_file = os.path.join(conf_dir, 'policy.json') self.api_server = ApiServer(self.test_dir, self.api_port, self.policy_file, sock=api_sock) self.registry_server = RegistryServer(self.test_dir, self.registry_port, self.policy_file, sock=reg_sock) self.scrubber_daemon = ScrubberDaemon(self.test_dir, self.policy_file) self.pid_files = [ self.api_server.pid_file, self.registry_server.pid_file, self.scrubber_daemon.pid_file ] self.files_to_destroy = [] self.launched_servers = []
def create_database(self): """Create database if required for this server""" if self.needs_database: conf_dir = os.path.join(self.test_dir, 'etc') utils.safe_mkdirs(conf_dir) conf_filepath = os.path.join(conf_dir, 'glance-manage.conf') with open(conf_filepath, 'wb') as conf_file: conf_file.write('[DEFAULT]\n') conf_file.write('sql_connection = %s' % self.sql_connection) conf_file.flush() glance_db_env = 'GLANCE_DB_TEST_SQLITE_FILE' if glance_db_env in os.environ: # use the empty db created and cached as a tempfile # instead of spending the time creating a new one db_location = os.environ[glance_db_env] os.system('cp %s %s/tests.sqlite' % (db_location, self.test_dir)) else: cmd = ('%s -m glance.cmd.manage --config-file %s db sync' % (sys.executable, conf_filepath)) execute(cmd, no_venv=self.no_venv, exec_env=self.exec_env, expect_exit=True) # copy the clean db to a temp location so that it # can be reused for future tests (osf, db_location) = tempfile.mkstemp() os.close(osf) os.system('cp %s/tests.sqlite %s' % (self.test_dir, db_location)) os.environ[glance_db_env] = db_location # cleanup the temp file when the test suite is # complete def _delete_cached_db(): try: os.remove(os.environ[glance_db_env]) except Exception: glance_tests.logger.exception( "Error cleaning up the file %s" % os.environ[glance_db_env]) atexit.register(_delete_cached_db)
def setUp(self): super(TestConvertImageTask, self).setUp() glance_store.register_opts(CONF) self.config(default_store='file', stores=['file', 'http'], filesystem_store_datadir=self.test_dir, group="glance_store") self.config(output_format='qcow2', group='image_conversion') glance_store.create_stores(CONF) self.work_dir = os.path.join(self.test_dir, 'work_dir') utils.safe_mkdirs(self.work_dir) self.config(work_dir=self.work_dir, group='task') self.context = mock.MagicMock() self.img_repo = mock.MagicMock() self.task_repo = mock.MagicMock() self.image_id = UUID1 self.gateway = gateway.Gateway() self.task_factory = domain.TaskFactory() self.img_factory = self.gateway.get_image_factory(self.context) self.image = self.img_factory.new_image(image_id=self.image_id, disk_format='raw', container_format='bare') task_input = { "import_from": "http://cloud.foo/image.raw", "import_from_format": "raw", "image_properties": { 'disk_format': 'raw', 'container_format': 'bare' } } task_ttl = CONF.task.task_time_to_live self.task_type = 'import' self.task = self.task_factory.new_task(self.task_type, TENANT1, task_time_to_live=task_ttl, task_input=task_input)
def create_database(self): """Create database if required for this server""" if self.needs_database: conf_dir = os.path.join(self.test_dir, 'etc') utils.safe_mkdirs(conf_dir) conf_filepath = os.path.join(conf_dir, 'glance-manage.conf') with open(conf_filepath, 'w') as conf_file: conf_file.write('[DEFAULT]\n') conf_file.write('sql_connection = %s' % self.sql_connection) conf_file.flush() glance_db_env = 'GLANCE_DB_TEST_SQLITE_FILE' if glance_db_env in os.environ: # use the empty db created and cached as a tempfile # instead of spending the time creating a new one db_location = os.environ[glance_db_env] os.system('cp %s %s/tests.sqlite' % (db_location, self.test_dir)) else: cmd = ('%s -m glance.cmd.manage --config-file %s db sync' % (sys.executable, conf_filepath)) execute(cmd, no_venv=self.no_venv, exec_env=self.exec_env, expect_exit=True) # copy the clean db to a temp location so that it # can be reused for future tests (osf, db_location) = tempfile.mkstemp() os.close(osf) os.system('cp %s/tests.sqlite %s' % (self.test_dir, db_location)) os.environ[glance_db_env] = db_location # cleanup the temp file when the test suite is # complete def _delete_cached_db(): try: os.remove(os.environ[glance_db_env]) except Exception: glance_tests.logger.exception( "Error cleaning up the file %s" % os.environ[glance_db_env]) atexit.register(_delete_cached_db)
def schedule_delayed_delete_from_backend(uri, image_id, **kwargs): """Given a uri, schedule the deletion of an image.""" datadir = CONF.scrubber_datadir delete_time = time.time() + CONF.scrub_time file_path = os.path.join(datadir, str(image_id)) utils.safe_mkdirs(datadir) if os.path.exists(file_path): msg = _("Image id %(image_id)s already queued for delete") % { 'image_id': image_id } raise exception.Duplicate(msg) if CONF.metadata_encryption_key is not None: uri = crypt.urlsafe_encrypt(CONF.metadata_encryption_key, uri, 64) with open(file_path, 'w') as f: f.write('\n'.join([uri, str(int(delete_time))])) os.chmod(file_path, 0o600) os.utime(file_path, (delete_time, delete_time))
def __init__(self, conf, **local_conf): self.conf = conf self.conf.register_opts(self.opts) self.datadir = store.get_scrubber_datadir(conf) self.cleanup = self.conf.cleanup_scrubber self.cleanup_time = self.conf.cleanup_scrubber_time host, port = registry.get_registry_addr(conf) logger.info(_("Initializing scrubber with conf: %s") % {'datadir': self.datadir, 'cleanup': self.cleanup, 'cleanup_time': self.cleanup_time, 'registry_host': host, 'registry_port': port}) self.registry = client.RegistryClient(host, port) utils.safe_mkdirs(self.datadir) store.create_stores(conf)
def setUp(self): super(BaseTestCase, self).setUp() self._config_fixture = self.useFixture(cfg_fixture.Config()) # NOTE(bcwaldon): parse_args has to be called to register certain # command-line options - specifically we need config_dir for # the following policy tests config.parse_args(args=[]) self.addCleanup(CONF.reset) mox_fixture = self.useFixture(moxstubout.MoxStubout()) self.stubs = mox_fixture.stubs self.stubs.Set(exception, '_FATAL_EXCEPTION_FORMAT_ERRORS', True) self.test_dir = self.useFixture(fixtures.TempDir()).path self.conf_dir = os.path.join(self.test_dir, 'etc') utils.safe_mkdirs(self.conf_dir) self.set_policy() # Limit the amount of DeprecationWarning messages in the unit test logs self.useFixture(glance_fixtures.WarningsFixture())
def set_paths(self): """ Creates all necessary directories under the base cache directory """ self.base_dir = CONF.image_cache_dir if self.base_dir is None: msg = _("Failed to read %s from config") % "image_cache_dir" LOG.error(msg) driver = self.__class__.__module__ raise exception.BadDriverConfiguration(driver_name=driver, reason=msg) self.incomplete_dir = os.path.join(self.base_dir, "incomplete") self.invalid_dir = os.path.join(self.base_dir, "invalid") self.queue_dir = os.path.join(self.base_dir, "queue") dirs = [self.incomplete_dir, self.invalid_dir, self.queue_dir] for path in dirs: utils.safe_mkdirs(path)
def __init__(self, store_api): LOG.info( _("Initializing scrubber with configuration: %s") % unicode({ 'scrubber_datadir': CONF.scrubber_datadir, 'cleanup': CONF.cleanup_scrubber, 'cleanup_time': CONF.cleanup_scrubber_time, 'registry_host': CONF.registry_host, 'registry_port': CONF.registry_port })) utils.safe_mkdirs(CONF.scrubber_datadir) self.store_api = store_api registry.configure_registry_client() registry.configure_registry_admin_creds() self.registry = registry.get_registry_client(context.RequestContext()) (self.file_queue, self.db_queue) = get_scrub_queues()
def setUp(self): super(FunctionalTest, self).setUp() self.test_dir = self.useFixture(fixtures.TempDir()).path self.api_protocol = 'http' self.api_port, api_sock = test_utils.get_unused_port_and_socket() self.registry_port, reg_sock = test_utils.get_unused_port_and_socket() self.include_scrubber = True self.tracecmd = tracecmd_osmap.get(platform.system()) conf_dir = os.path.join(self.test_dir, 'etc') utils.safe_mkdirs(conf_dir) self.copy_data_file('schema-image.json', conf_dir) self.copy_data_file('policy.json', conf_dir) self.copy_data_file('property-protections.conf', conf_dir) self.copy_data_file('property-protections-policies.conf', conf_dir) self.property_file_roles = os.path.join(conf_dir, 'property-protections.conf') property_policies = 'property-protections-policies.conf' self.property_file_policies = os.path.join(conf_dir, property_policies) self.policy_file = os.path.join(conf_dir, 'policy.json') self.api_server = ApiServer(self.test_dir, self.api_port, self.policy_file, sock=api_sock) self.registry_server = RegistryServer(self.test_dir, self.registry_port, self.policy_file, sock=reg_sock) self.scrubber_daemon = ScrubberDaemon(self.test_dir, self.policy_file) self.pid_files = [ self.api_server.pid_file, self.registry_server.pid_file, self.scrubber_daemon.pid_file ] self.files_to_destroy = [] self.launched_servers = []
def setUp(self): super(TestConvertImageTask, self).setUp() glance_store.register_opts(CONF) self.config(default_store='file', stores=['file', 'http'], filesystem_store_datadir=self.test_dir, group="glance_store") self.config(output_format='qcow2', group='image_conversion') glance_store.create_stores(CONF) self.work_dir = os.path.join(self.test_dir, 'work_dir') utils.safe_mkdirs(self.work_dir) self.config(work_dir=self.work_dir, group='task') self.context = mock.MagicMock() self.img_repo = mock.MagicMock() self.task_repo = mock.MagicMock() self.image_id = UUID1 self.gateway = gateway.Gateway() self.task_factory = domain.TaskFactory() self.img_factory = self.gateway.get_image_factory(self.context) self.image = self.img_factory.new_image(image_id=self.image_id, disk_format='raw', container_format='bare') task_input = { "import_from": "http://cloud.foo/image.raw", "import_from_format": "raw", "image_properties": {'disk_format': 'raw', 'container_format': 'bare'} } task_ttl = CONF.task.task_time_to_live self.task_type = 'import' self.task = self.task_factory.new_task(self.task_type, TENANT1, task_time_to_live=task_ttl, task_input=task_input)
def set_paths(self): """ Creates all necessary directories under the base cache directory """ self.base_dir = self.conf.image_cache_dir if self.base_dir is None: msg = _('Failed to read %s from config') % 'image_cache_dir' logger.error(msg) driver = self.__class__.__module__ raise exception.BadDriverConfiguration(driver_name=driver, reason=msg) self.incomplete_dir = os.path.join(self.base_dir, 'incomplete') self.invalid_dir = os.path.join(self.base_dir, 'invalid') self.queue_dir = os.path.join(self.base_dir, 'queue') dirs = [self.incomplete_dir, self.invalid_dir, self.queue_dir] for path in dirs: utils.safe_mkdirs(path)
def setUp(self): super(BaseTestCase, self).setUp() self._config_fixture = self.useFixture(cfg_fixture.Config()) # NOTE(bcwaldon): parse_args has to be called to register certain # command-line options - specifically we need config_dir for # the following policy tests config.parse_args(args=[]) self.addCleanup(CONF.reset) self.mock_object(exception, '_FATAL_EXCEPTION_FORMAT_ERRORS', True) self.test_dir = self.useFixture(fixtures.TempDir()).path self.conf_dir = os.path.join(self.test_dir, 'etc') utils.safe_mkdirs(self.conf_dir) self.set_policy() # Limit the amount of DeprecationWarning messages in the unit test logs self.useFixture(glance_fixtures.WarningsFixture()) # Make sure logging output is limited but still test debug formatting self.useFixture(log_fixture.get_logging_handle_error_fixture()) self.useFixture(glance_fixtures.StandardLogging())
def schedule_delete_from_backend(uri, options, context, image_id, **kwargs): """ Given a uri and a time, schedule the deletion of an image. """ use_delay = config.get_option(options, 'delayed_delete', type='bool', default=False) if not use_delay: registry.update_image_metadata(context, image_id, {'status': 'deleted'}) try: return delete_from_backend(uri, **kwargs) except (UnsupportedBackend, exception.NotFound): msg = _("Failed to delete image from store (%(uri)s).") % locals() logger.error(msg) datadir = config.get_option(options, 'scrubber_datadir') scrub_time = config.get_option(options, 'scrub_time', type='int', default=0) delete_time = time.time() + scrub_time file_path = os.path.join(datadir, str(image_id)) utils.safe_mkdirs(datadir) if os.path.exists(file_path): msg = _("Image id %(image_id)s already queued for delete") % { 'image_id': image_id } raise exception.Duplicate(msg) with open(file_path, 'w') as f: f.write('\n'.join([uri, str(int(delete_time))])) os.chmod(file_path, 0600) os.utime(file_path, (delete_time, delete_time)) registry.update_image_metadata(context, image_id, {'status': 'pending_delete'})
def setUp(self): """ Test to see if the pre-requisites for the image cache are working (python-xattr installed and xattr support on the filesystem) """ super(TestImageCacheXattr, self).setUp() if getattr(self, "disable", False): return self.cache_dir = os.path.join("/", "tmp", "test.cache.%d" % random.randint(0, 1000000)) utils.safe_mkdirs(self.cache_dir) if not getattr(self, "inited", False): try: import xattr except ImportError: self.inited = True self.disabled = True self.disabled_message = "python-xattr not installed." return self.inited = True self.disabled = False self.config( image_cache_dir=self.cache_dir, image_cache_driver="xattr", image_cache_max_size=1024 * 5, registry_host="127.0.0.1", registry_port=9191, ) self.cache = image_cache.ImageCache() if not xattr_writes_supported(self.cache_dir): self.inited = True self.disabled = True self.disabled_message = "filesystem does not support xattr" return
def setUp(self): """ Test to see if the pre-requisites for the image cache are working (python-xattr installed and xattr support on the filesystem) """ if getattr(self, 'disable', False): return self.cache_dir = os.path.join( "/", "tmp", "test.cache.%d" % random.randint(0, 1000000)) utils.safe_mkdirs(self.cache_dir) if not getattr(self, 'inited', False): try: import xattr except ImportError: self.inited = True self.disabled = True self.disabled_message = ("python-xattr not installed.") return self.inited = True self.disabled = False self.conf = test_utils.TestConfigOpts({ 'image_cache_dir': self.cache_dir, 'image_cache_driver': 'xattr', 'image_cache_max_size': 1024 * 5, 'registry_host': '0.0.0.0', 'registry_port': 9191 }) self.cache = image_cache.ImageCache(self.conf) if not xattr_writes_supported(self.cache_dir): self.inited = True self.disabled = True self.disabled_message = ("filesystem does not support xattr") return
def schedule_delete_from_backend(uri, conf, context, image_id, **kwargs): """ Given a uri and a time, schedule the deletion of an image. """ conf.register_opts(delete_opts) if not conf.delayed_delete: registry.update_image_metadata(context, image_id, {'status': 'deleted'}) try: return delete_from_backend(uri, **kwargs) except (UnsupportedBackend, exception.StoreDeleteNotSupported, exception.NotFound): exc_type = sys.exc_info()[0].__name__ msg = (_("Failed to delete image at %s from store (%s)") % (uri, exc_type)) logger.error(msg) finally: # avoid falling through to the delayed deletion logic return datadir = get_scrubber_datadir(conf) delete_time = time.time() + conf.scrub_time file_path = os.path.join(datadir, str(image_id)) utils.safe_mkdirs(datadir) if os.path.exists(file_path): msg = _("Image id %(image_id)s already queued for delete") % { 'image_id': image_id } raise exception.Duplicate(msg) with open(file_path, 'w') as f: f.write('\n'.join([uri, str(int(delete_time))])) os.chmod(file_path, 0600) os.utime(file_path, (delete_time, delete_time)) registry.update_image_metadata(context, image_id, {'status': 'pending_delete'})
def setUp(self): """ Test to see if the pre-requisites for the image cache are working (python-xattr installed and xattr support on the filesystem) """ super(TestImageCacheXattr, self).setUp() if getattr(self, 'disable', False): return self.cache_dir = os.path.join("/", "tmp", "test.cache.%d" % random.randint(0, 1000000)) utils.safe_mkdirs(self.cache_dir) if not getattr(self, 'inited', False): try: import xattr except ImportError: self.inited = True self.disabled = True self.disabled_message = ("python-xattr not installed.") return self.inited = True self.disabled = False self.config(image_cache_dir=self.cache_dir, image_cache_driver='xattr', image_cache_max_size=1024 * 5, registry_host='127.0.0.1', registry_port=9191) self.cache = image_cache.ImageCache() if not xattr_writes_supported(self.cache_dir): self.inited = True self.disabled = True self.disabled_message = ("filesystem does not support xattr") return
def setUp(self): self.test_id = random.randint(0, 100000) self.test_dir = os.path.join("/", "tmp", "test.%d" % self.test_id) utils.safe_mkdirs(self.test_dir) self.api_protocol = 'http' self.api_port = get_unused_port() self.registry_port = get_unused_port() self.api_server = ApiServer(self.test_dir, self.api_port, self.registry_port) self.registry_server = RegistryServer(self.test_dir, self.registry_port) self.scrubber_daemon = ScrubberDaemon(self.test_dir, self.registry_port) self.pid_files = [ self.api_server.pid_file, self.registry_server.pid_file, self.scrubber_daemon.pid_file ] self.files_to_destroy = []
def set_paths(self): """ Creates all necessary directories under the base cache directory """ try: key = 'image_cache_dir' self.base_dir = self.options[key] except KeyError: msg = _('Failed to read %s from config') % key logger.error(msg) driver = self.__class__.__module__ raise exception.BadDriverConfiguration(driver_name=driver, reason=msg) self.incomplete_dir = os.path.join(self.base_dir, 'incomplete') self.invalid_dir = os.path.join(self.base_dir, 'invalid') self.queue_dir = os.path.join(self.base_dir, 'queue') dirs = [self.incomplete_dir, self.invalid_dir, self.queue_dir] for path in dirs: utils.safe_mkdirs(path)
def write_conf(self, **kwargs): """ Writes the configuration file for the server to its intended destination. Returns the name of the configuration file. """ if self.conf_file_name: return self.conf_file_name if not self.conf_base: raise RuntimeError("Subclass did not populate config_base!") conf_override = self.__dict__.copy() if kwargs: conf_override.update(**kwargs) # A config file and paste.ini to use just for this test...we don't want # to trample on currently-running Glance servers, now do we? conf_dir = os.path.join(self.test_dir, 'etc') conf_filepath = os.path.join(conf_dir, "%s.conf" % self.server_name) paste_conf_filepath = conf_filepath.replace(".conf", "-paste.ini") utils.safe_mkdirs(conf_dir) def override_conf(filepath, base, override): with open(filepath, 'wb') as conf_file: conf_file.write(base % override) conf_file.flush() return conf_file.name self.conf_file_name = override_conf(conf_filepath, self.conf_base, conf_override) if self.paste_conf_base: override_conf(paste_conf_filepath, self.paste_conf_base, conf_override) return self.conf_file_name
def __init__(self): super(ScrubFileQueue, self).__init__() self.scrubber_datadir = CONF.scrubber_datadir utils.safe_mkdirs(self.scrubber_datadir) self.scrub_time = CONF.scrub_time self.metadata_encryption_key = CONF.metadata_encryption_key
def __init__(self): super(ScrubFileQueue, self).__init__() self.scrubber_datadir = CONF.scrubber_datadir utils.safe_mkdirs(self.scrubber_datadir)