def create_credentials(): """ Create PKI credentials for TLS access to libvirtd. Credentials are not signed by the host CA. This only allows unverified access but removes the need to transfer files between the host and the guest. """ path = FilePath(tempfile.mkdtemp()) try: ca = RootCredential.initialize(path, b"mycluster") NodeCredential.initialize(path, ca, uuid='client') ca_dir = FilePath('/etc/pki/CA') if not ca_dir.exists(): ca_dir.makedirs() path.child(AUTHORITY_CERTIFICATE_FILENAME).copyTo( FilePath('/etc/pki/CA/cacert.pem') ) client_key_dir = FilePath('/etc/pki/libvirt/private') if not client_key_dir.exists(): client_key_dir.makedirs() client_key_dir.chmod(0700) path.child('client.key').copyTo( client_key_dir.child('clientkey.pem') ) path.child('client.crt').copyTo( FilePath('/etc/pki/libvirt/clientcert.pem') ) finally: path.remove()
def parseArgs(self, deployment_config, application_config): deployment_config = FilePath(deployment_config) application_config = FilePath(application_config) if not deployment_config.exists(): raise UsageError('No file exists at {path}' .format(path=deployment_config.path)) if not application_config.exists(): raise UsageError('No file exists at {path}' .format(path=application_config.path)) self["deployment_config"] = deployment_config.getContent() self["application_config"] = application_config.getContent() try: deploy_config_obj = safe_load(self["deployment_config"]) except YAMLError as e: raise UsageError( ("Deployment configuration at {path} could not be parsed as " "YAML:\n\n{error}").format( path=deployment_config.path, error=str(e) ) ) try: app_config_obj = safe_load(self["application_config"]) except YAMLError as e: raise UsageError( ("Application configuration at {path} could not be parsed as " "YAML:\n\n{error}").format( path=application_config.path, error=str(e) ) ) try: fig_configuration = FigConfiguration(app_config_obj) if fig_configuration.is_valid_format(): applications = fig_configuration.applications() self['application_config'] = ( applications_to_flocker_yaml(applications) ) else: configuration = FlockerConfiguration(app_config_obj) if configuration.is_valid_format(): applications = configuration.applications() else: raise ConfigurationError( "Configuration is not a valid Fig or Flocker format." ) self['deployment'] = model_from_configuration( applications=applications, deployment_configuration=deploy_config_obj) except ConfigurationError as e: raise UsageError(str(e))
def fromCommandLine(cls, reactor, argv): config = EchoOptions() config.parseOptions(argv) ui = ConsoleUI(lambda: open("/dev/tty", "r+")) keys = [] if config["identity"]: keyPath = os.path.expanduser(config["identity"]) if os.path.exists(keyPath): keys.append(readKey(keyPath)) knownHostsPath = FilePath(os.path.expanduser(config["knownhosts"])) if knownHostsPath.exists(): knownHosts = KnownHostsFile.fromPath(knownHostsPath) else: knownHosts = None if config["no-agent"] or "SSH_AUTH_SOCK" not in os.environ: agentEndpoint = None else: agentEndpoint = UNIXClientEndpoint(reactor, os.environ["SSH_AUTH_SOCK"]) return cls(reactor, ui, config["host"], config["port"], config["username"], config["password"], keys, knownHosts, agentEndpoint)
def get_known_hosts(): knownHostsPath = FilePath(os.path.expanduser("~/.ssh/known_hosts")) if knownHostsPath.exists(): knownHosts = KnownHostsFile.fromPath(knownHostsPath) else: knownHosts = None return knownHosts
def activate(self): super(StarboundConfigManager, self).activate() try: configuration_file = FilePath( self.config.starbound_path).child('starbound.config') if not configuration_file.exists(): raise FatalPluginError( "Could not open starbound configuration file. Tried path: %s" % configuration_file) except AttributeError: raise FatalPluginError( "The starbound path (starbound_path) is not set in the configuration." ) try: with configuration_file.open() as f: starbound_config = json.load(f) except Exception as e: raise FatalPluginError( "Could not parse the starbound configuration file as JSON. Error given from JSON decoder: %s" % str(e)) if self.config.upstream_port != starbound_config['gamePort']: raise FatalPluginError( "The starbound gamePort option (%d) does not match the config.json upstream_port (%d)." % (starbound_config['gamePort'], self.config.upstream_port)) self._spawn = starbound_config['defaultWorldCoordinate'].split(":")
def load_or_create_client_key(key_file): """Load the ACME account key from a file, creating it if it does not exist. Args: key_file (str): name of the file to use as the account key """ # this is based on txacme.endpoint.load_or_create_client_key, but doesn't # hardcode the 'client.key' filename acme_key_file = FilePath(key_file) if acme_key_file.exists(): logger.info("Loading ACME account key from '%s'", acme_key_file) key = serialization.load_pem_private_key( acme_key_file.getContent(), password=None, backend=default_backend() ) else: logger.info("Saving new ACME account key to '%s'", acme_key_file) key = generate_private_key("rsa") acme_key_file.setContent( key.private_bytes( encoding=serialization.Encoding.PEM, format=serialization.PrivateFormat.TraditionalOpenSSL, encryption_algorithm=serialization.NoEncryption(), ) ) return JWKRSA(key=key)
def fromConfig(cls, reactor): keys = [] if "identity" in _CONFIG: keyPath = os.path.expanduser(_CONFIG["identity"]) if os.path.exists(keyPath): keys.append(readKey(keyPath)) knownHostsPath = FilePath(os.path.expanduser(_CONFIG["knownhosts"])) if knownHostsPath.exists(): knownHosts = KnownHostsFile.fromPath(knownHostsPath) else: knownHosts = None if "no-agent" in _CONFIG or "SSH_AUTH_SOCK" not in os.environ: agentEndpoint = None else: agentEndpoint = UNIXClientEndpoint( reactor, os.environ["SSH_AUTH_SOCK"]) if "password" in _CONFIG: password = _CONFIG["password"] else: password = None return cls( reactor, _CONFIG["host"], _CONFIG["port"], _CONFIG["username"], password, keys, knownHosts, agentEndpoint)
def _check_cert_directory(self): cert_path = FilePath(self['cert-directory']) self['cert-directory'] = cert_path if not cert_path.exists(): raise UsageError("{} does not exist".format(cert_path.path)) if not cert_path.isdir(): raise UsageError("{} is not a directory".format(cert_path.path))
def create_virtualenv(root): """ Create a virtualenv in ``root``. :param FilePath root: The directory in which to install a virtualenv. :returns: A ``VirtualEnv`` instance. """ # We call ``virtualenv`` as a subprocess rather than as a library, so that # we can turn off Python byte code compilation. run_command( ['virtualenv', '--python=/usr/bin/python2.7', '--quiet', root.path], added_env=dict(PYTHONDONTWRITEBYTECODE='1')) # XXX: Virtualenv doesn't link to pyc files when copying its bootstrap # modules. See https://github.com/pypa/virtualenv/issues/659 for module_name in virtualenv.REQUIRED_MODULES: py_base = root.descendant(['lib', 'python2.7', module_name]) py = py_base.siblingExtension('.py') if py.exists() and py.islink(): pyc = py_base.siblingExtension('.pyc') py_target = py.realpath() pyc_target = FilePath( py_target.splitext()[0]).siblingExtension('.pyc') if pyc.exists(): pyc.remove() if pyc_target.exists(): pyc_target.linkTo(pyc) return VirtualEnv(root=root)
def copyPackage(title): """ Copy package directory to db path using a file lock to avoid potential concurrency race conditions. @param title: string to use in log entry @type title: C{str} """ dbpath = FilePath(TimezoneCache.getDBPath()) pkgpath = TimezoneCache.FilteredFilePath(TimezoneCache._getPackageDBPath()) lockfile = FilesystemLock(dbpath.path + ".lock") result = lockfile.lock() try: if result and not dbpath.exists(): log.info( "{title} timezones from {pkg} to {to}", title=title, pkg=pkgpath.path, to=dbpath.path ) # Copy over the entire package pkgpath.copyFilteredDirectoryTo(dbpath) finally: if result: lockfile.unlock()
def activate(self): super(StarboundConfigManager, self).activate() try: configuration_file = FilePath( self.config.starbound_path).child('starbound.config') if not configuration_file.exists(): raise FatalPluginError( 'Could not open starbound configuration file. ' 'Tried path: {}'.format(configuration_file)) except AttributeError: raise FatalPluginError('The starbound path (starbound_path)' ' is not set in the configuration.') try: with configuration_file.open() as f: starbound_config = json.load(f) except Exception as e: raise FatalPluginError( 'Could not parse the starbound configuration file as JSON.' 'Error given from JSON decoder: {}'.format(e)) if self.config.upstream_port != starbound_config['gameServerPort']: raise FatalPluginError( 'The starbound gameServerPort option ({}) does not match the ' 'config.json upstream_port ({}).'.format( starbound_config['gameServerPort'], self.config.upstream_port))
def get_current_version(): manifest = FilePath(config.resources_directory).child("manifest.json") if not manifest.exists(): return 0 with manifest.open("r") as f: manifest = json.load(f) return int(manifest["version"])
def setUp(self): """ Add our example directory to the path and record which modules are currently loaded. """ self.originalPath = sys.path[:] self.originalModules = sys.modules.copy() self.fakeErr = StringIO() self.patch(sys, 'stderr', self.fakeErr) self.fakeOut = StringIO() self.patch(sys, 'stdout', self.fakeOut) # Get branch root here = FilePath(__file__).parent().parent().parent().parent() # Find the example script within this branch for childName in self.exampleRelativePath.split('/'): here = here.child(childName) if not here.exists(): raise SkipTest("Examples (%s) not found - cannot test" % (here.path, )) self.examplePath = here # Add the example parent folder to the Python path sys.path.append(self.examplePath.parent().path) # Import the example as a module moduleName = self.examplePath.basename().split('.')[0] self.example = __import__(moduleName)
def load_revokations(self): """ Load PEM formatted certificates that are no longer trustworthy and store the suject and issuer. `cert_list` is the path to a file that contains glob-like patterns to PEM-formatted certificates. """ revoke_file = self.revoke_file revoke_state = self.revoke_state if revoke_file is not None: last_mod_time = revoke_state['last_mod_time'] fp = FilePath(revoke_file) if not fp.exists(): return mod_time = fp.getModificationTime() if last_mod_time is None or mod_time > last_mod_time: log.msg("[INFO] Loading revoked certificate files specified in '{0}'.".format( revoke_file)) revoke_state['last_mod_time'] = mod_time revoked = set([]) with open(revoke_file) as f: for line in f: pattern = line.rstrip('\r\n') if pattern == '' or pattern.startswith('#'): continue for path in glob.glob(pattern): certs = [pem_cert_to_x509(cert) for cert in pem.parse_file(path)] for certificate in certs: revoked.add(( tuple(certificate.get_subject().get_components()), tuple(certificate.get_issuer().get_components()))) revoke_state['revoked'] = revoked
def get_client(options): cluster = FilePath(options["cluster-yml"]) if cluster.exists(): config = yaml.load(cluster.open()) certificates_path = cluster.parent() user = config["users"][0] control_service = None # figure it out based on cluster.yml else: certificates_path = FilePath(options["certs-path"]) if options["user"] is None: raise UsageError("must specify --user") user = options["user"] if options["control-service"] is None: raise UsageError("must specify --control-service") control_service = options["control-service"] user_certificate_filename = "%s.crt" % (user,) user_key_filename = "%s.key" % (user,) return txflocker_get_client( certificates_path=certificates_path, user_certificate_filename=user_certificate_filename, user_key_filename=user_key_filename, target_hostname=control_service, )
def inspect(doc): data = json.loads(doc) path = FilePath(data['path']) ret = {'kind': 'file', 'path': path.path, 'exists': path.exists()} if not ret['exists']: return ret if path.isdir(): ret['filetype'] = 'dir' elif path.isfile(): ret['filetype'] = 'file' ret['size'] = path.statinfo.st_size h = sha1() fh = open(path.path, 'r') while True: data = fh.read(4096) if not data: break h.update(data) ret['sha1'] = h.hexdigest() ret['owner'] = pwd.getpwuid(path.getUserID()).pw_name ret['group'] = grp.getgrgid(path.getGroupID()).gr_name ret['perms'] = permsString(path.getPermissions()) ret['ctime'] = int(path.statinfo.st_ctime) ret['mtime'] = int(path.statinfo.st_mtime) ret['atime'] = int(path.statinfo.st_atime) return ret
def copyPackage(title): """ Copy package directory to db path using a file lock to avoid potential concurrency race conditions. @param title: string to use in log entry @type title: C{str} """ dbpath = FilePath(TimezoneCache.getDBPath()) pkgpath = TimezoneCache.FilteredFilePath( TimezoneCache._getPackageDBPath()) lockfile = FilesystemLock(dbpath.path + ".lock") result = lockfile.lock() try: if result and not dbpath.exists(): log.info("{title} timezones from {pkg} to {to}", title=title, pkg=pkgpath.path, to=dbpath.path) # Copy over the entire package pkgpath.copyFilteredDirectoryTo(dbpath) finally: if result: lockfile.unlock()
def setUp(self): """ Add our example directory to the path and record which modules are currently loaded. """ self.originalPath = sys.path[:] self.originalModules = sys.modules.copy() # Python usually expects native strs to be written to sys.stdout/stderr self.fakeErr = NativeStringIO() self.patch(sys, 'stderr', self.fakeErr) self.fakeOut = NativeStringIO() self.patch(sys, 'stdout', self.fakeOut) # Get documentation root try: here = FilePath(os.environ['TOX_INI_DIR']).child('docs') except KeyError: raise SkipTest( "Examples not found ($TOX_INI_DIR unset) - cannot test", ) # Find the example script within this branch for childName in self.exampleRelativePath.split('/'): here = here.child(childName) if not here.exists(): raise SkipTest("Examples (%s) not found - cannot test" % (here.path, )) self.examplePath = here # Add the example parent folder to the Python path sys.path.append(self.examplePath.parent().path) # Import the example as a module moduleName = self.examplePath.basename().split('.')[0] self.example = __import__(moduleName)
def test_nonexistentPaths(self): """ Verify that L{modules.walkModules} ignores entries in sys.path which do not exist in the filesystem. """ existentPath = FilePath(self.mktemp()) os.makedirs(existentPath.child("test_package").path) existentPath.child("test_package").child("__init__.py").setContent("") nonexistentPath = FilePath(self.mktemp()) self.failIf(nonexistentPath.exists()) originalSearchPaths = sys.path[:] sys.path[:] = [existentPath.path] try: expected = [modules.getModule("test_package")] beforeModules = list(modules.walkModules()) sys.path.append(nonexistentPath.path) afterModules = list(modules.walkModules()) finally: sys.path[:] = originalSearchPaths self.assertEqual(beforeModules, expected) self.assertEqual(afterModules, expected)
def activate(self): super(StarboundConfigManager, self).activate() try: configuration_file = FilePath( self.config.starbound_path ).child('starbound.config') if not configuration_file.exists(): raise FatalPluginError( 'Could not open starbound configuration file. ' 'Tried path: {}'.format(configuration_file) ) except AttributeError: raise FatalPluginError( 'The starbound path (starbound_path)' ' is not set in the configuration.' ) try: with configuration_file.open() as f: starbound_config = json.load(f) except Exception as e: raise FatalPluginError( 'Could not parse the starbound configuration file as JSON.' 'Error given from JSON decoder: {}'.format(e) ) if self.config.upstream_port != starbound_config['gameServerPort']: raise FatalPluginError( 'The starbound gameServerPort option ({}) does not match the ' 'config.json upstream_port ({}).'.format( starbound_config['gameServerPort'], self.config.upstream_port ) )
def getTemplate(self, facetName): templatePath = FilePath( self.node.__file__ ).sibling(facetName + ".mak") if templatePath.exists(): return templatePath
class AddOptions(usage.Options): local_dir = None synopsis = "LOCAL_DIR" optParameters = [ ("poll-interval", "p", "60", "How often to ask for updates"), ("name", "n", None, "The name of this magic-folder"), ("author", "A", None, "Our name for Snapshots authored here"), ] description = ("Create a new magic-folder.") def parseArgs(self, local_dir=None): if local_dir is None: raise usage.UsageError( "Must specify a single argument: the local directory") self.local_dir = FilePath(local_dir) if not self.local_dir.exists(): raise usage.UsageError("'{}' doesn't exist".format(local_dir)) if not self.local_dir.isdir(): raise usage.UsageError("'{}' isn't a directory".format(local_dir)) def postOptions(self): super(AddOptions, self).postOptions() _fill_author_from_environment(self) if self["name"] is None: raise usage.UsageError("Must specify the --name option") try: if int(self['poll-interval']) <= 0: raise ValueError("should be positive") except ValueError: raise usage.UsageError( "--poll-interval must be a positive integer")
def isDocker(self, _initCGroupLocation="/proc/1/cgroup"): """ Check if the current platform is Linux in a Docker container. @return: C{True} if the current platform has been detected as Linux inside a Docker container. @rtype: C{bool} """ if not self.isLinux(): return False from twisted.python.filepath import FilePath # Ask for the cgroups of init (pid 1) initCGroups = FilePath(_initCGroupLocation) if initCGroups.exists(): # The cgroups file looks like "2:cpu:/". The third element will # begin with /docker if it is inside a Docker container. controlGroups = [ x.split(b":") for x in initCGroups.getContent().split(b"\n") ] for group in controlGroups: if len(group) == 3 and group[2].startswith(b"/docker/"): # If it starts with /docker/, we're in a docker container return True return False
def create_virtualenv(root): """ Create a virtualenv in ``root``. :param FilePath root: The directory in which to install a virtualenv. :returns: A ``VirtualEnv`` instance. """ # We call ``virtualenv`` as a subprocess rather than as a library, so that # we can turn off Python byte code compilation. run_command( ['virtualenv', '--python=/usr/bin/python2.7', '--quiet', root.path], added_env=dict(PYTHONDONTWRITEBYTECODE='1') ) # XXX: Virtualenv doesn't link to pyc files when copying its bootstrap # modules. See https://github.com/pypa/virtualenv/issues/659 for module_name in virtualenv.REQUIRED_MODULES: py_base = root.descendant( ['lib', 'python2.7', module_name]) py = py_base.siblingExtension('.py') if py.exists() and py.islink(): pyc = py_base.siblingExtension('.pyc') py_target = py.realpath() pyc_target = FilePath( py_target.splitext()[0]).siblingExtension('.pyc') if pyc.exists(): pyc.remove() if pyc_target.exists(): pyc_target.linkTo(pyc) return VirtualEnv(root=root)
def get_device_path(self, blockdevice_id): # libvirt does not return the correct device path when additional # disks have been attached using a client other than cinder. This is # expected behaviour within Cinder and libvirt # See https://bugs.launchpad.net/cinder/+bug/1387945 and # http://libvirt.org/formatdomain.html#elementsDisks (target section) # However, the correct device is named as a udev symlink which includes # the first 20 characters of the blockedevice_id. device_path = FilePath( "/dev/disk/by-id/virtio-{}".format(blockdevice_id[:20])) if not device_path.exists(): # If the device path does not exist, either virtio driver is # not being used (e.g. Rackspace), or the user has modified # their udev rules. The following code relies on Cinder # returning the correct device path, which appears to work # for Rackspace and will work with virtio if no disks have # been attached outside Cinder. try: cinder_volume = self.cinder_volume_manager.get(blockdevice_id) except CinderNotFound: raise UnknownVolume(blockdevice_id) # As far as we know you can not have more than one attachment, # but, perhaps we're wrong and there should be a test for the # multiple attachment case. FLOC-1854. try: [attachment] = cinder_volume.attachments except ValueError: raise UnattachedVolume(blockdevice_id) device_path = FilePath(attachment['device']) # It could be attached somewhere else... # https://clusterhq.atlassian.net/browse/FLOC-1830 return device_path
class JoinOptions(usage.Options): synopsis = "INVITE_CODE LOCAL_DIR" dmd_write_cap = "" magic_readonly_cap = "" optParameters = [ ("poll-interval", "p", "60", "How often to ask for updates"), ("name", "n", None, "Name for the new magic-folder"), ("author", "A", None, "Author name for Snapshots in this magic-folder"), ] def parseArgs(self, invite_code, local_dir): super(JoinOptions, self).parseArgs() try: if int(self['poll-interval']) <= 0: raise ValueError("should be positive") except ValueError: raise usage.UsageError( "--poll-interval must be a positive integer") self.local_dir = FilePath(local_dir) if not self.local_dir.exists(): raise usage.UsageError("'{}' doesn't exist".format(local_dir)) if not self.local_dir.isdir(): raise usage.UsageError("'{}' isn't a directory".format(local_dir)) self.invite_code = to_bytes(argv_to_unicode(invite_code)) def postOptions(self): super(JoinOptions, self).postOptions() _fill_author_from_environment(self) if self["name"] is None: raise usage.UsageError("Must specify the --name option")
def setUp(self): """ Add our example directory to the path and record which modules are currently loaded. """ self.fakeErr = StringIO() self.originalErr, sys.stderr = sys.stderr, self.fakeErr self.fakeOut = StringIO() self.originalOut, sys.stdout = sys.stdout, self.fakeOut self.originalPath = sys.path[:] self.originalModules = sys.modules.copy() # Get branch root here = FilePath(__file__).parent().parent().parent().parent() # Find the example script within this branch for childName in self.exampleRelativePath.split('/'): here = here.child(childName) if not here.exists(): raise SkipTest( "Examples (%s) not found - cannot test" % (here.path,)) self.examplePath = here # Add the example parent folder to the Python path sys.path.append(self.examplePath.parent().path) # Import the example as a module moduleName = self.examplePath.basename().split('.')[0] self.example = __import__(moduleName)
def fromCommandLine(cls, reactor, argv): config = EchoOptions() config.parseOptions(argv) keys = [] if config["identity"]: keyPath = os.path.expanduser(config["identity"]) if os.path.exists(keyPath): keys.append(readKey(keyPath)) knownHostsPath = FilePath(os.path.expanduser(config["knownhosts"])) if knownHostsPath.exists(): knownHosts = KnownHostsFile.fromPath(knownHostsPath) else: knownHosts = None if config["no-agent"] or "SSH_AUTH_SOCK" not in os.environ: agentEndpoint = None else: agentEndpoint = UNIXClientEndpoint( reactor, os.environ["SSH_AUTH_SOCK"]) return cls( reactor, config["host"], config["port"], config["username"], config["password"], keys, knownHosts, agentEndpoint)
def checkTrunkCheckout(self, project): """ Assert that a trunk checkout of the given project exists. """ trunkWorkingCopy = FilePath(self.paths).child(project).child('trunk') self.assertTrue(trunkWorkingCopy.exists(), "%r did not exist." % (trunkWorkingCopy.path, ))
def test_changeCurrentBranchDeletesUnknown(self): """ If L{BranchManager.changeProjectBranch} creates a new working copy, it doesn't contain extra unversioned files from the I{trunk} working copy. """ projectName = 'Quux' branchName = 'foo' self.createRepository( projectName, {'trunk': {}, 'branches': {branchName: {}}}) # Get a trunk checkout self.manager.changeProjectBranch( projectName, 'trunk', self.uri(projectName, 'trunk')) # Here is some unversioned junk in the trunk working copy self.modifyTrunk(projectName, "junk", "garbage") self.manager.changeProjectBranch(projectName, branchName) junk = FilePath(self.paths).descendant([ projectName, "branches", branchName, "junk"]) self.assertFalse(junk.exists())
def copyPackage(title): """ Copy package directory to db path using a temporary sibling to avoid potential concurrency race conditions. @param title: string to use in log entry @type title: C{str} """ dbpath = FilePath(TimezoneCache.getDBPath()) pkgpath = TimezoneCache.FilteredFilePath(TimezoneCache._getPackageDBPath()) log.info( "{title} timezones from {pkg} to {to}", title=title, pkg=pkgpath.path, to=dbpath.path ) # Use temp directory to copy to first temp = dbpath.temporarySibling() pkgpath.copyFilteredDirectoryTo(temp) # Move to actual path if it stll does not exist if not dbpath.exists(): temp.moveTo(dbpath) else: temp.remove()
def test_create(self): """ You can create a directory from a template """ t_root = FilePath(self.mktemp()) t_root.makedirs() d1 = t_root.child('dir1') d1.makedirs() f1 = d1.child('foo') f1.setContent('foo content') d2 = d1.child('dir2') d2.makedirs() f2 = d2.child('bar') f2.setContent('bar content') dst = FilePath(self.mktemp()) d = Directory(dst.path) # fake template root d.template_root = t_root d.create('dir1') self.assertTrue(dst.exists()) self.assertEqual(dst.child('foo').getContent(), 'foo content') self.assertTrue(dst.child('dir2').exists()) self.assertEqual(dst.child('dir2').child('bar').getContent(), 'bar content')
def _get_device_path_virtio_blk(self, volume): """ The virtio_blk driver allows a serial number to be assigned to virtual blockdevices. OpenStack will set a serial number containing the first 20 characters of the Cinder block device ID. This was introduced in * https://github.com/openstack/nova/commit/3a47c02c58cefed0e230190b4bcef14527c82709 # noqa * https://bugs.launchpad.net/nova/+bug/1004328 The udev daemon will read the serial number and create a symlink to the canonical virtio_blk device path. We do this because libvirt does not return the correct device path when additional disks have been attached using a client other than cinder. This is expected behaviour within Cinder and libvirt See https://bugs.launchpad.net/cinder/+bug/1387945 and http://libvirt.org/formatdomain.html#elementsDisks (target section) :param volume: The Cinder ``Volume`` which is attached. :returns: ``FilePath`` of the device created by the virtio_blk driver. """ expected_path = FilePath( "/dev/disk/by-id/virtio-{}".format(volume.id[:20]) ) if expected_path.exists(): return expected_path.realpath() else: raise UnattachedVolume(volume.id)
def isDocker(self, _initCGroupLocation="/proc/1/cgroup"): """ Check if the current platform is Linux in a Docker container. @return: C{True} if the current platform has been detected as Linux inside a Docker container. @rtype: C{bool} """ if not self.isLinux(): return False from twisted.python.filepath import FilePath # Ask for the cgroups of init (pid 1) initCGroups = FilePath(_initCGroupLocation) if initCGroups.exists(): # The cgroups file looks like "2:cpu:/". The third element will # begin with /docker if it is inside a Docker container. controlGroups = [x.split(b":") for x in initCGroups.getContent().split(b"\n")] for group in controlGroups: if len(group) == 3 and group[2].startswith(b"/docker/"): # If it starts with /docker/, we're in a docker container return True return False
def monitoring_check(checker, lasterrors_path, from_email, what, stdout, stderr): error_stream = StringIO() lasterrors = None lasterrors_fp = FilePath(lasterrors_path) if lasterrors_fp.exists(): lasterrors = lasterrors_fp.getContent() d = checker(stdout, error_stream) def cb(x): if isinstance(x, Failure): print >>stderr, str(x) if hasattr(x.value, 'response'): print >>stderr, x.value.response errors = error_stream.getvalue() print >>stderr, errors if errors != lasterrors: d2 = send_monitoring_report(errors, from_email, what) def _sent(ign): lasterrors_fp.setContent(errors) raise Exception("Sent failure report.") def _err(f): print >>stderr, str(f) return f d2.addCallbacks(_sent, _err) return d2 d.addBoth(cb) return d
def test_no_config_directory(self): """The config file's parent directory is created if it doesn't exist.""" path = FilePath(self.mktemp()).child(b"config.json") service = VolumeService(path, None, reactor=Clock()) service.startService() self.assertTrue(path.exists())
def checkTrunkCheckout(self, project): """ Assert that a trunk checkout of the given project exists. """ trunkWorkingCopy = FilePath(self.paths).child(project).child('trunk') self.assertTrue( trunkWorkingCopy.exists(), "%r did not exist." % (trunkWorkingCopy.path,))
def makeService(self, options): # Grab our configuration file's path. conf = options["config"] # If config is default value, check locations for configuration file. if conf == options.optParameters[0][2]: for location in self.locations: path = FilePath(os.path.join(location, conf)) if path.exists(): break else: path = FilePath(conf) if not path.exists(): raise RuntimeError("Couldn't find config file %r" % conf) # Create our service and return it. from bravo.service import service return service(path)
def stop_pipeline(self): self.pipeline.set_state(Gst.State.READY) plfile = FilePath("playlist.m3u8") segments = FilePath("./").globChildren("segment*") for segment in segments: segment.remove() if plfile.exists() and plfile.isfile(): plfile.remove()
def main(): plugins_dir = FilePath("/run/docker/plugins/") if not plugins_dir.exists(): plugins_dir.makedirs() dvol_path = FilePath("/var/lib/dvol/volumes") if not dvol_path.exists(): dvol_path.makedirs() voluminous = Voluminous(dvol_path.path) sock = plugins_dir.child("%s.sock" % (VOLUME_DRIVER_NAME, )) if sock.exists(): sock.remove() adapterServer = internet.UNIXServer(sock.path, getAdapter(voluminous)) reactor.callWhenRunning(adapterServer.startService) reactor.run()
def test_happy(self): """ on a normal exit atomic_makedirs creates the dir """ temp = FilePath(self.mktemp()) with atomic_makedirs(temp): pass self.assertThat(temp.exists(), Equals(True))
def test_creates_directory_if_missing(self): """ If the directory where the Docker plugin listens on Unix socket does not exist, the plugin will create it. """ path = FilePath(self.mktemp()) DockerPluginScript()._create_listening_directory(path) self.assertTrue(path.exists())
def main(): terraform_templates = FilePath("terraform") if not terraform_templates.exists(): print "Please run uft-flocker-sample-files in the current directory first." os._exit(1) os.system("cd terraform && terraform destroy" + (" -force" if os.environ.get("FORCE_DESTROY") else "")) pass
def test_no_failure_if_directory_exists(self): """ If the directory where the Docker plugin listens on Unix socket does exist, the plugin will not complain. """ path = FilePath(self.mktemp()) path.makedirs() DockerPluginScript()._create_listening_directory(path) self.assertTrue(path.exists())
def run(self): # get config file path configurator = config.configuratorFactory() filePath = FilePath(configurator.getConfigFile()) # check to see if it exists, and if so, back it up if filePath.exists(): self.backupConfig(filePath) # write the new config configurator.writeDefaults()
def main(): plugins_dir = FilePath("/run/docker/plugins/") if not plugins_dir.exists(): plugins_dir.makedirs() dvol_path = FilePath("/var/lib/dvol/volumes") if not dvol_path.exists(): dvol_path.makedirs() voluminous = Voluminous(dvol_path.path) sock = plugins_dir.child("%s.sock" % (VOLUME_DRIVER_NAME,)) if sock.exists(): sock.remove() adapterServer = internet.UNIXServer( sock.path, getAdapter(voluminous)) reactor.callWhenRunning(adapterServer.startService) reactor.run()
def cbConnect(self, directoryService): """ Callback from the directory service. From this point we're connected and authenticated. """ basepath = FilePath(os.path.expanduser('~/.distfs')) if not basepath.exists(): basepath.createDirectory() store = FileSystemStore(basepath.child('store').path) chunkFactory = Site(server.StoreResource(store)) locname = self['alias'] or directoryService.service # Listen for remote connections. This is for the other nodes # to access our store. port = self['port'] and int(self['port']) or 0 listeningPort = reactor.listenTCP(port, chunkFactory) keyStore = SQLiteDataStore(basepath.child('%s.db' % locname).path) dhtNode = KademliaNode(listeningPort.getHost().port, keyStore, reactor=reactor) # Listen locally so that applications can easily access the # store. reactor.listenUNIX( basepath.child('%s.http' % locname).path, chunkFactory) resolverPublisher = ResolverPublisher(dhtNode) controlFactory = control.ControlFactory(store, directoryService, dhtNode, resolverPublisher) reactor.listenUNIX( basepath.child('%s.ctrl' % locname).path, controlFactory) # Start a looping call that will publish chunks to the # overlay; do that every 6th hour. Delay the procedure a bit # so that the node has a chance to join the network. looping = task.LoopingCall(publishChunks, store, resolverPublisher) reactor.callLater(10, looping.start, 6 * 60 * 60, True) # Try joining the network. introducers = list() if self['introducer']: try: address, port = self['introducer'].split(':') except ValueError: address, port = self['introducer'], 8033 introducers.append((address, int(port))) dhtNode.joinNetwork(introducers) # At this point everything that can go (majorly) wrong has # been initialized and we can daemonize. if not self['no-daemon']: daemonize()
def test_exception(self): """ Upon error atomic_makedirs should erase the directory """ temp = FilePath(self.mktemp()) with ExpectedException(RuntimeError, "just testing"): with atomic_makedirs(temp): raise RuntimeError("just testing") self.assertThat(temp.exists(), Equals(False))
def getCAPrivateCert(): l_privatePath = FilePath(b"ca-private-cert.pem") if l_privatePath.exists(): return PrivateCertificate.loadPEM(l_privatePath.getContent()) else: l_caKey = KeyPair.generate(size=4096) l_caCert = l_caKey.selfSignedCert(1, CN="the-authority") l_privatePath.setContent(l_caCert.dumpPEM()) return l_caCert
def main(): terraform = FilePath("terraform") if not terraform.exists(): print "Please run uft-flocker-sample-files in the current directory first." os._exit(1) os.system("cd terraform && terraform apply") cluster_yml = terraform.child("cluster.yml") if cluster_yml.exists(): cluster_yml.moveTo(FilePath(".").child("cluster.yml"))
def push_pl(self, tsfilename): plfile = FilePath("playlist.m3u8").asBytesMode() if plfile.exists(): plcontent = plfile.getContent() if not self.paused: self.proto.sendMessage("playlist.m3u8") self.proto.sendMessage(plcontent, isBinary=True) return True
def _getExperimentDir(self, id, startTime): stime = time.gmtime(startTime) experimentDir = FilePath(Experiment.dataDir) for segment in [stime.tm_year, stime.tm_mon, stime.tm_mday, id]: experimentDir = experimentDir.child(str(segment)) if not experimentDir.exists(): return None return experimentDir
def _verifyfp_and_write_pubkey( (fingerprint_from_keyscan, hashed_pubkey) ): if fingerprint_from_AWS != fingerprint_from_keyscan: raise PublicKeyMismatch() print >>stderr, "The ssh public key on the server has fingerprint: %s" % (fingerprint_from_keyscan,) known_hosts_filepath = FilePath(os.path.expanduser('~')).child('.ssh').child('known_hosts') if not known_hosts_filepath.exists(): known_hosts_filepath.create() known_hosts = known_hosts_filepath.getContent().rstrip('\n') + '\n' new_known_hosts = known_hosts + hashed_pubkey known_hosts_filepath.setContent(new_known_hosts)
def mkdtemp(self): """ Create a temporary directory. @rtype: L{FilePath} """ tempDir = FilePath(self.mktemp()) if not tempDir.exists(): tempDir.makedirs() return tempDir