Ejemplo n.º 1
0
class FilesystemSynchronousBackend(object):
    """A synchronous filesystem backend.

    @see: L{IBackend}

    @param base_path: the base filesystem path for this backend, any attempts to
    read or write 'above' the specified path will be denied
    @type base_path: C{bytes} or L{FilePath<twisted.python.filepath.FilePath>}

    @param can_read: whether or not this backend should support reads
    @type can_read: C{bool}

    @param can_write: whether or not this backend should support writes
    @type can_write: C{bool}

    """

    def __init__(self, base_path, can_read=True, can_write=True):
        try:
            self.base = FilePath(base_path.path)
        except AttributeError:
            self.base = FilePath(base_path)
        self.can_read, self.can_write = can_read, can_write

    @deferred
    def get_reader(self, file_name):
        """
        @see: L{IBackend.get_reader}

        @rtype: L{Deferred}, yielding a L{FilesystemReader}

        """
        if not self.can_read:
            raise Unsupported("Reading not supported")
        try:
            target_path = self.base.descendant(file_name.split(b"/"))
        except InsecurePath as e:
            raise AccessViolation("Insecure path: %s" % e)
        return FilesystemReader(target_path)

    @deferred
    def get_writer(self, file_name):
        """
        @see: L{IBackend.get_writer}

        @rtype: L{Deferred}, yielding a L{FilesystemWriter}

        """
        if not self.can_write:
            raise Unsupported("Writing not supported")
        try:
            target_path = self.base.descendant(file_name.split(b"/"))
        except InsecurePath as e:
            raise AccessViolation("Insecure path: %s" % e)
        return FilesystemWriter(target_path)
Ejemplo n.º 2
0
class FilesystemSynchronousBackend(object):
    """A synchronous filesystem backend.

    @see: L{IBackend}

    @param base_path: the base filesystem path for this backend, any attempts to
    read or write 'above' the specified path will be denied
    @type base_path: C{bytes} or L{FilePath<twisted.python.filepath.FilePath>}

    @param can_read: whether or not this backend should support reads
    @type can_read: C{bool}

    @param can_write: whether or not this backend should support writes
    @type can_write: C{bool}

    """
    def __init__(self, base_path, can_read=True, can_write=True):
        try:
            self.base = FilePath(base_path.path)
        except AttributeError:
            self.base = FilePath(base_path)
        self.can_read, self.can_write = can_read, can_write

    @deferred
    def get_reader(self, file_name):
        """
        @see: L{IBackend.get_reader}

        @rtype: L{Deferred}, yielding a L{FilesystemReader}

        """
        if not self.can_read:
            raise Unsupported("Reading not supported")
        try:
            target_path = self.base.descendant(file_name.split(b"/"))
        except InsecurePath as e:
            raise AccessViolation("Insecure path: %s" % e)
        return FilesystemReader(target_path)

    @deferred
    def get_writer(self, file_name):
        """
        @see: L{IBackend.get_writer}

        @rtype: L{Deferred}, yielding a L{FilesystemWriter}

        """
        if not self.can_write:
            raise Unsupported("Writing not supported")
        try:
            target_path = self.base.descendant(file_name.split(b"/"))
        except InsecurePath as e:
            raise AccessViolation("Insecure path: %s" % e)
        return FilesystemWriter(target_path)
Ejemplo n.º 3
0
    def test_steps(self):
        """
        ``build_in_docker`` returns a ``BuildSequence`` comprising
        ``DockerBuild`` and ``DockerRun`` instances.
        """
        supplied_distribution = 'Foo'
        expected_tag = 'clusterhq/build-%s' % (supplied_distribution, )
        supplied_top_level = FilePath('/foo/bar')
        expected_build_directory = supplied_top_level.descendant(
            ['admin', 'build_targets', supplied_distribution])
        supplied_destination_path = FilePath('/baz/qux')
        expected_volumes = {
            FilePath('/output'): supplied_destination_path,
            FilePath('/flocker'): supplied_top_level,
        }
        expected_package_uri = 'http://www.example.com/foo/bar/whl'

        assert_equal_steps(
            test_case=self,
            expected=BuildSequence(steps=[
                DockerBuild(tag=expected_tag,
                            build_directory=expected_build_directory),
                DockerRun(tag=expected_tag,
                          volumes=expected_volumes,
                          command=[expected_package_uri]),
            ]),
            actual=build_in_docker(destination_path=supplied_destination_path,
                                   distribution=supplied_distribution,
                                   top_level=supplied_top_level,
                                   package_uri=expected_package_uri))
Ejemplo n.º 4
0
    def test_bootstrap_pyc(self):
        """
        ``create_virtualenv`` creates links to the pyc files for all the
        modules required for the virtualenv bootstrap process.
        """
        target_path = FilePath(self.mktemp())
        create_virtualenv(root=target_path)

        py_files = []
        for module_name in VIRTUALENV_REQUIRED_MODULES:
            py_base = target_path.descendant(['lib', 'python2.7', module_name])
            py = py_base.siblingExtension('.py')
            pyc = py_base.siblingExtension('.pyc')
            if py.exists() and False in (py.islink(), pyc.islink()):
                py_files.append('PY: {} > {}\nPYC: {} > {}\n'.format(
                    '/'.join(py.segmentsFrom(target_path)),
                    py.realpath().path,
                    '/'.join(pyc.segmentsFrom(target_path)),
                    pyc.islink() and pyc.realpath().path or 'NOT A SYMLINK'
                ))

        if py_files:
            self.fail(
                'Non-linked bootstrap pyc files in {}: \n{}'.format(
                    target_path, '\n'.join(py_files)
                )
            )
Ejemplo n.º 5
0
    def test_bootstrap_pyc(self):
        """
        ``create_virtualenv`` creates links to the pyc files for all the
        modules required for the virtualenv bootstrap process.
        """
        target_path = FilePath(self.mktemp())
        create_virtualenv(root=target_path)

        py_files = []
        for module_name in VIRTUALENV_REQUIRED_MODULES:
            py_base = target_path.descendant(['lib', 'python2.7', module_name])
            py = py_base.siblingExtension('.py')
            pyc = py_base.siblingExtension('.pyc')
            if py.exists() and False in (py.islink(), pyc.islink()):
                py_files.append('PY: {} > {}\nPYC: {} > {}\n'.format(
                    '/'.join(py.segmentsFrom(target_path)),
                    py.realpath().path,
                    '/'.join(pyc.segmentsFrom(target_path)),
                    pyc.islink() and pyc.realpath().path or 'NOT A SYMLINK'
                ))

        if py_files:
            self.fail(
                'Non-linked bootstrap pyc files in {}: \n{}'.format(
                    target_path, '\n'.join(py_files)
                )
            )
Ejemplo n.º 6
0
class BackendSelection(unittest.TestCase):
    test_data = """line1
line2
line3
"""


    def setUp(self):
        self.temp_dir = FilePath(tempfile.mkdtemp())
        self.existing_file_name = self.temp_dir.descendant(("dir", "foo"))
        self.existing_file_name.parent().makedirs()
        self.existing_file_name.setContent(self.test_data)

    @inlineCallbacks
    def test_read_supported_by_default(self):
        b = FilesystemSynchronousBackend(self.temp_dir.path)
        reader = yield b.get_reader('dir/foo')
        self.assertTrue(IReader.providedBy(reader))

    @inlineCallbacks
    def test_write_supported_by_default(self):
        b = FilesystemSynchronousBackend(self.temp_dir.path)
        writer = yield b.get_writer('dir/bar')
        self.assertTrue(IWriter.providedBy(writer))

    def test_read_unsupported(self):
        b = FilesystemSynchronousBackend(self.temp_dir.path, can_read=False)
        return self.assertFailure(b.get_reader('dir/foo'), Unsupported)

    def test_write_unsupported(self):
        b = FilesystemSynchronousBackend(self.temp_dir.path, can_write=False)
        return self.assertFailure(b.get_writer('dir/bar'), Unsupported)

    def test_insecure_reader(self):
        b = FilesystemSynchronousBackend(self.temp_dir.path)
        return self.assertFailure(
            b.get_reader('../foo'), AccessViolation)

    def test_insecure_writer(self):
        b = FilesystemSynchronousBackend(self.temp_dir.path)
        return self.assertFailure(
            b.get_writer('../foo'), AccessViolation)

    @inlineCallbacks
    def test_read_ignores_leading_and_trailing_slashes(self):
        b = FilesystemSynchronousBackend(self.temp_dir.path)
        reader = yield b.get_reader('/dir/foo/')
        segments_from_root = reader.file_path.segmentsFrom(self.temp_dir)
        self.assertEqual(["dir", "foo"], segments_from_root)

    @inlineCallbacks
    def test_write_ignores_leading_and_trailing_slashes(self):
        b = FilesystemSynchronousBackend(self.temp_dir.path)
        writer = yield b.get_writer('/dir/bar/')
        segments_from_root = writer.file_path.segmentsFrom(self.temp_dir)
        self.assertEqual(["dir", "bar"], segments_from_root)

    def tearDown(self):
        shutil.rmtree(self.temp_dir.path)
Ejemplo n.º 7
0
class BackendSelection(unittest.TestCase):
    test_data = b"""line1
line2
line3
"""

    def setUp(self):
        self.temp_dir = FilePath(tempfile.mkdtemp()).asBytesMode()
        self.existing_file_name = self.temp_dir.descendant((b"dir", b"foo"))
        self.existing_file_name.parent().makedirs()
        self.existing_file_name.setContent(self.test_data)

    @inlineCallbacks
    def test_read_supported_by_default(self):
        b = FilesystemSynchronousBackend(self.temp_dir.path)
        reader = yield b.get_reader(b'dir/foo')
        self.assertTrue(IReader.providedBy(reader))

    @inlineCallbacks
    def test_write_supported_by_default(self):
        b = FilesystemSynchronousBackend(self.temp_dir.path)
        writer = yield b.get_writer(b'dir/bar')
        self.assertTrue(IWriter.providedBy(writer))

    def test_read_unsupported(self):
        b = FilesystemSynchronousBackend(self.temp_dir.path, can_read=False)
        return self.assertFailure(b.get_reader(b'dir/foo'), Unsupported)

    def test_write_unsupported(self):
        b = FilesystemSynchronousBackend(self.temp_dir.path, can_write=False)
        return self.assertFailure(b.get_writer(b'dir/bar'), Unsupported)

    def test_insecure_reader(self):
        b = FilesystemSynchronousBackend(self.temp_dir.path)
        return self.assertFailure(b.get_reader(b'../foo'), AccessViolation)

    def test_insecure_writer(self):
        b = FilesystemSynchronousBackend(self.temp_dir.path)
        return self.assertFailure(b.get_writer(b'../foo'), AccessViolation)

    @inlineCallbacks
    def test_read_ignores_leading_and_trailing_slashes(self):
        b = FilesystemSynchronousBackend(self.temp_dir.path)
        reader = yield b.get_reader(b'/dir/foo/')
        segments_from_root = reader.file_path.segmentsFrom(self.temp_dir)
        self.assertEqual([b"dir", b"foo"], segments_from_root)

    @inlineCallbacks
    def test_write_ignores_leading_and_trailing_slashes(self):
        b = FilesystemSynchronousBackend(self.temp_dir.path)
        writer = yield b.get_writer(b'/dir/bar/')
        segments_from_root = writer.file_path.segmentsFrom(self.temp_dir)
        self.assertEqual([b"dir", b"bar"], segments_from_root)

    def tearDown(self):
        shutil.rmtree(self.temp_dir.path)
Ejemplo n.º 8
0
class FlockerDeployConfigureSSHTests(TestCase):
    """
    Tests for ``DeployScript._configure_ssh``.
    """

    @_require_installed
    def setUp(self):
        self.sshd_config = FilePath(self.mktemp())
        self.server = create_ssh_server(self.sshd_config)
        self.addCleanup(self.server.restore)
        self.flocker_config = FilePath(self.mktemp())
        self.local_user_ssh = FilePath(self.mktemp())

        self.config = OpenSSHConfiguration(
            ssh_config_path=self.local_user_ssh,
            flocker_path=self.flocker_config)
        self.configure_ssh = self.config.configure_ssh

        # ``configure_ssh`` expects ``ssh`` to already be able to
        # authenticate against the server.  Set up an ssh-agent to
        # help it do that against our testing server.
        self.agent = create_ssh_agent(self.server.key_path, self)

    def test_installs_public_sshkeys(self):
        """
        ``DeployScript._configure_ssh`` installs the cluster wide public ssh
        keys on each node in the supplied ``Deployment``.
        """
        deployment = Deployment(
            nodes=frozenset([
                Node(
                    hostname=str(self.server.ip),
                    applications=None
                ),
                # Node(
                #     hostname='node2.example.com',
                #     applications=None
                # )
            ])
        )

        script = DeployScript(
            ssh_configuration=self.config, ssh_port=self.server.port)
        result = script._configure_ssh(deployment)

        local_key = self.local_user_ssh.child(b'id_rsa_flocker.pub')
        authorized_keys = self.sshd_config.descendant([
            b'home', b'.ssh', b'authorized_keys'])

        def check_authorized_keys(ignored):
            self.assertIn(local_key.getContent().rstrip(),
                          authorized_keys.getContent().splitlines())

        result.addCallback(check_authorized_keys)
        return result
Ejemplo n.º 9
0
    def test_relative_args_with_box(self):
        """
        When invoked as `build`, no box can be specified.
        """
        path = FilePath(self.mktemp())
        path.createDirectory()
        base_path = path.descendant(['somewhere', 'box-name', 'build'])

        options = BuildOptions(base_path=base_path, top_level=path)

        self.assertRaises(UsageError, options.parseOptions, ['--box', 'box'])
Ejemplo n.º 10
0
    def test_relative_args_with_box(self):
        """
        When invoked as `build`, no box can be specified.
        """
        path = FilePath(self.mktemp())
        path.createDirectory()
        base_path = path.descendant(['somewhere', 'box-name', 'build'])

        options = BuildOptions(base_path=base_path, top_level=path)

        self.assertRaises(UsageError, options.parseOptions, ['--box', 'box'])
Ejemplo n.º 11
0
    def test_absolute_args_no_box(self):
        """
        When invoked as `build-vagrant-box`, specifying a box is required.
        """
        path = FilePath(self.mktemp())
        path.createDirectory()
        base_path = path.descendant(['bin', 'build-vagrant-box'])

        options = BuildOptions(base_path=base_path, top_level=path)

        self.assertRaises(UsageError, options.parseOptions, [])
Ejemplo n.º 12
0
    def test_absolute_args_no_box(self):
        """
        When invoked as `build-vagrant-box`, specifying a box is required.
        """
        path = FilePath(self.mktemp())
        path.createDirectory()
        base_path = path.descendant(['bin', 'build-vagrant-box'])

        options = BuildOptions(base_path=base_path, top_level=path)

        self.assertRaises(UsageError, options.parseOptions, [])
Ejemplo n.º 13
0
    def test_absolute_args(self):
        """
        When invoked as `build-vagrant-box`, :class:`BuildOption` takes the
        path relative to the top-level, and the box name from the passed
        argument.
        """
        path = FilePath(self.mktemp())
        path.createDirectory()
        base_path = path.descendant(['bin', 'build-vagrant-box'])

        options = BuildOptions(base_path=base_path, top_level=path)

        options.parseOptions(['--box', 'box-name'])

        self.assertEqual(options, {
            'box': 'box-name',
            'path': path.descendant(['vagrant', 'box-name']),
            'build-server': 'http://build.clusterhq.com/',
            'branch': None,
            'flocker-version': flocker_version,
        })
Ejemplo n.º 14
0
    def test_absolute_args(self):
        """
        When invoked as `build-vagrant-box`, :class:`BuildOption` takes the
        path relative to the top-level, and the box name from the passed
        argument.
        """
        path = FilePath(self.mktemp())
        path.createDirectory()
        base_path = path.descendant(['bin', 'build-vagrant-box'])

        options = BuildOptions(base_path=base_path, top_level=path)

        options.parseOptions(['--box', 'box-name'])

        self.assertEqual(
            options, {
                'box': 'box-name',
                'path': path.descendant(['vagrant', 'box-name']),
                'build-server': 'http://build.clusterhq.com/',
                'branch': None,
                'flocker-version': flocker_version,
            })
Ejemplo n.º 15
0
    def test_relative_args(self):
        """
        When invoked as `build`, no box can be specified. BuildOption takes the
        path from the parent of :file:`build`, and the box name from the name
        of
        that directory.
        """
        path = FilePath(self.mktemp())
        path.createDirectory()
        base_path = path.descendant(['somewhere', 'box-name', 'build'])

        options = BuildOptions(base_path=base_path, top_level=path)

        options.parseOptions([])

        self.assertEqual(options, {
            'box': 'box-name',
            'path': path.descendant(['somewhere', 'box-name']),
            'build-server': 'http://build.clusterhq.com/',
            'branch': None,
            'flocker-version': flocker_version,
        })
Ejemplo n.º 16
0
    def test_relative_args(self):
        """
        When invoked as `build`, no box can be specified. BuildOption takes the
        path from the parent of :file:`build`, and the box name from the name
        of
        that directory.
        """
        path = FilePath(self.mktemp())
        path.createDirectory()
        base_path = path.descendant(['somewhere', 'box-name', 'build'])

        options = BuildOptions(base_path=base_path, top_level=path)

        options.parseOptions([])

        self.assertEqual(
            options, {
                'box': 'box-name',
                'path': path.descendant(['somewhere', 'box-name']),
                'build-server': 'http://build.clusterhq.com/',
                'branch': None,
                'flocker-version': flocker_version,
            })
Ejemplo n.º 17
0
 def test_pythonpath(self):
     """
     ``create_virtualenv`` installs a virtual python whose path does not
     include the system python libraries.
     """
     target_path = FilePath(self.mktemp())
     create_virtualenv(root=target_path)
     output = check_output([
         target_path.descendant(['bin', 'python']).path, '-c',
         r'import sys; sys.stdout.write("\n".join(sys.path))'
     ])
     # We should probably check for lib64 as well here.
     self.assertNotIn('/usr/lib/python2.7/site-packages',
                      output.splitlines())
Ejemplo n.º 18
0
 def test_pythonpath(self):
     """
     ``create_virtualenv`` installs a virtual python whose path does not
     include the system python libraries.
     """
     target_path = FilePath(self.mktemp())
     create_virtualenv(root=target_path)
     output = check_output([
         target_path.descendant(['bin', 'python']).path,
         '-c', r'import sys; sys.stdout.write("\n".join(sys.path))'
     ])
     # We should probably check for lib64 as well here.
     self.assertNotIn(
         '/usr/lib/python2.7/site-packages', output.splitlines())
Ejemplo n.º 19
0
 def test_install(self):
     """
     ``VirtualEnv.install`` accepts a ``PythonPackage`` instance and
     installs it.
     """
     virtualenv_dir = FilePath(self.mktemp())
     virtualenv = create_virtualenv(root=virtualenv_dir)
     package_dir = FilePath(self.mktemp())
     package = canned_package(package_dir)
     virtualenv.install(package_dir.path)
     self.assertIn(
         '{}-{}-py2.7.egg-info'.format(package.name, package.version),
         [f.basename() for f in virtualenv_dir.descendant(
             ['lib', 'python2.7', 'site-packages']).children()]
     )
Ejemplo n.º 20
0
 def test_install(self):
     """
     ``VirtualEnv.install`` accepts a ``PythonPackage`` instance and
     installs it.
     """
     virtualenv_dir = FilePath(self.mktemp())
     virtualenv = create_virtualenv(root=virtualenv_dir)
     package_dir = FilePath(self.mktemp())
     package = canned_package(package_dir)
     virtualenv.install(package_dir.path)
     self.assertIn(
         '{}-{}-py2.7.egg-info'.format(package.name, package.version), [
             f.basename() for f in virtualenv_dir.descendant(
                 ['lib', 'python2.7', 'site-packages']).children()
         ])
Ejemplo n.º 21
0
    def mktemp(self):
        """
        Create a new path name which can be used for a new file or directory.

        The result is a path that is guaranteed to be unique within the
        current working directory.  The parent of the path will exist, but the
        path will not.

        :return str: The newly created path
        """
        cwd = FilePath(u".")
        # self.id returns a native string so split it on a native "."
        tmp = cwd.descendant(self.id().split("."))
        tmp.makedirs(ignoreExistingDirectory=True)
        # Remove group and other write permission, in case it was somehow
        # granted, so that when we invent a temporary filename beneath this
        # directory we're not subject to a collision attack.
        tmp.chmod(0o755)
        return tmp.child(u"tmp").temporarySibling().asTextMode().path
Ejemplo n.º 22
0
    def test_run(self):
        """
        ``CreateLinks.run`` generates symlinks in ``destination_path`` for all
        the supplied ``links``.
        """
        root = FilePath(self.mktemp())
        bin_dir = root.descendant(['usr', 'bin'])
        bin_dir.makedirs()

        CreateLinks(links=frozenset([
            (FilePath('/opt/flocker/bin/flocker-foo'), bin_dir),
            (FilePath('/opt/flocker/bin/flocker-bar'), bin_dir),
        ])).run()

        self.assertEqual(
            set(
                FilePath('/opt/flocker/bin').child(script)
                for script in ('flocker-foo', 'flocker-bar')),
            set(child.realpath() for child in bin_dir.children()))
Ejemplo n.º 23
0
    def test_run(self):
        """
        ``CreateLinks.run`` generates symlinks in ``destination_path`` for all
        the supplied ``links``.
        """
        root = FilePath(self.mktemp())
        bin_dir = root.descendant(['usr', 'bin'])
        bin_dir.makedirs()

        CreateLinks(
            links=frozenset([
                (FilePath('/opt/flocker/bin/flocker-foo'), bin_dir),
                (FilePath('/opt/flocker/bin/flocker-bar'), bin_dir),
            ])
        ).run()

        self.assertEqual(
            set(FilePath('/opt/flocker/bin').child(script)
                for script in ('flocker-foo', 'flocker-bar')),
            set(child.realpath() for child in bin_dir.children())
        )
Ejemplo n.º 24
0
    def test_steps(self):
        """
        ``build_in_docker`` returns a ``BuildSequence`` comprising
        ``DockerBuild`` and ``DockerRun`` instances.
        """
        supplied_distribution = 'Foo'
        expected_tag = 'clusterhq/build-%s' % (supplied_distribution,)
        supplied_top_level = FilePath('/foo/bar')
        expected_build_directory = supplied_top_level.descendant(
            ['admin', 'build_targets', supplied_distribution])
        supplied_destination_path = FilePath('/baz/qux')
        expected_volumes = {
            FilePath('/output'): supplied_destination_path,
            FilePath('/flocker'): supplied_top_level,
        }
        expected_package_uri = 'http://www.example.com/foo/bar/whl'

        assert_equal_steps(
            test_case=self,
            expected=BuildSequence(
                steps=[
                    DockerBuild(
                        tag=expected_tag,
                        build_directory=expected_build_directory
                    ),
                    DockerRun(
                        tag=expected_tag,
                        volumes=expected_volumes,
                        command=[expected_package_uri]
                    ),
                ]
            ),
            actual=build_in_docker(
                destination_path=supplied_destination_path,
                distribution=supplied_distribution,
                top_level=supplied_top_level,
                package_uri=expected_package_uri
            )
        )
Ejemplo n.º 25
0
class FlockerDeployConfigureSSHTests(TestCase):
    """
    Tests for ``DeployScript._configure_ssh``.
    """
    @_require_installed
    def setUp(self):
        self.sshd_config = FilePath(self.mktemp())
        self.server = create_ssh_server(self.sshd_config)
        self.addCleanup(self.server.restore)
        self.flocker_config = FilePath(self.mktemp())
        self.local_user_ssh = FilePath(self.mktemp())

        self.config = OpenSSHConfiguration(ssh_config_path=self.local_user_ssh,
                                           flocker_path=self.flocker_config)
        self.configure_ssh = self.config.configure_ssh

        # ``configure_ssh`` expects ``ssh`` to already be able to
        # authenticate against the server.  Set up an ssh-agent to
        # help it do that against our testing server.
        self.agent = create_ssh_agent(self.server.key_path, self)

    def test_installs_public_sshkeys(self):
        """
        ``DeployScript._configure_ssh`` installs the cluster wide public ssh
        keys on each node in the supplied ``Deployment``.
        """
        deployment = Deployment(nodes=[
            Node(hostname=unicode(self.server.ip), applications=[]),
            # Node(
            #     hostname='node2.example.com',
            #     applications=None
            # )
        ])

        script = DeployScript(ssh_configuration=self.config,
                              ssh_port=self.server.port)
        result = script._configure_ssh(deployment)

        local_key = self.local_user_ssh.child(b'id_rsa_flocker.pub')
        authorized_keys = self.sshd_config.descendant(
            [b'home', b'.ssh', b'authorized_keys'])

        def check_authorized_keys(ignored):
            self.assertIn(local_key.getContent().rstrip(),
                          authorized_keys.getContent().splitlines())

        result.addCallback(check_authorized_keys)
        return result

    def test_sshkey_installation_failure(self):
        """
        ``DeployScript._configure_ssh`` fires with an errback if one of the
        configuration attempts fails.
        """
        def fail(host, port):
            raise ZeroDivisionError()

        self.config.configure_ssh = fail

        deployment = Deployment(nodes=[
            Node(hostname=unicode(self.server.ip), applications=[]),
        ])

        script = DeployScript(ssh_configuration=self.config,
                              ssh_port=self.server.port)
        result = script._configure_ssh(deployment)
        result.addErrback(lambda f: f.value.subFailure)
        result = self.assertFailure(result, ZeroDivisionError)
        # Handle errors logged by gather_deferreds
        self.addCleanup(self.flushLoggedErrors, ZeroDivisionError)
        return result

    def test_sshkey_installation_ssh_process_failure(self):
        """
        ``DeployScript._configure_ssh`` fires with a ``SystemExit`` errback
        containing the SSH process output if one of the configuration
        attempts fails.
        """
        def fail(host, port):
            raise CalledProcessError(1, "ssh", output=b"onoes")

        self.config.configure_ssh = fail

        deployment = Deployment(nodes=[
            Node(hostname=unicode(self.server.ip), applications=[]),
        ])

        script = DeployScript(ssh_configuration=self.config,
                              ssh_port=self.server.port)
        result = script._configure_ssh(deployment)
        result = self.assertFailure(result, SystemExit)
        result.addCallback(lambda exc: self.assertEqual(
            exc.args, (b"Error connecting to cluster node: onoes", )))
        # Handle errors logged by gather_deferreds
        self.addCleanup(self.flushLoggedErrors, CalledProcessError)
        return result

    def test_sshkey_installation_failure_logging(self):
        """
        ``DeployScript._configure_ssh`` logs all failed configuration attempts.
        """
        expected_errors = [
            ZeroDivisionError("error1"),
            ZeroDivisionError("error2"),
            ZeroDivisionError("error3"),
        ]

        error_iterator = (e for e in expected_errors)

        def fail(host, port):
            raise error_iterator.next()

        self.config.configure_ssh = fail

        deployment = Deployment(nodes=[
            Node(hostname=u'node1.example.com', applications=[]),
            Node(hostname=u'node2.example.com', applications=[]),
            Node(hostname=u'node3.example.com', applications=[]),
        ])

        script = DeployScript(ssh_configuration=self.config,
                              ssh_port=self.server.port)
        result = script._configure_ssh(deployment)

        def check_logs(ignored_first_error):
            failures = self.flushLoggedErrors(ZeroDivisionError)
            # SSH configuration is performed in parallel threads so the order
            # of logged errors depends on the thread scheduling. Sort the
            # results before comparing.
            self.assertEqual(sorted(expected_errors),
                             sorted(f.value for f in failures))

        result.addErrback(check_logs)
        return result
Ejemplo n.º 26
0
def create_proxy_to(logger, ip, port):
    """
    :see: ``HostNetwork.create_proxy_to``
    """
    action = CREATE_PROXY_TO(logger=logger, target_ip=ip, target_port=port)

    with action:
        encoded_ip = unicode(ip).encode("ascii")
        encoded_port = unicode(port).encode("ascii")

        # The first goal is to configure "Destination NAT" (DNAT).  We're just
        # going to rewrite the destination address of traffic arriving on the
        # specified port so it looks like it is destined for the specified ip
        # instead of destined for "us".  This gets the packets delivered to the
        # right destination.
        iptables(
            logger,
            [
                # All NAT stuff happens in the netfilter NAT table.
                b"--table",
                b"nat",

                # Destination NAT has to happen "pre"-routing so that the normal
                # routing rules on the machine will use the re-written destination
                # address and get the packet to that new destination.  Accomplish
                # this by appending the rule to the PREROUTING chain.
                b"--append",
                b"PREROUTING",

                # Only re-route traffic with a destination port matching the one we
                # were told to manipulate.  It is also necessary to specify TCP (or
                # UDP) here since that is the layer of the network stack that
                # defines ports.
                b"--protocol",
                b"tcp",
                b"--destination-port",
                encoded_port,

                # And only re-route traffic directed at this host.  Traffic
                # originating on this host directed at some random other host that
                # happens to be on the same port should be left alone.
                b"--match",
                b"addrtype",
                b"--dst-type",
                b"LOCAL",

                # Tag it as a flocker-created rule so we can recognize it later.
                b"--match",
                b"comment",
                b"--comment",
                FLOCKER_PROXY_COMMENT_MARKER,

                # If the filter matched, jump to the DNAT chain to handle doing the
                # actual packet mangling.  DNAT is a built-in chain that already
                # knows how to do this.  Pass an argument to the DNAT chain so it
                # knows how to mangle the packet - rewrite the destination IP of
                # the address to the target we were told to use.
                b"--jump",
                b"DNAT",
                b"--to-destination",
                encoded_ip,
            ])

        # Bonus round!  Having performed DNAT (changing the destination) during
        # prerouting we are now prepared to send the packet on somewhere else.
        # On its way out of this system it is also necessary to further
        # modify and then track that packet.  We want it to look like it
        # comes from us (the downstream client will be *very* confused if
        # the node we're passing the packet on to replies *directly* to them;
        # and by confused I mean it will be totally broken, of course) so we
        # also need to "masquerade" in the postrouting chain.  This changes
        # the source address (ip and port) of the packet to the address of
        # the external interface the packet is exiting upon. Doing SNAT here
        # would be a little bit more efficient because the kernel could avoid
        # looking up the external interface's address for every single packet.
        # But it requires this code to know that address and it requires that
        # if it ever changes the rule gets updated and it may require some
        # steps to do port allocation (not sure what they are yet).  So we'll
        # just masquerade for now.
        iptables(
            logger,
            [
                # All NAT stuff happens in the netfilter NAT table.
                b"--table",
                b"nat",

                # As described above, this transformation happens after routing
                # decisions have been made and the packet is on its way out of the
                # system.  Therefore, append the rule to the POSTROUTING chain.
                b"--append",
                b"POSTROUTING",

                # We'll stick to matching the same kinds of packets we matched in
                # the earlier stage.  We might want to change the factoring of this
                # code to avoid the duplication - particularly in case we want to
                # change the specifics of the filter.
                #
                # This omits the LOCAL addrtype check, though, because at this
                # point the packet is definitely leaving this host.
                b"--protocol",
                b"tcp",
                b"--destination-port",
                encoded_port,

                # Do the masquerading.
                b"--jump",
                b"MASQUERADE",
            ])

        # Secret level!!  Traffic that originates *on* the host bypasses the
        # PREROUTING chain.  Instead, it passes through the OUTPUT chain.  If
        # we want connections from localhost to the forwarded port to be
        # affected then we need a rule in the OUTPUT chain to do the same kind
        # of DNAT that we did in the PREROUTING chain.
        iptables(
            logger,
            [
                # All NAT stuff happens in the netfilter NAT table.
                b"--table",
                b"nat",

                # As mentioned, this rule is for the OUTPUT chain.
                b"--append",
                b"OUTPUT",

                # Matching the exact same kinds of packets as the PREROUTING rule
                # matches.
                b"--protocol",
                b"tcp",
                b"--destination-port",
                encoded_port,
                b"--match",
                b"addrtype",
                b"--dst-type",
                b"LOCAL",

                # Do the same DNAT as we did in the rule for the PREROUTING chain.
                b"--jump",
                b"DNAT",
                b"--to-destination",
                encoded_ip,
            ])

        iptables(logger, [
            b"--table",
            b"filter",
            b"--insert",
            b"FORWARD",
            b"--destination",
            encoded_ip,
            b"--protocol",
            b"tcp",
            b"--destination-port",
            encoded_port,
            b"--jump",
            b"ACCEPT",
        ])

        # The network stack only considers forwarding traffic when certain
        # system configuration is in place.
        #
        # https://www.kernel.org/doc/Documentation/networking/ip-sysctl.txt
        # will explain the meaning of these in (very slightly) more detail.
        conf = FilePath(b"/proc/sys/net/ipv4/conf")
        descendant = conf.descendant([b"default", b"forwarding"])
        with descendant.open("wb") as forwarding:
            forwarding.write(b"1")

        # In order to have the OUTPUT chain DNAT rule affect routing decisions,
        # we also need to tell the system to make routing decisions about
        # traffic from or to localhost.
        for path in conf.children():
            with path.child(b"route_localnet").open("wb") as route_localnet:
                route_localnet.write(b"1")

        return Proxy(ip=ip, port=port)
Ejemplo n.º 27
0
    def test_client_cache(self):
        """
        Announcements received by an introducer client are written to that
        introducer client's cache file.
        """
        basedir = FilePath("introducer/ClientSeqnums/test_client_cache_1")
        private = basedir.child("private")
        private.makedirs()
        write_introducer(basedir, "default", "nope")
        cache_filepath = basedir.descendant([
            "private",
            "introducer_default_cache.yaml",
        ])

        # if storage is enabled, the Client will publish its storage server
        # during startup (although the announcement will wait in a queue
        # until the introducer connection is established). To avoid getting
        # confused by this, disable storage.
        with basedir.child("tahoe.cfg").open("w") as f:
            f.write(b"[storage]\n")
            f.write(b"enabled = false\n")

        c = yield create_client(basedir.path)
        ic = c.introducer_clients[0]
        private_key, public_key = ed25519.create_signing_keypair()
        public_key_str = remove_prefix(
            ed25519.string_from_verifying_key(public_key), b"pub-")
        furl1 = b"pb://[email protected]:123/short"  # base32("short")
        ann_t = make_ann_t(ic, furl1, private_key, 1)

        ic.got_announcements([ann_t])
        yield flushEventualQueue()

        # check the cache for the announcement
        announcements = self._load_cache(cache_filepath)
        self.failUnlessEqual(len(announcements), 1)
        self.failUnlessEqual(ensure_binary(announcements[0]['key_s']),
                             public_key_str)
        ann = announcements[0]["ann"]
        self.failUnlessEqual(ensure_binary(ann["anonymous-storage-FURL"]),
                             furl1)
        self.failUnlessEqual(ann["seqnum"], 1)

        # a new announcement that replaces the first should replace the
        # cached entry, not duplicate it
        furl2 = furl1 + b"er"
        ann_t2 = make_ann_t(ic, furl2, private_key, 2)
        ic.got_announcements([ann_t2])
        yield flushEventualQueue()
        announcements = self._load_cache(cache_filepath)
        self.failUnlessEqual(len(announcements), 1)
        self.failUnlessEqual(ensure_binary(announcements[0]['key_s']),
                             public_key_str)
        ann = announcements[0]["ann"]
        self.failUnlessEqual(ensure_binary(ann["anonymous-storage-FURL"]),
                             furl2)
        self.failUnlessEqual(ann["seqnum"], 2)

        # but a third announcement with a different key should add to the
        # cache
        private_key2, public_key2 = ed25519.create_signing_keypair()
        public_key_str2 = remove_prefix(
            ed25519.string_from_verifying_key(public_key2), b"pub-")
        furl3 = b"pb://[email protected]:456/short"
        ann_t3 = make_ann_t(ic, furl3, private_key2, 1)
        ic.got_announcements([ann_t3])
        yield flushEventualQueue()

        announcements = self._load_cache(cache_filepath)
        self.failUnlessEqual(len(announcements), 2)
        self.failUnlessEqual(
            set([public_key_str, public_key_str2]),
            set([ensure_binary(a["key_s"]) for a in announcements]))
        self.failUnlessEqual(
            set([furl2, furl3]),
            set([
                ensure_binary(a["ann"]["anonymous-storage-FURL"])
                for a in announcements
            ]))

        # test loading
        yield flushEventualQueue()
        ic2 = IntroducerClient(None, "introducer.furl", u"my_nickname",
                               "my_version", "oldest_version", fakeseq,
                               ic._cache_filepath)
        announcements = {}

        def got(key_s, ann):
            announcements[key_s] = ann

        ic2.subscribe_to("storage", got)
        ic2._load_announcements()  # normally happens when connection fails
        yield flushEventualQueue()

        self.failUnless(public_key_str in announcements)
        self.failUnlessEqual(
            ensure_binary(
                announcements[public_key_str]["anonymous-storage-FURL"]),
            furl2)
        self.failUnlessEqual(
            ensure_binary(
                announcements[public_key_str2]["anonymous-storage-FURL"]),
            furl3)

        c2 = yield create_client(basedir.path)
        c2.introducer_clients[0]._load_announcements()
        yield flushEventualQueue()
        self.assertEqual(c2.storage_broker.get_all_serverids(),
                         frozenset([public_key_str, public_key_str2]))
Ejemplo n.º 28
0
class GetExtensionsTest(TestCase):
    """
    Tests for L{dist.getExtensions}.
    """

    setupTemplate = (
        "from twisted.python.dist import ConditionalExtension\n"
        "extensions = [\n"
        "    ConditionalExtension(\n"
        "        '%s', ['twisted/some/thing.c'],\n"
        "        condition=lambda builder: True)\n"
        "    ]\n")

    def setUp(self):
        self.basedir = FilePath(self.mktemp()).child("twisted")
        self.basedir.makedirs()
        self.addCleanup(os.chdir, os.getcwd())
        os.chdir(self.basedir.parent().path)


    def writeSetup(self, name, *path):
        """
        Write out a C{setup.py} file to a location determined by
        L{self.basedir} and L{path}. L{self.setupTemplate} is used to
        generate its contents.
        """
        outdir = self.basedir.descendant(path)
        outdir.makedirs()
        setup = outdir.child("setup.py")
        setup.setContent(self.setupTemplate % (name,))


    def writeEmptySetup(self, *path):
        """
        Write out an empty C{setup.py} file to a location determined by
        L{self.basedir} and L{path}.
        """
        outdir = self.basedir.descendant(path)
        outdir.makedirs()
        outdir.child("setup.py").setContent("")


    def assertExtensions(self, expected):
        """
        Assert that the given names match the (sorted) names of discovered
        extensions.
        """
        extensions = dist.getExtensions()
        names = [extension.name for extension in extensions]
        self.assertEqual(sorted(names), expected)


    def test_getExtensions(self):
        """
        Files named I{setup.py} in I{twisted/topfiles} and I{twisted/*/topfiles}
        are executed with L{execfile} in order to discover the extensions they
        declare.
        """
        self.writeSetup("twisted.transmutate", "topfiles")
        self.writeSetup("twisted.tele.port", "tele", "topfiles")
        self.assertExtensions(["twisted.tele.port", "twisted.transmutate"])


    def test_getExtensionsTooDeep(self):
        """
        Files named I{setup.py} in I{topfiles} directories are not considered if
        they are too deep in the directory hierarchy.
        """
        self.writeSetup("twisted.trans.mog.rify", "trans", "mog", "topfiles")
        self.assertExtensions([])


    def test_getExtensionsNotTopfiles(self):
        """
        The folder in which I{setup.py} is discovered must be called I{topfiles}
        otherwise it is ignored.
        """
        self.writeSetup("twisted.metamorphosis", "notfiles")
        self.assertExtensions([])


    def test_getExtensionsNotSupportedOnJava(self):
        """
        Extensions are not supported on Java-based platforms.
        """
        self.addCleanup(setattr, sys, "platform", sys.platform)
        sys.platform = "java"
        self.writeSetup("twisted.sorcery", "topfiles")
        self.assertExtensions([])


    def test_getExtensionsExtensionsLocalIsOptional(self):
        """
        It is acceptable for extensions to not define the C{extensions} local
        variable.
        """
        self.writeEmptySetup("twisted.necromancy", "topfiles")
        self.assertExtensions([])
Ejemplo n.º 29
0
class FlockerDeployConfigureSSHTests(TestCase):
    """
    Tests for ``DeployScript._configure_ssh``.
    """

    @_require_installed
    def setUp(self):
        self.sshd_config = FilePath(self.mktemp())
        self.server = create_ssh_server(self.sshd_config)
        self.addCleanup(self.server.restore)
        self.flocker_config = FilePath(self.mktemp())
        self.local_user_ssh = FilePath(self.mktemp())

        self.config = OpenSSHConfiguration(
            ssh_config_path=self.local_user_ssh,
            flocker_path=self.flocker_config)
        self.configure_ssh = self.config.configure_ssh

        # ``configure_ssh`` expects ``ssh`` to already be able to
        # authenticate against the server.  Set up an ssh-agent to
        # help it do that against our testing server.
        self.agent = create_ssh_agent(self.server.key_path, self)

    def test_installs_public_sshkeys(self):
        """
        ``DeployScript._configure_ssh`` installs the cluster wide public ssh
        keys on each node in the supplied ``Deployment``.
        """
        deployment = Deployment(
            nodes=frozenset([
                Node(
                    hostname=str(self.server.ip),
                    applications=None
                ),
                # Node(
                #     hostname='node2.example.com',
                #     applications=None
                # )
            ])
        )

        script = DeployScript(
            ssh_configuration=self.config, ssh_port=self.server.port)
        result = script._configure_ssh(deployment)

        local_key = self.local_user_ssh.child(b'id_rsa_flocker.pub')
        authorized_keys = self.sshd_config.descendant([
            b'home', b'.ssh', b'authorized_keys'])

        def check_authorized_keys(ignored):
            self.assertIn(local_key.getContent().rstrip(),
                          authorized_keys.getContent().splitlines())

        result.addCallback(check_authorized_keys)
        return result

    def test_sshkey_installation_failure(self):
        """
        ``DeployScript._configure_ssh`` fires with an errback if one of the
        configuration attempts fails.
        """
        def fail(host, port):
            raise ZeroDivisionError()
        self.config.configure_ssh = fail

        deployment = Deployment(
            nodes=frozenset([
                Node(
                    hostname=str(self.server.ip),
                    applications=None
                ),
            ])
        )

        script = DeployScript(
            ssh_configuration=self.config, ssh_port=self.server.port)
        result = script._configure_ssh(deployment)
        result.addErrback(lambda f: f.value.subFailure)
        result = self.assertFailure(result, ZeroDivisionError)
        # Handle errors logged by gather_deferreds
        self.addCleanup(self.flushLoggedErrors, ZeroDivisionError)
        return result

    def test_sshkey_installation_ssh_process_failure(self):
        """
        ``DeployScript._configure_ssh`` fires with a ``SystemExit`` errback
        containing the SSH process output if one of the configuration
        attempts fails.
        """
        def fail(host, port):
            raise CalledProcessError(1, "ssh", output=b"onoes")
        self.config.configure_ssh = fail

        deployment = Deployment(
            nodes=frozenset([
                Node(
                    hostname=str(self.server.ip),
                    applications=None
                ),
            ])
        )

        script = DeployScript(
            ssh_configuration=self.config, ssh_port=self.server.port)
        result = script._configure_ssh(deployment)
        result = self.assertFailure(result, SystemExit)
        result.addCallback(lambda exc: self.assertEqual(
            exc.args, (b"Error connecting to cluster node: onoes",)))
        # Handle errors logged by gather_deferreds
        self.addCleanup(self.flushLoggedErrors, CalledProcessError)
        return result

    def test_sshkey_installation_failure_logging(self):
        """
        ``DeployScript._configure_ssh`` logs all failed configuration attempts.
        """
        expected_errors = [
            ZeroDivisionError("error1"),
            ZeroDivisionError("error2"),
            ZeroDivisionError("error3"),
        ]

        error_iterator = (e for e in expected_errors)

        def fail(host, port):
            raise error_iterator.next()

        self.config.configure_ssh = fail

        deployment = Deployment(
            nodes=frozenset([
                Node(
                    hostname=b'node1.example.com',
                    applications=None
                ),
                Node(
                    hostname=b'node2.example.com',
                    applications=None
                ),
                Node(
                    hostname=b'node3.example.com',
                    applications=None
                ),

            ])
        )

        script = DeployScript(
            ssh_configuration=self.config, ssh_port=self.server.port)
        result = script._configure_ssh(deployment)

        def check_logs(ignored_first_error):
            failures = self.flushLoggedErrors(ZeroDivisionError)
            self.assertEqual(
                expected_errors,
                [f.value for f in failures]
            )

        result.addErrback(check_logs)
        return result
Ejemplo n.º 30
0
class _WordsController(object):
    """
    Basic controller that manages registered zones and manages name assignment
    by filtering on the registered IP subnet.
    """

    def __init__(self, data_dir="data", word_count=3):
        self.data_dir = FilePath(data_dir)
        self.word_count = word_count
        self.separator = b"-"
        self.data = dict()
        self.proxied = False

        self.app = Klein()

        # Routing for the HTTP API
        @self.app.route("/")
        def readme(request):
            """
            Public README page
            """
            request.setHeader('Content-Type', 'text/plain')
            return self.get_readme()

        @self.app.route("/register")
        def register(request):
            """
            A GET request from a registered zone's subnet is sufficient to
            trigger a name assignment.
            We support mainly the `domain` parameter so that it's easy to reach
            from yggdrasil and other networks.
            """
            if self.proxied:
                # Trust X-Forwarded-For headers if set up in the config
                from twisted.web.http import _XForwardedForRequest
                request = _XForwardedForRequest(request)

            hostname = request.args.get(b'domain', [False])[0]
            if not hostname:
                hostname = request.getRequestHostname()
            try:
                ip = request.getClientAddress().host.decode("utf-8")
            except:
                ip = None
            request.setHeader('Content-Type', 'text/plain')
            return self.register_ip(hostname, ip)

    @lru_cache(10240)
    def get_readme(self):
        return (
            FilePath(self.data_dir.dirname())
            .child("README.md")
            .getContent()
            .decode("utf-8")
        )

    def register_zone(self, zone, subnet):
        """
        Register a zone with the controller along with its allowed subnet.

        @param zone: the DNS zone without any trailing or leading dots.
        @type  zone: C{bytes}

        @param subnet: the subnet that will be allowed to register names.
        @type  subnet: L{ipaddress.ip_network}
        """
        assert isinstance(
            subnet, (IPv4Network, IPv6Network)
        ), "'{}' is not ipaddress.ip_network".format(ip_network)
        log.debug("Registered Zone {zone} | {subnet}", zone=zone, subnet=subnet)
        self.data[zone] = subnet

    def register_ip(self, zone, ip):
        """
        Actually register a name for a given IP.

        @returns: A resource or a byte-string depending on the action being
          successful or not.
          Possible HTTP codes are:
            200 (OK)
            400 (Bad request --> no such zone)
            403 (Forbidden --> out of subnet)
            507 (Insufficient storage --> somehow the name space is kinda full)
        """
        if zone not in self.data:
            return ErrorPage(
                400, "Bad Request", "No such zone, consider hosting your own!"
            )
        try:
            return self.get_assign_name(zone, ip) + b"." + zone + b"\n"
        except ValueError:
            return ForbiddenResource("Your IP is not allowed to use this resource.")
        except LookupError:
            return ErrorPage(
                507,
                "Insufficient Storage",
                "It looks like this zone is getting full. Consider hosting your own!",
            )
        except Exception as ex:
            log.error("Error registering {zone} | {ip}", zone=zone, ip=ip)
            log.failure(ex)
            return ErrorPage(500, "Internal Error", "Something odd happened!")

    @lru_cache(maxsize=1024)
    def get_assign_name(self, zone, ip):
        ipaddr = ip_address(ip)
        # collisions should be handled by iterator
        it = hash_parts_generator(ip, self.word_count, len(self.all_words))
        for h in it:
            words = self.separator.join([self.all_words[i] for i in h])
            record = self.name_to_record(zone, words)
            if record.exists():
                try:
                    record_addr = ip_address(record.getContent().decode("utf-8"))
                    if record_addr == ipaddr:
                        # Already registered
                        return words
                except:
                    # If it contains invalid data, reuse
                    break
            else:
                break
        else:
            raise LookupError("Can't assign name in '{}' for IP '{}'".format(zone, ip))

        record.parent().makedirs(ignoreExistingDirectory=True)
        record.setContent(ipaddr.compressed.encode("utf-8"))

        return words

    @property
    @lru_cache(maxsize=1024)
    def all_words(self):
        """
        Return a list of '\n' separated byte-strings ignoring those lines
        starting with '#'.
        """
        return [
            i
            for i in self.data_dir.child("word_list").getContent().split(b"\n")
            if b"#" not in i
        ]

    def name_to_record(self, zone, words):
        """
        Helper class that returns a FilePath object to the file that should
        contain the resulting IP.
        """
        parts = [zone] + words.split(b"-")
        return self.data_dir.descendant(parts)

    @lru_cache(maxsize=1024)
    def words_to_IP(self, zone, words):
        """
        Get the IP associated with certain words in a zone if registered.
        """
        assert zone, "Empty zone passed"
        record = self.name_to_record(zone, words)
        if record.exists() and record.isfile():
            return ip_address(record.getContent().decode("utf-8"))
        raise ValueError("Name not registered '{}'".format(words))
Ejemplo n.º 31
0
class FlockerDeployConfigureSSHTests(TestCase):
    """
    Tests for ``DeployScript._configure_ssh``.
    """
    @_require_installed
    def setUp(self):
        self.sshd_config = FilePath(self.mktemp())
        self.server = create_ssh_server(self.sshd_config)
        self.addCleanup(self.server.restore)
        self.flocker_config = FilePath(self.mktemp())
        self.local_user_ssh = FilePath(self.mktemp())

        self.config = OpenSSHConfiguration(ssh_config_path=self.local_user_ssh,
                                           flocker_path=self.flocker_config)
        self.configure_ssh = self.config.configure_ssh

        # ``configure_ssh`` expects ``ssh`` to already be able to
        # authenticate against the server.  Set up an ssh-agent to
        # help it do that against our testing server.
        self.agent = create_ssh_agent(self.server.key_path, self)

    def test_installs_public_sshkeys(self):
        """
        ``DeployScript._configure_ssh`` installs the cluster wide public ssh
        keys on each node in the supplied ``Deployment``.
        """
        deployment = Deployment(nodes=frozenset([
            Node(hostname=str(self.server.ip), applications=None),
            # Node(
            #     hostname='node2.example.com',
            #     applications=None
            # )
        ]))

        script = DeployScript(ssh_configuration=self.config,
                              ssh_port=self.server.port)
        result = script._configure_ssh(deployment)

        local_key = self.local_user_ssh.child(b'id_rsa_flocker.pub')
        authorized_keys = self.sshd_config.descendant(
            [b'home', b'.ssh', b'authorized_keys'])

        def check_authorized_keys(ignored):
            self.assertIn(local_key.getContent().rstrip(),
                          authorized_keys.getContent().splitlines())

        result.addCallback(check_authorized_keys)
        return result

    def test_sshkey_installation_failure(self):
        """
        ``DeployScript._configure_ssh`` fires with an errback if one of the
        configuration attempts fails.
        """
        def fail(host, port):
            raise ZeroDivisionError()

        self.config.configure_ssh = fail

        deployment = Deployment(nodes=frozenset([
            Node(hostname=str(self.server.ip), applications=None),
        ]))

        script = DeployScript(ssh_configuration=self.config,
                              ssh_port=self.server.port)
        result = script._configure_ssh(deployment)
        result.addErrback(lambda f: f.value.subFailure)
        result = self.assertFailure(result, ZeroDivisionError)
        return result

    def test_sshkey_installation_ssh_process_failure(self):
        """
        ``DeployScript._configure_ssh`` fires with a ``SystemExit`` errback
        containing the SSH process output if one of the configuration
        attempts fails.
        """
        def fail(host, port):
            raise CalledProcessError(1, "ssh", output=b"onoes")

        self.config.configure_ssh = fail

        deployment = Deployment(nodes=frozenset([
            Node(hostname=str(self.server.ip), applications=None),
        ]))

        script = DeployScript(ssh_configuration=self.config,
                              ssh_port=self.server.port)
        result = script._configure_ssh(deployment)
        result = self.assertFailure(result, SystemExit)
        result.addCallback(lambda exc: self.assertEqual(
            exc.args, (b"Error connecting to cluster node: onoes", )))
        return result
Ejemplo n.º 32
0
def create_proxy_to(logger, ip, port):
    """
    :see: ``HostNetwork.create_proxy_to``
    """
    action = CREATE_PROXY_TO(
        logger=logger, target_ip=ip, target_port=port)

    with action:
        encoded_ip = unicode(ip).encode("ascii")
        encoded_port = unicode(port).encode("ascii")

        # The first goal is to configure "Destination NAT" (DNAT).  We're just
        # going to rewrite the destination address of traffic arriving on the
        # specified port so it looks like it is destined for the specified ip
        # instead of destined for "us".  This gets the packets delivered to the
        # right destination.
        iptables(logger, [
            # All NAT stuff happens in the netfilter NAT table.
            b"--table", b"nat",

            # Destination NAT has to happen "pre"-routing so that the normal
            # routing rules on the machine will use the re-written destination
            # address and get the packet to that new destination.  Accomplish
            # this by appending the rule to the PREROUTING chain.
            b"--append", b"PREROUTING",

            # Only re-route traffic with a destination port matching the one we
            # were told to manipulate.  It is also necessary to specify TCP (or
            # UDP) here since that is the layer of the network stack that
            # defines ports.
            b"--protocol", b"tcp", b"--destination-port", encoded_port,

            # And only re-route traffic directed at this host.  Traffic
            # originating on this host directed at some random other host that
            # happens to be on the same port should be left alone.
            b"--match", b"addrtype", b"--dst-type", b"LOCAL",

            # Tag it as a flocker-created rule so we can recognize it later.
            b"--match", b"comment", b"--comment", FLOCKER_PROXY_COMMENT_MARKER,

            # If the filter matched, jump to the DNAT chain to handle doing the
            # actual packet mangling.  DNAT is a built-in chain that already
            # knows how to do this.  Pass an argument to the DNAT chain so it
            # knows how to mangle the packet - rewrite the destination IP of
            # the address to the target we were told to use.
            b"--jump", b"DNAT", b"--to-destination", encoded_ip,
        ])

        # Bonus round!  Having performed DNAT (changing the destination) during
        # prerouting we are now prepared to send the packet on somewhere else.
        # On its way out of this system it is also necessary to further
        # modify and then track that packet.  We want it to look like it
        # comes from us (the downstream client will be *very* confused if
        # the node we're passing the packet on to replies *directly* to them;
        # and by confused I mean it will be totally broken, of course) so we
        # also need to "masquerade" in the postrouting chain.  This changes
        # the source address (ip and port) of the packet to the address of
        # the external interface the packet is exiting upon. Doing SNAT here
        # would be a little bit more efficient because the kernel could avoid
        # looking up the external interface's address for every single packet.
        # But it requires this code to know that address and it requires that
        # if it ever changes the rule gets updated and it may require some
        # steps to do port allocation (not sure what they are yet).  So we'll
        # just masquerade for now.
        iptables(logger, [
            # All NAT stuff happens in the netfilter NAT table.
            b"--table", b"nat",

            # As described above, this transformation happens after routing
            # decisions have been made and the packet is on its way out of the
            # system.  Therefore, append the rule to the POSTROUTING chain.
            b"--append", b"POSTROUTING",

            # We'll stick to matching the same kinds of packets we matched in
            # the earlier stage.  We might want to change the factoring of this
            # code to avoid the duplication - particularly in case we want to
            # change the specifics of the filter.
            #
            # This omits the LOCAL addrtype check, though, because at this
            # point the packet is definitely leaving this host.
            b"--protocol", b"tcp", b"--destination-port", encoded_port,

            # Do the masquerading.
            b"--jump", b"MASQUERADE",
        ])

        # Secret level!!  Traffic that originates *on* the host bypasses the
        # PREROUTING chain.  Instead, it passes through the OUTPUT chain.  If
        # we want connections from localhost to the forwarded port to be
        # affected then we need a rule in the OUTPUT chain to do the same kind
        # of DNAT that we did in the PREROUTING chain.
        iptables(logger, [
            # All NAT stuff happens in the netfilter NAT table.
            b"--table", b"nat",

            # As mentioned, this rule is for the OUTPUT chain.
            b"--append", b"OUTPUT",

            # Matching the exact same kinds of packets as the PREROUTING rule
            # matches.
            b"--protocol", b"tcp",
            b"--destination-port", encoded_port,
            b"--match", b"addrtype", b"--dst-type", b"LOCAL",

            # Do the same DNAT as we did in the rule for the PREROUTING chain.
            b"--jump", b"DNAT", b"--to-destination", encoded_ip,
        ])

        iptables(logger, [
            b"--table", b"filter",
            b"--insert", b"FORWARD",

            b"--destination", encoded_ip,
            b"--protocol", b"tcp", b"--destination-port", encoded_port,

            b"--jump", b"ACCEPT",
        ])

        # The network stack only considers forwarding traffic when certain
        # system configuration is in place.
        #
        # https://www.kernel.org/doc/Documentation/networking/ip-sysctl.txt
        # will explain the meaning of these in (very slightly) more detail.
        conf = FilePath(b"/proc/sys/net/ipv4/conf")
        descendant = conf.descendant([b"default", b"forwarding"])
        with descendant.open("wb") as forwarding:
            forwarding.write(b"1")

        # In order to have the OUTPUT chain DNAT rule affect routing decisions,
        # we also need to tell the system to make routing decisions about
        # traffic from or to localhost.
        for path in conf.children():
            with path.child(b"route_localnet").open("wb") as route_localnet:
                route_localnet.write(b"1")

        return Proxy(ip=ip, port=port)
Ejemplo n.º 33
0
class GetExtensionsTests(TestCase):
    """
    Tests for L{dist.getExtensions}.
    """

    setupTemplate = ("from twisted.python.dist import ConditionalExtension\n"
                     "extensions = [\n"
                     "    ConditionalExtension(\n"
                     "        '%s', ['twisted/some/thing.c'],\n"
                     "        condition=lambda builder: True)\n"
                     "    ]\n")

    def setUp(self):
        self.basedir = FilePath(self.mktemp()).child("twisted")
        self.basedir.makedirs()
        self.addCleanup(os.chdir, os.getcwd())
        os.chdir(self.basedir.parent().path)

    def writeSetup(self, name, *path):
        """
        Write out a C{setup.py} file to a location determined by
        L{self.basedir} and L{path}. L{self.setupTemplate} is used to
        generate its contents.
        """
        outdir = self.basedir.descendant(path)
        outdir.makedirs()
        setup = outdir.child("setup.py")
        setup.setContent(self.setupTemplate % (name, ))

    def writeEmptySetup(self, *path):
        """
        Write out an empty C{setup.py} file to a location determined by
        L{self.basedir} and L{path}.
        """
        outdir = self.basedir.descendant(path)
        outdir.makedirs()
        outdir.child("setup.py").setContent("")

    def assertExtensions(self, expected):
        """
        Assert that the given names match the (sorted) names of discovered
        extensions.
        """
        extensions = dist.getExtensions()
        names = [extension.name for extension in extensions]
        self.assertEqual(sorted(names), expected)

    def test_getExtensions(self):
        """
        Files named I{setup.py} in I{twisted/topfiles} and I{twisted/*/topfiles}
        are executed with L{execfile} in order to discover the extensions they
        declare.
        """
        self.writeSetup("twisted.transmutate", "topfiles")
        self.writeSetup("twisted.tele.port", "tele", "topfiles")
        self.assertExtensions(["twisted.tele.port", "twisted.transmutate"])

    def test_getExtensionsTooDeep(self):
        """
        Files named I{setup.py} in I{topfiles} directories are not considered if
        they are too deep in the directory hierarchy.
        """
        self.writeSetup("twisted.trans.mog.rify", "trans", "mog", "topfiles")
        self.assertExtensions([])

    def test_getExtensionsNotTopfiles(self):
        """
        The folder in which I{setup.py} is discovered must be called I{topfiles}
        otherwise it is ignored.
        """
        self.writeSetup("twisted.metamorphosis", "notfiles")
        self.assertExtensions([])

    def test_getExtensionsNotSupportedOnJava(self):
        """
        Extensions are not supported on Java-based platforms.
        """
        self.addCleanup(setattr, sys, "platform", sys.platform)
        sys.platform = "java"
        self.writeSetup("twisted.sorcery", "topfiles")
        self.assertExtensions([])

    def test_getExtensionsExtensionsLocalIsOptional(self):
        """
        It is acceptable for extensions to not define the C{extensions} local
        variable.
        """
        self.writeEmptySetup("twisted.necromancy", "topfiles")
        self.assertExtensions([])