Exemplo n.º 1
0
    def test_functional_ubuntu_1404(self):
        """
        The expected deb files are generated on Ubuntu14.04.
        """
        output_dir = FilePath(self.mktemp())
        check_call([
            FLOCKER_PATH.descendant([b'admin', b'build-package']).path,
            '--destination-path', output_dir.path,
            '--distribution', 'ubuntu-14.04',
            FLOCKER_PATH.path
        ])
        python_version = __version__
        rpm_version = make_rpm_version(python_version)

        expected_basenames = (
            ('clusterhq-python-flocker', 'amd64'),
            ('clusterhq-flocker-cli', 'all'),
            ('clusterhq-flocker-node', 'all'),
        )
        expected_filenames = []
        for basename, arch in expected_basenames:
            f = '{}_{}-{}_{}.deb'.format(
                basename, rpm_version.version, rpm_version.release, arch)
            expected_filenames.append(f)

        self.assertEqual(
            set(expected_filenames),
            set(f.basename() for f in output_dir.children())
        )

        for f in output_dir.children():
            assert_deb_lint(self, f)
Exemplo n.º 2
0
    def test_functional_centos_7(self):
        """
        The expected RPM files are built for CentOS 7
        """
        output_dir = FilePath(self.mktemp())
        check_call([
            FLOCKER_PATH.descendant([b'admin', b'build-package']).path,
            '--destination-path', output_dir.path,
            '--distribution', 'centos-7',
            FLOCKER_PATH.path
        ])
        python_version = __version__
        rpm_version = make_rpm_version(python_version)

        expected_basenames = (
            ('clusterhq-python-flocker', 'x86_64'),
            ('clusterhq-flocker-cli', 'noarch'),
            ('clusterhq-flocker-node', 'noarch'),
        )
        expected_filenames = []
        for basename, arch in expected_basenames:
            f = '{}-{}-{}.{}.rpm'.format(
                basename, rpm_version.version, rpm_version.release, arch)
            expected_filenames.append(f)

        self.assertEqual(
            set(expected_filenames),
            set(f.basename() for f in output_dir.children())
        )

        for f in output_dir.children():
            assert_rpm_lint(self, f)
Exemplo n.º 3
0
    def test_functional_ubuntu_1404(self):
        """
        The expected deb files are generated on Ubuntu14.04.
        """
        output_dir = FilePath(self.mktemp())
        check_call([
            FLOCKER_PATH.descendant(['admin', 'build-package']).path,
            '--destination-path', output_dir.path, '--distribution',
            'ubuntu-14.04', FLOCKER_PATH.path
        ])
        python_version = __version__
        rpm_version = make_rpm_version(python_version)

        expected_basenames = (
            ('clusterhq-python-flocker', 'amd64'),
            ('clusterhq-flocker-cli', 'all'),
            ('clusterhq-flocker-node', 'all'),
        )
        expected_filenames = []
        for basename, arch in expected_basenames:
            f = '{}_{}-{}_{}.deb'.format(basename, rpm_version.version,
                                         rpm_version.release, arch)
            expected_filenames.append(f)

        self.assertEqual(set(expected_filenames),
                         set(f.basename() for f in output_dir.children()))

        for f in output_dir.children():
            assert_deb_lint(self, f)
def main(argv):
    # input
    posts = FilePath(argv[1])

    # output
    blog = FilePath(argv[2])

    # Since Sphinx gets confused by image paths with "special" characters in
    # them, generate new names for all the image paths and a mapping from the
    # old names to the new names.
    images = FilePath(argv[3])

    imagepaths = []
    for post in images.children():
        if post.isdir():
            imagepaths.append(post)
            safe = post.sibling(fixpath(post.basename()))
            if post != safe and not safe.isdir():
                post.moveTo(safe)
                safe.linkTo(post)

    entries = []
    for post in posts.children():
        data = post.getContent().decode("utf-8")
        ignored, header, body = data.split(b"---", 2)
        meta = dict((text.strip() for text in line.split(":", 1)) for line in header.splitlines() if line.strip())
        date = datetime.strptime(meta["date"], "%Y/%m/%d %H:%M:%S")

        parent = blog.preauthChild(
            ("%d/%02d/%02d" % (date.year, date.month, date.day)).encode("utf-8"))
        title = fixpath(meta["title"].strip().lower().encode("utf-8")).decode("utf-8")
        entry = parent.child((title + ".rst").encode("utf-8"))

        header = HEADER_TEMPLATE % dict(
            author=meta["author"].strip(), categories="none",
            tags=meta["categories"].strip(), title=meta["title"].strip(),
            underbar="=" * len(meta["title"].strip()))

        for path in imagepaths:
            body = body.replace(
                u"/" + path.basename().decode("utf-8") + u"/",
                u"/" + fixpath(path.basename()).decode("utf-8") + u"/")

        if not parent.isdir():
            parent.makedirs()

        entry.setContent((header + html2rst(body)).encode("utf-8"))

        entries.append(entry)

    entries.sort()
    entries.reverse()

    sitemap = SITEMAP % dict(
        entries="".join([
                "\n   " + "/".join(entry.segmentsFrom(blog))
                for entry in entries]))
    blog.child(b"master.rst").setContent(sitemap.encode("utf-8"))

    FilePath(b"conf.py").copyTo(blog.child(b"conf.py"))
Exemplo n.º 5
0
    def test_functional_fedora_20(self):
        """
        The expected RPM files are built for Fedora 20
        """
        output_dir = FilePath(self.mktemp())
        check_call([
            FLOCKER_PATH.descendant(['admin', 'build-package']).path,
            '--destination-path', output_dir.path, '--distribution',
            'fedora-20', FLOCKER_PATH.path
        ])
        python_version = __version__
        rpm_version = make_rpm_version(python_version)

        expected_basenames = (
            ('clusterhq-python-flocker', 'x86_64'),
            ('clusterhq-flocker-cli', 'noarch'),
            ('clusterhq-flocker-node', 'noarch'),
        )
        expected_filenames = []
        for basename, arch in expected_basenames:
            f = '{}-{}-{}.{}.rpm'.format(basename, rpm_version.version,
                                         rpm_version.release, arch)
            expected_filenames.append(f)

        self.assertEqual(set(expected_filenames),
                         set(f.basename() for f in output_dir.children()))

        for f in output_dir.children():
            assert_rpm_lint(self, f)
Exemplo n.º 6
0
    def _large_request_test(self, request_body_size):
        """
        Assert that when a request with a body of of the given size is received
        its content is written to the directory the ``TahoeLAFSSite`` is
        configured with.
        """
        tempdir = FilePath(self.mktemp())
        tempdir.makedirs()
        request = self._create_request(tempdir)

        # So.  Bad news.  The temporary file for the uploaded content is
        # unnamed (and this isn't even necessarily a bad thing since it is how
        # you get automatic on-process-exit cleanup behavior on POSIX).  It's
        # not visible by inspecting the filesystem.  It has no name we can
        # discover.  Then how do we verify it is written to the right place?
        # The question itself is meaningless if we try to be too precise.  It
        # *has* no filesystem location.  However, it is still stored *on* some
        # filesystem.  We still want to make sure it is on the filesystem we
        # specified because otherwise it might be on a filesystem that's too
        # small or undesirable in some other way.
        #
        # I don't know of any way to ask a file descriptor which filesystem
        # it's on, either, though.  It might be the case that the [f]statvfs()
        # result could be compared somehow to infer the filesystem but
        # ... it's not clear what the failure modes might be there, across
        # different filesystems and runtime environments.
        #
        # Another approach is to make the temp directory unwriteable and
        # observe the failure when an attempt is made to create a file there.
        # This is hardly a lovely solution but at least it's kind of simple.
        #
        # It would be nice if it worked consistently cross-platform but on
        # Windows os.chmod is more or less broken.
        if platform.isWindows():
            request.gotLength(request_body_size)
            self.assertThat(
                tempdir.children(),
                HasLength(1),
            )
        else:
            tempdir.chmod(0o550)
            with self.assertRaises(OSError) as ctx:
                request.gotLength(request_body_size)
                raise Exception(
                    "OSError not raised, instead tempdir.children() = {}".format(
                        tempdir.children(),
                    ),
                )

            self.assertThat(
                ctx.exception.errno,
                Equals(EACCES),
            )
Exemplo n.º 7
0
 def test_add_node(self):
     """
     ``Certificates.add_node`` generates another node certificate.
     """
     output = FilePath(self.mktemp())
     output.makedirs()
     certificates = Certificates.generate(output, b"some-service", 2,
                                          b"test-cluster")
     certificates.add_node(3)
     self.assertEqual(
         {
             output.child(b"cluster.crt"),
             output.child(b"cluster.key"),
             output.child(b"control-some-service.crt"),
             output.child(b"control-some-service.key"),
             output.child(b"user.crt"),
             output.child(b"user.key"),
             output.child(b"node-0.crt"),
             output.child(b"node-0.key"),
             output.child(b"node-1.crt"),
             output.child(b"node-1.key"),
             output.child(b"node-3.crt"),
             output.child(b"node-3.key"),
         },
         set(output.children()),
     )
Exemplo n.º 8
0
 def test_catchAll(self):
     """
     Everything should match a catchall rule.
     """
     tmpdir = FilePath(self.mktemp())
     rules = [
         {
             'pattern': {
                 'foo': '*',
             },
             'actions': [
                 {'merge_yaml': '{foo}.yml'},
             ]
         },
         {
             'pattern': 'all',
             'actions': [
                 {'merge_yaml': 'extra.yml'},
             ]
         }
     ]
     dumper = RuleBasedFileDumper(tmpdir.path, rules)
     dumper.dumpObject({
         'foo': 'thefoo',
     })
     self.assertTrue(tmpdir.child('thefoo.yml').exists(),
         "Should have matched and acted on the first rule")
     dumper.dumpObject({
         'bar': 'hey',
     })
     self.assertTrue(tmpdir.child('extra.yml').exists(),
         "Should have matched and acted on the second rule")
     self.assertEqual(len(tmpdir.children()), 2, "Should only have made "
         "the 2 expected files")
Exemplo n.º 9
0
    def test_alwaysPreferPy(self):
        """
        Verify that .py files will always be preferred to .pyc files, regardless of
        directory listing order.
        """
        mypath = FilePath(self.mktemp())
        mypath.createDirectory()
        pp = modules.PythonPath(sysPath=[mypath.path])
        originalSmartPath = pp._smartPath

        def _evilSmartPath(pathName):
            o = originalSmartPath(pathName)
            originalChildren = o.children

            def evilChildren():
                # normally this order is random; let's make sure it always
                # comes up .pyc-first.
                x = list(originalChildren())
                x.sort()
                x.reverse()
                return x

            o.children = evilChildren
            return o

        mypath.child("abcd.py").setContent(b"\n")
        compileall.compile_dir(mypath.path, quiet=True)
        # sanity check
        self.assertEqual(len(list(mypath.children())), 2)
        pp._smartPath = _evilSmartPath
        self.assertEqual(pp["abcd"].filePath, mypath.child("abcd.py"))
Exemplo n.º 10
0
    def test_exportAllContacts(self):
        """
        Run the export with --all --contacts to get a directory of addressbooks from all
        addressbook homes in the database.
        """
        yield populateAddressBooksFrom(
            {
                "user01": {
                    "addressbook": {
                        "1.vcf": adbk1Root.child("1.vcf").getContent(),
                        "2.vcf": adbk1Root.child("2.vcf").getContent(),
                        "3.vcf": adbk1Root.child("3.vcf").getContent(),
                    }
                },
                "user02": {
                    "addressbook": {
                        "1.vcf": adbk1Root.child("1.vcf").getContent(),
                    },
                }
            }, self.store)

        outputDir = FilePath(self.mktemp())
        outputDir.makedirs()
        main([
            'calendarserver_export', '--directory', outputDir.path, '--all',
            '--contacts'
        ],
             reactor=self)
        yield self.waitToStop
        self.assertEquals(
            set(["user01_addressbook.vcf", "user02_addressbook.vcf"]),
            set([child.basename() for child in outputDir.children()]))
Exemplo n.º 11
0
    def test_alwaysPreferPy(self):
        """
        Verify that .py files will always be preferred to .pyc files, regardless of
        directory listing order.
        """
        mypath = FilePath(self.mktemp())
        mypath.createDirectory()
        pp = modules.PythonPath(sysPath=[mypath.path])
        originalSmartPath = pp._smartPath

        def _evilSmartPath(pathName):
            o = originalSmartPath(pathName)
            originalChildren = o.children

            def evilChildren():
                # normally this order is random; let's make sure it always
                # comes up .pyc-first.
                x = originalChildren()
                x.sort()
                x.reverse()
                return x

            o.children = evilChildren
            return o

        mypath.child("abcd.py").setContent("\n")
        compileall.compile_dir(mypath.path, quiet=True)
        # sanity check
        self.assertEquals(len(mypath.children()), 2)
        pp._smartPath = _evilSmartPath
        self.assertEquals(pp["abcd"].filePath, mypath.child("abcd.py"))
Exemplo n.º 12
0
    def test_exportOneAddressbook(self):
        """
        Run the export with a single uid and --contacts
        """
        yield populateAddressBooksFrom(
            {
                "user01": {
                    "addressbook": {
                        "1.vcf": adbk1Root.child("1.vcf").getContent(),
                        "2.vcf": adbk1Root.child("2.vcf").getContent(),
                        "3.vcf": adbk1Root.child("3.vcf").getContent(),
                    }
                },
                "user02": {
                    "addressbook": {
                        "1.vcf": adbk1Root.child("1.vcf").getContent(),
                    },
                }
            }, self.store)

        outputDir = FilePath(self.mktemp())
        outputDir.makedirs()
        main([
            'calendarserver_export', '--directory', outputDir.path, '--uid',
            'user01', '--contacts'
        ],
             reactor=self)
        yield self.waitToStop
        self.assertEquals(
            set(["user01_addressbook.vcf"]),
            set([child.basename() for child in outputDir.children()]))
Exemplo n.º 13
0
        def test_exportAll(self):
            """
            Run the export with --all to get a directory of calendars from all
            calendar homes in the database.
            """
            yield populateCalendarsFrom(
                {
                    "user01": {
                        "calendar1": {
                            "valentines-day.ics": (valentines, {}),
                            "new-years-day.ics": (newYears, {})
                        }
                    },
                    "user02": {
                        "calendar1": {
                            "valentines-day.ics": (valentines, {})
                        },
                        "calendar2": {
                            "new-years-day.ics": (newYears, {})
                        }
                    }
                }, self.store)

            outputDir = FilePath(self.mktemp())
            outputDir.makedirs()
            main([
                'calendarserver_export', '--directory', outputDir.path, '--all'
            ],
                 reactor=self)
            yield self.waitToStop
            self.assertEquals(
                set([
                    "user01_calendar1.ics", "user02_calendar1.ics",
                    "user02_calendar2.ics"
                ]), set([child.basename() for child in outputDir.children()]))
Exemplo n.º 14
0
    def test_generated(self):
        """
        ``Certificates.generate`` generates a certificate authority
        certificate, a control service certificate, a user certificate, and the
        given number of node certificates.
        """
        output = FilePath(self.mktemp())
        output.makedirs()
        Certificates.generate(output, b"some-service", 2, b"test-cluster")

        self.assertEqual(
            {
                output.child(b"cluster.crt"),
                output.child(b"cluster.key"),
                output.child(b"control-some-service.crt"),
                output.child(b"control-some-service.key"),
                output.child(b"user.crt"),
                output.child(b"user.key"),
                output.child(b"node-0.crt"),
                output.child(b"node-0.key"),
                output.child(b"node-1.crt"),
                output.child(b"node-1.key"),
            },
            set(output.children()),
        )
Exemplo n.º 15
0
class TestOpenSSHConfig(unittest.TestCase):
    def setUp(self):
        self.directory = FilePath(self.mktemp())
        self.directory.createDirectory()

    def test_files(self):
        openSSHConfig.setupConfig(self.directory.path, 2222)
        for file in self.directory.children():
            f = file.open()
            contents = f.read()
            f.close()
            self.assertTrue("%" not in contents)
        self.assertEquals(len(self.directory.children()), 5)

    def test_commandOptions(self):
        for option in openSSHConfig.setupConfig(self.directory.path, 2222):
            self.assertTrue("%" not in option)
Exemplo n.º 16
0
    def doIt(self, txn):


        if raw_input("Do you really want to remove all data [y/n]: ")[0].lower() != 'y':
            print "No data removed"
            returnValue(None)

        wipeout = (
            # These are ordered in such a way as to ensure key constraints are not 
            # violated as data is removed

            schema.RESOURCE_PROPERTY,

            schema.CALENDAR_OBJECT_REVISIONS,

            schema.CALENDAR,
            #schema.CALENDAR_BIND, - cascades
            #schema.CALENDAR_OBJECT, - cascades
            #schema.TIME_RANGE, - cascades
            #schema.TRANSPARENCY, - cascades


            schema.CALENDAR_HOME,
            #schema.CALENDAR_HOME_METADATA - cascades
            schema.ATTACHMENT,

            schema.ADDRESSBOOK_OBJECT_REVISIONS,

            schema.ADDRESSBOOK,
            #schema.ADDRESSBOOK_BIND, - cascades
            #schema.ADDRESSBOOK_OBJECT, - cascades

            schema.ADDRESSBOOK_HOME,
            #schema.ADDRESSBOOK_HOME_METADATA, - cascades

            schema.NOTIFICATION_HOME,
            schema.NOTIFICATION,
            #schema.NOTIFICATION_OBJECT_REVISIONS - cascades,
        )

        for tableschema in wipeout:
            yield self.removeTableData(txn, tableschema)
            print "Removed rows in table %s" % (tableschema,)

        if calendaruserproxy.ProxyDBService is not None:
            calendaruserproxy.ProxyDBService.clean() #@UndefinedVariable
            print "Removed all proxies"
        else:
            print "No proxy database to clean."

        fp = FilePath(config.AttachmentsRoot)
        if fp.exists():
            for child in fp.children():
                child.remove()
            print "Removed attachments."
        else:
            print "No attachments path to delete."
Exemplo n.º 17
0
class TestOpenSSHConfig(unittest.TestCase):

    def setUp(self):
        self.directory = FilePath(self.mktemp())
        self.directory.createDirectory()

    def test_files(self):
        openSSHConfig.setupConfig(self.directory.path, 2222)
        for file in self.directory.children():
            f = file.open()
            contents = f.read()
            f.close()
            self.assertTrue("%" not in contents)
        self.assertEquals(len(self.directory.children()), 5)

    def test_commandOptions(self):
        for option in openSSHConfig.setupConfig(self.directory.path, 2222):
            self.assertTrue("%" not in option)
Exemplo n.º 18
0
 def test_unpartialize(self):
     xpg = self.xpg_with_backup_dir()
     folder = FilePath(xpg.archive_log_directory)
     folder.child("1").touch()
     folder.child("2").touch()
     folder.child("3").touch()
     folder.child("4.partial").touch()
     xpg.unpartialize()
     self.assertEquals(set([c.basename() for c in folder.children()]),
                       set(['1', '2', '3', '4']))
 def test_unpartialize(self):
     xpg = self.xpg_with_backup_dir()
     folder = FilePath(xpg.archive_log_directory)
     folder.child("1").touch()
     folder.child("2").touch()
     folder.child("3").touch()
     folder.child("4.partial").touch()
     xpg.unpartialize()
     self.assertEquals(set([c.basename() for c in folder.children()]),
                       set(['1', '2', '3', '4']))
Exemplo n.º 20
0
    def doIt(self, txn):

        if raw_input("Do you really want to remove all data [y/n]: ")[0].lower() != 'y':
            print("No data removed")
            returnValue(None)

        wipeout = (
            # These are ordered in such a way as to ensure key constraints are not
            # violated as data is removed

            schema.RESOURCE_PROPERTY,

            schema.CALENDAR_OBJECT_REVISIONS,

            schema.CALENDAR,
            #schema.CALENDAR_BIND, - cascades
            #schema.CALENDAR_OBJECT, - cascades
            #schema.TIME_RANGE, - cascades
            #schema.TRANSPARENCY, - cascades


            schema.CALENDAR_HOME,
            #schema.CALENDAR_HOME_METADATA - cascades
            schema.ATTACHMENT,

            schema.ADDRESSBOOK_OBJECT_REVISIONS,

            schema.ADDRESSBOOK_HOME,
            #schema.ADDRESSBOOK_HOME_METADATA, - cascades
            #schema.ADDRESSBOOK_BIND, - cascades
            #schema.ADDRESSBOOK_OBJECT, - cascades

            schema.NOTIFICATION_HOME,
            schema.NOTIFICATION,
            #schema.NOTIFICATION_OBJECT_REVISIONS - cascades,
        )

        for tableschema in wipeout:
            yield self.removeTableData(txn, tableschema)
            print("Removed rows in table %s" % (tableschema,))

        if calendaruserproxy.ProxyDBService is not None:
            calendaruserproxy.ProxyDBService.clean() #@UndefinedVariable
            print("Removed all proxies")
        else:
            print("No proxy database to clean.")

        fp = FilePath(config.AttachmentsRoot)
        if fp.exists():
            for child in fp.children():
                child.remove()
            print("Removed attachments.")
        else:
            print("No attachments path to delete.")
Exemplo n.º 21
0
def loadServices(base):
    from braid import config

    services = {}
    servicesDir = FilePath(base).sibling('services')
    for serviceDir in servicesDir.children():
        serviceName = serviceDir.basename()
        fabfile = serviceDir.child('fabfile.py')
        if fabfile.exists():
            module = imp.load_source(serviceName, fabfile.path, fabfile.open())
            if module.config == config:
                del module.config
            services[serviceName] = module
    return services
Exemplo n.º 22
0
def loadServices(base):
    from braid import config

    services = {}
    servicesDir = FilePath(base).sibling('services')
    for serviceDir in servicesDir.children():
        serviceName = serviceDir.basename()
        fabfile = serviceDir.child('fabfile.py')
        if fabfile.exists():
            module = imp.load_source(serviceName, fabfile.path, fabfile.open())
            if module.config == config:
                del module.config
            services[serviceName] = module
    return services
Exemplo n.º 23
0
    def assertExtractedStructure(self, outputFile, dirDict):
        """
        Assert that a tarfile content is equivalent to one described by a dict.

        @param outputFile: The tar file built by L{DistributionBuilder}.
        @type outputFile: L{FilePath}.
        @param dirDict: The dict that should describe the contents of the
            directory. It should be the same structure as the C{dirDict}
            parameter to L{createStructure}.
        @type dirDict: C{dict}
        """
        tarFile = tarfile.TarFile.open(outputFile.path, "r:bz2")
        extracted = FilePath(self.mktemp())
        extracted.createDirectory()
        for info in tarFile:
            tarFile.extract(info, path=extracted.path)
        self.assertStructure(extracted.children()[0], dirDict)
Exemplo n.º 24
0
    def test_log_pruning(self):
        """
        L{XPostgres.prune_useless_archive_logs} will remove unhelpful log
        files.
        """
        xpg = self.xpg_with_backup_dir()
        folder = FilePath(xpg.archive_log_directory)
        folder.child("base_backup").createDirectory()
        folder.child("unknown").touch()
        folder.child("foo.bar.partial").touch()
        folder.child("foo.bar").touch()
        (folder.child("something").temporarySibling(
            ".in-progress").open().close())

        xpg.prune_useless_archive_logs()
        self.assertEquals(set([c.basename() for c in folder.children()]),
                          set(['base_backup', 'unknown', 'foo.bar']))
    def test_log_pruning(self):
        """
        L{XPostgres.prune_useless_archive_logs} will remove unhelpful log
        files.
        """
        xpg = self.xpg_with_backup_dir()
        folder = FilePath(xpg.archive_log_directory)
        folder.child("base_backup").createDirectory()
        folder.child("unknown").touch()
        folder.child("foo.bar.partial").touch()
        folder.child("foo.bar").touch()
        (folder.child("something").temporarySibling(".in-progress")
         .open().close())

        xpg.prune_useless_archive_logs()
        self.assertEquals(set([c.basename() for c in folder.children()]),
                          set(['base_backup', 'unknown', 'foo.bar']))
Exemplo n.º 26
0
    def assertExtractedStructure(self, outputFile, dirDict):
        """
        Assert that a tarfile content is equivalent to one described by a dict.

        @param outputFile: The tar file built by L{DistributionBuilder}.
        @type outputFile: L{FilePath}.
        @param dirDict: The dict that should describe the contents of the
            directory. It should be the same structure as the C{dirDict}
            parameter to L{createStructure}.
        @type dirDict: C{dict}
        """
        tarFile = tarfile.TarFile.open(outputFile.path, "r:bz2")
        extracted = FilePath(self.mktemp())
        extracted.createDirectory()
        for info in tarFile:
            tarFile.extract(info, path=extracted.path)
        self.assertStructure(extracted.children()[0], dirDict)
Exemplo n.º 27
0
    def test_generated(self):
        """
        ``Certificates.generate`` generates a certificate authority
        certificate, a control service certificate, a user certificate, and the
        given number of node certificates.
        """
        output = FilePath(self.mktemp())
        output.makedirs()
        Certificates.generate(output, b"some-service", 2, b"test-cluster")

        self.assertEqual(
            {
                output.child(b"cluster.crt"), output.child(b"cluster.key"),
                output.child(b"control-some-service.crt"),
                output.child(b"control-some-service.key"),
                output.child(b"user.crt"), output.child(b"user.key"),
                output.child(b"node-0.crt"), output.child(b"node-0.key"),
                output.child(b"node-1.crt"), output.child(b"node-1.key"),
            },
            set(output.children()),
        )
Exemplo n.º 28
0
 def test_add_node(self):
     """
     ``Certificates.add_node`` generates another node certificate.
     """
     output = FilePath(self.mktemp())
     output.makedirs()
     certificates = Certificates.generate(output, b"some-service", 2,
                                          b"test-cluster")
     certificates.add_node(3)
     self.assertEqual(
         {
             output.child(b"cluster.crt"), output.child(b"cluster.key"),
             output.child(b"control-some-service.crt"),
             output.child(b"control-some-service.key"),
             output.child(b"user.crt"), output.child(b"user.key"),
             output.child(b"node-0.crt"), output.child(b"node-0.key"),
             output.child(b"node-1.crt"), output.child(b"node-1.key"),
             output.child(b"node-3.crt"), output.child(b"node-3.key"),
         },
         set(output.children()),
     )
Exemplo n.º 29
0
class NewsBuilderTests(TestCase, StructureAssertingMixin):
    """
    Tests for L{NewsBuilder}.
    """
    skip = svnSkip

    def setUp(self):
        """
        Create a fake project and stuff some basic structure and content into
        it.
        """
        self.builder = NewsBuilder()
        self.project = FilePath(self.mktemp())
        self.project.createDirectory()

        self.existingText = 'Here is stuff which was present previously.\n'
        self.createStructure(
            self.project, {
                'NEWS':
                self.existingText,
                '5.feature':
                'We now support the web.\n',
                '12.feature':
                'The widget is more robust.\n',
                '15.feature':
                ('A very long feature which takes many words to '
                 'describe with any accuracy was introduced so that '
                 'the line wrapping behavior of the news generating '
                 'code could be verified.\n'),
                '16.feature':
                ('A simpler feature\ndescribed on multiple lines\n'
                 'was added.\n'),
                '23.bugfix':
                'Broken stuff was fixed.\n',
                '25.removal':
                'Stupid stuff was deprecated.\n',
                '30.misc':
                '',
                '35.misc':
                '',
                '40.doc':
                'foo.bar.Baz.quux',
                '41.doc':
                'writing Foo servers'
            })

    def svnCommit(self, project=None):
        """
        Make the C{project} directory a valid subversion directory with all
        files committed.
        """
        if project is None:
            project = self.project
        repositoryPath = self.mktemp()
        repository = FilePath(repositoryPath)

        runCommand(["svnadmin", "create", repository.path])
        runCommand(
            ["svn", "checkout", "file://" + repository.path, project.path])

        runCommand(["svn", "add"] + glob.glob(project.path + "/*"))
        runCommand(["svn", "commit", project.path, "-m", "yay"])

    def test_today(self):
        """
        L{NewsBuilder._today} returns today's date in YYYY-MM-DD form.
        """
        self.assertEqual(self.builder._today(),
                         date.today().strftime('%Y-%m-%d'))

    def test_findFeatures(self):
        """
        When called with L{NewsBuilder._FEATURE}, L{NewsBuilder._findChanges}
        returns a list of bugfix ticket numbers and descriptions as a list of
        two-tuples.
        """
        features = self.builder._findChanges(self.project,
                                             self.builder._FEATURE)
        self.assertEqual(features, [
            (5, "We now support the web."), (12, "The widget is more robust."),
            (15, "A very long feature which takes many words to describe with "
             "any accuracy was introduced so that the line wrapping behavior "
             "of the news generating code could be verified."),
            (16, "A simpler feature described on multiple lines was added.")
        ])

    def test_findBugfixes(self):
        """
        When called with L{NewsBuilder._BUGFIX}, L{NewsBuilder._findChanges}
        returns a list of bugfix ticket numbers and descriptions as a list of
        two-tuples.
        """
        bugfixes = self.builder._findChanges(self.project,
                                             self.builder._BUGFIX)
        self.assertEqual(bugfixes, [(23, 'Broken stuff was fixed.')])

    def test_findRemovals(self):
        """
        When called with L{NewsBuilder._REMOVAL}, L{NewsBuilder._findChanges}
        returns a list of removal/deprecation ticket numbers and descriptions
        as a list of two-tuples.
        """
        removals = self.builder._findChanges(self.project,
                                             self.builder._REMOVAL)
        self.assertEqual(removals, [(25, 'Stupid stuff was deprecated.')])

    def test_findDocumentation(self):
        """
        When called with L{NewsBuilder._DOC}, L{NewsBuilder._findChanges}
        returns a list of documentation ticket numbers and descriptions as a
        list of two-tuples.
        """
        doc = self.builder._findChanges(self.project, self.builder._DOC)
        self.assertEqual(doc, [(40, 'foo.bar.Baz.quux'),
                               (41, 'writing Foo servers')])

    def test_findMiscellaneous(self):
        """
        When called with L{NewsBuilder._MISC}, L{NewsBuilder._findChanges}
        returns a list of removal/deprecation ticket numbers and descriptions
        as a list of two-tuples.
        """
        misc = self.builder._findChanges(self.project, self.builder._MISC)
        self.assertEqual(misc, [(30, ''), (35, '')])

    def test_writeHeader(self):
        """
        L{NewsBuilder._writeHeader} accepts a file-like object opened for
        writing and a header string and writes out a news file header to it.
        """
        output = StringIO()
        self.builder._writeHeader(output, "Super Awesometastic 32.16")
        self.assertEqual(
            output.getvalue(), "Super Awesometastic 32.16\n"
            "=========================\n"
            "\n")

    def test_writeSection(self):
        """
        L{NewsBuilder._writeSection} accepts a file-like object opened for
        writing, a section name, and a list of ticket information (as returned
        by L{NewsBuilder._findChanges}) and writes out a section header and all
        of the given ticket information.
        """
        output = StringIO()
        self.builder._writeSection(
            output, "Features",
            [(3, "Great stuff."),
             (17, "Very long line which goes on and on and on, seemingly "
              "without end until suddenly without warning it does end.")])
        self.assertEqual(
            output.getvalue(), "Features\n"
            "--------\n"
            " - Great stuff. (#3)\n"
            " - Very long line which goes on and on and on, seemingly "
            "without end\n"
            "   until suddenly without warning it does end. (#17)\n"
            "\n")

    def test_writeMisc(self):
        """
        L{NewsBuilder._writeMisc} accepts a file-like object opened for
        writing, a section name, and a list of ticket information (as returned
        by L{NewsBuilder._findChanges} and writes out a section header and all
        of the ticket numbers, but excludes any descriptions.
        """
        output = StringIO()
        self.builder._writeMisc(output, "Other",
                                [(x, "") for x in range(2, 50, 3)])
        self.assertEqual(
            output.getvalue(), "Other\n"
            "-----\n"
            " - #2, #5, #8, #11, #14, #17, #20, #23, #26, #29, #32, #35, "
            "#38, #41,\n"
            "   #44, #47\n"
            "\n")

    def test_build(self):
        """
        L{NewsBuilder.build} updates a NEWS file with new features based on the
        I{<ticket>.feature} files found in the directory specified.
        """
        self.builder.build(self.project, self.project.child('NEWS'),
                           "Super Awesometastic 32.16")

        results = self.project.child('NEWS').getContent()
        self.assertEqual(
            results, 'Super Awesometastic 32.16\n'
            '=========================\n'
            '\n'
            'Features\n'
            '--------\n'
            ' - We now support the web. (#5)\n'
            ' - The widget is more robust. (#12)\n'
            ' - A very long feature which takes many words to describe '
            'with any\n'
            '   accuracy was introduced so that the line wrapping behavior '
            'of the\n'
            '   news generating code could be verified. (#15)\n'
            ' - A simpler feature described on multiple lines was '
            'added. (#16)\n'
            '\n'
            'Bugfixes\n'
            '--------\n'
            ' - Broken stuff was fixed. (#23)\n'
            '\n'
            'Improved Documentation\n'
            '----------------------\n'
            ' - foo.bar.Baz.quux (#40)\n'
            ' - writing Foo servers (#41)\n'
            '\n'
            'Deprecations and Removals\n'
            '-------------------------\n'
            ' - Stupid stuff was deprecated. (#25)\n'
            '\n'
            'Other\n'
            '-----\n'
            ' - #30, #35\n'
            '\n\n' + self.existingText)

    def test_emptyProjectCalledOut(self):
        """
        If no changes exist for a project, I{NEWS} gains a new section for
        that project that includes some helpful text about how there were no
        interesting changes.
        """
        project = FilePath(self.mktemp()).child("twisted")
        project.makedirs()
        self.createStructure(project, {'NEWS': self.existingText})

        self.builder.build(project, project.child('NEWS'),
                           "Super Awesometastic 32.16")
        results = project.child('NEWS').getContent()
        self.assertEqual(
            results, 'Super Awesometastic 32.16\n'
            '=========================\n'
            '\n' + self.builder._NO_CHANGES + '\n\n' + self.existingText)

    def test_preserveTicketHint(self):
        """
        If a I{NEWS} file begins with the two magic lines which point readers
        at the issue tracker, those lines are kept at the top of the new file.
        """
        news = self.project.child('NEWS')
        news.setContent(
            'Ticket numbers in this file can be looked up by visiting\n'
            'http://twistedmatrix.com/trac/ticket/<number>\n'
            '\n'
            'Blah blah other stuff.\n')

        self.builder.build(self.project, news, "Super Awesometastic 32.16")

        self.assertEqual(
            news.getContent(),
            'Ticket numbers in this file can be looked up by visiting\n'
            'http://twistedmatrix.com/trac/ticket/<number>\n'
            '\n'
            'Super Awesometastic 32.16\n'
            '=========================\n'
            '\n'
            'Features\n'
            '--------\n'
            ' - We now support the web. (#5)\n'
            ' - The widget is more robust. (#12)\n'
            ' - A very long feature which takes many words to describe '
            'with any\n'
            '   accuracy was introduced so that the line wrapping behavior '
            'of the\n'
            '   news generating code could be verified. (#15)\n'
            ' - A simpler feature described on multiple lines was '
            'added. (#16)\n'
            '\n'
            'Bugfixes\n'
            '--------\n'
            ' - Broken stuff was fixed. (#23)\n'
            '\n'
            'Improved Documentation\n'
            '----------------------\n'
            ' - foo.bar.Baz.quux (#40)\n'
            ' - writing Foo servers (#41)\n'
            '\n'
            'Deprecations and Removals\n'
            '-------------------------\n'
            ' - Stupid stuff was deprecated. (#25)\n'
            '\n'
            'Other\n'
            '-----\n'
            ' - #30, #35\n'
            '\n\n'
            'Blah blah other stuff.\n')

    def test_emptySectionsOmitted(self):
        """
        If there are no changes of a particular type (feature, bugfix, etc), no
        section for that type is written by L{NewsBuilder.build}.
        """
        for ticket in self.project.children():
            if ticket.splitext()[1] in ('.feature', '.misc', '.doc'):
                ticket.remove()

        self.builder.build(self.project, self.project.child('NEWS'),
                           'Some Thing 1.2')

        self.assertEqual(
            self.project.child('NEWS').getContent(), 'Some Thing 1.2\n'
            '==============\n'
            '\n'
            'Bugfixes\n'
            '--------\n'
            ' - Broken stuff was fixed. (#23)\n'
            '\n'
            'Deprecations and Removals\n'
            '-------------------------\n'
            ' - Stupid stuff was deprecated. (#25)\n'
            '\n\n'
            'Here is stuff which was present previously.\n')

    def test_duplicatesMerged(self):
        """
        If two change files have the same contents, they are merged in the
        generated news entry.
        """
        def feature(s):
            return self.project.child(s + '.feature')

        feature('5').copyTo(feature('15'))
        feature('5').copyTo(feature('16'))

        self.builder.build(self.project, self.project.child('NEWS'),
                           'Project Name 5.0')

        self.assertEqual(
            self.project.child('NEWS').getContent(), 'Project Name 5.0\n'
            '================\n'
            '\n'
            'Features\n'
            '--------\n'
            ' - We now support the web. (#5, #15, #16)\n'
            ' - The widget is more robust. (#12)\n'
            '\n'
            'Bugfixes\n'
            '--------\n'
            ' - Broken stuff was fixed. (#23)\n'
            '\n'
            'Improved Documentation\n'
            '----------------------\n'
            ' - foo.bar.Baz.quux (#40)\n'
            ' - writing Foo servers (#41)\n'
            '\n'
            'Deprecations and Removals\n'
            '-------------------------\n'
            ' - Stupid stuff was deprecated. (#25)\n'
            '\n'
            'Other\n'
            '-----\n'
            ' - #30, #35\n'
            '\n\n'
            'Here is stuff which was present previously.\n')

    def createFakeTwistedProject(self):
        """
        Create a fake-looking Twisted project to build from.
        """
        project = FilePath(self.mktemp()).child("twisted")
        project.makedirs()
        self.createStructure(
            project, {
                'NEWS': 'Old boring stuff from the past.\n',
                '_version.py': genVersion("twisted", 1, 2, 3),
                'topfiles': {
                    'NEWS': 'Old core news.\n',
                    '3.feature': 'Third feature addition.\n',
                    '5.misc': ''
                },
                'conch': {
                    '_version.py': genVersion("twisted.conch", 3, 4, 5),
                    'topfiles': {
                        'NEWS': 'Old conch news.\n',
                        '7.bugfix': 'Fixed that bug.\n'
                    }
                }
            })
        return project

    def test_buildAll(self):
        """
        L{NewsBuilder.buildAll} calls L{NewsBuilder.build} once for each
        subproject, passing that subproject's I{topfiles} directory as C{path},
        the I{NEWS} file in that directory as C{output}, and the subproject's
        name as C{header}, and then again for each subproject with the
        top-level I{NEWS} file for C{output}. Blacklisted subprojects are
        skipped.
        """
        builds = []
        builder = NewsBuilder()
        builder.build = lambda path, output, header: builds.append(
            (path, output, header))
        builder._today = lambda: '2009-12-01'

        project = self.createFakeTwistedProject()
        self.svnCommit(project)
        builder.buildAll(project)

        coreTopfiles = project.child("topfiles")
        coreNews = coreTopfiles.child("NEWS")
        coreHeader = "Twisted Core 1.2.3 (2009-12-01)"

        conchTopfiles = project.child("conch").child("topfiles")
        conchNews = conchTopfiles.child("NEWS")
        conchHeader = "Twisted Conch 3.4.5 (2009-12-01)"

        aggregateNews = project.child("NEWS")

        self.assertEqual(builds, [(conchTopfiles, conchNews, conchHeader),
                                  (conchTopfiles, aggregateNews, conchHeader),
                                  (coreTopfiles, coreNews, coreHeader),
                                  (coreTopfiles, aggregateNews, coreHeader)])

    def test_buildAllAggregate(self):
        """
        L{NewsBuilder.buildAll} aggregates I{NEWS} information into the top
        files, only deleting fragments once it's done.
        """
        builder = NewsBuilder()
        project = self.createFakeTwistedProject()
        self.svnCommit(project)
        builder.buildAll(project)

        aggregateNews = project.child("NEWS")

        aggregateContent = aggregateNews.getContent()
        self.assertIn("Third feature addition", aggregateContent)
        self.assertIn("Fixed that bug", aggregateContent)
        self.assertIn("Old boring stuff from the past", aggregateContent)

    def test_changeVersionInNews(self):
        """
        L{NewsBuilder._changeVersions} gets the release date for a given
        version of a project as a string.
        """
        builder = NewsBuilder()
        builder._today = lambda: '2009-12-01'
        project = self.createFakeTwistedProject()
        self.svnCommit(project)
        builder.buildAll(project)
        newVersion = Version('TEMPLATE', 7, 7, 14)
        coreNews = project.child('topfiles').child('NEWS')
        # twisted 1.2.3 is the old version.
        builder._changeNewsVersion(coreNews, "Core",
                                   Version("twisted", 1, 2,
                                           3), newVersion, '2010-01-01')
        expectedCore = ('Twisted Core 7.7.14 (2010-01-01)\n'
                        '================================\n'
                        '\n'
                        'Features\n'
                        '--------\n'
                        ' - Third feature addition. (#3)\n'
                        '\n'
                        'Other\n'
                        '-----\n'
                        ' - #5\n\n\n')
        self.assertEqual(expectedCore + 'Old core news.\n',
                         coreNews.getContent())

    def test_removeNEWSfragments(self):
        """
        L{NewsBuilder.buildALL} removes all the NEWS fragments after the build
        process, using the C{svn} C{rm} command.
        """
        builder = NewsBuilder()
        project = self.createFakeTwistedProject()
        self.svnCommit(project)
        builder.buildAll(project)

        self.assertEqual(5, len(project.children()))
        output = runCommand(["svn", "status", project.path])
        removed = [
            line for line in output.splitlines() if line.startswith("D ")
        ]
        self.assertEqual(3, len(removed))

    def test_checkSVN(self):
        """
        L{NewsBuilder.buildAll} raises L{NotWorkingDirectory} when the given
        path is not a SVN checkout.
        """
        self.assertRaises(NotWorkingDirectory, self.builder.buildAll,
                          self.project)
Exemplo n.º 30
0
 def process_fds():
     path = FilePath(b"/proc/self/fd")
     return set([child.basename() for child in path.children()])
Exemplo n.º 31
0
class Voluminous(object):
    lockFactory = DockerLock

    def __init__(self, directory):
        self._directory = FilePath(directory)
        self._output = []
        self.lock = self.lockFactory()
        self.commitDatabase = JsonCommitDatabase(self._directory)

    def output(self, s):
        self._output.append(s)
        print s

    def getOutput(self):
        return self._output

    def allBranches(self, volume):
        volumePath = self._directory.child(volume)
        branches = volumePath.child("branches").children()
        return [b.basename() for b in branches if b.isdir()]

    def listBranches(self):
        volume = self.volume()
        branches = self.allBranches(volume)
        currentBranch = self.getActiveBranch(volume)
        self.output("\n".join(sorted(("*" if b == currentBranch else " ") + " " + b for b in branches)))

    def checkoutBranch(self, branch, create):
        """
        "Check out" a branch, restarting containers in process, creating it
        from current branch HEAD if requested.
        """
        volume = self.volume()
        volumePath = self._directory.child(volume)
        # this raises an exception if branch is not a valid path segment
        branchPath = volumePath.child("branches").child(branch)
        if create:
            if branchPath.exists():
                self.output("Cannot create existing branch %s" % (branch,))
                return
            else:
                try:
                    HEAD = self._resolveNamedCommitCurrentBranch("HEAD", volume)
                except IndexError:
                    self.output("You must commit ('dvol commit') before you can " "branch ('dvol checkout -b')")
                    return
                # Copy metadata
                meta = self.commitDatabase.read(volume, self.getActiveBranch(volume))
                self.commitDatabase.write(volume, branch, meta)
                # Then copy latest HEAD of branch into new branch data
                # directory
                copyTo(volumePath.child("commits").child(HEAD), branchPath)
        else:
            if not branchPath.exists():
                self.output("Cannot switch to non-existing branch %s" % (branch,))
                return
        # Got here, so switch to the (maybe new branch)
        self.setActiveBranch(volume, branch)

    def createBranch(self, volume, branch):
        branchDir = self._directory.child(volume).child("branches").child(branch)
        branchDir.makedirs()
        # This branchDir is the one which will be bind-mounted into running
        # containers, via a symlink, but with symlinks and docker bind-mounts
        # it seems that it's the permissions of the target which affects the
        # (e.g.) writeability of the resulting mount.
        # Because some containers have processes which run as non-root users,
        # make the volume world-writeable so that it can still be useful to
        # those processes. In the future, it might be better to have options
        # for which uid, gid and perms the volume should have. This is
        # effectively the same as `chmod a=rwx branchDir.path`.
        os.chmod(branchDir.path, 0777)
        self.output("Created branch %s/%s" % (volume, branch))

    def createVolume(self, name):
        if self._directory.child(name).exists():
            self.output("Error: volume %s already exists" % (name,))
            raise VolumeAlreadyExists()
        self._directory.child(name).makedirs()
        self.setActiveVolume(name)
        self.output("Created volume %s" % (name,))
        self.createBranch(name, DEFAULT_BRANCH)

    def removeVolume(self, volume):
        if not self._directory.child(volume).exists():
            raise UsageError("Volume %r does not exist, cannot remove it" % (volume,))
        containers = self.lock.containers.get_related_containers(volume)
        if containers:
            raise UsageError(
                "Cannot remove %r while it is in use by '%s'" % (volume, (",".join(c["Name"] for c in containers)))
            )
        if self._userIsSure("This will remove all containers using the volume"):
            self.output("Deleting volume %r" % (volume,))
            # Remove related containers
            self.lock.containers.remove_related_containers(volume)
            self._directory.child(volume).remove()

        else:
            self.output("Aborting.")

    def deleteBranch(self, branch):
        volume = self.volume()
        if branch == self.getActiveBranch(volume):
            raise UsageError("Cannot delete active branch, use " "'dvol checkout' to switch branches first")
        if branch not in self.allBranches(volume):
            raise UsageError("Branch %r does not exist" % (branch,))
        if self._userIsSure():
            self.output("Deleting branch %r" % (branch,))
            volumePath = self._directory.child(volume)
            branchPath = volumePath.child("branches").child(branch)
            branchPath.remove()
        else:
            self.output("Aborting.")

    def _userIsSure(self, extraMessage=None):
        message = "Are you sure? "
        if extraMessage:
            message += extraMessage
        message += " (y/n): "
        sys.stdout.write(message)
        sys.stdout.flush()
        return raw_input().lower() in ("y", "yes")

    def setActiveVolume(self, volume):
        self._directory.child("current_volume.json").setContent(json.dumps(dict(current_volume=volume)))

    def volume(self):
        currentVolume = self._directory.child("current_volume.json")
        if currentVolume.exists():
            volume = json.loads(currentVolume.getContent())["current_volume"]
        else:
            raise UsageError("No active volume: use dvol switch to choose one")
        if not self._directory.child(volume).exists():
            raise UsageError("Active volume %s does not exist: " "use dvol switch to choose another" % (volume,))
        return volume

    def setActiveBranch(self, volume, branch):
        self._directory.child(volume).child("current_branch.json").setContent(json.dumps(dict(current_branch=branch)))
        self.lock.acquire(volume)
        try:
            self.updateRunningPoint(volume)
        finally:
            self.lock.release(volume)

    def getActiveBranch(self, volume):
        currentBranch = self._directory.child(self.volume()).child("current_branch.json")
        if currentBranch.exists():
            return json.loads(currentBranch.getContent())["current_branch"]
        else:
            return DEFAULT_BRANCH

    def updateRunningPoint(self, volume):
        """
        construct a stable (wrt switching branches) path with symlinks
        """
        volumePath = self._directory.child(volume)
        branchName = self.getActiveBranch(volume)
        branchPath = volumePath.child("branches").child(branchName)
        stablePath = volumePath.child("running_point")
        if stablePath.exists():
            stablePath.remove()
        branchPath.linkTo(stablePath)
        return stablePath.path

    def commitVolume(self, message):
        volume = self.volume()
        commitId = (str(uuid.uuid4()) + str(uuid.uuid4())).replace("-", "")[:40]
        self.output(commitId)
        volumePath = self._directory.child(volume)
        branchName = self.getActiveBranch(volume)
        branchPath = volumePath.child("branches").child(branchName)
        commitPath = volumePath.child("commits").child(commitId)
        if commitPath.exists():
            raise Exception("woah, random uuid collision. try again!")
        # Make the commits directory if necessary
        if not commitPath.parent().exists():
            commitPath.parent().makedirs()
        # acquire lock (read: stop containers) to ensure consistent snapshot
        # with file-copy based backend
        # XXX tests for acquire/release
        self.lock.acquire(volume)
        try:
            copyTo(branchPath, commitPath)
        finally:
            self.lock.release(volume)
        self._recordCommit(volume, branchName, commitId, message)

    def _recordCommit(self, volume, branch, commitId, message):
        commitData = self.commitDatabase.read(volume, branch)
        commitData.append(dict(id=commitId, message=message))
        self.commitDatabase.write(volume, branch, commitData)

    def exists(self, volume):
        volumePath = self._directory.child(volume)
        return volumePath.exists()

    def listVolumes(self):
        table = get_table()
        table.set_cols_align(["l", "l", "l"])
        dc = self.lock.containers  # XXX ugly
        volumes = [v for v in self._directory.children() if v.isdir()]
        activeVolume = None
        if volumes:
            try:
                activeVolume = self.volume()
            except UsageError:
                # don't refuse to list volumes just because none of them are active
                pass
        rows = (
            [["", "", ""]]
            + [["  VOLUME", "BRANCH", "CONTAINERS"]]
            + [
                [
                    ("*" if v.basename() == activeVolume else " ") + " " + v.basename(),
                    self.getActiveBranch(v.basename()),
                    ",".join(c["Name"] for c in dc.get_related_containers(v.basename())),
                ]
                for v in volumes
            ]
        )
        table.add_rows(rows)
        self.output(table.draw())

    def listCommits(self, branch=None):
        if branch is None:
            branch = self.getActiveBranch(self.volume())
        volume = self.volume()
        aggregate = []
        for commit in reversed(self.commitDatabase.read(volume, branch)):
            # TODO fill in author/date
            aggregate.append(
                "commit %(id)s\n"
                "Author: Who knows <mystery@person>\n"
                "Date: Whenever\n"
                "\n"
                "    %(message)s\n" % commit
            )
        self.output("\n".join(aggregate))

    def _resolveNamedCommitCurrentBranch(self, commit, volume):
        branch = self.getActiveBranch(volume)
        remainder = commit[len("HEAD") :]
        if remainder == "^" * len(remainder):
            offset = len(remainder)
        else:
            raise UsageError("Malformed commit identifier %r" % (commit,))
        commits = self.commitDatabase.read(volume, branch)
        # commits are appended to, so the last one is the latest
        return commits[-1 - offset]["id"]

    def _destroyNewerCommits(self, commit, volume):
        # TODO in the future, we'll care more about the following being an
        # atomic operation
        branch = self.getActiveBranch(volume)
        commits = self.commitDatabase.read(volume, branch)
        commitIndex = [c["id"] for c in commits].index(commit) + 1
        remainingCommits = commits[:commitIndex]
        destroyCommits = commits[commitIndex:]
        # look in all branches for commit references before removing them
        totalCommits = set()
        for otherBranch in self.allBranches(volume):
            if otherBranch == branch:
                # skip this branch, otherwise we'll never destroy any commits
                continue
            commits = self.commitDatabase.read(volume, branch)
            totalCommits.update(commit["id"] for commit in commits)
        for commit in destroyCommits:
            commitId = commit["id"]
            if commitId in totalCommits:
                # skip destroying this commit; it is still actively referred to
                # in another branch
                continue
            volumePath = self._directory.child(volume)
            commitPath = volumePath.child("commits").child(commitId)
            commitPath.remove()
        self.commitDatabase.write(volume, branch, remainingCommits)

    def resetVolume(self, commit):
        """
        Forcefully roll back the current working copy to this commit,
        destroying any later commits.
        """
        volume = self.volume()
        volumePath = self._directory.child(volume)
        branchName = self.getActiveBranch(volume)
        branchPath = volumePath.child("branches").child(branchName)
        if commit.startswith("HEAD"):
            try:
                commit = self._resolveNamedCommitCurrentBranch(commit, volume)
            except IndexError:
                self.output("Referenced commit does not exist; check dvol log")
                return
        commitPath = volumePath.child("commits").child(commit)
        if not commitPath.exists():
            raise NoSuchCommit("commit '%s' does not exist" % (commit,))
        self.lock.acquire(volume)
        try:
            branchPath.remove()
            copyTo(commitPath, branchPath)
            self._destroyNewerCommits(commit, volume)
        finally:
            self.lock.release(volume)

    def seedVolumes(self, compose_file):
        # XXX: does not work with absolute paths, but should
        compose = yaml.load(PWD_PATH.child(compose_file).open())
        valid_volumes = []
        # TODO: will need updating for new compose file format
        for service, config in compose.iteritems():
            if "volume_driver" in config and config["volume_driver"] == "dvol":
                for volume in config["volumes"]:
                    if ":" in volume and not volume.startswith("/"):
                        valid_volumes.append((volume, config))
        if not valid_volumes:
            print 'No volumes found with "volume_driver: dvol" and a named volume (like "volumename:/path_inside_container")! Please check your docker-compose.yml file.'
        else:
            print "Please seed your dvol volume(s) by running the following command(s):"
        for volume, config in valid_volumes:
            # TODO: need some validation before running commands with string interpolation here, docker-compose file could be malicious
            # TODO: would be better if we ran the command for the user, rather than making them copy and paste
            print "docker run --volume-driver=dvol -v %(volume)s:/_target -ti %(image)s sh -c 'cp -av %(source)s/* /_target/'" % dict(
                volume=volume.split(":")[0], source=volume.split(":")[1], image=config["image"]
            )
Exemplo n.º 32
0
class Voluminous(object):
    def __init__(self, directory, lockFactory=DockerLock):
        self._directory = FilePath(directory)
        self._output = []
        self.lock = lockFactory()
        self.commitDatabase = JsonCommitDatabase(self._directory)

    def output(self, s):
        self._output.append(s)
        print s

    def getOutput(self):
        result = ["\n".join(self._output)]
        self._output = []
        return result

    def allBranches(self, volume):
        volumePath = self._directory.child(volume)
        branches = volumePath.child("branches").children()
        return [b.basename() for b in branches if b.isdir()]

    def listBranches(self):
        volume = self.volume()
        branches = self.allBranches(volume)
        currentBranch = self.getActiveBranch(volume)
        self.output("\n".join(
            sorted(("*" if b == currentBranch else " ") + " " + b
                   for b in branches)))

    def checkoutBranch(self, branch, create):
        """
        "Check out" a branch, restarting containers in process, creating it
        from current branch HEAD if requested.
        """
        volume = self.volume()
        volumePath = self._directory.child(volume)
        # this raises an exception if branch is not a valid path segment
        branchPath = volumePath.child("branches").child(branch)
        if create:
            if branchPath.exists():
                self.output("Cannot create existing branch %s" % (branch, ))
                return
            else:
                try:
                    HEAD = self._resolveNamedCommitCurrentBranch(
                        "HEAD", volume)
                except IndexError:
                    self.output(
                        "You must commit ('dvol commit') before you can "
                        "branch ('dvol checkout -b')")
                    return
                # Copy metadata
                meta = self.commitDatabase.read(volume,
                                                self.getActiveBranch(volume))
                self.commitDatabase.write(volume, branch, meta)
                # Then copy latest HEAD of branch into new branch data
                # directory
                copyTo(volumePath.child("commits").child(HEAD), branchPath)
        else:
            if not branchPath.exists():
                self.output("Cannot switch to non-existing branch %s" %
                            (branch, ))
                return
        # Got here, so switch to the (maybe new branch)
        self.setActiveBranch(volume, branch)

    def createBranch(self, volume, branch):
        branchDir = self._directory.child(volume).child("branches").child(
            branch)
        branchDir.makedirs()
        # This branchDir is the one which will be bind-mounted into running
        # containers, via a symlink, but with symlinks and docker bind-mounts
        # it seems that it's the permissions of the target which affects the
        # (e.g.) writeability of the resulting mount.
        # Because some containers have processes which run as non-root users,
        # make the volume world-writeable so that it can still be useful to
        # those processes. In the future, it might be better to have options
        # for which uid, gid and perms the volume should have. This is
        # effectively the same as `chmod a=rwx branchDir.path`.
        os.chmod(branchDir.path, 0777)
        self.output("Created branch %s/%s" % (volume, branch))

    def createVolume(self, name):
        try:
            # XXX: Behaviour around names with relative path identifiers
            # such as '..' and '.' is largely undefined, these should
            # probably be rejected outright.
            if self._directory.child(name).exists():
                self.output("Error: volume %s already exists" % (name, ))
                return
        except InsecurePath:
            self.output("Error: %s is not a valid name" % (name, ))
            return
        self._directory.child(name).makedirs()
        self.setActiveVolume(name)
        self.output("Created volume %s" % (name, ))
        self.createBranch(name, DEFAULT_BRANCH)

    def removeVolume(self, volume, force=False):
        try:
            if not self._directory.child(volume).exists():
                self.output("Volume %r does not exist, cannot remove it" %
                            (volume, ))
                return
        except InsecurePath:
            self.output("Error: %s is not a valid name" % (volume, ))
            return
        containers = self.lock.containers.get_related_containers(volume)
        if containers:
            raise UsageError("Cannot remove %r while it is in use by '%s'" %
                             (volume, (",".join(c['Name']
                                                for c in containers))))
        if force or self._userIsSure(
                "This will remove all containers using the volume"):
            self.output("Deleting volume %r" % (volume, ))
            # Remove related containers
            self.lock.containers.remove_related_containers(volume)
            self._directory.child(volume).remove()

        else:
            self.output("Aborting.")

    def deleteBranch(self, branch):
        volume = self.volume()
        if branch == self.getActiveBranch(volume):
            raise UsageError("Cannot delete active branch, use "
                             "'dvol checkout' to switch branches first")
        if branch not in self.allBranches(volume):
            raise UsageError("Branch %r does not exist" % (branch, ))
        if self._userIsSure():
            self.output("Deleting branch %r" % (branch, ))
            volumePath = self._directory.child(volume)
            branchPath = volumePath.child("branches").child(branch)
            branchPath.remove()
        else:
            self.output("Aborting.")

    def _userIsSure(self, extraMessage=None):
        message = "Are you sure? "
        if extraMessage:
            message += extraMessage
        message += " (y/n): "
        sys.stdout.write(message)
        sys.stdout.flush()
        return raw_input().lower() in ("y", "yes")

    def setActiveVolume(self, volume):
        self._directory.child("current_volume.json").setContent(
            json.dumps(dict(current_volume=volume)))

    def volume(self):
        currentVolume = self._directory.child("current_volume.json")
        if currentVolume.exists():
            volume = json.loads(currentVolume.getContent())["current_volume"]
        else:
            raise UsageError("No active volume: use dvol switch to choose one")
        if not self._directory.child(volume).exists():
            raise UsageError("Active volume %s does not exist: "
                             "use dvol switch to choose another" % (volume, ))
        return volume

    def setActiveBranch(self, volume, branch):
        self._directory.child(volume).child("current_branch.json").setContent(
            json.dumps(dict(current_branch=branch)))
        self.lock.acquire(volume)
        try:
            self.updateRunningPoint(volume)
        finally:
            self.lock.release(volume)

    def getActiveBranch(self, volume):
        currentBranch = self._directory.child(volume).child(
            "current_branch.json")
        if currentBranch.exists():
            return json.loads(currentBranch.getContent())["current_branch"]
        else:
            return DEFAULT_BRANCH

    def updateRunningPoint(self, volume):
        """
        construct a stable (wrt switching branches) path with symlinks
        """
        volumePath = self._directory.child(volume)
        branchName = self.getActiveBranch(volume)
        branchPath = volumePath.child("branches").child(branchName)
        stablePath = volumePath.child("running_point")
        if stablePath.exists():
            stablePath.remove()
        branchPath.linkTo(stablePath)
        return stablePath.path

    def commitVolume(self, message):
        volume = self.volume()
        commitId = (str(uuid.uuid4()) + str(uuid.uuid4())).replace("-",
                                                                   "")[:40]
        self.output(commitId)
        volumePath = self._directory.child(volume)
        branchName = self.getActiveBranch(volume)
        branchPath = volumePath.child("branches").child(branchName)
        commitPath = volumePath.child("commits").child(commitId)
        if commitPath.exists():
            raise Exception("woah, random uuid collision. try again!")
        # Make the commits directory if necessary
        if not commitPath.parent().exists():
            commitPath.parent().makedirs()
        # acquire lock (read: stop containers) to ensure consistent snapshot
        # with file-copy based backend
        # XXX tests for acquire/release
        self.lock.acquire(volume)
        try:
            copyTo(branchPath, commitPath)
        finally:
            self.lock.release(volume)
        self._recordCommit(volume, branchName, commitId, message)

    def _recordCommit(self, volume, branch, commitId, message):
        commitData = self.commitDatabase.read(volume, branch)
        commitData.append(dict(id=commitId, message=message))
        self.commitDatabase.write(volume, branch, commitData)

    def exists(self, volume):
        volumePath = self._directory.child(volume)
        return volumePath.exists()

    def listVolumes(self):
        table = get_table()
        table.set_cols_align(["l", "l", "l"])
        dc = self.lock.containers  # XXX ugly
        volumes = [v for v in self._directory.children() if v.isdir()]
        activeVolume = None
        if volumes:
            try:
                activeVolume = self.volume()
            except UsageError:
                # don't refuse to list volumes just because none of them are active
                pass
        rows = [["", "", ""]] + [["  VOLUME", "BRANCH", "CONTAINERS"]] + [[
            ("*" if v.basename() == activeVolume else " ") + " " +
            v.basename(),
            self.getActiveBranch(v.basename()), ",".join(
                c['Name'] for c in dc.get_related_containers(v.basename()))
        ] for v in sorted(volumes)]
        table.add_rows(rows)
        self.output(table.draw())

    def listCommits(self, branch=None):
        if branch is None:
            branch = self.getActiveBranch(self.volume())
        volume = self.volume()
        aggregate = []
        for commit in reversed(self.commitDatabase.read(volume, branch)):
            # TODO fill in author/date
            aggregate.append("commit %(id)s\n"
                             "Author: Who knows <mystery@person>\n"
                             "Date: Whenever\n"
                             "\n"
                             "    %(message)s\n" % commit)
        self.output("\n".join(aggregate))

    def _resolveNamedCommitCurrentBranch(self, commit, volume):
        branch = self.getActiveBranch(volume)
        remainder = commit[len("HEAD"):]
        if remainder == "^" * len(remainder):
            offset = len(remainder)
        else:
            raise UsageError("Malformed commit identifier %r" % (commit, ))
        commits = self.commitDatabase.read(volume, branch)
        # commits are appended to, so the last one is the latest
        return commits[-1 - offset]["id"]

    def _destroyNewerCommits(self, commit, volume):
        # TODO in the future, we'll care more about the following being an
        # atomic operation
        branch = self.getActiveBranch(volume)
        commits = self.commitDatabase.read(volume, branch)
        commitIndex = [c["id"] for c in commits].index(commit) + 1
        remainingCommits = commits[:commitIndex]
        destroyCommits = commits[commitIndex:]
        # look in all branches for commit references before removing them
        totalCommits = set()
        for otherBranch in self.allBranches(volume):
            if otherBranch == branch:
                # skip this branch, otherwise we'll never destroy any commits
                continue
            commits = self.commitDatabase.read(volume, branch)
            totalCommits.update(commit["id"] for commit in commits)
        for commit in destroyCommits:
            commitId = commit["id"]
            if commitId in totalCommits:
                # skip destroying this commit; it is still actively referred to
                # in another branch
                continue
            volumePath = self._directory.child(volume)
            commitPath = volumePath.child("commits").child(commitId)
            commitPath.remove()
        self.commitDatabase.write(volume, branch, remainingCommits)

    def resetVolume(self, commit):
        """
        Forcefully roll back the current working copy to this commit,
        destroying any later commits.
        """
        volume = self.volume()
        volumePath = self._directory.child(volume)
        branchName = self.getActiveBranch(volume)
        branchPath = volumePath.child("branches").child(branchName)
        if commit.startswith("HEAD"):
            try:
                commit = self._resolveNamedCommitCurrentBranch(commit, volume)
            except IndexError:
                self.output("Referenced commit does not exist; check dvol log")
                return
        commitPath = volumePath.child("commits").child(commit)
        if not commitPath.exists():
            raise NoSuchCommit("commit '%s' does not exist" % (commit, ))
        self.lock.acquire(volume)
        try:
            branchPath.remove()
            copyTo(commitPath, branchPath)
            self._destroyNewerCommits(commit, volume)
        finally:
            self.lock.release(volume)

    def seedVolumes(self, compose_file):
        # XXX: does not work with absolute paths, but should
        compose = yaml.load(PWD_PATH.child(compose_file).open())
        valid_volumes = []
        # TODO: will need updating for new compose file format
        for service, config in compose.iteritems():
            if "volume_driver" in config and config["volume_driver"] == "dvol":
                for volume in config["volumes"]:
                    if ":" in volume and not volume.startswith("/"):
                        valid_volumes.append((volume, config))
        if not valid_volumes:
            self.output(
                'No volumes found with "volume_driver: dvol" and a named volume (like "volumename:/path_inside_container")! Please check your docker-compose.yml file.'
            )
        else:
            self.output(
                "Please seed your dvol volume(s) by running the following command(s):"
            )
        for volume, config in valid_volumes:
            # TODO: need some validation before running commands with string interpolation here, docker-compose file could be malicious
            # TODO: would be better if we ran the command for the user, rather than making them copy and paste
            self.output(
                "docker run --volume-driver=dvol -v %(volume)s:/_target -ti %(image)s sh -c 'cp -av %(source)s/* /_target/'"
                % dict(
                    volume=volume.split(":")[0],
                    source=volume.split(":")[1],
                    image=config["image"],
                ))
Exemplo n.º 33
0
    verbose = False

    global SCHEMADIR, PGSOCKETDIR, PSQL

    for opt, arg in optargs:
        if opt in ("-h", "--help"):
            usage()
        elif opt in ("-d",):
            SCHEMADIR = arg
        elif opt in ("-k",):
            PGSOCKETDIR = arg
        elif opt in ("-p",):
            PSQL = arg
        elif opt in ("-x",):
            sktdir = FilePath("/var/run/caldavd")
            for skt in sktdir.children():
                if skt.basename().startswith("ccs_postgres_"):
                    PGSOCKETDIR = skt.path
            PSQL = "/Applications/Server.app/Contents/ServerRoot/usr/bin/psql"
            SCHEMADIR = "/Applications/Server.app/Contents/ServerRoot/usr/share/caldavd/lib/python/txdav/common/datastore/sql_schema/"
        elif opt in ("-v", "--verbose"):
            verbose = True
        else:
            raise NotImplementedError(opt)

    # Retrieve the db_version number of the installed schema
    try:
        db_version = getSchemaVersion(verbose=verbose)
    except CheckSchemaError, e:
        db_version = 0
Exemplo n.º 34
0
    def test_exportMixAndMatch(self):
        """
        Run the export with some calendars and some addressbooks
        """
        yield populateAddressBooksFrom(
            {
                "user01": {
                    "addressbook": {
                        "1.vcf": adbk1Root.child("1.vcf").getContent(),
                        "2.vcf": adbk1Root.child("2.vcf").getContent(),
                        "3.vcf": adbk1Root.child("3.vcf").getContent(),
                    }
                },
                "user02": {
                    "addressbook": {
                        "1.vcf": adbk1Root.child("1.vcf").getContent(),
                    },
                }
            }, self.store)
        yield populateCalendarsFrom(
            {
                "user01": {
                    "calendar1": {
                        "valentines-day.ics": (valentines, {}),
                        "new-years-day.ics": (newYears, {})
                    }
                },
                "user02": {
                    "calendar1": {
                        "valentines-day.ics": (valentines, {})
                    },
                    "calendar2": {
                        "new-years-day.ics": (newYears, {})
                    }
                }
            }, self.store)

        outputDir = FilePath(self.mktemp())
        outputDir.makedirs()
        main([
            'calendarserver_export',
            '--directory',
            outputDir.path,
            '--uid',
            'user01',
            '--contacts',
            '--uid',
            'user01',
            '--calendars',
            '--uid',
            'user02',
            '--collection=calendar1',
            '--uid',
            'user02',
            '--contacts',
        ],
             reactor=self)
        yield self.waitToStop
        self.assertEquals(
            set([
                "user01_addressbook.vcf", "user01_calendar1.ics",
                "user02_calendar1.ics", "user02_addressbook.vcf"
            ]), set([child.basename() for child in outputDir.children()]))
Exemplo n.º 35
0
class FSItem(BackendItem):
    logCategory = 'fs_item'

    def __init__(
        self,
        object_id,
        parent,
        path,
        mimetype,
        urlbase,
        UPnPClass,
        update=False,
        store=None,
    ):
        BackendItem.__init__(self)
        self.id = object_id
        self.parent = parent
        if parent:
            parent.add_child(self, update=update)
        if mimetype == 'root':
            self.location = str(path)
        else:
            if mimetype == 'item' and path is None:
                path = os.path.join(parent.get_realpath(), str(self.id))
            # self.location = FilePath(unicode(path))
            self.location = FilePath(path)
        self.mimetype = mimetype
        if urlbase[-1] != '/':
            urlbase += '/'
        self.url = urlbase + str(self.id)

        self.store = store

        if parent is None:
            parent_id = -1
        else:
            parent_id = parent.get_id()

        self.item = UPnPClass(object_id, parent_id, self.get_name())
        if isinstance(self.item, Container):
            self.item.childCount = 0
        self.child_count = 0
        self.children = []
        self.sorted = False
        self.caption = None

        if mimetype in ['directory', 'root']:
            self.update_id = 0
            self.get_url = lambda: self.url
            # self.item.searchable = True
            # self.item.searchClass = 'object'
            if (isinstance(self.location, FilePath)
                    and self.location.isdir() is True):
                self.check_for_cover_art()
                if getattr(self, 'cover', None):
                    _, ext = os.path.splitext(self.cover)
                    ''' add the cover image extension to help clients
                        not reacting on the mimetype '''
                    self.item.albumArtURI = ''.join(
                        (urlbase, str(self.id), '?cover', str(ext)))
        else:
            self.get_url = lambda: self.url

            if self.mimetype.startswith('audio/'):
                if getattr(parent, 'cover', None):
                    _, ext = os.path.splitext(parent.cover)
                    ''' add the cover image extension to help clients
                        not reacting on the mimetype '''
                    self.item.albumArtURI = ''.join(
                        (urlbase, str(self.id), '?cover', ext))

            _, host_port, _, _, _ = urlsplit(urlbase)
            if host_port.find(':') != -1:
                host, port = tuple(host_port.split(':'))
            else:
                host = host_port

            try:
                size = self.location.getsize()
            except Exception:
                size = 0

            if (self.store.server and self.store.server.coherence.config.get(
                    'transcoding', 'no') == 'yes'):
                if self.mimetype in (
                        'application/ogg',
                        'audio/ogg',
                        'audio/x-wav',
                        'audio/x-m4a',
                        'application/x-flac',
                ):
                    new_res = Resource(
                        self.url + '/transcoded.mp3',
                        f'http-get:*:{"audio/mpeg"}:*',
                    )
                    new_res.size = None
                    # self.item.res.append(new_res)

            if mimetype != 'item':
                res = Resource(
                    'file://' + quote(self.get_path(), encoding='utf-8'),
                    f'internal:{host}:{self.mimetype}:*',
                )
                res.size = size
                self.item.res.append(res)

            if mimetype != 'item':
                res = Resource(self.url, f'http-get:*:{self.mimetype}:*')
            else:
                res = Resource(self.url, 'http-get:*:*:*')

            res.size = size
            self.item.res.append(res)
            ''' if this item is of type audio and we want to add a transcoding
                rule for it, this is the way to do it:

                create a new Resource object, at least a 'http-get'
                and maybe an 'internal' one too

                for transcoding to wav this looks like that

                res = Resource(
                    url_for_transcoded audio,
                    'http-get:*:audio/x-wav:%s'% ';'.join(
                        ['DLNA.ORG_PN=JPEG_TN']+simple_dlna_tags))
                res.size = None
                self.item.res.append(res)
            '''

            if (self.store.server and self.store.server.coherence.config.get(
                    'transcoding', 'no') == 'yes'):
                if self.mimetype in (
                        'audio/mpeg',
                        'application/ogg',
                        'audio/ogg',
                        'audio/x-wav',
                        'audio/x-m4a',
                        'audio/flac',
                        'application/x-flac',
                ):
                    dlna_pn = 'DLNA.ORG_PN=LPCM'
                    dlna_tags = simple_dlna_tags[:]
                    # dlna_tags[1] = 'DLNA.ORG_OP=00'
                    dlna_tags[2] = 'DLNA.ORG_CI=1'
                    new_res = Resource(
                        self.url + '?transcoded=lpcm',
                        f'http-get:*:{"audio/L16;rate=44100;channels=2"}:'
                        f'{";".join([dlna_pn] + dlna_tags)}',
                    )
                    new_res.size = None
                    # self.item.res.append(new_res)

                    if self.mimetype != 'audio/mpeg':
                        new_res = Resource(
                            self.url + '?transcoded=mp3',
                            f'http-get:*:{"audio/mpeg"}:*',
                        )
                        new_res.size = None
                        # self.item.res.append(new_res)
            ''' if this item is an image and we want to add a thumbnail for it
                we have to follow these rules:

                create a new Resource object, at least a 'http-get'
                and maybe an 'internal' one too

                for an JPG this looks like that

                res = Resource(url_for_thumbnail,
                        'http-get:*:image/jpg:%s'% ';'.join(
                        ['DLNA.ORG_PN=JPEG_TN']+simple_dlna_tags))
                res.size = size_of_thumbnail
                self.item.res.append(res)

                and for a PNG the Resource creation is like that

                res = Resource(url_for_thumbnail,
                        'http-get:*:image/png:%s'% ';'.join(
                        simple_dlna_tags+['DLNA.ORG_PN=PNG_TN']))

                if not hasattr(self.item, 'attachments'):
                    self.item.attachments = {}
                self.item.attachments[key] = utils.StaticFile(
                filename_of_thumbnail)
            '''

            if (self.mimetype in ('image/jpeg', 'image/png')
                    or self.mimetype.startswith('video/')):
                try:
                    filename, mimetype, dlna_pn = _find_thumbnail(
                        self.get_path())
                except NoThumbnailFound:
                    pass
                except Exception:
                    self.warning(traceback.format_exc())
                else:
                    dlna_tags = simple_dlna_tags[:]
                    dlna_tags[
                        3] = 'DLNA.ORG_FLAGS=00f00000000000000000000000000000'

                    hash_from_path = str(id(filename))
                    new_res = Resource(
                        self.url + '?attachment=' + hash_from_path,
                        f'http-get:*:{mimetype}:'
                        f'{";".join([dlna_pn] + dlna_tags)}',
                    )
                    new_res.size = os.path.getsize(filename)
                    self.item.res.append(new_res)
                    if not hasattr(self.item, 'attachments'):
                        self.item.attachments = {}
                    self.item.attachments[hash_from_path] = utils.StaticFile(
                        filename)

            if self.mimetype.startswith('video/'):
                # check for a subtitles file
                caption, _ = os.path.splitext(self.get_path())
                caption = caption + '.srt'
                if os.path.exists(caption):
                    hash_from_path = str(id(caption))
                    mimetype = 'smi/caption'
                    new_res = Resource(
                        self.url + '?attachment=' + hash_from_path,
                        f'http-get:*:{mimetype}:{"*"}',
                    )
                    new_res.size = os.path.getsize(caption)
                    self.caption = new_res.data
                    self.item.res.append(new_res)
                    if not hasattr(self.item, 'attachments'):
                        self.item.attachments = {}
                    self.item.attachments[hash_from_path] = utils.StaticFile(
                        caption,
                        defaultType=mimetype,
                    )

            try:
                # FIXME: getmtime is deprecated in Twisted 2.6
                self.item.date = datetime.fromtimestamp(
                    self.location.getmtime())
            except Exception:
                self.item.date = None

    def rebuild(self, urlbase):
        # print('rebuild', self.mimetype)
        if self.mimetype != 'item':
            return
        # print('rebuild for', self.get_path())
        mimetype, _ = mimetypes.guess_type(self.get_path(), strict=False)
        if mimetype is None:
            return
        self.mimetype = mimetype
        # print('rebuild', self.mimetype)
        UPnPClass = classChooser(self.mimetype)
        self.item = UPnPClass(self.id, self.parent.id, self.get_name())
        if getattr(self.parent, 'cover', None):
            _, ext = os.path.splitext(self.parent.cover)
            # add the cover image extension to help
            # clients not reacting on the mimetype
            self.item.albumArtURI = ''.join(
                (urlbase, str(self.id), '?cover', ext))

        _, host_port, _, _, _ = urlsplit(urlbase)
        if host_port.find(':') != -1:
            host, port = tuple(host_port.split(':'))
        else:
            host = host_port

        res = Resource(
            'file://' + quote(self.get_path()),
            f'internal:{host}:{self.mimetype}:*',
        )
        try:
            res.size = self.location.getsize()
        except Exception:
            res.size = 0
        self.item.res.append(res)
        res = Resource(self.url, f'http-get:*:{self.mimetype}:*')

        try:
            res.size = self.location.getsize()
        except Exception:
            res.size = 0
        self.item.res.append(res)

        try:
            # FIXME: getmtime is deprecated in Twisted 2.6
            self.item.date = datetime.fromtimestamp(self.location.getmtime())
        except Exception:
            self.item.date = None

        self.parent.update_id += 1

    def check_for_cover_art(self):
        ''' let's try to find in the current directory some jpg file,
            or png if the jpg search fails, and take the first one
            that comes around
        '''
        try:
            jpgs = [
                i.path for i in self.location.children()
                if i.splitext()[1] in ('.jpg', '.JPG')
            ]
            try:
                self.cover = jpgs[0]
            except IndexError:
                pngs = [
                    i.path for i in self.location.children()
                    if i.splitext()[1] in ('.png', '.PNG')
                ]
                try:
                    self.cover = pngs[0]
                except IndexError:
                    return
        except UnicodeDecodeError:
            self.warning(
                f'UnicodeDecodeError - there is something wrong with a ' +
                f'file located in {self.location.path}')

    def remove(self):
        # print('FSItem remove', self.id, self.get_name(), self.parent)
        if self.parent:
            self.parent.remove_child(self)
        del self.item

    def add_child(self, child, update=False):
        self.children.append(child)
        self.child_count += 1
        if isinstance(self.item, Container):
            self.item.childCount += 1
        if update:
            self.update_id += 1
        self.sorted = False

    def remove_child(self, child):
        # print(f'remove_from {self.id:d} ({self.get_name()}) '
        #       f'child {child.id:d} ({child.get_name()})')
        if child in self.children:
            self.child_count -= 1
            if isinstance(self.item, Container):
                self.item.childCount -= 1
            self.children.remove(child)
            self.update_id += 1
        self.sorted = False

    def get_children(self, start=0, request_count=0):
        if not self.sorted:
            self.children.sort(key=_natural_key)
            self.sorted = True
        if request_count == 0:
            return self.children[start:]
        else:
            return self.children[start:request_count]

    def get_child_count(self):
        return self.child_count

    def get_id(self):
        return self.id

    def get_update_id(self):
        if hasattr(self, 'update_id'):
            return self.update_id
        else:
            return None

    def get_path(self):
        if self.mimetype in ['directory', 'root']:
            return None
        if isinstance(self.location, FilePath):
            return self.location.path
        else:
            return self.location

    def get_realpath(self):
        if isinstance(self.location, FilePath):
            return self.location.path
        else:
            return self.location

    def set_path(self, path=None, extension=None):
        if path is None:
            path = self.get_path()
        if extension is not None:
            path, old_ext = os.path.splitext(path)
            path = ''.join((path, extension))
        if isinstance(self.location, FilePath):
            self.location = FilePath(path)
        else:
            self.location = path

    def get_name(self):
        if isinstance(self.location, FilePath):
            name = self.location.basename()
        else:
            name = self.location
        return name

    def get_cover(self):
        if self.cover:
            return self.cover
        try:
            return self.parent.cover
        except AttributeError:
            return None

    def get_parent(self):
        return self.parent

    def get_item(self):
        return self.item

    def get_xml(self):
        return self.item.toString()

    def __repr__(self):
        return ('id: ' + str(self.id) + ' @ ' +
                str(self.get_name().encode('ascii', 'xmlcharrefreplace')))
Exemplo n.º 36
0
class FSItem(BackendItem):
    logCategory = 'fs_item'

    def __init__(self, object_id, parent, path, mimetype, urlbase, UPnPClass,update=False):
        self.id = object_id
        self.parent = parent
        if parent:
            parent.add_child(self,update=update)
        if mimetype == 'root':
            self.location = unicode(path)
        else:
            if mimetype == 'item' and path is None:
                path = os.path.join(parent.get_path(),unicode(self.id))
            #self.location = FilePath(unicode(path))
            self.location = FilePath(path)
        self.mimetype = mimetype
        if urlbase[-1] != '/':
            urlbase += '/'
        self.url = urlbase + str(self.id)


        if parent == None:
            parent_id = -1
        else:
            parent_id = parent.get_id()

        self.item = UPnPClass(object_id, parent_id, self.get_name())
        if isinstance(self.item, Container):
            self.item.childCount = 0
        self.child_count = 0
        self.children = []


        if mimetype in ['directory','root']:
            self.update_id = 0
            self.get_url = lambda : self.url
            self.get_path = lambda : None
            #self.item.searchable = True
            #self.item.searchClass = 'object'
            if(isinstance(self.location,FilePath) and
               self.location.isdir() == True):
                self.check_for_cover_art()
                if hasattr(self, 'cover'):
                    _,ext =  os.path.splitext(self.cover)
                    """ add the cover image extension to help clients not reacting on
                        the mimetype """
                    self.item.albumArtURI = ''.join((urlbase,str(self.id),'?cover',ext))
        else:
            self.get_url = lambda : self.url

            if self.mimetype.startswith('audio/'):
                if hasattr(parent, 'cover'):
                    _,ext =  os.path.splitext(parent.cover)
                    """ add the cover image extension to help clients not reacting on
                        the mimetype """
                    self.item.albumArtURI = ''.join((urlbase,str(self.id),'?cover',ext))

            _,host_port,_,_,_ = urlsplit(urlbase)
            if host_port.find(':') != -1:
                host,port = tuple(host_port.split(':'))
            else:
                host = host_port

            try:
                size = self.location.getsize()
            except:
                size = 0

            if mimetype != 'item':
                res = Resource('file://'+ urllib.quote(self.get_path()), 'internal:%s:%s:*' % (host,self.mimetype))
                res.size = size
                self.item.res.append(res)

            if mimetype != 'item':
                res = Resource(self.url, 'http-get:*:%s:*' % self.mimetype)
            else:
                res = Resource(self.url, 'http-get:*:*:*')

            res.size = size
            self.item.res.append(res)


            """ if this item is an image and we want to add a thumbnail for it
                we have to follow these rules:

                create a new Resource object, at least a 'http-get'
                and maybe an 'internal' one too

                for an JPG this looks like that

                res = Resource(url_for_thumbnail,
                        'http-get:*:image/jpg:%s'% ';'.join(simple_dlna_tags+('DLNA.ORG_PN=JPEG_TN',)))
                res.size = size_of_thumbnail
                self.item.res.append(res)

                and for a PNG the Resource creation is like that

                res = Resource(url_for_thumbnail,
                        'http-get:*:image/png:%s'% ';'.join(simple_dlna_tags+('DLNA.ORG_PN=PNG_TN',)))

                if not hasattr(self.item, 'attachments'):
                    self.item.attachments = {}
                self.item.attachments[key] = utils.StaticFile(filename_of_thumbnail)
            """

            if self.mimetype in ('image/jpeg', 'image/png'):
                path = self.get_path()
                thumbnail = os.path.join(os.path.dirname(path),'.thumbs',os.path.basename(path))
                if os.path.exists(thumbnail):
                    mimetype,_ = mimetypes.guess_type(thumbnail, strict=False)
                    if mimetype in ('image/jpeg','image/png'):
                        if mimetype == 'image/jpeg':
                            dlna_pn = 'DLNA.ORG_PN=JPEG_TN'
                        else:
                            dlna_pn = 'DLNA.ORG_PN=PNG_TN'

                        hash_from_path = str(id(thumbnail))
                        new_res = Resource(self.url+'?attachment='+hash_from_path,
                            'http-get:*:%s:%s' % (mimetype, ';'.join(simple_dlna_tags+(dlna_pn,))))
                        new_res.size = os.path.getsize(thumbnail)
                        self.item.res.append(new_res)
                        if not hasattr(self.item, 'attachments'):
                            self.item.attachments = {}
                        self.item.attachments[hash_from_path] = utils.StaticFile(urllib.quote(thumbnail))


            try:
                # FIXME: getmtime is deprecated in Twisted 2.6
                self.item.date = datetime.fromtimestamp(self.location.getmtime())
            except:
                self.item.date = None

    def rebuild(self, urlbase):
        #print "rebuild", self.mimetype
        if self.mimetype != 'item':
            return
        #print "rebuild for", self.get_path()
        mimetype,_ = mimetypes.guess_type(self.get_path(),strict=False)
        if mimetype == None:
            return
        self.mimetype = mimetype
        #print "rebuild", self.mimetype
        UPnPClass = classChooser(self.mimetype)
        self.item = UPnPClass(self.id, self.parent.id, self.get_name())
        if hasattr(self.parent, 'cover'):
            _,ext =  os.path.splitext(self.parent.cover)
            """ add the cover image extension to help clients not reacting on
                the mimetype """
            self.item.albumArtURI = ''.join((urlbase,str(self.id),'?cover',ext))

        _,host_port,_,_,_ = urlsplit(urlbase)
        if host_port.find(':') != -1:
            host,port = tuple(host_port.split(':'))
        else:
            host = host_port

        res = Resource('file://'+urllib.quote(self.get_path()), 'internal:%s:%s:*' % (host,self.mimetype))
        try:
            res.size = self.location.getsize()
        except:
            res.size = 0
        self.item.res.append(res)
        res = Resource(self.url, 'http-get:*:%s:*' % self.mimetype)

        try:
            res.size = self.location.getsize()
        except:
            res.size = 0
        self.item.res.append(res)

        try:
            # FIXME: getmtime is deprecated in Twisted 2.6
            self.item.date = datetime.fromtimestamp(self.location.getmtime())
        except:
            self.item.date = None

        self.parent.update_id += 1

    def check_for_cover_art(self):
        """ let's try to find in the current directory some jpg file,
            or png if the jpg search fails, and take the first one
            that comes around
        """
        try:
            jpgs = [i.path for i in self.location.children() if i.splitext()[1] in ('.jpg', '.JPG')]
            try:
                self.cover = jpgs[0]
            except IndexError:
                pngs = [i.path for i in self.location.children() if i.splitext()[1] in ('.png', '.PNG')]
                try:
                    self.cover = pngs[0]
                except IndexError:
                    return
        except UnicodeDecodeError:
            self.warning("UnicodeDecodeError - there is something wrong with a file located in %r", self.location.path)

    def remove(self):
        #print "FSItem remove", self.id, self.get_name(), self.parent
        if self.parent:
            self.parent.remove_child(self)
        del self.item

    def add_child(self, child, update=False):
        self.children.append(child)
        self.child_count += 1
        if isinstance(self.item, Container):
            self.item.childCount += 1
        if update == True:
            self.update_id += 1

    def remove_child(self, child):
        #print "remove_from %d (%s) child %d (%s)" % (self.id, self.get_name(), child.id, child.get_name())
        if child in self.children:
            self.child_count -= 1
            if isinstance(self.item, Container):
                self.item.childCount -= 1
            self.children.remove(child)
            self.update_id += 1

    def get_children(self,start=0,request_count=0):
        if request_count == 0:
            return self.children[start:]
        else:
            return self.children[start:request_count]

    def get_child_count(self):
        return self.child_count

    def get_id(self):
        return self.id

    def get_update_id(self):
        if hasattr(self, 'update_id'):
            return self.update_id
        else:
            return None

    def get_path(self):
        if isinstance( self.location,FilePath):
            return self.location.path
        else:
            self.location

    def set_path(self,path=None,extension=None):
        if path is None:
            path = self.get_path()
        if extension is not None:
            path,old_ext = os.path.splitext(path)
            path = ''.join((path,extension))
        if isinstance( self.location,FilePath):
            self.location = FilePath(path)
        else:
            self.location = path

    def get_name(self):
        if isinstance( self.location,FilePath):
            name = self.location.basename().decode("utf-8", "replace")
        else:
            name = self.location.decode("utf-8", "replace")
        return name

    def get_cover(self):
        try:
            return self.cover
        except:
            try:
                return self.parent.cover
            except:
                return ''

    def get_parent(self):
        return self.parent

    def get_item(self):
        return self.item

    def get_xml(self):
        return self.item.toString()

    def __repr__(self):
        return 'id: ' + str(self.id) + ' @ ' + self.get_name().encode('ascii','xmlcharrefreplace')
Exemplo n.º 37
0
class NewsBuilderTests(TestCase, StructureAssertingMixin):
    """
    Tests for L{NewsBuilder}.
    """
    skip = svnSkip

    def setUp(self):
        """
        Create a fake project and stuff some basic structure and content into
        it.
        """
        self.builder = NewsBuilder()
        self.project = FilePath(self.mktemp())
        self.project.createDirectory()

        self.existingText = 'Here is stuff which was present previously.\n'
        self.createStructure(
            self.project, {
                'NEWS': self.existingText,
                '5.feature': 'We now support the web.\n',
                '12.feature': 'The widget is more robust.\n',
                '15.feature': (
                    'A very long feature which takes many words to '
                    'describe with any accuracy was introduced so that '
                    'the line wrapping behavior of the news generating '
                    'code could be verified.\n'),
                '16.feature': (
                    'A simpler feature\ndescribed on multiple lines\n'
                    'was added.\n'),
                '23.bugfix': 'Broken stuff was fixed.\n',
                '25.removal': 'Stupid stuff was deprecated.\n',
                '30.misc': '',
                '35.misc': '',
                '40.doc': 'foo.bar.Baz.quux',
                '41.doc': 'writing Foo servers'})


    def svnCommit(self, project=None):
        """
        Make the C{project} directory a valid subversion directory with all
        files committed.
        """
        if project is None:
            project = self.project
        repositoryPath = self.mktemp()
        repository = FilePath(repositoryPath)

        runCommand(["svnadmin", "create", repository.path])
        runCommand(["svn", "checkout", "file://" + repository.path,
                    project.path])

        runCommand(["svn", "add"] + glob.glob(project.path + "/*"))
        runCommand(["svn", "commit", project.path, "-m", "yay"])


    def test_today(self):
        """
        L{NewsBuilder._today} returns today's date in YYYY-MM-DD form.
        """
        self.assertEqual(
            self.builder._today(), date.today().strftime('%Y-%m-%d'))


    def test_findFeatures(self):
        """
        When called with L{NewsBuilder._FEATURE}, L{NewsBuilder._findChanges}
        returns a list of bugfix ticket numbers and descriptions as a list of
        two-tuples.
        """
        features = self.builder._findChanges(
            self.project, self.builder._FEATURE)
        self.assertEqual(
            features,
            [(5, "We now support the web."),
             (12, "The widget is more robust."),
             (15,
              "A very long feature which takes many words to describe with "
              "any accuracy was introduced so that the line wrapping behavior "
              "of the news generating code could be verified."),
             (16, "A simpler feature described on multiple lines was added.")])


    def test_findBugfixes(self):
        """
        When called with L{NewsBuilder._BUGFIX}, L{NewsBuilder._findChanges}
        returns a list of bugfix ticket numbers and descriptions as a list of
        two-tuples.
        """
        bugfixes = self.builder._findChanges(
            self.project, self.builder._BUGFIX)
        self.assertEqual(
            bugfixes,
            [(23, 'Broken stuff was fixed.')])


    def test_findRemovals(self):
        """
        When called with L{NewsBuilder._REMOVAL}, L{NewsBuilder._findChanges}
        returns a list of removal/deprecation ticket numbers and descriptions
        as a list of two-tuples.
        """
        removals = self.builder._findChanges(
            self.project, self.builder._REMOVAL)
        self.assertEqual(
            removals,
            [(25, 'Stupid stuff was deprecated.')])


    def test_findDocumentation(self):
        """
        When called with L{NewsBuilder._DOC}, L{NewsBuilder._findChanges}
        returns a list of documentation ticket numbers and descriptions as a
        list of two-tuples.
        """
        doc = self.builder._findChanges(
            self.project, self.builder._DOC)
        self.assertEqual(
            doc,
            [(40, 'foo.bar.Baz.quux'),
             (41, 'writing Foo servers')])


    def test_findMiscellaneous(self):
        """
        When called with L{NewsBuilder._MISC}, L{NewsBuilder._findChanges}
        returns a list of removal/deprecation ticket numbers and descriptions
        as a list of two-tuples.
        """
        misc = self.builder._findChanges(
            self.project, self.builder._MISC)
        self.assertEqual(
            misc,
            [(30, ''),
             (35, '')])


    def test_writeHeader(self):
        """
        L{NewsBuilder._writeHeader} accepts a file-like object opened for
        writing and a header string and writes out a news file header to it.
        """
        output = StringIO()
        self.builder._writeHeader(output, "Super Awesometastic 32.16")
        self.assertEqual(
            output.getvalue(),
            "Super Awesometastic 32.16\n"
            "=========================\n"
            "\n")


    def test_writeSection(self):
        """
        L{NewsBuilder._writeSection} accepts a file-like object opened for
        writing, a section name, and a list of ticket information (as returned
        by L{NewsBuilder._findChanges}) and writes out a section header and all
        of the given ticket information.
        """
        output = StringIO()
        self.builder._writeSection(
            output, "Features",
            [(3, "Great stuff."),
             (17, "Very long line which goes on and on and on, seemingly "
              "without end until suddenly without warning it does end.")])
        self.assertEqual(
            output.getvalue(),
            "Features\n"
            "--------\n"
            " - Great stuff. (#3)\n"
            " - Very long line which goes on and on and on, seemingly "
            "without end\n"
            "   until suddenly without warning it does end. (#17)\n"
            "\n")


    def test_writeMisc(self):
        """
        L{NewsBuilder._writeMisc} accepts a file-like object opened for
        writing, a section name, and a list of ticket information (as returned
        by L{NewsBuilder._findChanges} and writes out a section header and all
        of the ticket numbers, but excludes any descriptions.
        """
        output = StringIO()
        self.builder._writeMisc(
            output, "Other",
            [(x, "") for x in range(2, 50, 3)])
        self.assertEqual(
            output.getvalue(),
            "Other\n"
            "-----\n"
            " - #2, #5, #8, #11, #14, #17, #20, #23, #26, #29, #32, #35, "
            "#38, #41,\n"
            "   #44, #47\n"
            "\n")


    def test_build(self):
        """
        L{NewsBuilder.build} updates a NEWS file with new features based on the
        I{<ticket>.feature} files found in the directory specified.
        """
        self.builder.build(
            self.project, self.project.child('NEWS'),
            "Super Awesometastic 32.16")

        results = self.project.child('NEWS').getContent()
        self.assertEqual(
            results,
            'Super Awesometastic 32.16\n'
            '=========================\n'
            '\n'
            'Features\n'
            '--------\n'
            ' - We now support the web. (#5)\n'
            ' - The widget is more robust. (#12)\n'
            ' - A very long feature which takes many words to describe '
            'with any\n'
            '   accuracy was introduced so that the line wrapping behavior '
            'of the\n'
            '   news generating code could be verified. (#15)\n'
            ' - A simpler feature described on multiple lines was '
            'added. (#16)\n'
            '\n'
            'Bugfixes\n'
            '--------\n'
            ' - Broken stuff was fixed. (#23)\n'
            '\n'
            'Improved Documentation\n'
            '----------------------\n'
            ' - foo.bar.Baz.quux (#40)\n'
            ' - writing Foo servers (#41)\n'
            '\n'
            'Deprecations and Removals\n'
            '-------------------------\n'
            ' - Stupid stuff was deprecated. (#25)\n'
            '\n'
            'Other\n'
            '-----\n'
            ' - #30, #35\n'
            '\n\n' + self.existingText)


    def test_emptyProjectCalledOut(self):
        """
        If no changes exist for a project, I{NEWS} gains a new section for
        that project that includes some helpful text about how there were no
        interesting changes.
        """
        project = FilePath(self.mktemp()).child("twisted")
        project.makedirs()
        self.createStructure(project, {'NEWS': self.existingText})

        self.builder.build(
            project, project.child('NEWS'),
            "Super Awesometastic 32.16")
        results = project.child('NEWS').getContent()
        self.assertEqual(
            results,
            'Super Awesometastic 32.16\n'
            '=========================\n'
            '\n' +
            self.builder._NO_CHANGES +
            '\n\n' + self.existingText)


    def test_preserveTicketHint(self):
        """
        If a I{NEWS} file begins with the two magic lines which point readers
        at the issue tracker, those lines are kept at the top of the new file.
        """
        news = self.project.child('NEWS')
        news.setContent(
            'Ticket numbers in this file can be looked up by visiting\n'
            'http://twistedmatrix.com/trac/ticket/<number>\n'
            '\n'
            'Blah blah other stuff.\n')

        self.builder.build(self.project, news, "Super Awesometastic 32.16")

        self.assertEqual(
            news.getContent(),
            'Ticket numbers in this file can be looked up by visiting\n'
            'http://twistedmatrix.com/trac/ticket/<number>\n'
            '\n'
            'Super Awesometastic 32.16\n'
            '=========================\n'
            '\n'
            'Features\n'
            '--------\n'
            ' - We now support the web. (#5)\n'
            ' - The widget is more robust. (#12)\n'
            ' - A very long feature which takes many words to describe '
            'with any\n'
            '   accuracy was introduced so that the line wrapping behavior '
            'of the\n'
            '   news generating code could be verified. (#15)\n'
            ' - A simpler feature described on multiple lines was '
            'added. (#16)\n'
            '\n'
            'Bugfixes\n'
            '--------\n'
            ' - Broken stuff was fixed. (#23)\n'
            '\n'
            'Improved Documentation\n'
            '----------------------\n'
            ' - foo.bar.Baz.quux (#40)\n'
            ' - writing Foo servers (#41)\n'
            '\n'
            'Deprecations and Removals\n'
            '-------------------------\n'
            ' - Stupid stuff was deprecated. (#25)\n'
            '\n'
            'Other\n'
            '-----\n'
            ' - #30, #35\n'
            '\n\n'
            'Blah blah other stuff.\n')


    def test_emptySectionsOmitted(self):
        """
        If there are no changes of a particular type (feature, bugfix, etc), no
        section for that type is written by L{NewsBuilder.build}.
        """
        for ticket in self.project.children():
            if ticket.splitext()[1] in ('.feature', '.misc', '.doc'):
                ticket.remove()

        self.builder.build(
            self.project, self.project.child('NEWS'),
            'Some Thing 1.2')

        self.assertEqual(
            self.project.child('NEWS').getContent(),
            'Some Thing 1.2\n'
            '==============\n'
            '\n'
            'Bugfixes\n'
            '--------\n'
            ' - Broken stuff was fixed. (#23)\n'
            '\n'
            'Deprecations and Removals\n'
            '-------------------------\n'
            ' - Stupid stuff was deprecated. (#25)\n'
            '\n\n'
            'Here is stuff which was present previously.\n')


    def test_duplicatesMerged(self):
        """
        If two change files have the same contents, they are merged in the
        generated news entry.
        """
        def feature(s):
            return self.project.child(s + '.feature')
        feature('5').copyTo(feature('15'))
        feature('5').copyTo(feature('16'))

        self.builder.build(
            self.project, self.project.child('NEWS'),
            'Project Name 5.0')

        self.assertEqual(
            self.project.child('NEWS').getContent(),
            'Project Name 5.0\n'
            '================\n'
            '\n'
            'Features\n'
            '--------\n'
            ' - We now support the web. (#5, #15, #16)\n'
            ' - The widget is more robust. (#12)\n'
            '\n'
            'Bugfixes\n'
            '--------\n'
            ' - Broken stuff was fixed. (#23)\n'
            '\n'
            'Improved Documentation\n'
            '----------------------\n'
            ' - foo.bar.Baz.quux (#40)\n'
            ' - writing Foo servers (#41)\n'
            '\n'
            'Deprecations and Removals\n'
            '-------------------------\n'
            ' - Stupid stuff was deprecated. (#25)\n'
            '\n'
            'Other\n'
            '-----\n'
            ' - #30, #35\n'
            '\n\n'
            'Here is stuff which was present previously.\n')


    def createFakeTwistedProject(self):
        """
        Create a fake-looking Twisted project to build from.
        """
        project = FilePath(self.mktemp()).child("twisted")
        project.makedirs()
        self.createStructure(
            project, {
                'NEWS': 'Old boring stuff from the past.\n',
                '_version.py': genVersion("twisted", 1, 2, 3),
                'topfiles': {
                    'NEWS': 'Old core news.\n',
                    '3.feature': 'Third feature addition.\n',
                    '5.misc': ''},
                'conch': {
                    '_version.py': genVersion("twisted.conch", 3, 4, 5),
                    'topfiles': {
                        'NEWS': 'Old conch news.\n',
                        '7.bugfix': 'Fixed that bug.\n'}}})
        return project


    def test_buildAll(self):
        """
        L{NewsBuilder.buildAll} calls L{NewsBuilder.build} once for each
        subproject, passing that subproject's I{topfiles} directory as C{path},
        the I{NEWS} file in that directory as C{output}, and the subproject's
        name as C{header}, and then again for each subproject with the
        top-level I{NEWS} file for C{output}. Blacklisted subprojects are
        skipped.
        """
        builds = []
        builder = NewsBuilder()
        builder.build = lambda path, output, header: builds.append((
            path, output, header))
        builder._today = lambda: '2009-12-01'

        project = self.createFakeTwistedProject()
        self.svnCommit(project)
        builder.buildAll(project)

        coreTopfiles = project.child("topfiles")
        coreNews = coreTopfiles.child("NEWS")
        coreHeader = "Twisted Core 1.2.3 (2009-12-01)"

        conchTopfiles = project.child("conch").child("topfiles")
        conchNews = conchTopfiles.child("NEWS")
        conchHeader = "Twisted Conch 3.4.5 (2009-12-01)"

        aggregateNews = project.child("NEWS")

        self.assertEqual(
            builds,
            [(conchTopfiles, conchNews, conchHeader),
             (conchTopfiles, aggregateNews, conchHeader),
             (coreTopfiles, coreNews, coreHeader),
             (coreTopfiles, aggregateNews, coreHeader)])


    def test_buildAllAggregate(self):
        """
        L{NewsBuilder.buildAll} aggregates I{NEWS} information into the top
        files, only deleting fragments once it's done.
        """
        builder = NewsBuilder()
        project = self.createFakeTwistedProject()
        self.svnCommit(project)
        builder.buildAll(project)

        aggregateNews = project.child("NEWS")

        aggregateContent = aggregateNews.getContent()
        self.assertIn("Third feature addition", aggregateContent)
        self.assertIn("Fixed that bug", aggregateContent)
        self.assertIn("Old boring stuff from the past", aggregateContent)


    def test_changeVersionInNews(self):
        """
        L{NewsBuilder._changeVersions} gets the release date for a given
        version of a project as a string.
        """
        builder = NewsBuilder()
        builder._today = lambda: '2009-12-01'
        project = self.createFakeTwistedProject()
        self.svnCommit(project)
        builder.buildAll(project)
        newVersion = Version('TEMPLATE', 7, 7, 14)
        coreNews = project.child('topfiles').child('NEWS')
        # twisted 1.2.3 is the old version.
        builder._changeNewsVersion(
            coreNews, "Core", Version("twisted", 1, 2, 3),
            newVersion, '2010-01-01')
        expectedCore = (
            'Twisted Core 7.7.14 (2010-01-01)\n'
            '================================\n'
            '\n'
            'Features\n'
            '--------\n'
            ' - Third feature addition. (#3)\n'
            '\n'
            'Other\n'
            '-----\n'
            ' - #5\n\n\n')
        self.assertEqual(
            expectedCore + 'Old core news.\n', coreNews.getContent())


    def test_removeNEWSfragments(self):
        """
        L{NewsBuilder.buildALL} removes all the NEWS fragments after the build
        process, using the C{svn} C{rm} command.
        """
        builder = NewsBuilder()
        project = self.createFakeTwistedProject()
        self.svnCommit(project)
        builder.buildAll(project)

        self.assertEqual(5, len(project.children()))
        output = runCommand(["svn", "status", project.path])
        removed = [line for line in output.splitlines()
                   if line.startswith("D ")]
        self.assertEqual(3, len(removed))


    def test_checkSVN(self):
        """
        L{NewsBuilder.buildAll} raises L{NotWorkingDirectory} when the given
        path is not a SVN checkout.
        """
        self.assertRaises(
            NotWorkingDirectory, self.builder.buildAll, self.project)
Exemplo n.º 38
0
class FileUploadResource(Resource):
    """
    Twisted Web resource that handles file uploads over `HTTP/POST` requests.
    """

    log = make_logger()

    def __init__(self,
                 upload_directory,
                 temp_directory,
                 form_fields,
                 upload_session,
                 options=None):
        """

        :param upload_directory: The target directory where uploaded files will be stored.
        :type upload_directory: str
        :param temp_directory: A temporary directory where chunks of a file being uploaded are stored.
        :type temp_directory: str
        :param form_fields: Names of HTML form fields used for uploading.
        :type form_fields: dict
        :param upload_session: An instance of `ApplicationSession` used for publishing progress events.
        :type upload_session: obj
        :param options: Options for file upload.
        :type options: dict or None
        """

        Resource.__init__(self)
        self._uploadRoot = FilePath(upload_directory)
        self._tempDirRoot = FilePath(temp_directory)
        self._form_fields = form_fields
        self._fileupload_session = upload_session
        self._options = options or {}
        self._max_file_size = self._options.get('max_file_size',
                                                10 * 1024 * 1024)
        self._fileTypes = self._options.get('file_types', None)
        self._file_permissions = self._options.get('file_permissions', None)

        # track uploaded files / chunks
        self._uploads = {}

        self.log.info('Upload Resource started.')

        # scan the temp dir for uploaded chunks and fill the _uploads dict with it
        # so existing uploads can be resumed
        # all other remains will be purged
        for fileTempDir in self._tempDirRoot.children():
            fileTempName = fileTempDir.basename()
            if fileTempDir.isdir():
                self._uploads[fileTempName] = {
                    'chunk_list': [],
                    'origin': 'startup'
                }
                for chunk in fileTempDir.listdir():
                    if chunk[:6] == 'chunk_':
                        self._uploads[fileTempName]['chunk_list'].append(
                            int(chunk[6:]))
                    else:
                        fileTempDir.child(chunk).remove()
                # if no chunks detected then remove remains completely
                if len(self._uploads[fileTempName]['chunk_list']) == 0:
                    fileTempDir.remove()
                    self._uploads.pop(fileTempName, None)
            else:  # fileTempDir is a file remaining from a single chunk upload
                fileTempDir.remove()

        self.log.debug("Scanned pending uploads: {uploads}",
                       uploads=self._uploads)

    def render_POST(self, request):
        headers = {
            x.decode('iso-8859-1'): y.decode('iso-8859-1')
            for x, y in request.getAllHeaders().items()
        }

        origin = headers['host']

        postFields = cgi.FieldStorage(fp=request.content,
                                      headers=headers,
                                      environ={"REQUEST_METHOD": "POST"})

        f = self._form_fields

        filename = postFields[f['file_name']].value
        totalSize = int(postFields[f['total_size']].value)
        totalChunks = int(postFields[f['total_chunks']].value)
        chunkSize = int(postFields[f['chunk_size']].value)
        chunkNumber = int(postFields[f['chunk_number']].value)
        fileContent = postFields[f['content']].value

        if 'chunk_extra' in f and f['chunk_extra'] in postFields:
            chunk_extra = json.loads(postFields[f['chunk_extra']].value)
        else:
            chunk_extra = {}

        if 'finish_extra' in f and f['finish_extra'] in postFields:
            finish_extra = json.loads(postFields[f['finish_extra']].value)
        else:
            finish_extra = {}

        fileId = filename

        # # prepare user specific upload areas
        # # NOT YET IMPLEMENTED
        # #
        # if 'auth_id' in f and f['auth_id'] in postFields:
        #     auth_id = postFields[f['auth_id']].value
        #     mydir = os.path.join(self._uploadRoot, auth_id)
        #     my_temp_dir = os.path.join(self._tempDirRoot, auth_id)
        #
        #     # check if auth_id is a valid directory_name
        #     #
        #     if auth_id != auth_id.encode('ascii', 'ignore'):
        #         msg = "The requestor auth_id must be an ascii string."
        #         if self._debug:
        #             log.msg(msg)
        #         # 415 Unsupported Media Type
        #         request.setResponseCode(415, msg)
        #         return msg
        # else:
        #     auth_id = 'anonymous'

        # create user specific folder

        # mydir = self._uploadRoot
        # my_temp_dir = self._tempDirRoot

        # if not os.path.exists(mydir):
        #     os.makedirs(mydir)
        # if not os.path.exists(my_temp_dir):
        #     os.makedirs(my_temp_dir)

        # prepare the on_progress publisher
        if 'on_progress' in f and f['on_progress'] in postFields and self._fileupload_session != {}:
            topic = postFields[f['on_progress']].value

            if 'session' in f and f['session'] in postFields:
                session = int(postFields[f['session']].value)
                publish_options = PublishOptions(eligible=[session])
            else:
                publish_options = None

            def fileupload_publish(payload):
                self._fileupload_session.publish(topic,
                                                 payload,
                                                 options=publish_options)
        else:

            def fileupload_publish(payload):
                pass

        # Register upload right at the start to avoid overlapping upload conflicts
        #
        if fileId not in self._uploads:
            self._uploads[fileId] = {'chunk_list': [], 'origin': origin}
            chunk_is_first = True
            self.log.debug(
                'Started upload of file: file_name={file_name}, total_size={total_size}, total_chunks={total_chunks}, chunk_size={chunk_size}, chunk_number={chunk_number}',
                file_name=fileId,
                total_size=totalSize,
                total_chunks=totalChunks,
                chunk_size=chunkSize,
                chunk_number=chunkNumber)
        else:
            chunk_is_first = False
            # If the chunks are read at startup of crossbar any client may claim and resume the pending upload !
            #
            upl = self._uploads[fileId]
            if upl['origin'] == 'startup':
                self.log.debug(
                    'Will try to resume upload of file: file_name={file_name}, total_size={total_size}, total_chunks={total_chunks}, chunk_size={chunk_size}, chunk_number={chunk_number}',
                    file_name=fileId,
                    total_size=totalSize,
                    total_chunks=totalChunks,
                    chunk_size=chunkSize,
                    chunk_number=chunkNumber)
                upl['origin'] = origin
            else:
                # check if another session is uploading this file already
                #
                if upl['origin'] != origin:
                    msg = "File being uploaded is already uploaded in a different session."
                    self.log.debug(msg)
                    # 409 Conflict
                    request.setResponseCode(409, msg.encode('utf8'))
                    return msg.encode('utf8')
                else:
                    # check if the chunk is being uploaded in this very session already
                    # this should never happen !
                    if chunkNumber in upl['chunk_list']:
                        msg = "Chunk beeing uploaded is already uploading."
                        self.log.debug(msg)
                        # Don't throw a conflict. This may be a wanted behaviour.
                        # Even if an upload would be resumable, you don't have to resume.
                        # 409 Conflict
                        # request.setResponseCode(409, msg.encode('utf8'))
                        # return msg.encode('utf8')

        # check file size
        #
        if totalSize > self._max_file_size:
            msg = "Size {} of file to be uploaded exceeds maximum {}".format(
                totalSize, self._max_file_size)
            self.log.debug(msg)
            # 413 Request Entity Too Large
            request.setResponseCode(413, msg.encode('utf8'))
            return msg.encode('utf8')

        # check file extensions
        #
        extension = os.path.splitext(filename)[1]
        if self._fileTypes and extension not in self._fileTypes:
            msg = "Type '{}' of file to be uploaded is in allowed types {}".format(
                extension, self._fileTypes)
            self.log.debug(msg)
            # 415 Unsupported Media Type
            request.setResponseCode(415, msg.encode('utf8'))
            return msg.encode('utf8')

        # TODO: check mime type
        #
        fileTempDir = self._tempDirRoot.child(fileId)
        chunkName = fileTempDir.child('chunk_' + str(chunkNumber))
        _chunkName = fileTempDir.child('#kfhf3kz412uru578e38viokbjhfvz4w__' +
                                       'chunk_' + str(chunkNumber))

        def mergeFile():
            # every chunk has to check if it is the last chunk written, except in a single chunk scenario
            if totalChunks > 1 and len(
                    self._uploads[fileId]['chunk_list']) >= totalChunks:
                # last chunk
                self.log.debug(
                    'Finished file upload after chunk {chunk_number} with chunk_list {chunk_list}',
                    chunk_number=chunkNumber,
                    chunk_list=self._uploads)

                # Merge all files into one file and remove the temp files
                # TODO: How to avoid the extra file IO ?
                finalFileName = self._uploadRoot.child(fileId)
                _finalFileName = fileTempDir.child(
                    '#kfhf3kz412uru578e38viokbjhfvz4w__' + fileId)
                with open(_finalFileName.path, 'wb') as _finalFile:
                    for cn in range(1, totalChunks + 1):
                        with open(
                                fileTempDir.child('chunk_' + str(cn)).path,
                                'rb') as ff:
                            _finalFile.write(ff.read())

                _finalFileName.moveTo(finalFileName)

                if self._file_permissions:
                    perm = int(self._file_permissions, 8)
                    try:
                        finalFileName.chmod(perm)
                    except Exception as e:
                        msg = "file upload resource - could not change file permissions of uploaded file"
                        self.log.debug(msg)
                        self.log.debug(e)
                        self._uploads.pop(fileId, None)
                        request.setResponseCode(500, msg.encode('utf8'))
                        return msg.encode('utf8')
                    else:
                        self.log.debug(
                            "Changed permissions on {file_name} to {permissions}",
                            file_name=finalFileName,
                            permissions=self._file_permissions)

                # remove the file temp folder
                fileTempDir.remove()

                self._uploads.pop(fileId, None)

                # publish file upload progress to file_progress_URI
                fileupload_publish({
                    "id": fileId,
                    "chunk": chunkNumber,
                    "name": filename,
                    "total": totalSize,
                    "remaining": 0,
                    "status": "finished",
                    "progress": 1.,
                    "finish_extra": finish_extra,
                    "chunk_extra": chunk_extra
                })

        if chunk_is_first:
            # first chunk of file

            # publish file upload start
            #
            fileupload_publish({
                "id": fileId,
                "chunk": chunkNumber,
                "name": filename,
                "total": totalSize,
                "remaining": totalSize,
                "status": "started",
                "progress": 0.,
                "chunk_extra": chunk_extra
            })

            if totalChunks == 1:
                # only one chunk overall -> write file directly
                finalFileName = self._uploadRoot.child(fileId)
                _finalFileName = self._tempDirRoot.child(
                    '#kfhf3kz412uru578e38viokbjhfvz4w__' + fileId)

                with open(_finalFileName.path, 'wb') as _finalFile:
                    _finalFile.write(fileContent)

                if self._file_permissions:
                    perm = int(self._file_permissions, 8)
                    try:
                        _finalFileName.chmod(perm)
                    except Exception as e:
                        # finalFileName.remove()
                        msg = "Could not change file permissions of uploaded file"
                        self.log.debug(msg)
                        self.log.debug(e)
                        request.setResponseCode(500, msg.encode('utf8'))
                        return msg.encode('utf8')
                    else:
                        self.log.debug(
                            "Changed permissions on {file_name} to {permissions}",
                            file_name=finalFileName,
                            permissions=self._file_permissions)

                _finalFileName.moveTo(finalFileName)
                if chunkNumber not in self._uploads[fileId]['chunk_list']:
                    self._uploads[fileId]['chunk_list'].append(chunkNumber)

                self._uploads.pop(fileId, None)

                # publish file upload progress to file_progress_URI
                fileupload_publish({
                    "id": fileId,
                    "chunk": chunkNumber,
                    "name": filename,
                    "total": totalSize,
                    "remaining": 0,
                    "status": "finished",
                    "progress": 1.,
                    "finish_extra": finish_extra,
                    "chunk_extra": chunk_extra
                })

            else:
                # first of more chunks
                # fileTempDir.remove()  # any potential conflict should have been resolved above. This should not be necessary!
                if not os.path.isdir(fileTempDir.path):
                    fileTempDir.makedirs()

                with open(_chunkName.path, 'wb') as chunk:
                    chunk.write(fileContent)
                _chunkName.moveTo(chunkName)  # atomic file system operation
                self.log.debug('chunk_' + str(chunkNumber) +
                               ' written and moved to ' + chunkName.path)
                # publish file upload progress
                #
                fileupload_publish({
                    "id":
                    fileId,
                    "chunk":
                    chunkNumber,
                    "name":
                    filename,
                    "total":
                    totalSize,
                    "remaining":
                    totalSize - chunkSize,
                    "status":
                    "progress",
                    "progress":
                    round(float(chunkSize) / float(totalSize), 3),
                    "chunk_extra":
                    chunk_extra
                })
                if chunkNumber not in self._uploads[fileId]['chunk_list']:
                    self._uploads[fileId]['chunk_list'].append(chunkNumber)
                mergeFile()
            # clean the temp dir once per file upload
            self._remove_stale_uploads()

        else:
            # intermediate chunk
            if not os.path.isdir(fileTempDir.path):
                fileTempDir.makedirs()

            with open(_chunkName.path, 'wb') as chunk:
                chunk.write(fileContent)
            _chunkName.moveTo(chunkName)
            self.log.debug('chunk_' + str(chunkNumber) +
                           ' written and moved to ' + chunkName.path)

            if chunkNumber not in self._uploads[fileId]['chunk_list']:
                self._uploads[fileId]['chunk_list'].append(chunkNumber)

            received = sum(
                fileTempDir.child(f).getsize() for f in fileTempDir.listdir())

            fileupload_publish({
                "id":
                fileId,
                "chunk":
                chunkNumber,
                "name":
                filename,
                "total":
                totalSize,
                "remaining":
                totalSize - received,
                "status":
                "progress",
                "progress":
                round(float(received) / float(totalSize), 3),
                "chunk_extra":
                chunk_extra
            })
            mergeFile()
        # no errors encountered -> respond success
        request.setResponseCode(200)
        return b''

    def _remove_stale_uploads(self):
        """
        This only works if there is a temp folder exclusive for crossbar file uploads
        if the system temp folder is used then crossbar creates a "crossbar-uploads" there and
        uses that as the temp folder for uploads
        If you don't clean up regularly an attacker could fill up the OS file system
        """
        for fileTempDir in self._tempDirRoot.children():
            self.log.debug('REMOVE STALE UPLOADS ' +
                           str(fileTempDir.basename()))
            if fileTempDir.isdir() and (
                    fileTempDir.basename()) not in self._uploads:
                fileTempDir.remove()

    def render_GET(self, request):
        """
        This method can be used to check whether a chunk has been uploaded already.
        It returns with HTTP status code `200` if yes and `404` if not.
        The request needs to contain the file identifier and the chunk number to check for.
        """
        for param in ['file_name', 'chunk_number']:
            if not self._form_fields[param].encode(
                    'iso-8859-1') in request.args:
                msg = "file upload resource - missing request query parameter '{}', configured from '{}'".format(
                    self._form_fields[param], param)
                self.log.debug(msg)
                # 400 Bad Request
                request.setResponseCode(400, msg.encode('utf8'))
                return msg.encode('utf8')

        file_name = request.args[self._form_fields['file_name'].encode(
            'iso-8859-1')][0].decode('utf8')
        chunk_number = int(
            request.args[self._form_fields['chunk_number'].encode(
                'iso-8859-1')][0].decode('utf8'))

        # a complete upload will be repeated an incomplete upload will be resumed
        if file_name in self._uploads and chunk_number in self._uploads[
                file_name]['chunk_list']:
            self.log.debug(
                "Skipping chunk upload {file_name} of chunk {chunk_number}",
                file_name=file_name,
                chunk_number=chunk_number)
            msg = b"chunk of file already uploaded"
            request.setResponseCode(200, msg)
            return msg
        else:
            msg = b"chunk of file not yet uploaded"
            request.setResponseCode(404, msg)
            return msg
Exemplo n.º 39
0
    def process_fds():
        path = FilePath(b"/dev/fd")
        if not path.exists():
            raise SkipTest("/dev/fd is not available.")

        return set([child.basename() for child in path.children()])
Exemplo n.º 40
0
def create_proxy_to(logger, ip, port):
    """
    :see: ``HostNetwork.create_proxy_to``
    """
    action = CREATE_PROXY_TO(logger=logger, target_ip=ip, target_port=port)

    with action:
        encoded_ip = unicode(ip).encode("ascii")
        encoded_port = unicode(port).encode("ascii")

        # The first goal is to configure "Destination NAT" (DNAT).  We're just
        # going to rewrite the destination address of traffic arriving on the
        # specified port so it looks like it is destined for the specified ip
        # instead of destined for "us".  This gets the packets delivered to the
        # right destination.
        iptables(
            logger,
            [
                # All NAT stuff happens in the netfilter NAT table.
                b"--table",
                b"nat",

                # Destination NAT has to happen "pre"-routing so that the normal
                # routing rules on the machine will use the re-written destination
                # address and get the packet to that new destination.  Accomplish
                # this by appending the rule to the PREROUTING chain.
                b"--append",
                b"PREROUTING",

                # Only re-route traffic with a destination port matching the one we
                # were told to manipulate.  It is also necessary to specify TCP (or
                # UDP) here since that is the layer of the network stack that
                # defines ports.
                b"--protocol",
                b"tcp",
                b"--destination-port",
                encoded_port,

                # And only re-route traffic directed at this host.  Traffic
                # originating on this host directed at some random other host that
                # happens to be on the same port should be left alone.
                b"--match",
                b"addrtype",
                b"--dst-type",
                b"LOCAL",

                # Tag it as a flocker-created rule so we can recognize it later.
                b"--match",
                b"comment",
                b"--comment",
                FLOCKER_PROXY_COMMENT_MARKER,

                # If the filter matched, jump to the DNAT chain to handle doing the
                # actual packet mangling.  DNAT is a built-in chain that already
                # knows how to do this.  Pass an argument to the DNAT chain so it
                # knows how to mangle the packet - rewrite the destination IP of
                # the address to the target we were told to use.
                b"--jump",
                b"DNAT",
                b"--to-destination",
                encoded_ip,
            ])

        # Bonus round!  Having performed DNAT (changing the destination) during
        # prerouting we are now prepared to send the packet on somewhere else.
        # On its way out of this system it is also necessary to further
        # modify and then track that packet.  We want it to look like it
        # comes from us (the downstream client will be *very* confused if
        # the node we're passing the packet on to replies *directly* to them;
        # and by confused I mean it will be totally broken, of course) so we
        # also need to "masquerade" in the postrouting chain.  This changes
        # the source address (ip and port) of the packet to the address of
        # the external interface the packet is exiting upon. Doing SNAT here
        # would be a little bit more efficient because the kernel could avoid
        # looking up the external interface's address for every single packet.
        # But it requires this code to know that address and it requires that
        # if it ever changes the rule gets updated and it may require some
        # steps to do port allocation (not sure what they are yet).  So we'll
        # just masquerade for now.
        iptables(
            logger,
            [
                # All NAT stuff happens in the netfilter NAT table.
                b"--table",
                b"nat",

                # As described above, this transformation happens after routing
                # decisions have been made and the packet is on its way out of the
                # system.  Therefore, append the rule to the POSTROUTING chain.
                b"--append",
                b"POSTROUTING",

                # We'll stick to matching the same kinds of packets we matched in
                # the earlier stage.  We might want to change the factoring of this
                # code to avoid the duplication - particularly in case we want to
                # change the specifics of the filter.
                #
                # This omits the LOCAL addrtype check, though, because at this
                # point the packet is definitely leaving this host.
                b"--protocol",
                b"tcp",
                b"--destination-port",
                encoded_port,

                # Do the masquerading.
                b"--jump",
                b"MASQUERADE",
            ])

        # Secret level!!  Traffic that originates *on* the host bypasses the
        # PREROUTING chain.  Instead, it passes through the OUTPUT chain.  If
        # we want connections from localhost to the forwarded port to be
        # affected then we need a rule in the OUTPUT chain to do the same kind
        # of DNAT that we did in the PREROUTING chain.
        iptables(
            logger,
            [
                # All NAT stuff happens in the netfilter NAT table.
                b"--table",
                b"nat",

                # As mentioned, this rule is for the OUTPUT chain.
                b"--append",
                b"OUTPUT",

                # Matching the exact same kinds of packets as the PREROUTING rule
                # matches.
                b"--protocol",
                b"tcp",
                b"--destination-port",
                encoded_port,
                b"--match",
                b"addrtype",
                b"--dst-type",
                b"LOCAL",

                # Do the same DNAT as we did in the rule for the PREROUTING chain.
                b"--jump",
                b"DNAT",
                b"--to-destination",
                encoded_ip,
            ])

        iptables(logger, [
            b"--table",
            b"filter",
            b"--insert",
            b"FORWARD",
            b"--destination",
            encoded_ip,
            b"--protocol",
            b"tcp",
            b"--destination-port",
            encoded_port,
            b"--jump",
            b"ACCEPT",
        ])

        # The network stack only considers forwarding traffic when certain
        # system configuration is in place.
        #
        # https://www.kernel.org/doc/Documentation/networking/ip-sysctl.txt
        # will explain the meaning of these in (very slightly) more detail.
        conf = FilePath(b"/proc/sys/net/ipv4/conf")
        descendant = conf.descendant([b"default", b"forwarding"])
        with descendant.open("wb") as forwarding:
            forwarding.write(b"1")

        # In order to have the OUTPUT chain DNAT rule affect routing decisions,
        # we also need to tell the system to make routing decisions about
        # traffic from or to localhost.
        for path in conf.children():
            with path.child(b"route_localnet").open("wb") as route_localnet:
                route_localnet.write(b"1")

        return Proxy(ip=ip, port=port)
Exemplo n.º 41
0
class MapUpdater(object):
    def __init__(self, mapsPath, fetchURL, deleteIfNotPresent, tfLevelSounds):
        assert isinstance(mapsPath, str) and len(mapsPath)
        assert isinstance(fetchURL, str) and len(fetchURL)
        self.mapsPath = FilePath(mapsPath)
        self.downloadTempPath = self.mapsPath.child('mapupdater')
        self.fetchURL = URLPath.fromString(fetchURL)
        self.deleteIfNotPresent = deleteIfNotPresent
        self.tfLevelSounds = tfLevelSounds
        self.semaphore = DeferredSemaphore(1)
        self.downloadSemaphore = DeferredSemaphore(4)
        for fp in self.downloadTempPath.globChildren('*.bsp.bz2'):
            fp.remove()


    def checkMaps(self, *a, **kw):
        """
        Wrap self._checkMaps to prevent running multiple checks at once.
        """
        return self.semaphore.run(self._checkMaps, *a, **kw)


    def _checkMaps(self, forceDownloadMaps=None):
        def _cb(remoteMaps):
            if forceDownloadMaps:
                remoteMaps = list(set(remoteMaps + forceDownloadMaps))
            remoteMapsLower = [f.lower() for f in remoteMaps]
            ourMaps = filter(lambda p: not p.isdir() and p.path.endswith('.bsp'),
                             self.mapsPath.children())
            ourMapFilenames = [p.basename().lower() + '.bz2' for p in ourMaps]

            missing = []
            for f in remoteMaps:
                if f.lower() not in ourMapFilenames:
                    missing.append(f)

            delete = []
            for p in ourMaps:
                filename = p.basename().lower() + '.bz2'
                if filename not in remoteMapsLower:
                    delete.append(p)

            if self.deleteIfNotPresent and delete:
                for fp in delete:
                    fp.remove()

                print 'Deleted {} map(s) not present at remote server:'.format(len(delete))
                print ', '.join([x.basename() for x in delete])

            if missing:
                print 'Fetching {} map(s)'.format(len(missing))

                def _allFinished(ignored):
                    self.mapsPath.child('tempus_map_updater_run_once').touch()
                    if self.tfLevelSounds:
                        self.addLevelSounds(ourMaps)
                    print 'Now up-to-date.'

                ds = []
                for filename in missing:
                    ds.append(self.fetchMap(filename))
                return gatherResults(ds).addCallback(_allFinished)
            elif self.tfLevelSounds:
                self.addLevelSounds(ourMaps)


        return self.getMapList().addCallback(_cb)


    def fetchMap(self, *a, **kw):
        return self.downloadSemaphore.run(self._fetchMap, *a, **kw)


    def _fetchMap(self, filename):
        downloadTempPath = self.downloadTempPath
        if not downloadTempPath.exists():
            downloadTempPath.makedirs()

        def _cb(response, fn):
            tp = downloadTempPath.child(fn)
            fd = tp.open('wb')

            def _extracted(ignored):
                extractedPath = tp.sibling(tp.basename().replace('.bz2', ''))
                extractedPath.moveTo(
                    self.mapsPath.child(tp.basename().replace('.bz2', '')))
                try:
                    tp.remove()
                # File already gone
                except OSError:
                    pass
                print 'Finished downloading {}'.format(fn)

            def _finished(ignored):
                fd.close()
                d = getProcessOutputAndValue(
                    'aunpack', (tp.path, '-X', downloadTempPath.path))
                d.addErrback(log.err)
                d.addCallback(_extracted)
                return d

            def _eb(failure):
                print 'Error downloading {}:'.format(fn)
                print failure.getTraceback()
                fd.close()
                try:
                    tp.remove()
                # File already gone
                except OSError:
                    pass

            d = treq.collect(response, fd.write)
            d.addCallback(_finished)
            d.addErrback(_eb)
            return d

        d = treq.get(str(self.fetchURL.child(filename)))
        return d.addCallback(_cb, filename)


    def getMapList(self, forceDownloadMaps):
        raise NotImplementedError('Subclasses must override this method.')


    def addLevelSounds(self, mapPaths):
        content = FilePath(mapupdater.__file__).sibling(
            'tf_level_sounds.txt').getContent()
        added = []
        for p in mapPaths:
            mapName = p.basename()[:-4]
            p2 = p.sibling('{}_level_sounds.txt'.format(mapName))
            if p2.exists() and p2.getContent() == content:
                continue
            added.append(mapName)
            p2.setContent(content)
        if added:
            print 'Added level sounds for:'
            print ', '.join(added)
Exemplo n.º 42
0
def create_proxy_to(logger, ip, port):
    """
    :see: ``HostNetwork.create_proxy_to``
    """
    action = CREATE_PROXY_TO(
        logger=logger, target_ip=ip, target_port=port)

    with action:
        encoded_ip = unicode(ip).encode("ascii")
        encoded_port = unicode(port).encode("ascii")

        # The first goal is to configure "Destination NAT" (DNAT).  We're just
        # going to rewrite the destination address of traffic arriving on the
        # specified port so it looks like it is destined for the specified ip
        # instead of destined for "us".  This gets the packets delivered to the
        # right destination.
        iptables(logger, [
            # All NAT stuff happens in the netfilter NAT table.
            b"--table", b"nat",

            # Destination NAT has to happen "pre"-routing so that the normal
            # routing rules on the machine will use the re-written destination
            # address and get the packet to that new destination.  Accomplish
            # this by appending the rule to the PREROUTING chain.
            b"--append", b"PREROUTING",

            # Only re-route traffic with a destination port matching the one we
            # were told to manipulate.  It is also necessary to specify TCP (or
            # UDP) here since that is the layer of the network stack that
            # defines ports.
            b"--protocol", b"tcp", b"--destination-port", encoded_port,

            # And only re-route traffic directed at this host.  Traffic
            # originating on this host directed at some random other host that
            # happens to be on the same port should be left alone.
            b"--match", b"addrtype", b"--dst-type", b"LOCAL",

            # Tag it as a flocker-created rule so we can recognize it later.
            b"--match", b"comment", b"--comment", FLOCKER_PROXY_COMMENT_MARKER,

            # If the filter matched, jump to the DNAT chain to handle doing the
            # actual packet mangling.  DNAT is a built-in chain that already
            # knows how to do this.  Pass an argument to the DNAT chain so it
            # knows how to mangle the packet - rewrite the destination IP of
            # the address to the target we were told to use.
            b"--jump", b"DNAT", b"--to-destination", encoded_ip,
        ])

        # Bonus round!  Having performed DNAT (changing the destination) during
        # prerouting we are now prepared to send the packet on somewhere else.
        # On its way out of this system it is also necessary to further
        # modify and then track that packet.  We want it to look like it
        # comes from us (the downstream client will be *very* confused if
        # the node we're passing the packet on to replies *directly* to them;
        # and by confused I mean it will be totally broken, of course) so we
        # also need to "masquerade" in the postrouting chain.  This changes
        # the source address (ip and port) of the packet to the address of
        # the external interface the packet is exiting upon. Doing SNAT here
        # would be a little bit more efficient because the kernel could avoid
        # looking up the external interface's address for every single packet.
        # But it requires this code to know that address and it requires that
        # if it ever changes the rule gets updated and it may require some
        # steps to do port allocation (not sure what they are yet).  So we'll
        # just masquerade for now.
        iptables(logger, [
            # All NAT stuff happens in the netfilter NAT table.
            b"--table", b"nat",

            # As described above, this transformation happens after routing
            # decisions have been made and the packet is on its way out of the
            # system.  Therefore, append the rule to the POSTROUTING chain.
            b"--append", b"POSTROUTING",

            # We'll stick to matching the same kinds of packets we matched in
            # the earlier stage.  We might want to change the factoring of this
            # code to avoid the duplication - particularly in case we want to
            # change the specifics of the filter.
            #
            # This omits the LOCAL addrtype check, though, because at this
            # point the packet is definitely leaving this host.
            b"--protocol", b"tcp", b"--destination-port", encoded_port,

            # Do the masquerading.
            b"--jump", b"MASQUERADE",
        ])

        # Secret level!!  Traffic that originates *on* the host bypasses the
        # PREROUTING chain.  Instead, it passes through the OUTPUT chain.  If
        # we want connections from localhost to the forwarded port to be
        # affected then we need a rule in the OUTPUT chain to do the same kind
        # of DNAT that we did in the PREROUTING chain.
        iptables(logger, [
            # All NAT stuff happens in the netfilter NAT table.
            b"--table", b"nat",

            # As mentioned, this rule is for the OUTPUT chain.
            b"--append", b"OUTPUT",

            # Matching the exact same kinds of packets as the PREROUTING rule
            # matches.
            b"--protocol", b"tcp",
            b"--destination-port", encoded_port,
            b"--match", b"addrtype", b"--dst-type", b"LOCAL",

            # Do the same DNAT as we did in the rule for the PREROUTING chain.
            b"--jump", b"DNAT", b"--to-destination", encoded_ip,
        ])

        iptables(logger, [
            b"--table", b"filter",
            b"--insert", b"FORWARD",

            b"--destination", encoded_ip,
            b"--protocol", b"tcp", b"--destination-port", encoded_port,

            b"--jump", b"ACCEPT",
        ])

        # The network stack only considers forwarding traffic when certain
        # system configuration is in place.
        #
        # https://www.kernel.org/doc/Documentation/networking/ip-sysctl.txt
        # will explain the meaning of these in (very slightly) more detail.
        conf = FilePath(b"/proc/sys/net/ipv4/conf")
        descendant = conf.descendant([b"default", b"forwarding"])
        with descendant.open("wb") as forwarding:
            forwarding.write(b"1")

        # In order to have the OUTPUT chain DNAT rule affect routing decisions,
        # we also need to tell the system to make routing decisions about
        # traffic from or to localhost.
        for path in conf.children():
            with path.child(b"route_localnet").open("wb") as route_localnet:
                route_localnet.write(b"1")

        return Proxy(ip=ip, port=port)
Exemplo n.º 43
0
 def process_fds():
     path = FilePath(b"/proc/self/fd")
     return set([child.basename() for child in path.children()])
Exemplo n.º 44
0
class Voluminous(object):
    lockFactory = DockerLock

    def __init__(self, directory):
        self._directory = FilePath(directory)
        self._output = []
        self.lock = self.lockFactory()
        self.commitDatabase = JsonCommitDatabase(self._directory)

    def output(self, s):
        self._output.append(s)
        print s

    def getOutput(self):
        return self._output

    def allBranches(self, volume):
        volumePath = self._directory.child(volume)
        branches = volumePath.child("branches").children()
        return [b.basename() for b in branches if b.isdir()]

    def listBranches(self):
        volume = self.volume()
        branches = self.allBranches(volume)
        currentBranch = self.getActiveBranch(volume)
        self.output("\n".join(
            sorted(("*" if b == currentBranch else " ") + " " + b
                   for b in branches)))

    def checkoutBranch(self, branch, create):
        """
        "Check out" a branch, restarting containers in process, creating it
        from current branch HEAD if requested.
        """
        volume = self.volume()
        volumePath = self._directory.child(volume)
        branchPath = volumePath.child("branches").child(branch)
        if create:
            if branchPath.exists():
                self.output("Cannot create existing branch %s" % (branch, ))
                return
            else:
                try:
                    HEAD = self._resolveNamedCommitCurrentBranch(
                        "HEAD", volume)
                except IndexError:
                    self.output(
                        "You must commit ('dvol commit') before you can "
                        "branch ('dvol checkout -b')")
                    return
                # Copy metadata
                meta = self.commitDatabase.read(volume,
                                                self.getActiveBranch(volume))
                self.commitDatabase.write(volume, branch, meta)
                # Then copy latest HEAD of branch into new branch data
                # directory
                volumePath.child("commits").child(HEAD).copyTo(branchPath)
        else:
            if not branchPath.exists():
                self.output("Cannot switch to non-existing branch %s" %
                            (branch, ))
                return
        # Got here, so switch to the (maybe new branch)
        self.setActiveBranch(volume, branch)

    def createBranch(self, volume, branch):
        branchDir = self._directory.child(volume).child("branches").child(
            branch)
        branchDir.makedirs()
        self.output("Created branch %s/%s" % (volume, branch))

    def createVolume(self, name):
        if self._directory.child(name).exists():
            self.output("Error: volume %s already exists" % (name, ))
            raise VolumeAlreadyExists()
        self._directory.child(name).makedirs()
        self.setActiveVolume(name)
        self.output("Created volume %s" % (name, ))
        self.createBranch(name, DEFAULT_BRANCH)

    def removeVolume(self, volume):
        if not self._directory.child(volume).exists():
            raise UsageError("Volume %r does not exist, cannot remove it" %
                             (volume, ))
        containers = self.lock.containers.get_related_containers(volume)
        if containers:
            raise UsageError("Cannot remove %r while it is in use by '%s'" %
                             (volume, (",".join(c['Name']
                                                for c in containers))))
        if self._userIsSure(
                "This will remove all containers using the volume"):
            self.output("Deleting volume %r" % (volume, ))
            # Remove related containers
            self.lock.containers.remove_related_containers(volume)
            self._directory.child(volume).remove()

        else:
            self.output("Aborting.")

    def deleteBranch(self, branch):
        volume = self.volume()
        if branch == self.getActiveBranch(volume):
            raise UsageError("Cannot delete active branch, use "
                             "'dvol checkout' to switch branches first")
        if branch not in self.allBranches(volume):
            raise UsageError("Branch %r does not exist" % (branch, ))
        if self._userIsSure():
            self.output("Deleting branch %r" % (branch, ))
            volumePath = self._directory.child(volume)
            branchPath = volumePath.child("branches").child(branch)
            branchPath.remove()
        else:
            self.output("Aborting.")

    def _userIsSure(self, extraMessage=None):
        message = "Are you sure? "
        if extraMessage:
            message += extraMessage
        message += " (y/n): "
        sys.stdout.write(message)
        sys.stdout.flush()
        return raw_input().lower() in ("y", "yes")

    def setActiveVolume(self, volume):
        self._directory.child("current_volume.json").setContent(
            json.dumps(dict(current_volume=volume)))

    def volume(self):
        currentVolume = self._directory.child("current_volume.json")
        if currentVolume.exists():
            volume = json.loads(currentVolume.getContent())["current_volume"]
        else:
            raise UsageError("No active volume: use dvol switch to choose one")
        if not self._directory.child(volume).exists():
            raise UsageError("Active volume %s does not exist: "
                             "use dvol switch to choose another" % (volume, ))
        return volume

    def setActiveBranch(self, volume, branch):
        self._directory.child(volume).child("current_branch.json").setContent(
            json.dumps(dict(current_branch=branch)))
        self.lock.acquire(volume)
        try:
            self.updateRunningPoint(volume)
        finally:
            self.lock.release(volume)

    def getActiveBranch(self, volume):
        currentBranch = self._directory.child(
            self.volume()).child("current_branch.json")
        if currentBranch.exists():
            return json.loads(currentBranch.getContent())["current_branch"]
        else:
            return DEFAULT_BRANCH

    def updateRunningPoint(self, volume):
        """
        construct a stable (wrt switching branches) path with symlinks
        """
        volumePath = self._directory.child(volume)
        branchName = self.getActiveBranch(volume)
        branchPath = volumePath.child("branches").child(branchName)
        stablePath = volumePath.child("running_point")
        if stablePath.exists():
            stablePath.remove()
        branchPath.linkTo(stablePath)
        return stablePath.path

    def commitVolume(self, message):
        volume = self.volume()
        commitId = (str(uuid.uuid4()) + str(uuid.uuid4())).replace("-",
                                                                   "")[:40]
        self.output(commitId)
        volumePath = self._directory.child(volume)
        branchName = self.getActiveBranch(volume)
        branchPath = volumePath.child("branches").child(branchName)
        commitPath = volumePath.child("commits").child(commitId)
        if commitPath.exists():
            raise Exception("woah, random uuid collision. try again!")
        commitPath.makedirs()
        # acquire lock (read: stop containers) to ensure consistent snapshot
        # with file-copy based backend
        # XXX tests for acquire/release
        self.lock.acquire(volume)
        try:
            branchPath.copyTo(commitPath)
        finally:
            self.lock.release(volume)
        self._recordCommit(volume, branchName, commitId, message)

    def _recordCommit(self, volume, branch, commitId, message):
        commitData = self.commitDatabase.read(volume, branch)
        commitData.append(dict(id=commitId, message=message))
        self.commitDatabase.write(volume, branch, commitData)

    def exists(self, volume):
        volumePath = self._directory.child(volume)
        return volumePath.exists()

    def listVolumes(self):
        table = get_table()
        table.set_cols_align(["l", "l", "l"])
        dc = self.lock.containers  # XXX ugly
        volumes = [v for v in self._directory.children() if v.isdir()]
        activeVolume = None
        if volumes:
            try:
                activeVolume = self.volume()
            except UsageError:
                # don't refuse to list volumes just because none of them are active
                pass
        rows = [["", "", ""]] + [["  VOLUME", "BRANCH", "CONTAINERS"]] + [[
            ("*" if v.basename() == activeVolume else " ") + " " +
            v.basename(),
            self.getActiveBranch(v.basename()), ",".join(
                c['Name'] for c in dc.get_related_containers(v.basename()))
        ] for v in volumes]
        table.add_rows(rows)
        self.output(table.draw())

    def listCommits(self, branch=None):
        if branch is None:
            branch = self.getActiveBranch(self.volume())
        volume = self.volume()
        aggregate = []
        for commit in reversed(self.commitDatabase.read(volume, branch)):
            # TODO fill in author/date
            aggregate.append("commit %(id)s\n"
                             "Author: Who knows <mystery@person>\n"
                             "Date: Whenever\n"
                             "\n"
                             "    %(message)s\n" % commit)
        self.output("\n".join(aggregate))

    def _resolveNamedCommitCurrentBranch(self, commit, volume):
        branch = self.getActiveBranch(volume)
        remainder = commit[len("HEAD"):]
        if remainder == "^" * len(remainder):
            offset = len(remainder)
        else:
            raise UsageError("Malformed commit identifier %r" % (commit, ))
        commits = self.commitDatabase.read(volume, branch)
        # commits are appended to, so the last one is the latest
        return commits[-1 - offset]["id"]

    def _destroyNewerCommits(self, commit, volume):
        # TODO in the future, we'll care more about the following being an
        # atomic operation
        branch = self.getActiveBranch(volume)
        commits = self.commitDatabase.read(volume, branch)
        commitIndex = [c["id"] for c in commits].index(commit) + 1
        remainingCommits = commits[:commitIndex]
        destroyCommits = commits[commitIndex:]
        # look in all branches for commit references before removing them
        totalCommits = set()
        for otherBranch in self.allBranches(volume):
            if otherBranch == branch:
                # skip this branch, otherwise we'll never destroy any commits
                continue
            commits = self.commitDatabase.read(volume, branch)
            totalCommits.update(commit["id"] for commit in commits)
        for commit in destroyCommits:
            commitId = commit["id"]
            if commitId in totalCommits:
                # skip destroying this commit; it is still actively referred to
                # in another branch
                continue
            volumePath = self._directory.child(volume)
            commitPath = volumePath.child("commits").child(commitId)
            commitPath.remove()
        self.commitDatabase.write(volume, branch, remainingCommits)

    def resetVolume(self, commit):
        """
        Forcefully roll back the current working copy to this commit,
        destroying any later commits.
        """
        volume = self.volume()
        volumePath = self._directory.child(volume)
        branchName = self.getActiveBranch(volume)
        branchPath = volumePath.child("branches").child(branchName)
        if commit.startswith("HEAD"):
            try:
                commit = self._resolveNamedCommitCurrentBranch(commit, volume)
            except IndexError:
                self.output("Referenced commit does not exist; check dvol log")
                return
        commitPath = volumePath.child("commits").child(commit)
        if not commitPath.exists():
            raise NoSuchCommit("commit '%s' does not exist" % (commit, ))
        self.lock.acquire(volume)
        try:
            branchPath.remove()
            commitPath.copyTo(branchPath)
            self._destroyNewerCommits(commit, volume)
        finally:
            self.lock.release(volume)
Exemplo n.º 45
0
    def process_fds():
        path = FilePath(b"/proc/self/fd")
        if not path.exists():
            raise SkipTest("/proc is not available.")

        return set([child.basename() for child in path.children()])
Exemplo n.º 46
0
class NewsBuilderTests(TestCase, StructureAssertingMixin):
    """
    Tests for L{NewsBuilder}.
    """
    skip = svnSkip

    def setUp(self):
        """
        Create a fake project and stuff some basic structure and content into
        it.
        """
        self.builder = NewsBuilder()
        self.project = FilePath(self.mktemp())
        self.project.createDirectory()

        self.existingText = 'Here is stuff which was present previously.\n'
        createStructure(
            self.project, {
                'NEWS': self.existingText,
                '5.feature': 'We now support the web.\n',
                '12.feature': 'The widget is more robust.\n',
                '15.feature': (
                    'A very long feature which takes many words to '
                    'describe with any accuracy was introduced so that '
                    'the line wrapping behavior of the news generating '
                    'code could be verified.\n'),
                '16.feature': (
                    'A simpler feature\ndescribed on multiple lines\n'
                    'was added.\n'),
                '23.bugfix': 'Broken stuff was fixed.\n',
                '25.removal': 'Stupid stuff was deprecated.\n',
                '30.misc': '',
                '35.misc': '',
                '40.doc': 'foo.bar.Baz.quux',
                '41.doc': 'writing Foo servers'})


    def test_today(self):
        """
        L{NewsBuilder._today} returns today's date in YYYY-MM-DD form.
        """
        self.assertEqual(
            self.builder._today(), date.today().strftime('%Y-%m-%d'))


    def test_findFeatures(self):
        """
        When called with L{NewsBuilder._FEATURE}, L{NewsBuilder._findChanges}
        returns a list of bugfix ticket numbers and descriptions as a list of
        two-tuples.
        """
        features = self.builder._findChanges(
            self.project, self.builder._FEATURE)
        self.assertEqual(
            features,
            [(5, "We now support the web."),
             (12, "The widget is more robust."),
             (15,
              "A very long feature which takes many words to describe with "
              "any accuracy was introduced so that the line wrapping behavior "
              "of the news generating code could be verified."),
             (16, "A simpler feature described on multiple lines was added.")])


    def test_findBugfixes(self):
        """
        When called with L{NewsBuilder._BUGFIX}, L{NewsBuilder._findChanges}
        returns a list of bugfix ticket numbers and descriptions as a list of
        two-tuples.
        """
        bugfixes = self.builder._findChanges(
            self.project, self.builder._BUGFIX)
        self.assertEqual(
            bugfixes,
            [(23, 'Broken stuff was fixed.')])


    def test_findRemovals(self):
        """
        When called with L{NewsBuilder._REMOVAL}, L{NewsBuilder._findChanges}
        returns a list of removal/deprecation ticket numbers and descriptions
        as a list of two-tuples.
        """
        removals = self.builder._findChanges(
            self.project, self.builder._REMOVAL)
        self.assertEqual(
            removals,
            [(25, 'Stupid stuff was deprecated.')])


    def test_findDocumentation(self):
        """
        When called with L{NewsBuilder._DOC}, L{NewsBuilder._findChanges}
        returns a list of documentation ticket numbers and descriptions as a
        list of two-tuples.
        """
        doc = self.builder._findChanges(
            self.project, self.builder._DOC)
        self.assertEqual(
            doc,
            [(40, 'foo.bar.Baz.quux'),
             (41, 'writing Foo servers')])


    def test_findMiscellaneous(self):
        """
        When called with L{NewsBuilder._MISC}, L{NewsBuilder._findChanges}
        returns a list of removal/deprecation ticket numbers and descriptions
        as a list of two-tuples.
        """
        misc = self.builder._findChanges(
            self.project, self.builder._MISC)
        self.assertEqual(
            misc,
            [(30, ''),
             (35, '')])


    def test_writeHeader(self):
        """
        L{NewsBuilder._writeHeader} accepts a file-like object opened for
        writing and a header string and writes out a news file header to it.
        """
        output = StringIO()
        self.builder._writeHeader(output, "Super Awesometastic 32.16")
        self.assertEqual(
            output.getvalue(),
            "Super Awesometastic 32.16\n"
            "=========================\n"
            "\n")


    def test_writeSection(self):
        """
        L{NewsBuilder._writeSection} accepts a file-like object opened for
        writing, a section name, and a list of ticket information (as returned
        by L{NewsBuilder._findChanges}) and writes out a section header and all
        of the given ticket information.
        """
        output = StringIO()
        self.builder._writeSection(
            output, "Features",
            [(3, "Great stuff."),
             (17, "Very long line which goes on and on and on, seemingly "
              "without end until suddenly without warning it does end.")])
        self.assertEqual(
            output.getvalue(),
            "Features\n"
            "--------\n"
            " - Great stuff. (#3)\n"
            " - Very long line which goes on and on and on, seemingly "
            "without end\n"
            "   until suddenly without warning it does end. (#17)\n"
            "\n")


    def test_writeMisc(self):
        """
        L{NewsBuilder._writeMisc} accepts a file-like object opened for
        writing, a section name, and a list of ticket information (as returned
        by L{NewsBuilder._findChanges} and writes out a section header and all
        of the ticket numbers, but excludes any descriptions.
        """
        output = StringIO()
        self.builder._writeMisc(
            output, "Other",
            [(x, "") for x in range(2, 50, 3)])
        self.assertEqual(
            output.getvalue(),
            "Other\n"
            "-----\n"
            " - #2, #5, #8, #11, #14, #17, #20, #23, #26, #29, #32, #35, "
            "#38, #41,\n"
            "   #44, #47\n"
            "\n")


    def test_build(self):
        """
        L{NewsBuilder.build} updates a NEWS file with new features based on the
        I{<ticket>.feature} files found in the directory specified.
        """
        self.builder.build(
            self.project, self.project.child('NEWS'),
            "Super Awesometastic 32.16")

        results = self.project.child('NEWS').getContent()
        self.assertEqual(
            results,
            'Super Awesometastic 32.16\n'
            '=========================\n'
            '\n'
            'Features\n'
            '--------\n'
            ' - We now support the web. (#5)\n'
            ' - The widget is more robust. (#12)\n'
            ' - A very long feature which takes many words to describe '
            'with any\n'
            '   accuracy was introduced so that the line wrapping behavior '
            'of the\n'
            '   news generating code could be verified. (#15)\n'
            ' - A simpler feature described on multiple lines was '
            'added. (#16)\n'
            '\n'
            'Bugfixes\n'
            '--------\n'
            ' - Broken stuff was fixed. (#23)\n'
            '\n'
            'Improved Documentation\n'
            '----------------------\n'
            ' - foo.bar.Baz.quux (#40)\n'
            ' - writing Foo servers (#41)\n'
            '\n'
            'Deprecations and Removals\n'
            '-------------------------\n'
            ' - Stupid stuff was deprecated. (#25)\n'
            '\n'
            'Other\n'
            '-----\n'
            ' - #30, #35\n'
            '\n\n' + self.existingText)


    def test_emptyProjectCalledOut(self):
        """
        If no changes exist for a project, I{NEWS} gains a new section for
        that project that includes some helpful text about how there were no
        interesting changes.
        """
        project = FilePath(self.mktemp()).child("twisted")
        project.makedirs()
        createStructure(project, {'NEWS': self.existingText})

        self.builder.build(
            project, project.child('NEWS'),
            "Super Awesometastic 32.16")
        results = project.child('NEWS').getContent()
        self.assertEqual(
            results,
            'Super Awesometastic 32.16\n'
            '=========================\n'
            '\n' +
            self.builder._NO_CHANGES +
            '\n\n' + self.existingText)


    def test_preserveTicketHint(self):
        """
        If a I{NEWS} file begins with the two magic lines which point readers
        at the issue tracker, those lines are kept at the top of the new file.
        """
        news = self.project.child('NEWS')
        news.setContent(
            'Ticket numbers in this file can be looked up by visiting\n'
            'http://twistedmatrix.com/trac/ticket/<number>\n'
            '\n'
            'Blah blah other stuff.\n')

        self.builder.build(self.project, news, "Super Awesometastic 32.16")

        self.assertEqual(
            news.getContent(),
            'Ticket numbers in this file can be looked up by visiting\n'
            'http://twistedmatrix.com/trac/ticket/<number>\n'
            '\n'
            'Super Awesometastic 32.16\n'
            '=========================\n'
            '\n'
            'Features\n'
            '--------\n'
            ' - We now support the web. (#5)\n'
            ' - The widget is more robust. (#12)\n'
            ' - A very long feature which takes many words to describe '
            'with any\n'
            '   accuracy was introduced so that the line wrapping behavior '
            'of the\n'
            '   news generating code could be verified. (#15)\n'
            ' - A simpler feature described on multiple lines was '
            'added. (#16)\n'
            '\n'
            'Bugfixes\n'
            '--------\n'
            ' - Broken stuff was fixed. (#23)\n'
            '\n'
            'Improved Documentation\n'
            '----------------------\n'
            ' - foo.bar.Baz.quux (#40)\n'
            ' - writing Foo servers (#41)\n'
            '\n'
            'Deprecations and Removals\n'
            '-------------------------\n'
            ' - Stupid stuff was deprecated. (#25)\n'
            '\n'
            'Other\n'
            '-----\n'
            ' - #30, #35\n'
            '\n\n'
            'Blah blah other stuff.\n')


    def test_emptySectionsOmitted(self):
        """
        If there are no changes of a particular type (feature, bugfix, etc), no
        section for that type is written by L{NewsBuilder.build}.
        """
        for ticket in self.project.children():
            if ticket.splitext()[1] in ('.feature', '.misc', '.doc'):
                ticket.remove()

        self.builder.build(
            self.project, self.project.child('NEWS'),
            'Some Thing 1.2')

        self.assertEqual(
            self.project.child('NEWS').getContent(),
            'Some Thing 1.2\n'
            '==============\n'
            '\n'
            'Bugfixes\n'
            '--------\n'
            ' - Broken stuff was fixed. (#23)\n'
            '\n'
            'Deprecations and Removals\n'
            '-------------------------\n'
            ' - Stupid stuff was deprecated. (#25)\n'
            '\n\n'
            'Here is stuff which was present previously.\n')


    def test_duplicatesMerged(self):
        """
        If two change files have the same contents, they are merged in the
        generated news entry.
        """
        def feature(s):
            return self.project.child(s + '.feature')
        feature('5').copyTo(feature('15'))
        feature('5').copyTo(feature('16'))

        self.builder.build(
            self.project, self.project.child('NEWS'),
            'Project Name 5.0')

        self.assertEqual(
            self.project.child('NEWS').getContent(),
            'Project Name 5.0\n'
            '================\n'
            '\n'
            'Features\n'
            '--------\n'
            ' - We now support the web. (#5, #15, #16)\n'
            ' - The widget is more robust. (#12)\n'
            '\n'
            'Bugfixes\n'
            '--------\n'
            ' - Broken stuff was fixed. (#23)\n'
            '\n'
            'Improved Documentation\n'
            '----------------------\n'
            ' - foo.bar.Baz.quux (#40)\n'
            ' - writing Foo servers (#41)\n'
            '\n'
            'Deprecations and Removals\n'
            '-------------------------\n'
            ' - Stupid stuff was deprecated. (#25)\n'
            '\n'
            'Other\n'
            '-----\n'
            ' - #30, #35\n'
            '\n\n'
            'Here is stuff which was present previously.\n')
Exemplo n.º 47
0
class FileUploadResource(Resource):

    """
    Twisted Web resource that handles file uploads over `HTTP/POST` requests.
    """

    log = make_logger()

    def __init__(self,
                 upload_directory,
                 temp_directory,
                 form_fields,
                 upload_session,
                 options=None):
        """

        :param upload_directory: The target directory where uploaded files will be stored.
        :type upload_directory: str
        :param temp_directory: A temporary directory where chunks of a file being uploaded are stored.
        :type temp_directory: str
        :param form_fields: Names of HTML form fields used for uploading.
        :type form_fields: dict
        :param upload_session: An instance of `ApplicationSession` used for publishing progress events.
        :type upload_session: obj
        :param options: Options for file upload.
        :type options: dict or None
        """

        Resource.__init__(self)
        self._uploadRoot = FilePath(upload_directory)
        self._tempDirRoot = FilePath(temp_directory)
        self._form_fields = form_fields
        self._fileupload_session = upload_session
        self._options = options or {}
        self._max_file_size = self._options.get('max_file_size', 10 * 1024 * 1024)
        self._fileTypes = self._options.get('file_types', None)
        self._file_permissions = self._options.get('file_permissions', None)

        # track uploaded files / chunks
        self._uploads = {}

        self.log.info('Upload Resource started.')

        # scan the temp dir for uploaded chunks and fill the _uploads dict with it
        # so existing uploads can be resumed
        # all other remains will be purged
        for fileTempDir in self._tempDirRoot.children():
            fileTempName = fileTempDir.basename()
            if fileTempDir.isdir():
                self._uploads[fileTempName] = {'chunk_list': [], 'origin': 'startup'}
                for chunk in fileTempDir.listdir():
                    if chunk[:6] == 'chunk_':
                        self._uploads[fileTempName]['chunk_list'].append(int(chunk[6:]))
                    else:
                        fileTempDir.child(chunk).remove()
                # if no chunks detected then remove remains completely
                if len(self._uploads[fileTempName]['chunk_list']) == 0:
                    fileTempDir.remove()
                    self._uploads.pop(fileTempName, None)
            else:  # fileTempDir is a file remaining from a single chunk upload
                fileTempDir.remove()

        self.log.debug("Scanned pending uploads: {uploads}", uploads=self._uploads)

    def render_POST(self, request):
        headers = {x.decode('iso-8859-1'): y.decode('iso-8859-1')
                   for x, y in request.getAllHeaders().items()}

        origin = headers['host']

        postFields = cgi.FieldStorage(
            fp=request.content,
            headers=headers,
            environ={"REQUEST_METHOD": "POST"})

        f = self._form_fields

        filename = postFields[f['file_name']].value
        totalSize = int(postFields[f['total_size']].value)
        totalChunks = int(postFields[f['total_chunks']].value)
        chunkSize = int(postFields[f['chunk_size']].value)
        chunkNumber = int(postFields[f['chunk_number']].value)
        fileContent = postFields[f['content']].value

        if 'chunk_extra' in f and f['chunk_extra'] in postFields:
            chunk_extra = json.loads(postFields[f['chunk_extra']].value)
        else:
            chunk_extra = {}

        if 'finish_extra' in f and f['finish_extra'] in postFields:
            finish_extra = json.loads(postFields[f['finish_extra']].value)
        else:
            finish_extra = {}

        fileId = filename

        # # prepare user specific upload areas
        # # NOT YET IMPLEMENTED
        # #
        # if 'auth_id' in f and f['auth_id'] in postFields:
        #     auth_id = postFields[f['auth_id']].value
        #     mydir = os.path.join(self._uploadRoot, auth_id)
        #     my_temp_dir = os.path.join(self._tempDirRoot, auth_id)
        #
        #     # check if auth_id is a valid directory_name
        #     #
        #     if auth_id != auth_id.encode('ascii', 'ignore'):
        #         msg = "The requestor auth_id must be an ascii string."
        #         if self._debug:
        #             log.msg(msg)
        #         # 415 Unsupported Media Type
        #         request.setResponseCode(415, msg)
        #         return msg
        # else:
        #     auth_id = 'anonymous'

        # create user specific folder

        # mydir = self._uploadRoot
        # my_temp_dir = self._tempDirRoot

        # if not os.path.exists(mydir):
        #     os.makedirs(mydir)
        # if not os.path.exists(my_temp_dir):
        #     os.makedirs(my_temp_dir)

        # prepare the on_progress publisher
        if 'on_progress' in f and f['on_progress'] in postFields and self._fileupload_session != {}:
            topic = postFields[f['on_progress']].value

            if 'session' in f and f['session'] in postFields:
                session = int(postFields[f['session']].value)
                publish_options = PublishOptions(eligible=[session])
            else:
                publish_options = None

            def fileupload_publish(payload):
                self._fileupload_session.publish(topic, payload, options=publish_options)
        else:
            def fileupload_publish(payload):
                pass

        # Register upload right at the start to avoid overlapping upload conflicts
        #
        if fileId not in self._uploads:
            self._uploads[fileId] = {'chunk_list': [], 'origin': origin}
            chunk_is_first = True
            self.log.debug('Started upload of file: file_name={file_name}, total_size={total_size}, total_chunks={total_chunks}, chunk_size={chunk_size}, chunk_number={chunk_number}',
                           file_name=fileId, total_size=totalSize, total_chunks=totalChunks, chunk_size=chunkSize, chunk_number=chunkNumber)
        else:
            chunk_is_first = False
            # If the chunks are read at startup of crossbar any client may claim and resume the pending upload !
            #
            upl = self._uploads[fileId]
            if upl['origin'] == 'startup':
                self.log.debug('Will try to resume upload of file: file_name={file_name}, total_size={total_size}, total_chunks={total_chunks}, chunk_size={chunk_size}, chunk_number={chunk_number}',
                               file_name=fileId, total_size=totalSize, total_chunks=totalChunks, chunk_size=chunkSize, chunk_number=chunkNumber)
                upl['origin'] = origin
            else:
                # check if another session is uploading this file already
                #
                if upl['origin'] != origin:
                    msg = "File being uploaded is already uploaded in a different session."
                    self.log.debug(msg)
                    # 409 Conflict
                    request.setResponseCode(409, msg.encode('utf8'))
                    return msg.encode('utf8')
                else:
                    # check if the chunk is being uploaded in this very session already
                    # this should never happen !
                    if chunkNumber in upl['chunk_list']:
                        msg = "Chunk beeing uploaded is already uploading."
                        self.log.debug(msg)
                        # Don't throw a conflict. This may be a wanted behaviour.
                        # Even if an upload would be resumable, you don't have to resume.
                        # 409 Conflict
                        # request.setResponseCode(409, msg.encode('utf8'))
                        # return msg.encode('utf8')

        # check file size
        #
        if totalSize > self._max_file_size:
            msg = "Size {} of file to be uploaded exceeds maximum {}".format(totalSize, self._max_file_size)
            self.log.debug(msg)
            # 413 Request Entity Too Large
            request.setResponseCode(413, msg.encode('utf8'))
            return msg.encode('utf8')

        # check file extensions
        #
        extension = os.path.splitext(filename)[1]
        if self._fileTypes and extension not in self._fileTypes:
            msg = "Type '{}' of file to be uploaded is in allowed types {}".format(extension, self._fileTypes)
            self.log.debug(msg)
            # 415 Unsupported Media Type
            request.setResponseCode(415, msg.encode('utf8'))
            return msg.encode('utf8')

        # TODO: check mime type
        #
        fileTempDir = self._tempDirRoot.child(fileId)
        chunkName = fileTempDir.child('chunk_' + str(chunkNumber))
        _chunkName = fileTempDir.child('#kfhf3kz412uru578e38viokbjhfvz4w__' + 'chunk_' + str(chunkNumber))

        def mergeFile():
            # every chunk has to check if it is the last chunk written, except in a single chunk scenario
            if totalChunks > 1 and len(self._uploads[fileId]['chunk_list']) >= totalChunks:
                # last chunk
                self.log.debug('Finished file upload after chunk {chunk_number} with chunk_list {chunk_list}', chunk_number=chunkNumber, chunk_list=self._uploads)

                # Merge all files into one file and remove the temp files
                # TODO: How to avoid the extra file IO ?
                finalFileName = self._uploadRoot.child(fileId)
                _finalFileName = fileTempDir.child('#kfhf3kz412uru578e38viokbjhfvz4w__' + fileId)
                with open(_finalFileName.path, 'wb') as _finalFile:
                    for cn in range(1, totalChunks + 1):
                        with open(fileTempDir.child('chunk_' + str(cn)).path, 'rb') as ff:
                            _finalFile.write(ff.read())

                _finalFileName.moveTo(finalFileName)

                if self._file_permissions:
                    perm = int(self._file_permissions, 8)
                    try:
                        finalFileName.chmod(perm)
                    except Exception as e:
                        msg = "file upload resource - could not change file permissions of uploaded file"
                        self.log.debug(msg)
                        self.log.debug(e)
                        self._uploads.pop(fileId, None)
                        request.setResponseCode(500, msg.encode('utf8'))
                        return msg.encode('utf8')
                    else:
                        self.log.debug("Changed permissions on {file_name} to {permissions}", file_name=finalFileName, permissions=self._file_permissions)

                # remove the file temp folder
                fileTempDir.remove()

                self._uploads.pop(fileId, None)

                # publish file upload progress to file_progress_URI
                fileupload_publish({
                                   "id": fileId,
                                   "chunk": chunkNumber,
                                   "name": filename,
                                   "total": totalSize,
                                   "remaining": 0,
                                   "status": "finished",
                                   "progress": 1.,
                                   "finish_extra": finish_extra,
                                   "chunk_extra": chunk_extra
                                   })

        if chunk_is_first:
            # first chunk of file

            # publish file upload start
            #
            fileupload_publish({
                               "id": fileId,
                               "chunk": chunkNumber,
                               "name": filename,
                               "total": totalSize,
                               "remaining": totalSize,
                               "status": "started",
                               "progress": 0.,
                               "chunk_extra": chunk_extra
                               })

            if totalChunks == 1:
                # only one chunk overall -> write file directly
                finalFileName = self._uploadRoot.child(fileId)
                _finalFileName = self._tempDirRoot.child('#kfhf3kz412uru578e38viokbjhfvz4w__' + fileId)

                with open(_finalFileName.path, 'wb') as _finalFile:
                    _finalFile.write(fileContent)

                if self._file_permissions:
                    perm = int(self._file_permissions, 8)
                    try:
                        _finalFileName.chmod(perm)
                    except Exception as e:
                        # finalFileName.remove()
                        msg = "Could not change file permissions of uploaded file"
                        self.log.debug(msg)
                        self.log.debug(e)
                        request.setResponseCode(500, msg.encode('utf8'))
                        return msg.encode('utf8')
                    else:
                        self.log.debug("Changed permissions on {file_name} to {permissions}", file_name=finalFileName, permissions=self._file_permissions)

                _finalFileName.moveTo(finalFileName)
                if chunkNumber not in self._uploads[fileId]['chunk_list']:
                    self._uploads[fileId]['chunk_list'].append(chunkNumber)

                self._uploads.pop(fileId, None)

                # publish file upload progress to file_progress_URI
                fileupload_publish({
                                   "id": fileId,
                                   "chunk": chunkNumber,
                                   "name": filename,
                                   "total": totalSize,
                                   "remaining": 0,
                                   "status": "finished",
                                   "progress": 1.,
                                   "finish_extra": finish_extra,
                                   "chunk_extra": chunk_extra
                                   })

            else:
                # first of more chunks
                # fileTempDir.remove()  # any potential conflict should have been resolved above. This should not be necessary!
                if not os.path.isdir(fileTempDir.path):
                    fileTempDir.makedirs()

                with open(_chunkName.path, 'wb') as chunk:
                    chunk.write(fileContent)
                _chunkName.moveTo(chunkName)  # atomic file system operation
                self.log.debug('chunk_' + str(chunkNumber) + ' written and moved to ' + chunkName.path)
                # publish file upload progress
                #
                fileupload_publish({
                                   "id": fileId,
                                   "chunk": chunkNumber,
                                   "name": filename,
                                   "total": totalSize,
                                   "remaining": totalSize - chunkSize,
                                   "status": "progress",
                                   "progress": round(float(chunkSize) / float(totalSize), 3),
                                   "chunk_extra": chunk_extra
                                   })
                if chunkNumber not in self._uploads[fileId]['chunk_list']:
                    self._uploads[fileId]['chunk_list'].append(chunkNumber)
                mergeFile()
            # clean the temp dir once per file upload
            self._remove_stale_uploads()

        else:
            # intermediate chunk
            if not os.path.isdir(fileTempDir.path):
                fileTempDir.makedirs()

            with open(_chunkName.path, 'wb') as chunk:
                chunk.write(fileContent)
            _chunkName.moveTo(chunkName)
            self.log.debug('chunk_' + str(chunkNumber) + ' written and moved to ' + chunkName.path)

            if chunkNumber not in self._uploads[fileId]['chunk_list']:
                self._uploads[fileId]['chunk_list'].append(chunkNumber)

            received = sum(fileTempDir.child(f).getsize() for f in fileTempDir.listdir())

            fileupload_publish({
                               "id": fileId,
                               "chunk": chunkNumber,
                               "name": filename,
                               "total": totalSize,
                               "remaining": totalSize - received,
                               "status": "progress",
                               "progress": round(float(received) / float(totalSize), 3),
                               "chunk_extra": chunk_extra
                               })
            mergeFile()
        # no errors encountered -> respond success
        request.setResponseCode(200)
        return b''

    def _remove_stale_uploads(self):
        """
        This only works if there is a temp folder exclusive for crossbar file uploads
        if the system temp folder is used then crossbar creates a "crossbar-uploads" there and
        uses that as the temp folder for uploads
        If you don't clean up regularly an attacker could fill up the OS file system
        """
        for fileTempDir in self._tempDirRoot.children():
            self.log.debug('REMOVE STALE UPLOADS ' + str(fileTempDir.basename()))
            if fileTempDir.isdir() and (fileTempDir.basename()) not in self._uploads:
                fileTempDir.remove()

    def render_GET(self, request):
        """
        This method can be used to check whether a chunk has been uploaded already.
        It returns with HTTP status code `200` if yes and `404` if not.
        The request needs to contain the file identifier and the chunk number to check for.
        """
        for param in ['file_name', 'chunk_number']:
            if not self._form_fields[param].encode('iso-8859-1') in request.args:
                msg = "file upload resource - missing request query parameter '{}', configured from '{}'".format(self._form_fields[param], param)
                self.log.debug(msg)
                # 400 Bad Request
                request.setResponseCode(400, msg.encode('utf8'))
                return msg.encode('utf8')

        file_name = request.args[self._form_fields['file_name'].encode('iso-8859-1')][0].decode('utf8')
        chunk_number = int(request.args[self._form_fields['chunk_number'].encode('iso-8859-1')][0].decode('utf8'))

        # a complete upload will be repeated an incomplete upload will be resumed
        if file_name in self._uploads and chunk_number in self._uploads[file_name]['chunk_list']:
            self.log.debug("Skipping chunk upload {file_name} of chunk {chunk_number}", file_name=file_name, chunk_number=chunk_number)
            msg = b"chunk of file already uploaded"
            request.setResponseCode(200, msg)
            return msg
        else:
            msg = b"chunk of file not yet uploaded"
            request.setResponseCode(404, msg)
            return msg
Exemplo n.º 48
0
class Voluminous(object):
    lockFactory = DockerLock

    def __init__(self, directory):
        self._directory = FilePath(directory)
        self._output = []
        self.lock = self.lockFactory()
        self.commitDatabase = JsonCommitDatabase(self._directory)

    def output(self, s):
        self._output.append(s)
        print s

    def getOutput(self):
        return self._output

    def allBranches(self, volume):
        volumePath = self._directory.child(volume)
        branches = volumePath.child("branches").children()
        return [b.basename() for b in branches if b.isdir()]

    def listBranches(self):
        volume = self.volume()
        branches = self.allBranches(volume)
        currentBranch = self.getActiveBranch(volume)
        self.output("\n".join(sorted(
            ("*" if b == currentBranch else " ")
            + " " + b for b in branches)))

    def checkoutBranch(self, branch, create):
        """
        "Check out" a branch, restarting containers in process, creating it
        from current branch HEAD if requested.
        """
        volume = self.volume()
        volumePath = self._directory.child(volume)
        branchPath = volumePath.child("branches").child(branch)
        if create:
            if branchPath.exists():
                self.output("Cannot create existing branch %s" % (branch,))
                return
            else:
                try:
                    HEAD = self._resolveNamedCommitCurrentBranch("HEAD", volume)
                except IndexError:
                    self.output("You must commit ('dvol commit') before you can "
                                "branch ('dvol checkout -b')")
                    return
                # Copy metadata
                meta = self.commitDatabase.read(volume,
                        self.getActiveBranch(volume))
                self.commitDatabase.write(volume, branch, meta)
                # Then copy latest HEAD of branch into new branch data
                # directory
                volumePath.child("commits").child(HEAD).copyTo(branchPath)
        else:
            if not branchPath.exists():
                self.output("Cannot switch to non-existing branch %s" % (branch,))
                return
        # Got here, so switch to the (maybe new branch)
        self.setActiveBranch(volume, branch)

    def createBranch(self, volume, branch):
        branchDir = self._directory.child(volume).child("branches").child(branch)
        branchDir.makedirs()
        self.output("Created branch %s/%s" % (volume, branch))

    def createVolume(self, name):
        if self._directory.child(name).exists():
            self.output("Error: volume %s already exists" % (name,))
            raise VolumeAlreadyExists()
        self._directory.child(name).makedirs()
        self.setActiveVolume(name)
        self.output("Created volume %s" % (name,))
        self.createBranch(name, DEFAULT_BRANCH)

    def removeVolume(self, volume):
        if not self._directory.child(volume).exists():
            raise UsageError("Volume %r does not exist, cannot remove it" %
                    (volume,))
        containers = self.lock.containers.get_related_containers(volume)
        if containers:
            raise UsageError("Cannot remove %r while it is in use by '%s'" %
                    (volume, (",".join(c['Name'] for c in containers))))
        if self._userIsSure():
            self.output("Deleting volume %r" % (volume,))
            self._directory.child(volume).remove()
        else:
            self.output("Aborting.")

    def deleteBranch(self, branch):
        volume = self.volume()
        if branch == self.getActiveBranch(volume):
            raise UsageError("Cannot delete active branch, use "
                             "'dvol checkout' to switch branches first")
        if branch not in self.allBranches(volume):
            raise UsageError("Branch %r does not exist" % (branch,))
        if self._userIsSure():
            self.output("Deleting branch %r" % (branch,))
            volumePath = self._directory.child(volume)
            branchPath = volumePath.child("branches").child(branch)
            branchPath.remove()
        else:
            self.output("Aborting.")

    def _userIsSure(self):
        sys.stdout.write("Are you sure (y/n)? ")
        sys.stdout.flush()
        return raw_input().lower() in ("y", "yes")

    def setActiveVolume(self, volume):
         self._directory.child(
            "current_volume.json").setContent(
                json.dumps(dict(current_volume=volume)))

    def volume(self):
        currentVolume = self._directory.child("current_volume.json")
        if currentVolume.exists():
            volume = json.loads(currentVolume.getContent())["current_volume"]
        else:
            raise UsageError("No active volume: use dvol switch to choose one")
        if not self._directory.child(volume).exists():
            raise UsageError("Active volume %s does not exist: "
                             "use dvol switch to choose another" % (volume,))
        return volume

    def setActiveBranch(self, volume, branch):
        self._directory.child(volume).child(
            "current_branch.json").setContent(
                json.dumps(dict(current_branch=branch)))
        self.lock.acquire(volume)
        try:
            self.updateRunningPoint(volume)
        finally:
            self.lock.release(volume)

    def getActiveBranch(self, volume):
        currentBranch = self._directory.child(self.volume()).child("current_branch.json")
        if currentBranch.exists():
            return json.loads(currentBranch.getContent())["current_branch"]
        else:
            return DEFAULT_BRANCH

    def updateRunningPoint(self, volume):
        """
        construct a stable (wrt switching branches) path with symlinks
        """
        volumePath = self._directory.child(volume)
        branchName = self.getActiveBranch(volume)
        branchPath = volumePath.child("branches").child(branchName)
        stablePath = volumePath.child("running_point")
        if stablePath.exists():
            stablePath.remove()
        branchPath.linkTo(stablePath)
        return stablePath.path

    def commitVolume(self, message):
        volume = self.volume()
        commitId = (str(uuid.uuid4()) + str(uuid.uuid4())).replace("-", "")[:40]
        self.output(commitId)
        volumePath = self._directory.child(volume)
        branchName = self.getActiveBranch(volume)
        branchPath = volumePath.child("branches").child(branchName)
        commitPath = volumePath.child("commits").child(commitId)
        if commitPath.exists():
            raise Exception("woah, random uuid collision. try again!")
        commitPath.makedirs()
        # acquire lock (read: stop containers) to ensure consistent snapshot
        # with file-copy based backend
        # XXX tests for acquire/release
        self.lock.acquire(volume)
        try:
            branchPath.copyTo(commitPath)
        finally:
            self.lock.release(volume)
        self._recordCommit(volume, branchName, commitId, message)

    def _recordCommit(self, volume, branch, commitId, message):
        commitData = self.commitDatabase.read(volume, branch)
        commitData.append(dict(id=commitId, message=message))
        self.commitDatabase.write(volume, branch, commitData)

    def exists(self, volume):
        volumePath = self._directory.child(volume)
        return volumePath.exists()

    def listVolumes(self):
        table = get_table()
        table.set_cols_align(["l", "l", "l"])
        dc = self.lock.containers # XXX ugly
        volumes = [v for v in self._directory.children() if v.isdir()]
        activeVolume = None
        if volumes:
            try:
                activeVolume = self.volume()
            except UsageError:
                # don't refuse to list volumes just because none of them are active
                pass
        rows = [["", "", ""]] + [
                ["  VOLUME", "BRANCH", "CONTAINERS"]] + [
                [("*" if v.basename() == activeVolume else " ") + " " + v.basename(),
                    self.getActiveBranch(v.basename()),
                    ",".join(c['Name'] for c in dc.get_related_containers(v.basename()))]
                    for v in volumes]
        table.add_rows(rows)
        self.output(table.draw())

    def listCommits(self, branch=None):
        if branch is None:
            branch = self.getActiveBranch(self.volume())
        volume = self.volume()
        aggregate = []
        for commit in reversed(self.commitDatabase.read(volume, branch)):
            # TODO fill in author/date
            aggregate.append(
                "commit %(id)s\n"
                "Author: Who knows <mystery@person>\n"
                "Date: Whenever\n"
                "\n"
                "    %(message)s\n" % commit)
        self.output("\n".join(aggregate))

    def _resolveNamedCommitCurrentBranch(self, commit, volume):
        branch = self.getActiveBranch(volume)
        remainder = commit[len("HEAD"):]
        if remainder == "^" * len(remainder):
            offset = len(remainder)
        else:
            raise UsageError("Malformed commit identifier %r" % (commit,))
        commits = self.commitDatabase.read(volume, branch)
        # commits are appended to, so the last one is the latest
        return commits[-1 - offset]["id"]

    def _destroyNewerCommits(self, commit, volume):
        # TODO in the future, we'll care more about the following being an
        # atomic operation
        branch = self.getActiveBranch(volume)
        commits = self.commitDatabase.read(volume, branch)
        commitIndex = [c["id"] for c in commits].index(commit) + 1
        remainingCommits = commits[:commitIndex]
        destroyCommits = commits[commitIndex:]
        # look in all branches for commit references before removing them
        totalCommits = set()
        for otherBranch in self.allBranches(volume):
            if otherBranch == branch:
                # skip this branch, otherwise we'll never destroy any commits
                continue
            commits = self.commitDatabase.read(volume, branch)
            totalCommits.update(commit["id"] for commit in commits)
        for commit in destroyCommits:
            commitId = commit["id"]
            if commitId in totalCommits:
                # skip destroying this commit; it is still actively referred to
                # in another branch
                continue
            volumePath = self._directory.child(volume)
            commitPath = volumePath.child("commits").child(commitId)
            commitPath.remove()
        self.commitDatabase.write(volume, branch, remainingCommits)

    def resetVolume(self, commit):
        """
        Forcefully roll back the current working copy to this commit,
        destroying any later commits.
        """
        volume = self.volume()
        volumePath = self._directory.child(volume)
        branchName = self.getActiveBranch(volume)
        branchPath = volumePath.child("branches").child(branchName)
        if commit.startswith("HEAD"):
            try:
                commit = self._resolveNamedCommitCurrentBranch(commit, volume)
            except IndexError:
                self.output("Referenced commit does not exist; check dvol log")
                return
        commitPath = volumePath.child("commits").child(commit)
        if not commitPath.exists():
            raise NoSuchCommit("commit '%s' does not exist" % (commit,))
        self.lock.acquire(volume)
        try:
            branchPath.remove()
            commitPath.copyTo(branchPath)
            self._destroyNewerCommits(commit, volume)
        finally:
            self.lock.release(volume)
Exemplo n.º 49
0
    verbose = False

    global SCHEMADIR, PGSOCKETDIR, PSQL

    for opt, arg in optargs:
        if opt in ("-h", "--help"):
            usage()
        elif opt in ("-d",):
            SCHEMADIR = arg
        elif opt in ("-k",):
            PGSOCKETDIR = arg
        elif opt in ("-p",):
            PSQL = arg
        elif opt in ("-x",):
            sktdir = FilePath("/var/run/caldavd")
            for skt in sktdir.children():
                if skt.basename().startswith("ccs_postgres_"):
                    PGSOCKETDIR = skt.path
            PSQL = "/Applications/Server.app/Contents/ServerRoot/usr/bin/psql"
            SCHEMADIR = "/Applications/Server.app/Contents/ServerRoot/Library/CalendarServer/lib/python2.7/site-packages/txdav/common/datastore/sql_schema/"
        elif opt in ("-v", "--verbose"):
            verbose = True
        else:
            raise NotImplementedError(opt)

    # Retrieve the db_version number of the installed schema
    try:
        db_version = getSchemaVersion(verbose=verbose)
    except CheckSchemaError, e:
        db_version = 0
Exemplo n.º 50
0
class MapUpdater(object):
    def __init__(self, mapsPath, fetchURL, deleteIfNotPresent, tfLevelSounds):
        assert isinstance(mapsPath, str) and len(mapsPath)
        assert isinstance(fetchURL, str) and len(fetchURL)
        self.mapsPath = FilePath(mapsPath)
        self.downloadTempPath = self.mapsPath.child('mapupdater')
        self.fetchURL = URLPath.fromString(fetchURL)
        self.deleteIfNotPresent = deleteIfNotPresent
        self.tfLevelSounds = tfLevelSounds
        self.semaphore = DeferredSemaphore(1)
        self.downloadSemaphore = DeferredSemaphore(4)
        for fp in self.downloadTempPath.globChildren('*.bsp.bz2'):
            fp.remove()

    def checkMaps(self, *a, **kw):
        """
        Wrap self._checkMaps to prevent running multiple checks at once.
        """
        return self.semaphore.run(self._checkMaps, *a, **kw)

    def _checkMaps(self, forceDownloadMaps=None):
        def _cb(remoteMaps):
            if forceDownloadMaps:
                remoteMaps = list(set(remoteMaps + forceDownloadMaps))
            remoteMapsLower = [f.lower() for f in remoteMaps]
            ourMaps = filter(
                lambda p: not p.isdir() and p.path.endswith('.bsp'),
                self.mapsPath.children())
            ourMapFilenames = [p.basename().lower() + '.bz2' for p in ourMaps]

            missing = []
            for f in remoteMaps:
                if f.lower() not in ourMapFilenames:
                    missing.append(f)

            delete = []
            for p in ourMaps:
                filename = p.basename().lower() + '.bz2'
                if filename not in remoteMapsLower:
                    delete.append(p)

            if self.deleteIfNotPresent and delete:
                for fp in delete:
                    fp.remove()

                print 'Deleted {} map(s) not present at remote server:'.format(
                    len(delete))
                print ', '.join([x.basename() for x in delete])

            if missing:
                print 'Fetching {} map(s)'.format(len(missing))

                def _allFinished(ignored):
                    self.mapsPath.child('tempus_map_updater_run_once').touch()
                    if self.tfLevelSounds:
                        self.addLevelSounds(ourMaps)
                    print 'Now up-to-date.'

                ds = []
                for filename in missing:
                    ds.append(self.fetchMap(filename))
                return gatherResults(ds).addCallback(_allFinished)
            elif self.tfLevelSounds:
                self.addLevelSounds(ourMaps)

        return self.getMapList().addCallback(_cb)

    def fetchMap(self, *a, **kw):
        return self.downloadSemaphore.run(self._fetchMap, *a, **kw)

    def _fetchMap(self, filename):
        downloadTempPath = self.downloadTempPath
        if not downloadTempPath.exists():
            downloadTempPath.makedirs()

        def _cb(response, fn):
            tp = downloadTempPath.child(fn)
            fd = tp.open('wb')

            def _extracted(ignored):
                extractedPath = tp.sibling(tp.basename().replace('.bz2', ''))
                extractedPath.moveTo(
                    self.mapsPath.child(tp.basename().replace('.bz2', '')))
                try:
                    tp.remove()
                # File already gone
                except OSError:
                    pass
                print 'Finished downloading {}'.format(fn)

            def _finished(ignored):
                fd.close()
                d = getProcessOutputAndValue(
                    'aunpack', (tp.path, '-X', downloadTempPath.path))
                d.addErrback(log.err)
                d.addCallback(_extracted)
                return d

            def _eb(failure):
                print 'Error downloading {}:'.format(fn)
                print failure.getTraceback()
                fd.close()
                try:
                    tp.remove()
                # File already gone
                except OSError:
                    pass

            d = treq.collect(response, fd.write)
            d.addCallback(_finished)
            d.addErrback(_eb)
            return d

        d = treq.get(str(self.fetchURL.child(filename)))
        return d.addCallback(_cb, filename)

    def getMapList(self, forceDownloadMaps):
        raise NotImplementedError('Subclasses must override this method.')

    def addLevelSounds(self, mapPaths):
        content = FilePath(
            mapupdater.__file__).sibling('tf_level_sounds.txt').getContent()
        added = []
        for p in mapPaths:
            mapName = p.basename()[:-4]
            p2 = p.sibling('{}_level_sounds.txt'.format(mapName))
            if p2.exists() and p2.getContent() == content:
                continue
            added.append(mapName)
            p2.setContent(content)
        if added:
            print 'Added level sounds for:'
            print ', '.join(added)