Exemplo n.º 1
0
    def buildStore(self, testCase, notifierFactory):
        """
        Do the necessary work to build a store for a particular test case.

        @return: a L{Deferred} which fires with an L{IDataStore}.
        """
        disableMemcacheForTest(testCase)
        dbRoot = CachingFilePath(self.SHARED_DB_PATH)
        attachmentRoot = dbRoot.child("attachments")
        if self.sharedService is None:
            ready = Deferred()
            def getReady(connectionFactory):
                self.makeAndCleanStore(
                    testCase, notifierFactory, attachmentRoot
                ).chainDeferred(ready)
                return Service()
            self.sharedService = self.createService(getReady)
            self.sharedService.startService()
            def startStopping():
                log.msg("Starting stopping.")
                self.sharedService.unpauseMonitor()
                return self.sharedService.stopService()
            reactor.addSystemEventTrigger(#@UndefinedVariable
                "before", "shutdown", startStopping)
            result = ready
        else:
            result = self.makeAndCleanStore(
                testCase, notifierFactory, attachmentRoot
            )
        def cleanUp():
            def stopit():
                self.sharedService.pauseMonitor()
            return deferLater(reactor, 0.1, stopit)
        testCase.addCleanup(cleanUp)
        return result
Exemplo n.º 2
0
    def configure(self):
        """
        Adjust the global configuration for this test.
        """
        self.serverRoot = self.mktemp()
        os.mkdir(self.serverRoot)

        config.reset()

        config.ServerRoot = os.path.abspath(self.serverRoot)
        config.ConfigRoot = "config"
        config.LogRoot = "logs"
        config.RunRoot = "logs"

        if not os.path.exists(config.DataRoot):
            os.makedirs(config.DataRoot)
        if not os.path.exists(config.DocumentRoot):
            os.makedirs(config.DocumentRoot)
        if not os.path.exists(config.ConfigRoot):
            os.makedirs(config.ConfigRoot)
        if not os.path.exists(config.LogRoot):
            os.makedirs(config.LogRoot)

        config.Memcached.Pools.Default.ClientEnabled = False
        config.Memcached.Pools.Default.ServerEnabled = False
        ClientFactory.allowTestCache = True
        memcacher.Memcacher.allowTestCache = True
        memcacher.Memcacher.memoryCacheInstance = None
        config.DirectoryAddressBook.Enabled = False
        config.UsePackageTimezones = True

        accounts = FilePath(config.DataRoot).child("accounts.xml")
        accounts.setContent(xmlFile.getContent())
Exemplo n.º 3
0
    def test_fileStoreFromPath(self):
        """
        Verify that fileStoreFromPath() will return a CommonDataStore if
        the given path contains either "calendars" or "addressbooks"
        sub-directories.  Otherwise it returns None
        """

        # No child directories
        docRootPath = CachingFilePath(self.mktemp())
        docRootPath.createDirectory()
        step = UpgradeToDatabaseStep.fileStoreFromPath(docRootPath)
        self.assertEquals(step, None)

        # "calendars" child directory exists
        childPath = docRootPath.child("calendars")
        childPath.createDirectory()
        step = UpgradeToDatabaseStep.fileStoreFromPath(docRootPath)
        self.assertTrue(isinstance(step, CommonDataStore))
        childPath.remove()

        # "addressbooks" child directory exists
        childPath = docRootPath.child("addressbooks")
        childPath.createDirectory()
        step = UpgradeToDatabaseStep.fileStoreFromPath(docRootPath)
        self.assertTrue(isinstance(step, CommonDataStore))
        childPath.remove()
    def test_collection_in_calendar(self):
        """
        Make (regular) collection in calendar
        """
        calendar_path, calendar_uri = self.mkdtemp("collection_in_calendar")
        calPath = FilePath(calendar_path)
        calPath.remove()

        def mkcalendar_cb(response):
            response = IResponse(response)

            if response.code != responsecode.CREATED:
                self.fail("MKCALENDAR failed: %s" % (response.code,))

            def mkcol_cb(response):
                response = IResponse(response)

                if response.code != responsecode.FORBIDDEN:
                    self.fail("Incorrect response to nested MKCOL: %s" % (response.code,))

            nested_uri = "/".join([calendar_uri, "nested"])

            request = SimpleRequest(self.site, "MKCOL", nested_uri)
            self.send(request, mkcol_cb)

        request = SimpleRequest(self.site, "MKCALENDAR", calendar_uri)
        return self.send(request, mkcalendar_cb)
Exemplo n.º 5
0
    def _connectorFor_pg8000(dbmodule, **kwargs):
        """
        Turn properties into pg8000 kwargs
        """
        params = DBAPIParameters(**kwargs)
        dbkwargs = {
            "user": params.user,
            "password": params.password,
            "database": params.database,
        }
        if params.unixsocket:
            dbkwargs["unix_sock"] = params.unixsocket

            # We're using a socket file
            socketFP = CachingFilePath(dbkwargs["unix_sock"])

            if socketFP.isdir():
                # We have been given the directory, not the actual socket file
                socketFP = socketFP.child(".s.PGSQL.{}".format(params.port if params.port else "5432"))
                dbkwargs["unix_sock"] = socketFP.path

            if not socketFP.isSocket():
                raise InternalDataStoreError(
                    "No such socket file: {}".format(socketFP.path)
                )
        else:
            dbkwargs["host"] = params.host
            if params.port:
                dbkwargs["port"] = int(params.port)
        return DBAPIConnector(dbmodule, postgresPreflight, **dbkwargs)
Exemplo n.º 6
0
    def setUp(self):
        """
        Replace self.site.resource with an appropriately provisioned
        AddressBookHomeFile, and replace self.docroot with a path pointing at that
        file.
        """
        super(AddressBookHomeTestCase, self).setUp()

        fp = FilePath(self.mktemp())
        fp.createDirectory()

        self.createStockDirectoryService()

        # Need a data store
        _newStore = CommonDataStore(fp, None, True, False)

        self.homeProvisioner = DirectoryAddressBookHomeProvisioningResource(
            self.directoryService, "/addressbooks/",
            _newStore
        )
        
        def _defer(user):
            # Commit the transaction
            self.site.resource._associatedTransaction.commit()
            self.docroot = user._newStoreHome._path.path
            
        return self._refreshRoot().addCallback(_defer)
Exemplo n.º 7
0
    def _connectorFor_pg8000(dbmodule, **kwargs):
        """
        Turn properties into pg8000 kwargs
        """
        params = DBAPIParameters(**kwargs)
        dbkwargs = {
            "user": params.user,
            "password": params.password,
            "database": params.database,
        }
        if params.ssl:
            dbkwargs["ssl"] = params.ssl
        if params.unixsocket:
            dbkwargs["unix_sock"] = params.unixsocket

            # We're using a socket file
            socketFP = CachingFilePath(dbkwargs["unix_sock"])

            if socketFP.isdir():
                # We have been given the directory, not the actual socket file
                socketFP = socketFP.child(".s.PGSQL.{}".format(params.port if params.port else "5432"))
                dbkwargs["unix_sock"] = socketFP.path

            if not socketFP.isSocket():
                raise InternalDataStoreError(
                    "No such socket file: {}".format(socketFP.path)
                )
        else:
            dbkwargs["host"] = params.host
            if params.port:
                dbkwargs["port"] = int(params.port)
        if "txnTimeoutSeconds" in kwargs:
            dbkwargs["txnTimeoutSeconds"] = kwargs["txnTimeoutSeconds"]
        return DBAPIConnector(dbmodule, pg8000Preflight, **dbkwargs)
Exemplo n.º 8
0
 def setUp(self):
     tempDir = FilePath(self.mktemp())
     tempDir.makedirs()
     tempFile = tempDir.child("test")
     tempFile.touch()
     self.propertyStore = self.propertyStore1 = PropertyStore("user01", "user01", lambda : tempFile)
     self.propertyStore2 = PropertyStore("user02", "user01", lambda : tempFile)
Exemplo n.º 9
0
 def setUp(self):
     """
     Create a L{CachingFilePath} for the test to use.
     """
     self.cfp = CachingFilePath(self.mktemp())
     self.clock = Clock()
     self.cfp._sleep = self.clock.advance
Exemplo n.º 10
0
 def setUp(self):
     """
     Set up the test case to set the base attributes to point at
     L{AbstractFilePathTestCase}.
     """
     FilePathTestCase.setUp(self)
     self.root = CachingFilePath(self.root.path)
     self.path = CachingFilePath(self.path.path)
Exemplo n.º 11
0
    def __init__(self, params, alwaysStat=False):

        defaults = {
            'xmlFile' : None,
            'directoryBackedAddressBook': None,
            'recordTypes' : (
                self.recordType_users,
                self.recordType_groups,
                self.recordType_locations,
                self.recordType_resources,
            ),
            'cacheTimeout' : 30,
            'realmName' : '/Search',
        }
        ignored = None
        params = self.getParams(params, defaults, ignored)

        self._recordTypes = params['recordTypes']
        self.realmName = params['realmName']

        super(XMLDirectoryService, self).__init__(params['cacheTimeout'])

        xmlFile = fullServerPath(config.DataRoot, params.get("xmlFile"))
        if type(xmlFile) is str:
            xmlFile = FilePath(xmlFile)

        if not xmlFile.exists():
            xmlFile.setContent("""<?xml version="1.0" encoding="utf-8"?>

<accounts realm="%s">
</accounts>
""" % (self.realmName,))

        uid = -1
        if config.UserName:
            try:
                uid = pwd.getpwnam(config.UserName).pw_uid
            except KeyError:
                self.log_error("User not found: %s" % (config.UserName,))

        gid = -1
        if config.GroupName:
            try:
                gid = grp.getgrnam(config.GroupName).gr_gid
            except KeyError:
                self.log_error("Group not found: %s" % (config.GroupName,))

        if uid != -1 and gid != -1:
            os.chown(xmlFile.path, uid, gid)


        self.xmlFile = xmlFile
        self._fileInfo = None
        self._lastCheck = 0
        self._alwaysStat = alwaysStat
        self.directoryBackedAddressBook = params.get('directoryBackedAddressBook')

        self._accounts()
Exemplo n.º 12
0
    def test_copy(self):

        tempDir = FilePath(self.mktemp())
        tempDir.makedirs()
        tempFile1 = tempDir.child("test1")
        tempFile1.touch()
        tempFile2 = tempDir.child("test2")
        tempFile2.touch()

        # Existing store
        store1_user1 = PropertyStore("user01", lambda: tempFile1)
        store1_user2 = PropertyStore("user01", lambda: tempFile1)
        store1_user2._setPerUserUID("user02")

        # New store
        store2_user1 = PropertyStore("user01", lambda: tempFile2)
        store2_user2 = PropertyStore("user01", lambda: tempFile2)
        store2_user2._setPerUserUID("user02")

        # Populate current store with data
        class DummyProperty1(WebDAVTextElement):
            namespace = "http://calendarserver.org/ns/"
            name = "dummy1"

        class DummyProperty2(WebDAVTextElement):
            namespace = "http://calendarserver.org/ns/"
            name = "dummy2"

        class DummyProperty3(WebDAVTextElement):
            namespace = "http://calendarserver.org/ns/"
            name = "dummy3"

        props_user1 = (
            DummyProperty1.fromString("value1-user1"),
            DummyProperty2.fromString("value2-user1"),
        )
        props_user2 = (
            DummyProperty1.fromString("value1-user2"),
            DummyProperty3.fromString("value3-user2"),
        )

        for prop in props_user1:
            store1_user1[PropertyName.fromElement(prop)] = prop
        for prop in props_user2:
            store1_user2[PropertyName.fromElement(prop)] = prop
        store1_user1.flush()
        store1_user2.flush()

        # Do copy and check results
        store2_user1.copyAllProperties(store1_user1)
        store2_user1.flush()

        self.assertEqual(store1_user1.attrs.items(),
                         store2_user1.attrs.items())
        self.assertEqual(store1_user2.attrs.items(),
                         store2_user2.attrs.items())
Exemplo n.º 13
0
 def test_shouldReparse(self):
     """
     Verify that a change to the file will get noticed
     """
     newxmlfile = FilePath(self.mktemp())
     FilePath(xmlFile).copyTo(newxmlfile)
     db = AugmentXMLDB((newxmlfile.path,))
     self.assertFalse(db._shouldReparse([newxmlfile.path]))  # No need to parse
     newxmlfile.setContent("")  # Change the file
     self.assertTrue(db._shouldReparse([newxmlfile.path]))  # Need to parse
Exemplo n.º 14
0
 def test_shouldReparse(self):
     """
     Verify that a change to the file will get noticed
     """
     newxmlfile = FilePath(self.mktemp())
     FilePath(xmlFile).copyTo(newxmlfile)
     db = AugmentXMLDB((newxmlfile.path,))
     self.assertFalse(db._shouldReparse([newxmlfile.path])) # No need to parse
     newxmlfile.setContent("") # Change the file
     self.assertTrue(db._shouldReparse([newxmlfile.path])) # Need to parse
Exemplo n.º 15
0
    def setUp(self):
        yield super(GroupShareeTestBase, self).setUp()

        accountsFilePath = FilePath(
            os.path.join(os.path.dirname(__file__), "accounts"))
        yield self.buildStoreAndDirectory(
            accounts=accountsFilePath.child("groupAccounts.xml"), )
        yield self.populate()

        self.paths = {}
Exemplo n.º 16
0
 def test_parseNonASCIIConfig(self):
     """
     Non-ASCII <string>s found as part of a configuration file will be
     retrieved as UTF-8 encoded 'str' objects, as parsed by
     L{NoUnicodePlistParser}.
     """
     cfg = Config(PListConfigProvider({"DataRoot": ""}))
     tempfile = FilePath(self.mktemp())
     tempfile.setContent(nonASCIIConfigPList)
     cfg.load(tempfile.path)
     self.assertEquals(cfg.DataRoot, nonASCIIValue)
Exemplo n.º 17
0
 def test_parseNonASCIIConfig(self):
     """
     Non-ASCII <string>s found as part of a configuration file will be
     retrieved as UTF-8 encoded 'str' objects, as parsed by
     L{NoUnicodePlistParser}.
     """
     cfg = Config(PListConfigProvider({"DataRoot": ""}))
     tempfile = FilePath(self.mktemp())
     tempfile.setContent(nonASCIIConfigPList)
     cfg.load(tempfile.path)
     self.assertEquals(cfg.DataRoot, nonASCIIValue)
Exemplo n.º 18
0
    def setUp(self):
        yield super(GroupShareeTestBase, self).setUp()

        accountsFilePath = FilePath(
            os.path.join(os.path.dirname(__file__), "accounts")
        )
        yield self.buildStoreAndDirectory(
            accounts=accountsFilePath.child("groupAccounts.xml"),
        )
        yield self.populate()

        self.paths = {}
Exemplo n.º 19
0
    def test_copy(self):

        tempDir = FilePath(self.mktemp())
        tempDir.makedirs()
        tempFile1 = tempDir.child("test1")
        tempFile1.touch()
        tempFile2 = tempDir.child("test2")
        tempFile2.touch()

        # Existing store
        store1_user1 = PropertyStore("user01", lambda : tempFile1)
        store1_user2 = PropertyStore("user01", lambda : tempFile1)
        store1_user2._setPerUserUID("user02")

        # New store
        store2_user1 = PropertyStore("user01", lambda : tempFile2)
        store2_user2 = PropertyStore("user01", lambda : tempFile2)
        store2_user2._setPerUserUID("user02")

        # Populate current store with data
        class DummyProperty1(WebDAVTextElement):
            namespace = "http://calendarserver.org/ns/"
            name = "dummy1"
        class DummyProperty2(WebDAVTextElement):
            namespace = "http://calendarserver.org/ns/"
            name = "dummy2"
        class DummyProperty3(WebDAVTextElement):
            namespace = "http://calendarserver.org/ns/"
            name = "dummy3"

        props_user1 = (
            DummyProperty1.fromString("value1-user1"),
            DummyProperty2.fromString("value2-user1"),
        )
        props_user2 = (
            DummyProperty1.fromString("value1-user2"),
            DummyProperty3.fromString("value3-user2"),
        )

        for prop in props_user1:
            store1_user1[PropertyName.fromElement(prop)] = prop
        for prop in props_user2:
            store1_user2[PropertyName.fromElement(prop)] = prop
        store1_user1.flush()
        store1_user2.flush()

        # Do copy and check results
        store2_user1.copyAllProperties(store1_user1)
        store2_user1.flush()

        self.assertEqual(store1_user1.attrs.items(), store2_user1.attrs.items())
        self.assertEqual(store1_user2.attrs.items(), store2_user2.attrs.items())
Exemplo n.º 20
0
    def test_startService_withDumpFile(self):
        """
        Assuming a properly configured environment ($PATH points at an 'initdb'
        and 'postgres', $PYTHONPATH includes pgdb), starting a
        L{PostgresService} will start the service passed to it, after importing
        an existing dump file.
        """

        test = self
        class SimpleService1(Service):

            instances = []
            ready = Deferred()

            def __init__(self, connectionFactory, storageService):
                self.connection = connectionFactory()
                test.addCleanup(self.connection.close)
                self.instances.append(self)


            def startService(self):
                cursor = self.connection.cursor()
                try:
                    cursor.execute(
                        "insert into import_test_table values ('value2')"
                    )
                except:
                    self.ready.errback()
                else:
                    self.ready.callback(None)
                finally:
                    cursor.close()

        # The SQL in importFile.sql will get executed, including the insertion of "value1"
        importFileName = CachingFilePath(__file__).parent().child("importFile.sql").path
        svc = PostgresService(
            CachingFilePath("postgres_3.pgdb"),
            SimpleService1,
            "",
            databaseName="dummy_db",
            testMode=True,
            importFileName=importFileName
        )
        svc.startService()
        self.addCleanup(svc.stopService)
        yield SimpleService1.ready
        connection = SimpleService1.instances[0].connection
        cursor = connection.cursor()
        cursor.execute("select * from import_test_table")
        values = cursor.fetchall()
        self.assertEquals(values, [["value1"], ["value2"]])
Exemplo n.º 21
0
    def buildStore(
        self,
        testCase,
        notifierFactory,
        directoryService=None,
        homes=None,
        enableJobProcessing=True,
    ):
        """
        Do the necessary work to build a store for a particular test case.

        @return: a L{Deferred} which fires with an L{IDataStore}.
        """
        disableMemcacheForTest(testCase)
        dbRoot = FilePath(self.sharedDBPath)
        attachmentRoot = dbRoot.child("attachments")
        # The directory will be given to us later via setDirectoryService
        if self.sharedService is None:
            ready = Deferred()

            def getReady(connectionFactory, storageService):
                self.makeAndCleanStore(
                    testCase, notifierFactory, directoryService,
                    attachmentRoot, enableJobProcessing).chainDeferred(ready)
                return Service()

            self.sharedService = self.createService(getReady)
            self.sharedService.startService()

            def startStopping():
                log.info("Starting stopping.")
                self.sharedService.unpauseMonitor()
                return self.sharedService.stopService()

            reactor.addSystemEventTrigger("before", "shutdown", startStopping)
            result = ready
        else:
            result = self.makeAndCleanStore(testCase, notifierFactory,
                                            directoryService, attachmentRoot,
                                            enableJobProcessing)

        def cleanUp():
            def stopit():
                self.sharedService.pauseMonitor()

            return deferLater(reactor, 0.1, stopit)

        testCase.addCleanup(cleanUp)
        return result
Exemplo n.º 22
0
 def setUp(self):
     tempDir = FilePath(self.mktemp())
     tempDir.makedirs()
     tempFile = tempDir.child("test")
     tempFile.touch()
     self.propertyStore = PropertyStore("user01", lambda : tempFile)
     self.propertyStore1 = self.propertyStore
     self.propertyStore2 = PropertyStore("user01", lambda : tempFile)
     self.propertyStore2._setPerUserUID("user02")
     self.propertyStore2._setProxyUID("user02")
     self.propertyStore3 = PropertyStore("user01", lambda : tempFile)
     self.propertyStore3._setProxyUID("user03")
     self.propertyStore4 = PropertyStore("user01", lambda : tempFile)
     self.propertyStore4._setPerUserUID("user02")
     self.propertyStore4._setProxyUID("user04")
Exemplo n.º 23
0
 def setUp(self):
     tempDir = FilePath(self.mktemp())
     tempDir.makedirs()
     tempFile = tempDir.child("test")
     tempFile.touch()
     self.propertyStore = PropertyStore("user01", lambda: tempFile)
     self.propertyStore1 = self.propertyStore
     self.propertyStore2 = PropertyStore("user01", lambda: tempFile)
     self.propertyStore2._setPerUserUID("user02")
     self.propertyStore2._setProxyUID("user02")
     self.propertyStore3 = PropertyStore("user01", lambda: tempFile)
     self.propertyStore3._setProxyUID("user03")
     self.propertyStore4 = PropertyStore("user01", lambda: tempFile)
     self.propertyStore4._setPerUserUID("user02")
     self.propertyStore4._setProxyUID("user04")
Exemplo n.º 24
0
 def test_quitAfterUpgradeStep(self):
     triggerFileName = "stop_after_upgrade"
     triggerFile = FilePath(triggerFileName)
     self.pps.addStep(StepOne(self._record, False)).addStep(
         StepTwo(self._record, False)).addStep(
             QuitAfterUpgradeStep(triggerFile.path,
                                  reactor=self.clock)).addStep(
                                      StepFour(self._record, True))
     triggerFile.setContent("")
     self.pps.startService()
     self.assertEquals(self.history, [
         'one success', 'two success', 'four failure',
         ('serviceCreator', None, 'storageService')
     ])
     self.assertFalse(triggerFile.exists())
Exemplo n.º 25
0
    def ready(self, createDatabaseConn, createDatabaseCursor):
        """
        Subprocess is ready.  Time to initialize the subservice.
        If the database has not been created and there is a dump file,
        then the dump file is imported.
        """

        if self.resetSchema:
            try:
                createDatabaseCursor.execute(
                    "drop database {}".format(self.databaseName)
                )
            except pgdb.DatabaseError:
                pass

        try:
            createDatabaseCursor.execute(
                "create database {} with encoding 'UTF8'"
                .format(self.databaseName)
            )
        except:
            # database already exists
            executeSQL = False
        else:
            # database does not yet exist; if dump file exists, execute it,
            # otherwise execute schema
            executeSQL = True
            sqlToExecute = self.schema
            if self.importFileName:
                importFilePath = CachingFilePath(self.importFileName)
                if importFilePath.exists():
                    sqlToExecute = importFilePath.getContent()

        createDatabaseCursor.close()
        createDatabaseConn.close()

        if executeSQL:
            connection = self.produceConnection()
            cursor = connection.cursor()
            cursor.execute(sqlToExecute)
            connection.commit()
            connection.close()

        if self.shutdownDeferred is None:
            # Only continue startup if we've not begun shutdown
            self.subServiceFactory(
                self.produceConnection, self
            ).setServiceParent(self)
Exemplo n.º 26
0
    def ready(self, createDatabaseConn, createDatabaseCursor):
        """
        Subprocess is ready.  Time to initialize the subservice.
        If the database has not been created and there is a dump file,
        then the dump file is imported.
        """

        if self.resetSchema:
            try:
                createDatabaseCursor.execute(
                    "drop database {}".format(self.databaseName)
                )
            except pgdb.DatabaseError:
                pass

        try:
            createDatabaseCursor.execute(
                "create database {} with encoding 'UTF8'"
                .format(self.databaseName)
            )
        except:
            # database already exists
            executeSQL = False
        else:
            # database does not yet exist; if dump file exists, execute it,
            # otherwise execute schema
            executeSQL = True
            sqlToExecute = self.schema
            if self.importFileName:
                importFilePath = CachingFilePath(self.importFileName)
                if importFilePath.exists():
                    sqlToExecute = importFilePath.getContent()

        createDatabaseCursor.close()
        createDatabaseConn.close()

        if executeSQL:
            connection = self.produceConnection()
            cursor = connection.cursor()
            cursor.execute(sqlToExecute)
            connection.commit()
            connection.close()

        if self.shutdownDeferred is None:
            # Only continue startup if we've not begun shutdown
            self.subServiceFactory(
                self.produceConnection, self
            ).setServiceParent(self)
Exemplo n.º 27
0
    def setUp(self):
        """
        Set up two stores to migrate between.
        """
        # Add some files to the file store.

        self.filesPath = CachingFilePath(self.mktemp())
        self.filesPath.createDirectory()
        fileStore = self.fileStore = CommonDataStore(
            self.filesPath, {"push": StubNotifierFactory()}, TestStoreDirectoryService(), True, True
        )
        self.sqlStore = yield theStoreBuilder.buildStore(
            self, StubNotifierFactory()
        )
        self.upgrader = UpgradeToDatabaseStep(self.fileStore, self.sqlStore)

        requirements = CommonTests.requirements
        extras = deriveValue(self, "extraRequirements", lambda t: {})
        requirements = self.mergeRequirements(requirements, extras)

        yield populateCalendarsFrom(requirements, fileStore)
        md5s = CommonTests.md5s
        yield resetCalendarMD5s(md5s, fileStore)
        self.filesPath.child("calendars").child(
            "__uids__").child("ho").child("me").child("home1").child(
            ".some-extra-data").setContent("some extra data")

        requirements = ABCommonTests.requirements
        yield populateAddressBooksFrom(requirements, fileStore)
        md5s = ABCommonTests.md5s
        yield resetAddressBookMD5s(md5s, fileStore)
        self.filesPath.child("addressbooks").child(
            "__uids__").child("ho").child("me").child("home1").child(
            ".some-extra-data").setContent("some extra data")
Exemplo n.º 28
0
 def setUp(self):
     """
     Create a L{CachingFilePath} for the test to use.
     """
     self.cfp = CachingFilePath(self.mktemp())
     self.clock = Clock()
     self.cfp._sleep = self.clock.advance
Exemplo n.º 29
0
    def __init__(self,
                 path,
                 defaultType="text/plain",
                 ignoredExts=(),
                 processors=None,
                 indexNames=None):
        """Create a file with the given path.
        """
        super(File, self).__init__()

        self.putChildren = {}
        if isinstance(path, FilePath):
            self.fp = path
        else:
            assert isinstance(path, str), "This should be a string."
            self.fp = FilePath(path)
        # Remove the dots from the path to split
        self.defaultType = defaultType
        self.ignoredExts = list(ignoredExts)
        if processors is not None:
            self.processors = dict([(key.lower(), value)
                                    for key, value in processors.items()])

        if indexNames is not None:
            self.indexNames = indexNames
Exemplo n.º 30
0
    def setUp(self):
        """
        Set up two stores to migrate between.
        """
        # Add some files to the file store.

        self.filesPath = CachingFilePath(self.mktemp())
        self.filesPath.createDirectory()
        fileStore = self.fileStore = CommonDataStore(
            self.filesPath, {"push": StubNotifierFactory()}, TestStoreDirectoryService(), True, True
        )
        self.sqlStore = yield theStoreBuilder.buildStore(
            self, StubNotifierFactory()
        )
        self.upgrader = UpgradeToDatabaseStep(self.fileStore, self.sqlStore)

        requirements = CommonTests.requirements
        extras = deriveValue(self, "extraRequirements", lambda t: {})
        requirements = self.mergeRequirements(requirements, extras)

        yield populateCalendarsFrom(requirements, fileStore)
        md5s = CommonTests.md5s
        yield resetCalendarMD5s(md5s, fileStore)
        self.filesPath.child("calendars").child(
            "__uids__").child("ho").child("me").child("home1").child(
            ".some-extra-data").setContent("some extra data")

        requirements = ABCommonTests.requirements
        yield populateAddressBooksFrom(requirements, fileStore)
        md5s = ABCommonTests.md5s
        yield resetAddressBookMD5s(md5s, fileStore)
        self.filesPath.child("addressbooks").child(
            "__uids__").child("ho").child("me").child("home1").child(
            ".some-extra-data").setContent("some extra data")

        # Add some properties we want to check get migrated over
        txn = self.fileStore.newTransaction()
        home = yield txn.calendarHomeWithUID("home_defaults")

        cal = yield home.calendarWithName("calendar_1")
        props = cal.properties()
        props[PropertyName.fromElement(caldavxml.SupportedCalendarComponentSet)] = caldavxml.SupportedCalendarComponentSet(
            caldavxml.CalendarComponent(name="VEVENT"),
            caldavxml.CalendarComponent(name="VTODO"),
        )
        props[PropertyName.fromElement(element.ResourceType)] = element.ResourceType(
            element.Collection(),
            caldavxml.Calendar(),
        )
        props[PropertyName.fromElement(customxml.GETCTag)] = customxml.GETCTag.fromString("foobar")

        inbox = yield home.calendarWithName("inbox")
        props = inbox.properties()
        props[PropertyName.fromElement(customxml.CalendarAvailability)] = customxml.CalendarAvailability.fromString(str(self.av1))
        props[PropertyName.fromElement(caldavxml.ScheduleDefaultCalendarURL)] = caldavxml.ScheduleDefaultCalendarURL(
            element.HRef.fromString("/calendars/__uids__/home_defaults/calendar_1"),
        )

        yield txn.commit()
Exemplo n.º 31
0
    def setUp(self):
        """
        Set up two stores to migrate between.
        """
        # Add some files to the file store.

        self.filesPath = CachingFilePath(self.mktemp())
        self.filesPath.createDirectory()
        fileStore = self.fileStore = CommonDataStore(
            self.filesPath, StubNotifierFactory(), True, True
        )
        self.sqlStore = yield theStoreBuilder.buildStore(
            self, StubNotifierFactory()
        )
        subStarted = self.subStarted = Deferred()
        class StubService(Service, object):
            def startService(self):
                super(StubService, self).startService()
                if not subStarted.called:
                    subStarted.callback(None)
        from twisted.python import log
        def justOnce(evt):
            if evt.get('isError') and not hasattr(subStarted, 'result'):
                subStarted.errback(
                    evt.get('failure',
                            RuntimeError("error starting up (see log)"))
                )
        log.addObserver(justOnce)
        def cleanObserver():
            try:
                log.removeObserver(justOnce)
            except ValueError:
                pass # x not in list, I don't care.
        self.addCleanup(cleanObserver)
        self.stubService = StubService()
        self.topService = MultiService()
        self.upgrader = self.createUpgradeService()
        self.upgrader.setServiceParent(self.topService)

        requirements = CommonTests.requirements
        extras = deriveValue(self, "extraRequirements", lambda t: {})
        requirements = self.mergeRequirements(requirements, extras)

        yield populateCalendarsFrom(requirements, fileStore)
        md5s = CommonTests.md5s
        yield resetCalendarMD5s(md5s, fileStore)
        self.filesPath.child("calendars").child(
            "__uids__").child("ho").child("me").child("home1").child(
            ".some-extra-data").setContent("some extra data")

        requirements = ABCommonTests.requirements
        yield populateAddressBooksFrom(requirements, fileStore)
        md5s = ABCommonTests.md5s
        yield resetAddressBookMD5s(md5s, fileStore)
        self.filesPath.child("addressbooks").child(
            "__uids__").child("ho").child("me").child("home1").child(
            ".some-extra-data").setContent("some extra data")
Exemplo n.º 32
0
 def test_quitAfterUpgradeStep(self):
     triggerFileName = "stop_after_upgrade"
     triggerFile = FilePath(triggerFileName)
     self.pps.addStep(
         StepOne(self._record, False)
     ).addStep(
         StepTwo(self._record, False)
     ).addStep(
         QuitAfterUpgradeStep(triggerFile.path, reactor=self.clock)
     ).addStep(
         StepFour(self._record, True)
     )
     triggerFile.setContent("")
     self.pps.startService()
     self.assertEquals(self.history,
         ['one success', 'two success', 'four failure',
         ('serviceCreator', None, 'storageService')])
     self.assertFalse(triggerFile.exists())
Exemplo n.º 33
0
 def setUp(self):
     """
     Create a resource and a xattr property store for it.
     """
     self.resourcePath = FilePath(self.mktemp())
     self.resourcePath.setContent("")
     self.attrs = xattr(self.resourcePath.path)
     self.resource = DAVFile(self.resourcePath.path)
     self.propertyStore = xattrPropertyStore(self.resource)
Exemplo n.º 34
0
 def configure(self, filename, appropriateStoreClass, merge):
     subsvc = None
     self.upgrader = UpgradeToDatabaseStep(
         FileStore(
             CachingFilePath(filename), None, None, True, True,
             propertyStoreClass=namedAny(appropriateStoreClass)
         ), self.store, subsvc, merge=merge
     )
     return {}
Exemplo n.º 35
0
    def __init__(self,
                 attachment,
                 contentType,
                 dispositionName,
                 creating=False,
                 migrating=False):
        super(AttachmentStorageTransport,
              self).__init__(attachment, contentType, dispositionName)

        fileDescriptor, fileName = self._temporaryFile()
        # Wrap the file descriptor in a file object we can write to
        self._file = os.fdopen(fileDescriptor, "w")
        self._path = CachingFilePath(fileName)
        self._hash = hashlib.md5()
        self._creating = creating
        self._migrating = migrating

        self._txn.postAbort(self.aborted)
Exemplo n.º 36
0
 def setUp(self):
     """
     Create a resource and a xattr property store for it.
     """
     self.resourcePath = FilePath(self.mktemp())
     self.resourcePath.setContent("")
     self.attrs = xattr(self.resourcePath.path)
     self.resource = DAVFile(self.resourcePath.path)
     self.propertyStore = xattrPropertyStore(self.resource)
Exemplo n.º 37
0
    def test_retryLoop(self):
        """
        L{CachingFilePath} should catch C{EINVAL} and respond by retrying the
        C{listdir} operation until it succeeds.
        """
        calls = []

        def raiseEINVAL(dirname):
            calls.append(dirname)
            if len(calls) < 5:
                raise OSError(EINVAL, "This should be caught by the test.")
            return ['a', 'b', 'c']
        self.cfp._listdir = raiseEINVAL
        self.assertEquals(self.cfp.listdir(), ['a', 'b', 'c'])
        self.assertEquals(self.cfp.children(), [
            CachingFilePath(pathjoin(self.cfp.path, 'a')),
            CachingFilePath(pathjoin(self.cfp.path, 'b')),
            CachingFilePath(pathjoin(self.cfp.path, 'c')),
        ])
    def _test_file_in_calendar(self, what, *work):
        """
        Creates a calendar collection, then PUTs a resource into that collection
        with the data from given stream and verifies that the response code from the
        PUT request matches the given response_code.
        """
        calendar_path, calendar_uri = self.mkdtemp("calendar")
        calPath = FilePath(calendar_path)
        calPath.remove()

        def mkcalendar_cb(response):
            response = IResponse(response)

            if response.code != responsecode.CREATED:
                self.fail("MKCALENDAR failed: %s" % (response.code,))

            if not calPath.isdir():
                self.fail("MKCALENDAR did not create a collection")

            ds = []
            c = 0

            for stream, response_code in work:
                def put_cb(response, stream=stream, response_code=response_code):
                    response = IResponse(response)

                    if response.code != response_code:
                        self.fail("Incorrect response to %s: %s (!= %s)" % (what, response.code, response_code))

                dst_uri = "/".join([calendar_uri, "dst%d.ics" % (c,)])
                request = SimpleRequest(self.site, "PUT", dst_uri)
                request.headers.setHeader("if-none-match", "*")
                request.headers.setHeader("content-type", MimeType("text", "calendar"))
                request.stream = stream
                ds.append(self.send(request, put_cb))

                c += 1

            return DeferredList(ds)

        request = SimpleRequest(self.site, "MKCALENDAR", calendar_uri)
        return self.send(request, mkcalendar_cb)
Exemplo n.º 39
0
    def buildStore(self, testCase, notifierFactory, directoryService=None, homes=None, enableJobProcessing=True):
        """
        Do the necessary work to build a store for a particular test case.

        @return: a L{Deferred} which fires with an L{IDataStore}.
        """
        disableMemcacheForTest(testCase)
        dbRoot = FilePath(self.sharedDBPath)
        attachmentRoot = dbRoot.child("attachments")
        # The directory will be given to us later via setDirectoryService
        if self.sharedService is None:
            ready = Deferred()

            def getReady(connectionFactory, storageService):
                self.makeAndCleanStore(
                    testCase, notifierFactory, directoryService, attachmentRoot, enableJobProcessing
                ).chainDeferred(ready)
                return Service()

            self.sharedService = self.createService(getReady)
            self.sharedService.startService()

            def startStopping():
                log.info("Starting stopping.")
                self.sharedService.unpauseMonitor()
                return self.sharedService.stopService()

            reactor.addSystemEventTrigger("before", "shutdown", startStopping)
            result = ready
        else:
            result = self.makeAndCleanStore(
                testCase, notifierFactory, directoryService, attachmentRoot, enableJobProcessing
            )

        def cleanUp():
            def stopit():
                self.sharedService.pauseMonitor()

            return deferLater(reactor, 0.1, stopit)

        testCase.addCleanup(cleanUp)
        return result
Exemplo n.º 40
0
 def createStockDirectoryService(self):
     """
     Create a stock C{directoryService} attribute and assign it.
     """
     self.xmlFile = FilePath(config.DataRoot).child("accounts.xml")
     self.xmlFile.setContent(xmlFile.getContent())
     self.directoryFixture.addDirectoryService(XMLDirectoryService({
         "xmlFile": "accounts.xml",
         "augmentService":
             augment.AugmentXMLDB(xmlFiles=(augmentsFile.path,)),
     }))
Exemplo n.º 41
0
 def test_relativeDefaultPaths(self):
     """
     The paths specified in the default configuration should be interpreted
     as relative to the paths specified in the configuration file.
     """
     cfg = Config(PListConfigProvider(
         {"AccountingLogRoot": "some-path",
          "LogRoot": "should-be-ignored"}))
     cfg.addPostUpdateHooks([_updateDataStore])
     tempfile = FilePath(self.mktemp())
     tempfile.setContent("<plist version='1.0'><dict>"
                         "<key>LogRoot</key><string>/some/root</string>"
                         "</dict></plist>")
     cfg.load(tempfile.path)
     self.assertEquals(cfg.AccountingLogRoot, "/some/root/some-path")
     tempfile.setContent("<plist version='1.0'><dict>"
                         "<key>LogRoot</key><string>/other/root</string>"
                         "</dict></plist>")
     cfg.load(tempfile.path)
     self.assertEquals(cfg.AccountingLogRoot, "/other/root/some-path")
Exemplo n.º 42
0
 def doDirectoryTest(self, addedNames, modify=lambda x: None,
                     expectedNames=None):
     """
     Do a test of a L{DAVFile} pointed at a directory, verifying that files
     existing with the given names will be faithfully 'played back' via HTML
     rendering.
     """
     if expectedNames is None:
         expectedNames = addedNames
     fp = FilePath(self.mktemp())
     fp.createDirectory()
     for sampleName in expectedNames:
         fp.child(sampleName).touch()
     df = DAVFile(fp)
     modify(df)
     responseText = (yield df.render(SimpleFakeRequest('/'))).stream.read()
     responseXML = browserHTML2ETree(responseText)
     names = set([element.text.encode("utf-8")
                  for element in responseXML.findall(".//a")])
     self.assertEquals(set(expectedNames), names)
Exemplo n.º 43
0
 def test_relativeDefaultPaths(self):
     """
     The paths specified in the default configuration should be interpreted
     as relative to the paths specified in the configuration file.
     """
     cfg = Config(PListConfigProvider(
         {"AccountingLogRoot": "some-path",
          "LogRoot": "should-be-ignored"}))
     cfg.addPostUpdateHooks([_updateDataStore])
     tempfile = FilePath(self.mktemp())
     tempfile.setContent("<plist version='1.0'><dict>"
                         "<key>LogRoot</key><string>/some/root</string>"
                         "</dict></plist>")
     cfg.load(tempfile.path)
     self.assertEquals(cfg.AccountingLogRoot, "/some/root/some-path")
     tempfile.setContent("<plist version='1.0'><dict>"
                         "<key>LogRoot</key><string>/other/root</string>"
                         "</dict></plist>")
     cfg.load(tempfile.path)
     self.assertEquals(cfg.AccountingLogRoot, "/other/root/some-path")
Exemplo n.º 44
0
 def doDirectoryTest(self, addedNames, modify=lambda x: None,
                     expectedNames=None):
     """
     Do a test of a L{DAVFile} pointed at a directory, verifying that files
     existing with the given names will be faithfully 'played back' via HTML
     rendering.
     """
     if expectedNames is None:
         expectedNames = addedNames
     fp = FilePath(self.mktemp())
     fp.createDirectory()
     for sampleName in expectedNames:
         fp.child(sampleName).touch()
     df = DAVFile(fp)
     modify(df)
     responseText = (yield df.render(SimpleFakeRequest('/'))).stream.read()
     responseXML = browserHTML2ETree(responseText)
     names = set([element.text.encode("utf-8")
                  for element in responseXML.findall(".//a")])
     self.assertEquals(set(expectedNames), names)
Exemplo n.º 45
0
    def test_fileStoreFromPath(self):
        """
        Verify that fileStoreFromPath() will return a CommonDataStore if
        the given path contains either "calendars" or "addressbooks"
        sub-directories.  Otherwise it returns None
        """

        # No child directories
        docRootPath = CachingFilePath(self.mktemp())
        docRootPath.createDirectory()
        step = UpgradeToDatabaseStep.fileStoreFromPath(docRootPath)
        self.assertEquals(step, None)

        # "calendars" child directory exists
        childPath = docRootPath.child("calendars")
        childPath.createDirectory()
        step = UpgradeToDatabaseStep.fileStoreFromPath(docRootPath)
        self.assertTrue(isinstance(step, CommonDataStore))
        childPath.remove()

        # "addressbooks" child directory exists
        childPath = docRootPath.child("addressbooks")
        childPath.createDirectory()
        step = UpgradeToDatabaseStep.fileStoreFromPath(docRootPath)
        self.assertTrue(isinstance(step, CommonDataStore))
        childPath.remove()
Exemplo n.º 46
0
    def setUp(self):
        super(ModificationTestCase, self).setUp()

        testRoot = os.path.join(os.path.dirname(__file__), "modify")
        #configFileName = os.path.join(testRoot, "caldavd.plist")
        #config.load(configFileName)

        usersFile = os.path.join(testRoot, "users-groups.xml")
        config.DirectoryService.params.xmlFile = usersFile

        # Copy xml file containing locations/resources to a temp file because
        # we're going to be modifying it during testing

        origResourcesFile = FilePath(os.path.join(os.path.dirname(__file__),
            "modify", "resources-locations.xml"))
        copyResourcesFile = FilePath(self.mktemp())
        origResourcesFile.copyTo(copyResourcesFile)
        config.ResourceService.params.xmlFile = copyResourcesFile
        config.ResourceService.Enabled = True

        augmentsFile = os.path.join(testRoot, "augments.xml")
        config.AugmentService.params.xmlFiles = (augmentsFile,)
Exemplo n.º 47
0
    def __init__(self, attachment, contentType, dispositionName, creating=False, migrating=False):
        super(AttachmentStorageTransport, self).__init__(
            attachment, contentType, dispositionName)

        fileDescriptor, fileName = self._temporaryFile()
        # Wrap the file descriptor in a file object we can write to
        self._file = os.fdopen(fileDescriptor, "w")
        self._path = CachingFilePath(fileName)
        self._hash = hashlib.md5()
        self._creating = creating
        self._migrating = migrating

        self._txn.postAbort(self.aborted)
Exemplo n.º 48
0
 def configure(self, filename, appropriateStoreClass, merge):
     subsvc = None
     from txdav.common.datastore.file import CommonDataStore as FileStore
     self.upgrader = UpgradeToDatabaseStep(FileStore(
         CachingFilePath(filename),
         None,
         None,
         True,
         True,
         propertyStoreClass=namedAny(appropriateStoreClass)),
                                           self.store,
                                           subsvc,
                                           merge=merge)
     return {}
Exemplo n.º 49
0
    def test_startService_Socket(self):
        """
        Assuming a properly configured environment ($PATH points at an 'initdb'
        and 'postgres', $PYTHONPATH includes pgdb), starting a
        L{PostgresService} will start the service passed to it, after executing
        the schema.
        """

        test = self

        class SimpleService2(Service):

            instances = []
            ready = Deferred()

            def __init__(self, connectionFactory, storageService):
                self.connection = connectionFactory()
                test.addCleanup(self.connection.close)
                self.instances.append(self)

            def startService(self):
                cursor = self.connection.cursor()
                try:
                    cursor.execute(
                        "insert into test_dummy_table values ('dummy')"
                    )
                except:
                    self.ready.errback()
                else:
                    self.ready.callback(None)
                finally:
                    cursor.close()

        svc = PostgresService(
            CachingFilePath("postgres_2.pgdb"),
            SimpleService2,
            "create table TEST_DUMMY_TABLE (stub varchar)",
            databaseName="dummy_db",
            listenAddresses=['127.0.0.1', ],
            testMode=True
        )
        svc.startService()
        self.addCleanup(svc.stopService)
        yield SimpleService2.ready
        connection = SimpleService2.instances[0].connection
        cursor = connection.cursor()
        cursor.execute("select * from test_dummy_table")
        values = cursor.fetchall()
        self.assertEquals(map(list, values), [["dummy"]])
    def test_fail_dot_file_put_in_calendar(self):
        """
        Make (regular) collection in calendar
        """
        calendar_path, calendar_uri = self.mkdtemp("dot_file_in_calendar")
        calPath = FilePath(calendar_path)
        calPath.remove()

        def mkcalendar_cb(response):
            response = IResponse(response)

            if response.code != responsecode.CREATED:
                self.fail("MKCALENDAR failed: %s" % (response.code,))

            def put_cb(response):
                response = IResponse(response)

                if response.code != responsecode.FORBIDDEN:
                    self.fail("Incorrect response to dot file PUT: %s" % (response.code,))

            stream = self.dataPath.child(
                "Holidays").child(
                "C318AA54-1ED0-11D9-A5E0-000A958A3252.ics"
            ).open()
            try: calendar = str(Component.fromStream(stream))
            finally: stream.close()

            event_uri = "/".join([calendar_uri, ".event.ics"])

            request = SimpleRequest(self.site, "PUT", event_uri)
            request.headers.setHeader("content-type", MimeType("text", "calendar"))
            request.stream = MemoryStream(calendar)
            self.send(request, put_cb)

        request = SimpleRequest(self.site, "MKCALENDAR", calendar_uri)
        return self.send(request, mkcalendar_cb)
Exemplo n.º 51
0
    def test_includes(self):

        plist1 = """
<plist version="1.0">
  <dict>
    <key>ServerRoot</key>
    <string>/root</string>
    <key>DocumentRoot</key>
    <string>defaultdoc</string>
    <key>DataRoot</key>
    <string>defaultdata</string>
    <key>ConfigRoot</key>
    <string>defaultconfig</string>
    <key>LogRoot</key>
    <string>defaultlog</string>
    <key>RunRoot</key>
    <string>defaultrun</string>
    <key>Includes</key>
    <array>
        <string>%s</string>
    </array>
  </dict>
</plist>
"""

        plist2 = """
<plist version="1.0">
  <dict>
    <key>DataRoot</key>
    <string>overridedata</string>
  </dict>
</plist>
"""

        tempfile2 = FilePath(self.mktemp())
        tempfile2.setContent(plist2)

        tempfile1 = FilePath(self.mktemp())
        tempfile1.setContent(plist1 % (tempfile2.path, ))

        cfg = Config(
            PListConfigProvider({
                "ServerRoot": "",
                "DocumentRoot": "",
                "DataRoot": "",
                "ConfigRoot": "",
                "LogRoot": "",
                "RunRoot": "",
                "Includes": [],
            }))
        cfg.addPostUpdateHooks([_updateDataStore])
        cfg.load(tempfile1.path)
        self.assertEquals(cfg.DocumentRoot, "/root/overridedata/defaultdoc")
        self.assertEquals(cfg.DataRoot, "/root/overridedata")
Exemplo n.º 52
0
    def test_includes(self):

        plist1 = """
<plist version="1.0">
  <dict>
    <key>ServerRoot</key>
    <string>/root</string>
    <key>DocumentRoot</key>
    <string>defaultdoc</string>
    <key>DataRoot</key>
    <string>defaultdata</string>
    <key>ConfigRoot</key>
    <string>defaultconfig</string>
    <key>LogRoot</key>
    <string>defaultlog</string>
    <key>RunRoot</key>
    <string>defaultrun</string>
    <key>Includes</key>
    <array>
        <string>%s</string>
    </array>
  </dict>
</plist>
"""

        plist2 = """
<plist version="1.0">
  <dict>
    <key>DataRoot</key>
    <string>overridedata</string>
  </dict>
</plist>
"""

        tempfile2 = FilePath(self.mktemp())
        tempfile2.setContent(plist2)

        tempfile1 = FilePath(self.mktemp())
        tempfile1.setContent(plist1 % (tempfile2.path,))

        cfg = Config(PListConfigProvider({
            "ServerRoot": "",
            "DocumentRoot": "",
            "DataRoot": "",
            "ConfigRoot": "",
            "LogRoot": "",
            "RunRoot": "",
            "Includes": [],
        }))
        cfg.addPostUpdateHooks([_updateDataStore])
        cfg.load(tempfile1.path)
        self.assertEquals(cfg.DocumentRoot, "/root/overridedata/defaultdoc")
        self.assertEquals(cfg.DataRoot, "/root/overridedata")
Exemplo n.º 53
0
    def __init__(self, dataStoreDirectory, subServiceFactory,
                 schema, databaseName='subpostgres', resetSchema=False,
                 logFile="postgres.log", testMode=False,
                 uid=None, gid=None):
        """
        Initialize a L{PostgresService} pointed at a data store directory.

        @param dataStoreDirectory: the directory to
        @type dataStoreDirectory: L{twext.python.filepath.CachingFilePath}

        @param subServiceFactory: a 1-arg callable that will be called with a
            1-arg callable which returns a DB-API cursor.
        @type subServiceFactory: C{callable}
        """
        MultiService.__init__(self)
        self.subServiceFactory = subServiceFactory
        self.dataStoreDirectory = dataStoreDirectory
        self.resetSchema = resetSchema

        if os.getuid() == 0:
            socketRoot = "/var/run"
        else:
            socketRoot = "/tmp"
        self.socketDir = CachingFilePath("%s/ccs_postgres_%s/" %
            (socketRoot, md5(dataStoreDirectory.path).hexdigest()))
        self.databaseName = databaseName
        self.logFile = logFile
        self.uid = uid
        self.gid = gid
        self.schema = schema
        self.monitor = None
        self.openConnections = []

        # FIXME: By default there is very little (4MB) shared memory available,
        # so at the moment I am lowering these postgres config options to allow
        # multiple servers to run.  We might want to look into raising
        # kern.sysv.shmmax.
        # See: http://www.postgresql.org/docs/8.4/static/kernel-resources.html
        if testMode:
            self.sharedBuffers = 16
            self.maxConnections = 2
        else:
            self.sharedBuffers = 30
            self.maxConnections = 20
Exemplo n.º 54
0
def pgServiceFromConfig(config, subServiceFactory, uid=None, gid=None):
    """
    Construct a L{PostgresService} from a given configuration and subservice.

    @param config: the configuration to derive postgres configuration
        parameters from.

    @param subServiceFactory: A factory for the service to start once the
        L{PostgresService} has been initialized.

    @param uid: The user-ID to run the PostgreSQL server as.

    @param gid: The group-ID to run the PostgreSQL server as.

    @return: a service which can start postgres.

    @rtype: L{PostgresService}
    """
    dbRoot = CachingFilePath(config.DatabaseRoot)
    # Construct a PostgresService exactly as the parent would, so that we
    # can establish connection information.
    return PostgresService(
        dbRoot, subServiceFactory, current_sql_schema,
        databaseName=config.Postgres.DatabaseName,
        clusterName=config.Postgres.ClusterName,
        logFile=config.Postgres.LogFile,
        logDirectory=config.LogRoot if config.Postgres.LogRotation else "",
        socketDir=config.Postgres.SocketDirectory,
        listenAddresses=config.Postgres.ListenAddresses,
        sharedBuffers=config.Postgres.SharedBuffers,
        maxConnections=config.Postgres.MaxConnections,
        options=config.Postgres.Options,
        uid=uid, gid=gid,
        spawnedDBUser=config.SpawnedDBUser,
        importFileName=config.DBImportFile,
        pgCtl=config.Postgres.Ctl,
        initDB=config.Postgres.Init,
    )
Exemplo n.º 55
0
class AttachmentStorageTransport(StorageTransportBase):

    _TEMPORARY_UPLOADS_DIRECTORY = "Temporary"

    def __init__(self,
                 attachment,
                 contentType,
                 dispositionName,
                 creating=False,
                 migrating=False):
        super(AttachmentStorageTransport,
              self).__init__(attachment, contentType, dispositionName)

        fileDescriptor, fileName = self._temporaryFile()
        # Wrap the file descriptor in a file object we can write to
        self._file = os.fdopen(fileDescriptor, "w")
        self._path = CachingFilePath(fileName)
        self._hash = hashlib.md5()
        self._creating = creating
        self._migrating = migrating

        self._txn.postAbort(self.aborted)

    def _temporaryFile(self):
        """
        Returns a (file descriptor, absolute path) tuple for a temporary file within
        the Attachments/Temporary directory (creating the Temporary subdirectory
        if it doesn't exist).  It is the caller's responsibility to remove the
        file.
        """
        attachmentRoot = self._txn._store.attachmentsPath
        tempUploadsPath = attachmentRoot.child(
            self._TEMPORARY_UPLOADS_DIRECTORY)
        if not tempUploadsPath.exists():
            tempUploadsPath.createDirectory()
        return tempfile.mkstemp(dir=tempUploadsPath.path)

    @property
    def _txn(self):
        return self._attachment._txn

    def aborted(self):
        """
        Transaction aborted - clean up temp files.
        """
        if self._path.exists():
            self._path.remove()

    def write(self, data):
        if isinstance(data, buffer):
            data = str(data)
        self._file.write(data)
        self._hash.update(data)

    @inlineCallbacks
    def loseConnection(self):
        """
        Note that when self._migrating is set we only care about the data and don't need to
        do any quota checks/adjustments.
        """

        # FIXME: this should be synchronously accessible; IAttachment should
        # have a method for getting its parent just as CalendarObject/Calendar
        # do.

        # FIXME: If this method isn't called, the transaction should be
        # prevented from committing successfully.  It's not valid to have an
        # attachment that doesn't point to a real file.

        home = (yield self._txn.calendarHomeWithResourceID(
            self._attachment._ownerHomeID))

        oldSize = self._attachment.size()
        newSize = self._file.tell()
        self._file.close()

        # Check max size for attachment
        if not self._migrating and newSize > config.MaximumAttachmentSize:
            self._path.remove()
            if self._creating:
                yield self._attachment._internalRemove()
            raise AttachmentSizeTooLarge()

        # Check overall user quota
        if not self._migrating:
            allowed = home.quotaAllowedBytes()
            if allowed is not None and allowed < (
                (yield home.quotaUsedBytes()) + (newSize - oldSize)):
                self._path.remove()
                if self._creating:
                    yield self._attachment._internalRemove()
                raise QuotaExceeded()

        self._path.moveTo(self._attachment._path)

        yield self._attachment.changed(self._contentType,
                                       self._dispositionName,
                                       self._hash.hexdigest(), newSize)

        if not self._migrating and home:
            # Adjust quota
            yield home.adjustQuotaUsedBytes(self._attachment.size() - oldSize)

            # Send change notification to home
            yield home.notifyChanged()
Exemplo n.º 56
0
def buildTestDirectory(store,
                       dataRoot,
                       accounts=None,
                       resources=None,
                       augments=None,
                       proxies=None,
                       serversDB=None):
    """
    @param store: the store for the directory to use

    @param dataRoot: the directory to copy xml files to

    @param accounts: path to the accounts.xml file
    @type accounts: L{FilePath}

    @param resources: path to the resources.xml file
    @type resources: L{FilePath}

    @param augments: path to the augments.xml file
    @type augments: L{FilePath}

    @param proxies: path to the proxies.xml file
    @type proxies: L{FilePath}

    @return: the directory service
    @rtype: L{IDirectoryService}
    """

    defaultDirectory = FilePath(__file__).sibling("accounts")
    if accounts is None:
        accounts = defaultDirectory.child("accounts.xml")
    if resources is None:
        resources = defaultDirectory.child("resources.xml")
    if augments is None:
        augments = defaultDirectory.child("augments.xml")
    if proxies is None:
        proxies = defaultDirectory.child("proxies.xml")

    if not os.path.exists(dataRoot):
        os.makedirs(dataRoot)

    accountsCopy = FilePath(dataRoot).child("accounts.xml")
    accountsCopy.setContent(accounts.getContent())

    resourcesCopy = FilePath(dataRoot).child("resources.xml")
    resourcesCopy.setContent(resources.getContent())

    augmentsCopy = FilePath(dataRoot).child("augments.xml")
    augmentsCopy.setContent(augments.getContent())

    proxiesCopy = FilePath(dataRoot).child("proxies.xml")
    proxiesCopy.setContent(proxies.getContent())

    servicesInfo = (
        ConfigDict({
            "Enabled": True,
            "type": "xml",
            "params": {
                "xmlFile": "accounts.xml",
                "recordTypes": ("users", "groups"),
            },
        }),
        ConfigDict({
            "Enabled": True,
            "type": "xml",
            "params": {
                "xmlFile": "resources.xml",
                "recordTypes": ("locations", "resources", "addresses"),
            },
        }),
    )
    augmentServiceInfo = ConfigDict({
        "type": "twistedcaldav.directory.augment.AugmentXMLDB",
        "params": {
            "xmlFiles": [
                "augments.xml",
            ],
            "statSeconds": 15,
        },
    })
    wikiServiceInfo = ConfigDict({
        "Enabled": True,
        "CollabHost": "localhost",
        "CollabPort": 4444,
    })
    directory = buildDirectory(store, dataRoot, servicesInfo,
                               augmentServiceInfo, wikiServiceInfo, serversDB)

    store.setDirectoryService(directory)

    return directory
Exemplo n.º 57
0
    def __init__(
        self,
        dataStoreDirectory,
        subServiceFactory,
        schema,
        resetSchema=False,
        databaseName="subpostgres",
        clusterName="cluster",
        logFile="postgres.log",
        logDirectory="",
        socketDir="",
        socketName="",
        listenAddresses=[],
        txnTimeoutSeconds=30,
        sharedBuffers=30,
        maxConnections=20,
        options=[],
        testMode=False,
        uid=None,
        gid=None,
        spawnedDBUser="******",
        pgCtl="pg_ctl",
        initDB="initdb",
        reactor=None,
    ):
        """
        Initialize a L{PostgresService} pointed at a data store directory.

        @param dataStoreDirectory: the directory to
        @type dataStoreDirectory: L{twext.python.filepath.CachingFilePath}

        @param subServiceFactory: a 1-arg callable that will be called with a
            1-arg callable which returns a DB-API cursor.
        @type subServiceFactory: C{callable}

        @param spawnedDBUser: the postgres role
        @type spawnedDBUser: C{str}
        """

        # FIXME: By default there is very little (4MB) shared memory available,
        # so at the moment I am lowering these postgres config options to allow
        # multiple servers to run.  We might want to look into raising
        # kern.sysv.shmmax.
        # See: http://www.postgresql.org/docs/8.4/static/kernel-resources.html

        MultiService.__init__(self)
        self.subServiceFactory = subServiceFactory
        self.dataStoreDirectory = dataStoreDirectory
        self.workingDir = self.dataStoreDirectory.child("working")
        self.resetSchema = resetSchema

        # In order to delay a shutdown until database initialization has
        # completed, our stopService( ) examines the delayedShutdown flag.
        # If True, we wait on the shutdownDeferred to fire before proceeding.
        # The deferred gets fired once database init is complete.
        self.delayedShutdown = False  # set to True when in critical code
        self.shutdownDeferred = None  # the actual deferred

        # Options from config
        self.databaseName = databaseName
        self.clusterName = clusterName
        # Make logFile absolute in case the working directory of postgres is
        # elsewhere:
        self.logFile = os.path.abspath(logFile)
        if logDirectory:
            self.logDirectory = os.path.abspath(logDirectory)
        else:
            self.logDirectory = ""

        # Always use our own configured socket dir in case the built-in
        # postgres tries to use a directory we don't have permissions for
        if not socketDir:
            # Socket directory was not specified, so come up with one
            # in /tmp and based on a hash of the data store directory
            digest = md5(dataStoreDirectory.path).hexdigest()
            socketDir = "/tmp/ccs_postgres_" + digest
        self.socketDir = CachingFilePath(socketDir)
        self.socketName = socketName

        if listenAddresses:
            if ":" in listenAddresses[0]:
                self.host, self.port = listenAddresses[0].split(":")
            else:
                self.host, self.port = (listenAddresses[0], None)

            self.listenAddresses = [
                addr.split(":")[0] for addr in listenAddresses
            ]
        else:
            self.host = self.socketDir.path
            self.port = None
            self.listenAddresses = []

        self.txnTimeoutSeconds = txnTimeoutSeconds

        self.testMode = testMode
        self.sharedBuffers = max(sharedBuffers if not testMode else 16, 16)
        self.maxConnections = maxConnections if not testMode else 8
        self.options = options

        self.uid = uid
        self.gid = gid
        self.spawnedDBUser = spawnedDBUser
        self.schema = schema
        self.monitor = None
        self.openConnections = []

        def locateCommand(name, cmd):
            for found in which(cmd):
                return found

            raise InternalDataStoreError(
                "Unable to locate {} command: {}".format(name, cmd))

        self._pgCtl = locateCommand("pg_ctl", pgCtl)

        # Make note of the inode for the pg_ctl script; if it changes or is
        # missing when it comes time to stop postgres, instead send SIGTERM
        # to stop our postgres (since we can't do a graceful shutdown)
        try:
            self._pgCtlInode = os.stat(self._pgCtl).st_ino
        except:
            self._pgCtlInode = 0

        self._initdb = locateCommand("initdb", initDB)
        self._reactor = reactor
        self._postgresPid = None
Exemplo n.º 58
0
class PostgresService(MultiService):
    def __init__(
        self,
        dataStoreDirectory,
        subServiceFactory,
        schema,
        resetSchema=False,
        databaseName="subpostgres",
        clusterName="cluster",
        logFile="postgres.log",
        logDirectory="",
        socketDir="",
        socketName="",
        listenAddresses=[],
        txnTimeoutSeconds=30,
        sharedBuffers=30,
        maxConnections=20,
        options=[],
        testMode=False,
        uid=None,
        gid=None,
        spawnedDBUser="******",
        pgCtl="pg_ctl",
        initDB="initdb",
        reactor=None,
    ):
        """
        Initialize a L{PostgresService} pointed at a data store directory.

        @param dataStoreDirectory: the directory to
        @type dataStoreDirectory: L{twext.python.filepath.CachingFilePath}

        @param subServiceFactory: a 1-arg callable that will be called with a
            1-arg callable which returns a DB-API cursor.
        @type subServiceFactory: C{callable}

        @param spawnedDBUser: the postgres role
        @type spawnedDBUser: C{str}
        """

        # FIXME: By default there is very little (4MB) shared memory available,
        # so at the moment I am lowering these postgres config options to allow
        # multiple servers to run.  We might want to look into raising
        # kern.sysv.shmmax.
        # See: http://www.postgresql.org/docs/8.4/static/kernel-resources.html

        MultiService.__init__(self)
        self.subServiceFactory = subServiceFactory
        self.dataStoreDirectory = dataStoreDirectory
        self.workingDir = self.dataStoreDirectory.child("working")
        self.resetSchema = resetSchema

        # In order to delay a shutdown until database initialization has
        # completed, our stopService( ) examines the delayedShutdown flag.
        # If True, we wait on the shutdownDeferred to fire before proceeding.
        # The deferred gets fired once database init is complete.
        self.delayedShutdown = False  # set to True when in critical code
        self.shutdownDeferred = None  # the actual deferred

        # Options from config
        self.databaseName = databaseName
        self.clusterName = clusterName
        # Make logFile absolute in case the working directory of postgres is
        # elsewhere:
        self.logFile = os.path.abspath(logFile)
        if logDirectory:
            self.logDirectory = os.path.abspath(logDirectory)
        else:
            self.logDirectory = ""

        # Always use our own configured socket dir in case the built-in
        # postgres tries to use a directory we don't have permissions for
        if not socketDir:
            # Socket directory was not specified, so come up with one
            # in /tmp and based on a hash of the data store directory
            digest = md5(dataStoreDirectory.path).hexdigest()
            socketDir = "/tmp/ccs_postgres_" + digest
        self.socketDir = CachingFilePath(socketDir)
        self.socketName = socketName

        if listenAddresses:
            if ":" in listenAddresses[0]:
                self.host, self.port = listenAddresses[0].split(":")
            else:
                self.host, self.port = (listenAddresses[0], None)

            self.listenAddresses = [
                addr.split(":")[0] for addr in listenAddresses
            ]
        else:
            self.host = self.socketDir.path
            self.port = None
            self.listenAddresses = []

        self.txnTimeoutSeconds = txnTimeoutSeconds

        self.testMode = testMode
        self.sharedBuffers = max(sharedBuffers if not testMode else 16, 16)
        self.maxConnections = maxConnections if not testMode else 8
        self.options = options

        self.uid = uid
        self.gid = gid
        self.spawnedDBUser = spawnedDBUser
        self.schema = schema
        self.monitor = None
        self.openConnections = []

        def locateCommand(name, cmd):
            for found in which(cmd):
                return found

            raise InternalDataStoreError(
                "Unable to locate {} command: {}".format(name, cmd))

        self._pgCtl = locateCommand("pg_ctl", pgCtl)

        # Make note of the inode for the pg_ctl script; if it changes or is
        # missing when it comes time to stop postgres, instead send SIGTERM
        # to stop our postgres (since we can't do a graceful shutdown)
        try:
            self._pgCtlInode = os.stat(self._pgCtl).st_ino
        except:
            self._pgCtlInode = 0

        self._initdb = locateCommand("initdb", initDB)
        self._reactor = reactor
        self._postgresPid = None

    @property
    def reactor(self):
        if self._reactor is None:
            from twisted.internet import reactor
            self._reactor = reactor
        return self._reactor

    def activateDelayedShutdown(self):
        """
        Call this when starting database initialization code to
        protect against shutdown.

        Sets the delayedShutdown flag to True so that if reactor shutdown
        commences, the shutdown will be delayed until deactivateDelayedShutdown
        is called.
        """
        self.delayedShutdown = True

    def deactivateDelayedShutdown(self):
        """
        Call this when database initialization code has completed so that the
        reactor can shutdown.
        """
        self.delayedShutdown = False
        if self.shutdownDeferred:
            self.shutdownDeferred.callback(None)

    def _connectorFor(self, databaseName=None):
        if databaseName is None:
            databaseName = self.databaseName

        kwargs = {
            "database": databaseName,
        }

        if self.host.startswith("/"):
            kwargs["endpoint"] = "unix:{}".format(self.host)
        else:
            kwargs["endpoint"] = "tcp:{}".format(self.host)
            if self.port:
                kwargs["endpoint"] = "{}:{}".format(kwargs["endpoint"],
                                                    self.port)
        if self.spawnedDBUser:
            kwargs["user"] = self.spawnedDBUser
        elif self.uid is not None:
            kwargs["user"] = pwd.getpwuid(self.uid).pw_name
        kwargs["txnTimeoutSeconds"] = self.txnTimeoutSeconds

        return DBAPIConnector.connectorFor("postgres", **kwargs)

    def produceConnection(self, label="<unlabeled>", databaseName=None):
        """
        Produce a DB-API 2.0 connection pointed at this database.
        """
        return self._connectorFor(databaseName).connect(label)

    def ready(self, createDatabaseConn, createDatabaseCursor):
        """
        Subprocess is ready.  Time to initialize the subservice.
        If the database has not been created and there is a dump file,
        then the dump file is imported.
        """
        if self.resetSchema:
            try:
                createDatabaseCursor.execute("drop database {}".format(
                    self.databaseName))
            except postgres.DatabaseError:
                pass

        try:
            createDatabaseCursor.execute(
                "create database {} with encoding 'UTF8'".format(
                    self.databaseName))
        except:
            # database already exists
            sqlToExecute = None
        else:
            # database does not yet exist; if dump file exists, execute it,
            # otherwise execute schema
            sqlToExecute = self.schema

        createDatabaseCursor.close()
        createDatabaseConn.close()

        if sqlToExecute is not None:
            connection = self.produceConnection()
            cursor = connection.cursor()
            for statement in splitSQLString(sqlToExecute):
                cursor.execute(statement)
            connection.commit()
            connection.close()

        if self.shutdownDeferred is None:
            # Only continue startup if we've not begun shutdown
            self.subServiceFactory(self.produceConnection,
                                   self).setServiceParent(self)

    def pauseMonitor(self):
        """
        Pause monitoring.  This is a testing hook for when (if) we are
        continuously monitoring output from the 'postgres' process.
        """
        #        for pipe in self.monitor.transport.pipes.values():
        #            pipe.stopReading()
        #            pipe.stopWriting()
        pass

    def unpauseMonitor(self):
        """
        Unpause monitoring.

        @see: L{pauseMonitor}
        """
        #        for pipe in self.monitor.transport.pipes.values():
        #            pipe.startReading()
        #            pipe.startWriting()
        pass

    def startDatabase(self):
        """
        Start the database and initialize the subservice.
        """
        def createConnection():
            try:
                createDatabaseConn = self.produceConnection(
                    "schema creation", "postgres")
            except postgres.DatabaseError as e:
                log.error(
                    "Unable to connect to database for schema creation:"
                    " {error}",
                    error=e)
                raise

            createDatabaseCursor = createDatabaseConn.cursor()

            if postgres.__name__ == "pg8000":
                createDatabaseConn.realConnection.autocommit = True
            elif postgres.__name__ == "pgdb":
                createDatabaseCursor.execute("commit")
            else:
                raise InternalDataStoreError(
                    "Unknown Postgres DBM module: {}".format(postgres))

            return createDatabaseConn, createDatabaseCursor

        monitor = PostgresMonitor(self)
        # check consistency of initdb and postgres?

        options = []
        options.append("-c listen_addresses={}".format(
            shell_quote(",".join(self.listenAddresses))))
        if self.socketDir:
            options.append("-c unix_socket_directories={}".format(
                shell_quote(self.socketDir.path)))
        if self.port:
            options.append("-c port={}".format(shell_quote(self.port)))
        options.append("-c shared_buffers={:d}".format(
            self.sharedBuffers)  # int: don't quote
                       )
        options.append("-c max_connections={:d}".format(
            self.maxConnections)  # int: don't quote
                       )
        options.append("-c standard_conforming_strings=on")
        options.append("-c unix_socket_permissions=0770")
        options.extend(self.options)
        if self.logDirectory:  # tell postgres to rotate logs
            options.append("-c log_directory={}".format(
                shell_quote(self.logDirectory)))
            options.append("-c log_truncate_on_rotation=on")
            options.append("-c log_filename=postgresql_%w.log")
            options.append("-c log_rotation_age=1440")
            options.append("-c logging_collector=on")

        options.append("-c log_line_prefix=%t")
        if self.testMode:
            options.append("-c log_statement=all")

        args = [
            self._pgCtl,
            "start",
            "--log={}".format(self.logFile),
            "--timeout=86400",  # Plenty of time for a long cluster upgrade
            "-w",  # Wait for startup to complete
            "-o",
            " ".join(options),  # Options passed to postgres
        ]

        log.info("Requesting postgres start via: {args}", args=args)
        self.reactor.spawnProcess(
            monitor,
            self._pgCtl,
            args,
            env=self.env,
            path=self.workingDir.path,
            uid=self.uid,
            gid=self.gid,
        )
        self.monitor = monitor

        def gotStatus(result):
            """
            Grab the postgres pid from the pgCtl status call in case we need
            to kill it directly later on in hardStop().  Useful in conjunction
            with the DataStoreMonitor so we can shut down if DataRoot has been
            removed/renamed/unmounted.
            """
            reResult = re.search("PID: (\d+)\D", result)
            if reResult is not None:
                self._postgresPid = int(reResult.group(1))
            self.ready(*createConnection())
            self.deactivateDelayedShutdown()

        def gotReady(result):
            """
            We started postgres; we're responsible for stopping it later.
            Call pgCtl status to get the pid.
            """
            log.info("{cmd} exited", cmd=self._pgCtl)
            self.shouldStopDatabase = True
            d = Deferred()
            statusMonitor = CapturingProcessProtocol(d, None)
            self.reactor.spawnProcess(
                statusMonitor,
                self._pgCtl,
                [self._pgCtl, "status"],
                env=self.env,
                path=self.workingDir.path,
                uid=self.uid,
                gid=self.gid,
            )
            return d.addCallback(gotStatus)

        def couldNotStart(f):
            """
            There was an error trying to start postgres.  Try to connect
            because it might already be running.  In this case, we won't
            be the one to stop it.
            """
            d = Deferred()
            statusMonitor = CapturingProcessProtocol(d, None)
            self.reactor.spawnProcess(
                statusMonitor,
                self._pgCtl,
                [self._pgCtl, "status"],
                env=self.env,
                path=self.workingDir.path,
                uid=self.uid,
                gid=self.gid,
            )
            return d.addCallback(gotStatus).addErrback(giveUp)

        def giveUp(f):
            """
            We can't start postgres or connect to a running instance.  Shut
            down.
            """
            log.critical("Can't start or connect to postgres: {failure.value}",
                         failure=f)
            self.deactivateDelayedShutdown()
            self.reactor.stop()

        self.monitor.completionDeferred.addCallback(gotReady).addErrback(
            couldNotStart)

    shouldStopDatabase = False

    def startService(self):
        MultiService.startService(self)
        self.activateDelayedShutdown()
        clusterDir = self.dataStoreDirectory.child(self.clusterName)
        env = self.env = os.environ.copy()
        env.update(PGDATA=clusterDir.path,
                   PGHOST=self.host,
                   PGUSER=self.spawnedDBUser)

        if self.socketDir:
            if not self.socketDir.isdir():
                log.info("Creating {dir}",
                         dir=self.socketDir.path.decode("utf-8"))
                self.socketDir.createDirectory()

            if self.uid and self.gid:
                os.chown(self.socketDir.path, self.uid, self.gid)

            os.chmod(self.socketDir.path, 0770)

        if not self.dataStoreDirectory.isdir():
            log.info("Creating {dir}",
                     dir=self.dataStoreDirectory.path.decode("utf-8"))
            self.dataStoreDirectory.createDirectory()

        if not self.workingDir.isdir():
            log.info("Creating {dir}",
                     dir=self.workingDir.path.decode("utf-8"))
            self.workingDir.createDirectory()

        if self.uid and self.gid:
            os.chown(self.dataStoreDirectory.path, self.uid, self.gid)
            os.chown(self.workingDir.path, self.uid, self.gid)

        if not clusterDir.isdir():
            # No cluster directory, run initdb
            log.info("Running initdb for {dir}",
                     dir=clusterDir.path.decode("utf-8"))
            dbInited = Deferred()
            self.reactor.spawnProcess(
                CapturingProcessProtocol(dbInited, None),
                self._initdb,
                [self._initdb, "-E", "UTF8", "-U", self.spawnedDBUser],
                env=env,
                path=self.workingDir.path,
                uid=self.uid,
                gid=self.gid,
            )

            def doCreate(result):
                if result.find("FATAL:") != -1:
                    log.error(result)
                    raise InternalDataStoreError(
                        "Unable to initialize postgres database: {}".format(
                            result))
                self.startDatabase()

            dbInited.addCallback(doCreate)

        else:
            log.info("Cluster already exists at {dir}",
                     dir=clusterDir.path.decode("utf-8"))
            self.startDatabase()

    def stopService(self):
        """
        Stop all child services, then stop the subprocess, if it's running.
        """

        if self.delayedShutdown:
            # We're still in the process of initializing the database, so
            # delay shutdown until the shutdownDeferred fires.
            d = self.shutdownDeferred = Deferred()
            d.addCallback(lambda ignored: MultiService.stopService(self))
        else:
            d = MultiService.stopService(self)

        def superStopped(result):
            # If pg_ctl's startup wasn't successful, don't bother to stop the
            # database.  (This also happens in command-line tools.)
            if self.shouldStopDatabase:

                # Compare pg_ctl inode with one we saw at the start; if different
                # (or missing), fall back to SIGTERM
                try:
                    newInode = os.stat(self._pgCtl).st_ino
                except OSError:
                    # Missing
                    newInode = -1

                if self._pgCtlInode != newInode:
                    # send SIGTERM to postgres
                    log.info("Postgres control script mismatch")
                    if self._postgresPid:
                        log.info("Sending SIGTERM to Postgres")
                        try:
                            os.kill(self._postgresPid, signal.SIGTERM)
                        except OSError:
                            pass
                    return succeed(None)
                else:
                    # use pg_ctl stop
                    monitor = PostgresMonitor()
                    args = [
                        self._pgCtl,
                        "stop",
                        "--log={}".format(self.logFile),
                    ]
                    log.info("Requesting postgres stop via: {args}", args=args)
                    self.reactor.spawnProcess(
                        monitor,
                        self._pgCtl,
                        args,
                        env=self.env,
                        path=self.workingDir.path,
                        uid=self.uid,
                        gid=self.gid,
                    )
                    return monitor.completionDeferred

        return d.addCallback(superStopped)

    def hardStop(self):
        """
        Stop postgres quickly by sending it SIGQUIT
        """
        if self._postgresPid is not None:
            try:
                os.kill(self._postgresPid, signal.SIGQUIT)
            except OSError:
                pass
Exemplo n.º 59
0
    def setUp(self):

        self.serverRoot = self.mktemp()
        os.mkdir(self.serverRoot)
        self.absoluteServerRoot = os.path.abspath(self.serverRoot)

        configRoot = os.path.join(self.absoluteServerRoot, "Config")
        if not os.path.exists(configRoot):
            os.makedirs(configRoot)

        dataRoot = os.path.join(self.absoluteServerRoot, "Data")
        if not os.path.exists(dataRoot):
            os.makedirs(dataRoot)

        documentRoot = os.path.join(self.absoluteServerRoot, "Documents")
        if not os.path.exists(documentRoot):
            os.makedirs(documentRoot)

        logRoot = os.path.join(self.absoluteServerRoot, "Logs")
        if not os.path.exists(logRoot):
            os.makedirs(logRoot)

        runRoot = os.path.join(self.absoluteServerRoot, "Run")
        if not os.path.exists(runRoot):
            os.makedirs(runRoot)

        config.reset()

        testRoot = os.path.join(os.path.dirname(__file__), "gateway")
        templateName = os.path.join(testRoot, "caldavd.plist")
        with open(templateName) as templateFile:
            template = templateFile.read()

        databaseRoot = os.path.abspath("_spawned_scripts_db" + str(os.getpid()))
        newConfig = template % {
            "ServerRoot": self.absoluteServerRoot,
            "DataRoot": dataRoot,
            "DatabaseRoot": databaseRoot,
            "DocumentRoot": documentRoot,
            "ConfigRoot": configRoot,
            "LogRoot": logRoot,
            "RunRoot": runRoot,
            "WritablePlist": os.path.join(
                os.path.abspath(configRoot), "caldavd-writable.plist"
            ),
        }
        configFilePath = FilePath(
            os.path.join(configRoot, "caldavd.plist")
        )

        configFilePath.setContent(newConfig)

        self.configFileName = configFilePath.path
        config.load(self.configFileName)

        config.Memcached.Pools.Default.ClientEnabled = False
        config.Memcached.Pools.Default.ServerEnabled = False
        ClientFactory.allowTestCache = True
        memcacher.Memcacher.allowTestCache = True
        memcacher.Memcacher.reset()
        config.DirectoryAddressBook.Enabled = False
        config.UsePackageTimezones = True

        origUsersFile = FilePath(
            os.path.join(
                os.path.dirname(__file__),
                "gateway",
                "users-groups.xml"
            )
        )
        copyUsersFile = FilePath(
            os.path.join(config.DataRoot, "accounts.xml")
        )
        origUsersFile.copyTo(copyUsersFile)

        origResourcesFile = FilePath(
            os.path.join(
                os.path.dirname(__file__),
                "gateway",
                "resources-locations.xml"
            )
        )
        copyResourcesFile = FilePath(
            os.path.join(config.DataRoot, "resources.xml")
        )
        origResourcesFile.copyTo(copyResourcesFile)

        origAugmentFile = FilePath(
            os.path.join(
                os.path.dirname(__file__),
                "gateway",
                "augments.xml"
            )
        )
        copyAugmentFile = FilePath(os.path.join(config.DataRoot, "augments.xml"))
        origAugmentFile.copyTo(copyAugmentFile)

        self.notifierFactory = StubNotifierFactory()
        self.store = yield theStoreBuilder.buildStore(self, self.notifierFactory)
        self.directory = directoryFromConfig(config, self.store)
Exemplo n.º 60
0
    def setUp(self):
        """
        Set up two stores to migrate between.
        """

        yield super(HomeMigrationTests, self).setUp()
        yield self.buildStoreAndDirectory(extraUids=(
            u"home1",
            u"home2",
            u"home3",
            u"home_defaults",
            u"home_no_splits",
            u"home_splits",
            u"home_splits_shared",
        ))
        self.sqlStore = self.store

        # Add some files to the file store.

        self.filesPath = CachingFilePath(self.mktemp())
        self.filesPath.createDirectory()
        fileStore = self.fileStore = CommonDataStore(
            self.filesPath, {"push": StubNotifierFactory()}, self.directory,
            True, True)
        self.upgrader = UpgradeToDatabaseStep(self.fileStore, self.sqlStore)

        requirements = CommonTests.requirements
        extras = deriveValue(self, "extraRequirements", lambda t: {})
        requirements = self.mergeRequirements(requirements, extras)

        yield populateCalendarsFrom(requirements, fileStore)
        md5s = CommonTests.md5s
        yield resetCalendarMD5s(md5s, fileStore)
        self.filesPath.child("calendars").child("__uids__").child("ho").child(
            "me").child("home1").child(".some-extra-data").setContent(
                "some extra data")

        requirements = ABCommonTests.requirements
        yield populateAddressBooksFrom(requirements, fileStore)
        md5s = ABCommonTests.md5s
        yield resetAddressBookMD5s(md5s, fileStore)
        self.filesPath.child("addressbooks").child("__uids__").child(
            "ho").child("me").child("home1").child(
                ".some-extra-data").setContent("some extra data")

        # Add some properties we want to check get migrated over
        txn = self.fileStore.newTransaction()
        home = yield txn.calendarHomeWithUID("home_defaults")

        cal = yield home.calendarWithName("calendar_1")
        props = cal.properties()
        props[PropertyName.fromElement(
            caldavxml.SupportedCalendarComponentSet
        )] = caldavxml.SupportedCalendarComponentSet(
            caldavxml.CalendarComponent(name="VEVENT"),
            caldavxml.CalendarComponent(name="VTODO"),
        )
        props[PropertyName.fromElement(
            element.ResourceType)] = element.ResourceType(
                element.Collection(),
                caldavxml.Calendar(),
            )
        props[PropertyName.fromElement(
            customxml.GETCTag)] = customxml.GETCTag.fromString("foobar")

        inbox = yield home.calendarWithName("inbox")
        props = inbox.properties()
        props[PropertyName.fromElement(
            customxml.CalendarAvailability
        )] = customxml.CalendarAvailability.fromString(str(self.av1))
        props[PropertyName.fromElement(
            caldavxml.ScheduleDefaultCalendarURL
        )] = caldavxml.ScheduleDefaultCalendarURL(
            element.HRef.fromString(
                "/calendars/__uids__/home_defaults/calendar_1"), )

        yield txn.commit()