def test_type_serialization(self):
        lines = self.genLines()
        pathA, pathB = 'pathA', 'pathB'
        serializers = Storage.OpenJsonSerializers()
        serializedA, serializedB = [], []

        serializedA = serializers.serializeAsVector(pathA, list(lines))
        serializedB = serializers.serializeAsVector(pathB, list(lines))

        success, contents = Storage.deserializeAsType(serializedA)
        self.assertTrue(success)
        self.assertEqual(tuple(contents), lines)

        success, contents = Storage.deserializeAsType(serializedB)
        self.assertTrue(success)
        self.assertEqual(tuple(contents), lines)
Exemple #2
0
    def corrupted_read(self, writerFactory, toWrite, lastData, corruptFun, successExpected):
        path = self.nextTempFilePath()
        writer = writerFactory(path)
        ioWrite = ['asdfasdfasdf', 'asdfasdfasdfasdfasdf', 'asdfsdfikjlkhjgkl']
        for string in toWrite:
            writer.writeString(string)
        writer.flush()

        start = writer.fileSize()
        self.assertEqual(os.stat(path).st_size, start)

        writer.writeString(lastData)
        writer.flush()

        with open(path, 'a+') as f:
            f.seek(start)
            bytes = f.read()
            f.truncate(start)

            crc = struct.unpack(crcType, bytes[:struct.calcsize(crcType)])[0]
            size = struct.unpack(crcType, bytes[struct.calcsize(crcType):struct.calcsize(sizeType)])[0]
            message = bytes[struct.calcsize(crcType) + struct.calcsize(sizeType):]

            dat = corruptFun(crc, size, message)
            f.write(dat)

        success, vec = Storage.readToVector(path)
        self.assertEqual(success, successExpected)
        os.unlink(path)
Exemple #3
0
    def corrupted_read(self, writerFactory, toWrite, lastData, corruptFun,
                       successExpected):
        path = self.nextTempFilePath()
        writer = writerFactory(path)
        ioWrite = ['asdfasdfasdf', 'asdfasdfasdfasdfasdf', 'asdfsdfikjlkhjgkl']
        for string in toWrite:
            writer.writeString(string)
        writer.flush()

        start = writer.fileSize()
        self.assertEqual(os.stat(path).st_size, start)

        writer.writeString(lastData)
        writer.flush()

        with open(path, 'a+') as f:
            f.seek(start)
            bytes = f.read()
            f.truncate(start)

            crc = struct.unpack(crcType, bytes[:struct.calcsize(crcType)])[0]
            size = struct.unpack(
                crcType,
                bytes[struct.calcsize(crcType):struct.calcsize(sizeType)])[0]
            message = bytes[struct.calcsize(crcType) +
                            struct.calcsize(sizeType):]

            dat = corruptFun(crc, size, message)
            f.write(dat)

        success, vec = Storage.readToVector(path)
        self.assertEqual(success, successExpected)
        os.unlink(path)
    def test_type_serialization(self):
        lines = self.genLines()
        pathA, pathB = "pathA", "pathB"
        serializers = Storage.OpenJsonSerializers()
        serializedA, serializedB = [], []

        serializedA = serializers.serializeAsVector(pathA, list(lines))
        serializedB = serializers.serializeAsVector(pathB, list(lines))

        success, contents = Storage.deserializeAsType(serializedA)
        self.assertTrue(success)
        self.assertEqual(tuple(contents), lines)

        success, contents = Storage.deserializeAsType(serializedB)
        self.assertTrue(success)
        self.assertEqual(tuple(contents), lines)
    def test_serialization(self):
        lines = self.genLines()
        pathA, pathB = "pathA", "pathB"
        serializers = Storage.OpenJsonSerializers()
        serializedA, serializedB = [], []

        for line in lines:
            serializedA.append(serializers.serialize(pathA, line))
            serializedB.append(serializers.serialize(pathB, line))

        success, contents = Storage.deserializeAsVector(serializedA)
        self.assertTrue(success)
        self.assertEqual(tuple(contents), lines)

        success, contents = Storage.deserializeAsVector(serializedB)
        self.assertTrue(success)
        self.assertEqual(tuple(contents), lines)
Exemple #6
0
 def createLogFileDir(self):
     self.tempdir = tempfile.mkdtemp()
     self.keyspace = SharedState.Keyspace("TakeHighestIdKeyType",
                                          NativeJson.Json("test-space"), 1)
     self.keyrange = SharedState.KeyRange(self.keyspace, 0, None, None,
                                          True, False)
     self.logFileDir = StorageNative.LogFileDirectory(
         self.tempdir, self.keyspace, self.keyrange)
     self.baseDir = os.path.split(self.logFileDir.getCurrentLogPath())[0]
    def test_serialization(self):
        lines = self.genLines()
        pathA, pathB = 'pathA', 'pathB'
        serializers = Storage.OpenJsonSerializers()
        serializedA, serializedB = [], []

        for line in lines:
            serializedA.append(serializers.serialize(pathA, line))
            serializedB.append(serializers.serialize(pathB, line))


        success, contents = Storage.deserializeAsVector(serializedA)
        self.assertTrue(success)
        self.assertEqual(tuple(contents), lines)

        success, contents = Storage.deserializeAsVector(serializedB)
        self.assertTrue(success)
        self.assertEqual(tuple(contents), lines)
Exemple #8
0
 def test_basic(self):
     path = self.nextTempFilePath()
     writer = Storage.ChecksummedWriter(path)
     toWrite = ['asdfasdfasdf', 'asdfasdfasdfasdfasdf', 'asdfsdfikjlkhjgkl']
     for string in toWrite:
         writer.writeString(string)
     writer.flush()
     self.assertEqual(writer.written(), writer.fileSize())
     self.assertEqual(
         writer.written(),
         sum(
             len(x) + struct.calcsize(crcType) + struct.calcsize(sizeType)
             for x in toWrite))
     self.assertEqual(writer.path(), path)
     self.assertEqual(os.stat(path).st_size, writer.written())
     success, contents = Storage.readToVector(path)
     self.assertTrue(success)
     self.assertEqual(tuple(toWrite), tuple(contents))
     os.unlink(path)
    def test_one(self):
        logEntries = {}
        def append(path, contents):
            path = os.path.split(path)[1]
            if path not in logEntries:
                logEntries[path] = []
            logEntries[path].append(contents)

        def written(path, contents):
            print path, contents

        def flush(path):
            pass

        def read(path):
            return True, []

        pyOpenFiles = Storage.PythonOpenFiles(
                append,
                written,
                flush,
                read
                )

        fileStore = Storage.FileKeyspaceStorage(
                'test-cache-dir',
                self.keyspace,
                self.keyrange,
                pyOpenFiles.asInterface(),
                .01)

        entries = [Storage.createLogEntryEvent(self.getKey('key-%x' % ix), json('value-%s' % ix), ix)
                for ix in range(100)]

        for e in entries:
            fileStore.writeLogEntry(e)

        success, readEntries = Storage.deserializeAllLogEntries(logEntries.values()[0])
        self.assertEqual(tuple(readEntries), tuple(entries))
    def test_one(self):
        logEntries = {}

        def append(path, contents):
            path = os.path.split(path)[1]
            if path not in logEntries:
                logEntries[path] = []
            logEntries[path].append(contents)

        def written(path, contents):
            print path, contents

        def flush(path):
            pass

        def read(path):
            return True, []

        pyOpenFiles = Storage.PythonOpenFiles(append, written, flush, read)

        fileStore = Storage.FileKeyspaceStorage('test-cache-dir',
                                                self.keyspace, self.keyrange,
                                                pyOpenFiles.asInterface(), .01)

        entries = [
            Storage.createLogEntryEvent(self.getKey('key-%x' % ix),
                                        json('value-%s' % ix), ix)
            for ix in range(100)
        ]

        for e in entries:
            fileStore.writeLogEntry(e)

        success, readEntries = Storage.deserializeAllLogEntries(
            logEntries.values()[0])
        self.assertEqual(tuple(readEntries), tuple(entries))
Exemple #11
0
    def perform_startup(self, logFiles, stateFiles, targetLog, targetState):
        self.createLogFileDir()

        for it in logFiles:
            self.touchFile(self.pathFor('LOG', it))

        for it in stateFiles:
            self.touchFile(self.pathFor('STATE', it))

        self.logFileDir = StorageNative.LogFileDirectory(
            self.tempdir, self.keyspace, self.keyrange)
        self.assertEqual(self.logFileDir.getCurrentLogPath(),
                         self.pathFor('LOG', targetLog))
        self.assertEqual(self.logFileDir.getNextStatePath(),
                         self.pathFor('STATE', targetState))
        shutil.rmtree(self.tempdir, True)
Exemple #12
0
 def test_basic(self):
     path = self.nextTempFilePath()
     writer = Storage.ChecksummedWriter(path)
     toWrite = ['asdfasdfasdf', 'asdfasdfasdfasdfasdf', 'asdfsdfikjlkhjgkl']
     for string in toWrite:
         writer.writeString(string)
     writer.flush()
     self.assertEqual(writer.written(), writer.fileSize())
     self.assertEqual(
             writer.written(),
             sum(len(x) + struct.calcsize(crcType) + struct.calcsize(sizeType) for x in toWrite))
     self.assertEqual(writer.path(), path)
     self.assertEqual(os.stat(path).st_size, writer.written())
     success, contents = Storage.readToVector(path)
     self.assertTrue(success)
     self.assertEqual(tuple(toWrite), tuple(contents))
     os.unlink(path)
Exemple #13
0
 def executeTest(ramdiskdir):
     openFiles = Storage.OpenFiles(10)
     os.system('mount -t tmpfs -o size=1M tmpfs %s' % ramdiskdir)
     try:
         paths = [
             os.path.join(ramdiskdir, 'test-%s' % ix)
             for ix in range(10)
         ]
         toWrite = [
             chr((ix + c) % 255) for c in range(1024)
             for ix in range(32)
         ] * 32
         for string in toWrite:
             path = random.choice(paths)
             openFiles.append(path, string)
     except OSError as e:
         self.assertEqual(e.errno, errno.ENOSPC)
         openFiles.shutdown()
         raise
Exemple #14
0
    def test_open_files_multithreaded(self):
        # test multiple threads in C++ to test OpenFiles for race conditions

        self.openFiles = Storage.OpenFiles(10)
        numThreads = 10
        numValues = 20
        valueSize = 128
        iters = 20
        base = tempfile.mkdtemp()
        numFilesPerThread = 100
        numFiles = 1000

        def genValues(num, size):
            return [
                ''.join(
                    chr(random.randint(ord('A'), ord('z')))
                    for y in range(size)) for x in range(num)
            ]

        files = [
            os.path.join(base, 'test-path-%s' % ix) for ix in range(numFiles)
        ]

        def writer(files, strings, iters, ix):
            Storage.writeToOpenFiles(self.openFiles, files, strings, iters)

        threads = [
            threading.Thread(target=writer,
                             args=(random.sample(files, numFilesPerThread),
                                   genValues(numValues, valueSize), iters, ix))
            for ix in range(numThreads)
        ]

        for t in threads:
            t.start()

        for t in threads:
            t.join()

        print len(os.listdir(base))
        shutil.rmtree(base)
Exemple #15
0
 def createNullUpdate(self, key):
     tr = StorageNative.createPartialEvent(key, None, self.eventId, self.clientId)
     self.eventId += 1
     return tr
Exemple #16
0
 def test_open_files(self):
     fdsOpenBefore = len(getOwnOpenFds())
     self.openFiles = Storage.OpenFiles(1)
     self.corrupted_readers(
         lambda path: OpenFilesWrapper(path, self.openFiles))
     self.assertEqual(fdsOpenBefore + 1, len(getOwnOpenFds()))
Exemple #17
0
 def writer(files, strings, iters, ix):
     Storage.writeToOpenFiles(
             self.openFiles,
             files,
             strings,
             iters)
Exemple #18
0
 def writer(files, strings, iters, ix):
     Storage.writeToOpenFiles(self.openFiles, files, strings, iters)
Exemple #19
0
 def createKeyUpdate(self, key, value):
     tr = StorageNative.createPartialEvent(key, value, self.eventId, self.clientId)
     self.eventId += 1
     return tr
 def createKeyUpdate(self, key, value):
     tr = StorageNative.createPartialEvent(key, value, self.eventId, self.clientId)
     self.eventId += 1
     return tr
 def createNullUpdate(self, key):
     tr = StorageNative.createPartialEvent(key, None, self.eventId, self.clientId)
     self.eventId += 1
     return tr