Beispiel #1
0
 def mount_cachefs(self):
     cmdline_options = [
         os.path.join(config.getProjectRoot(), 'scripts', 'wrapper.sh'),
         os.path.join(config.getProjectRoot(), 'cachefs.py'),
         self.cfg.cache_fs.cache_fs_mountpoint,
         '--source-dir={source}'.format(
             source=self.cfg.cache_manager.source_dir),
         '--cache-dir={cache}'.format(
             cache=self.cfg.cache_manager.cache_root_dir),
         '--log={log_path}'.format(log_path=os.path.join(
             self.cfg.ut_tests_root, "LOG_" + self.__class__.__name__ +
             str(self.tag))),
         '--disk-cache-lifetime={disk_cache_lifetime}'.format(
             disk_cache_lifetime=self.cfg.cache_manager.disk_cache_lifetime
         ),
         '--memory-cache-lifetime={memory_cache_lifetime}'.format(
             memory_cache_lifetime=self.cfg.cache_manager.
             memory_cache_lifetime),
         '--debug',
         '-f'  # foreground
     ]
     self.cachefs_mounter = mounter.FuseFsMounter(
         cmdline_options, self.cfg.cache_fs.cache_fs_mountpoint)
     try:
         self.cachefs_mounter.mount()
     except:
         print("************************************************")
         print("************************************************")
         print("CANNOT MOUNT CACHEFS, TRYING TO CLEANUP THE MESS")
         print("************************************************")
         print("************************************************")
         self.cleanupWorkspace()
Beispiel #2
0
    def mount_cachefs(self):
        self.timeModule = mocks.time_mock.ModuleInterface()
        self.timeController = self.timeModule.getController()

        self.moxConfig.StubOutWithMock(self.timeController, "_timeImpl")
        self.timeController._timeImpl().MultipleTimes().AndReturn(self.initialTimeValue)
        self.moxConfig.ReplayAll()

        os.symlink(os.path.join(config.getProjectRoot(), 'tests', 'mocks'), 
                   os.path.join(config.getProjectRoot(), 'mocks'))
        CachefsSystemTest.mount_cachefs(self)

        TestHelper.fetch_all(self.source_memfs_inport)
        TestHelper.fetch_all(self.cache_memfs_inport)
Beispiel #3
0
    def mount_cachefs(self):
        self.timeModule = mocks.time_mock.ModuleInterface()
        self.timeController = self.timeModule.getController()

        self.moxConfig.StubOutWithMock(self.timeController, "_timeImpl")
        self.timeController._timeImpl().MultipleTimes().AndReturn(
            self.initialTimeValue)
        self.moxConfig.ReplayAll()

        os.symlink(os.path.join(config.getProjectRoot(), 'tests', 'mocks'),
                   os.path.join(config.getProjectRoot(), 'mocks'))
        CachefsSystemTest.mount_cachefs(self)

        TestHelper.fetch_all(self.source_memfs_inport)
        TestHelper.fetch_all(self.cache_memfs_inport)
Beispiel #4
0
 def tearDownImpl(self):
     CachefsSystemTest.tearDownImpl(self)
     os.remove(os.path.join(config.getProjectRoot(), 'mocks'))
     self.timeController.finalize()
     self.timeController.dispose()
     self.timeModule.server.join()
     self.moxConfig.UnsetStubs()
Beispiel #5
0
 def tearDownImpl(self):
     CachefsSystemTest.tearDownImpl(self)
     os.remove(os.path.join(config.getProjectRoot(), 'mocks'))
     self.timeController.finalize()
     self.timeController.dispose()
     self.timeModule.server.join()
     self.moxConfig.UnsetStubs()
Beispiel #6
0
 def _buildCmdLineOptions(self, mountpoint, logpath, unixAddr):
     cmdline_options = [
         os.path.join(config.getProjectRoot(), 'tests', 'mocks', 'memfs.py'),
         mountpoint,
         '--log={log_path}'.format(log_path=logpath),
         '--commport={commport}'.format(commport=unixAddr),
         '-f' # foreground
     ]
     return cmdline_options
Beispiel #7
0
 def _buildCmdLineOptions(self, mountpoint, logpath, unixAddr):
     cmdline_options = [
         os.path.join(config.getProjectRoot(), 'tests', 'mocks',
                      'memfs.py'),
         mountpoint,
         '--log={log_path}'.format(log_path=logpath),
         '--commport={commport}'.format(commport=unixAddr),
         '-f'  # foreground
     ]
     return cmdline_options
Beispiel #8
0
 def mount_cachefs(self):
     cmdline_options = [
         os.path.join(config.getProjectRoot(), 'scripts', 'wrapper.sh'),
         os.path.join(config.getProjectRoot(), 'cachefs.py'),
         self.cfg.cache_fs.cache_fs_mountpoint,
         '--source-dir={source}'.format(source=self.cfg.cache_manager.source_dir),
         '--cache-dir={cache}'.format(cache=self.cfg.cache_manager.cache_root_dir),
         '--log={log_path}'.format(log_path=os.path.join(self.cfg.ut_tests_root, "LOG_" + self.__class__.__name__ + str(self.tag))),
         '--disk-cache-lifetime={disk_cache_lifetime}'.format(disk_cache_lifetime=self.cfg.cache_manager.disk_cache_lifetime),
         '--memory-cache-lifetime={memory_cache_lifetime}'.format(memory_cache_lifetime=self.cfg.cache_manager.memory_cache_lifetime),
         '--debug',
         '-f' # foreground
     ]
     self.cachefs_mounter = mounter.FuseFsMounter(cmdline_options, self.cfg.cache_fs.cache_fs_mountpoint)
     try:
         self.cachefs_mounter.mount()
     except:
         print("************************************************")
         print("************************************************")
         print("CANNOT MOUNT CACHEFS, TRYING TO CLEANUP THE MESS")
         print("************************************************")
         print("************************************************")
         self.cleanupWorkspace()
Beispiel #9
0
def read_csv(batch_size, file_name, record_defaults):
    filename_queue = tf.train.string_input_producer(
        [config.getProjectRoot() + "/Iris/" + file_name])

    reader = tf.TextLineReader()
    key, value = reader.read(queue=filename_queue)

    # decode_csv will convert a Tensor from type string (the text line) in
    # a tuple of tensor columns with the specified defaults, which also
    # sets the data type for each column
    decoded = tf.decode_csv(value, record_defaults=record_defaults)

    return tf.train.shuffle_batch(decoded,
                                  batch_size=batch_size,
                                  capacity=batch_size * 50,
                                  min_after_dequeue=batch_size)