def setUp(self): """Set up for testing osd.object.server.ObjectController""" utils.HASH_PATH_SUFFIX = 'endcap' utils.HASH_PATH_PREFIX = 'startcap' self.testdir = \ os.path.join(mkdtemp(), 'tmp_test_object_server_ObjectController') self.filesystems = os.path.join(os.path.join(self.testdir, "export")) self.filesystem = "fs1" self.dir = "d1" mkdirs( os.path.join(self.filesystems, self.filesystem, "a", "c", self.dir, "data")) mkdirs( os.path.join(self.filesystems, self.filesystem, "a", "c", self.dir, "meta")) conf = { 'filesystems': self.filesystems, 'mount_check': 'false', 'llport': 61014 } self.object_controller = self.create_instance(conf) self.object_controller.bytes_per_sync = 1 self._orig_tpool_exc = tpool.execute tpool.execute = lambda f, *args, **kwargs: f(*args, **kwargs) self.df_mgr = diskfile.DiskFileManager(conf, self.object_controller.logger)
def _create_ondisk_file(self, df, data, timestamp, metadata=None): """ create the data amd meta data file""" if timestamp is None: timestamp = time() timestamp = normalize_timestamp(timestamp) if not metadata: metadata = {} if 'X-Timestamp' not in metadata: metadata['X-Timestamp'] = normalize_timestamp(timestamp) if 'ETag' not in metadata: etag = md5() etag.update(data) metadata['ETag'] = etag.hexdigest() if 'name' not in metadata: metadata['name'] = '/a/c/o' if 'Content-Length' not in metadata: metadata['Content-Length'] = str(len(data)) hash_name = df._name_hash mkdirs(df._datadir) mkdirs(df._metadir) data_file = os.path.join(df._datadir, df._name_hash + ".data") meta_file = os.path.join(df._metadir, df._name_hash + ".meta") with open(data_file, 'wb') as f: f.write(data) with open(meta_file, 'wb') as f: f.write(pickle.dumps(metadata, diskfile.PICKLE_PROTOCOL)) f.write("EOF")
def setUp(self): self.tmpdir = mkdtemp() self.testdir = os.path.join(self.tmpdir, 'test_object_server_disk_file_mgr') mkdirs(os.path.join(self.testdir, "export", "fs1")) mkdirs(os.path.join(self.testdir, "export", "fs1")) self.filesystems = os.path.join(os.path.join(self.testdir, "export")) self._orig_tpool_exc = tpool.execute tpool.execute = lambda f, *args, **kwargs: f(*args, **kwargs) self.conf = dict(filesystems=self.filesystems, mount_check='false', keep_cache_size=2 * 1024) self.df_mgr = diskfile.DiskFileManager(self.conf, FakeLogger())
def setUp(self): """Set up for testing osd.object.server.ObjectController""" utils.HASH_PATH_SUFFIX = 'endcap' utils.HASH_PATH_PREFIX = 'startcap' self.testdir = test_dir #self.testdir = \ # os.path.join(mkdtemp(), 'tmp_test_object_server_ObjectController') #self.filesystems = os.path.join(os.path.join(self.testdir, "export")) self.filesystems = os.path.join(os.path.join(self.testdir, "export")) self.filesystem = "fs1" self.dir = "o1" conf = {'filesystems': self.filesystems, 'mount_check': 'false'} self.object_controller = self.create_instance(conf) mkdirs(os.path.join(self.filesystems, self.filesystem)) #mkdirs(os.path.join(self.filesystems, self.filesystem)) self._orig_tpool_exc = tpool.execute tpool.execute = lambda f, *args, **kwargs: f(*args, **kwargs) self.df_mgr = diskfile.DiskFileManager(conf, self.object_controller.logger) self.lock_mgr_obj = LockManager(FakeLogger(''))
def _create_tmp_file(tmpdir, tmp_data_file, tmp_meta_file): if not exists(tmpdir): mkdirs(tmpdir) fd_data, tmppath_data = create_tmp_file(tmpdir, tmp_data_file) fd_meta, tmppath_meta = create_tmp_file(tmpdir, tmp_meta_file) return (fd_data, tmppath_data, fd_meta, tmppath_meta)
def recover_complete_file(self, tmp_dir, file_name, only_meta=False): ''' Writes file to actual location and call container update request method and then finally release lock request. :param tmp_dir: tmp directory :param file_name: file name to be recovered :param only_meta: flag to identify only meta file case ''' update_status, release_status = None, None metadata = None duplicate = False try: meta_file = file_name + '.meta' with open(os.path.join(tmp_dir, meta_file), 'rb') as meta_fd: orig_metadata = pickle.loads(meta_fd.read()) metadata = dict( [(key, val) for key, val in orig_metadata.iteritems() if key.lower() in DATAFILE_SYSTEM_META]) metadata.update({'name': orig_metadata['name']}) metadata.update({'X-Timestamp': orig_metadata['X-Timestamp']}) except (Exception, IOError) as err: self._logger.error(__( 'ERROR While reading %(meta)s file' ' close failure: %(exc)s : %(stack)s'), {'exc': err, 'stack': ''.join(traceback.format_stack()), 'meta' : os.path.join(tmp_dir, file_name + '.meta')}) raise obj_hash = file_name.replace('_', '/') data_target_path, meta_target_path = self.__get_target_path(metadata, file_name) data_file = os.path.join(tmp_dir, file_name) + '.data' meta_file = os.path.join(tmp_dir, file_name) + '.meta' if os.path.exists(data_target_path) and os.path.exists(meta_target_path): duplicate = True try: if not only_meta: mkdirs(os.path.dirname(data_target_path)) os.rename(data_file, data_target_path) mkdirs(os.path.dirname(meta_target_path)) os.rename(meta_file, meta_target_path) except OSError: self._logger.error("Failure during file renaming") raise self._logger.info("Sending request to container service " "for file: %s" % data_file) update_status = self.__communicator_obj. \ container_update(metadata, duplicate) if not update_status: self._logger.error("Could not update container") else: self._logger.info(__("Container update successful for " "file: %s" % data_file)) self._logger.info(__("Sending request for releasing " "lock: %s" % data_file)) release_status = self.__communicator_obj. \ release_lock(obj_hash, metadata) if release_status: self._logger.info(__("Release lock successful for " "file: %s" % metadata['name'])) else: self._logger.error("Could not release lock for " "file: %s" % metadata['name'])