def prepare2get(self, err_entry=None, found_checkpoint=False): """This method is only executed for GET operations and its purpose is to issue the Prepapre2Get commands for the files in the archive which will later on be copied back to EOS. Args: err_entry (list): Entry record from the archive file corresponding to the first file/dir that was corrupted. found_checkpoint (bool): If True it means the checkpoint was already found and we don't need to search for it. Raises: IOError: The Prepare2Get request failed. """ if self.archive.d2t: return count = 0 limit = 50 # max files per prepare request oper = 'prepare' self.set_status("prepare2get") t0 = time.time() lpaths = [] status = True metahandler = MetaHandler() for fentry in self.archive.files(): # Find error checkpoint if not already found if err_entry and not found_checkpoint: if fentry != err_entry: continue else: found_checkpoint = True count += 1 surl, __ = self.archive.get_endpoints(fentry[1]) lpaths.append(surl[surl.rfind('//') + 1:]) if len(lpaths) == limit: xrd_st = self.archive.fs_dst.prepare( lpaths, PrepareFlags.STAGE, callback=metahandler.register(oper, surl)) if not xrd_st.ok: __ = metahandler.wait(oper) err_msg = "Failed prepare2get for path={0}".format(surl) self.logger.error(err_msg) raise IOError(err_msg) # Wait for batch to be executed del lpaths[:] status = status and metahandler.wait(oper) self.logger.debug( ("Prepare2get done count={0}/{1}" "").format(count, self.archive.header['num_files'])) if not status: break # Send the remaining requests if lpaths and status: xrd_st = self.archive.fs_dst.prepare(lpaths, PrepareFlags.STAGE, callback=metahandler.register( oper, surl)) if not xrd_st.ok: __ = metahandler.wait(oper) err_msg = "Failed prepare2get" self.logger.error(err_msg) raise IOError(err_msg) # Wait for batch to be executed del lpaths[:] status = status and metahandler.wait(oper) if status: t1 = time.time() self.logger.info("TIMING_prepare2get={0} sec".format(t1 - t0)) else: err_msg = "Failed prepare2get" self.logger.error(err_msg) raise IOError(err_msg)
def prepare2get(self, err_entry=None, found_checkpoint=False): """This method is only executed for GET operations and it's purpose is to issue the Prepapre2Get commands for the files in the archive which will later on be copied back to EOS. Args: err_entry (list): Entry record from the archive file corresponding to the first file/dir that was corrupted. found_checkpoint (bool): If True it means the checkpoint was already found and we don't need to search for it. Raises: IOError: The Prepare2Get request failed. """ if self.archive.d2t: return count = 0 limit = 50 # max files per prepare request oper = 'prepare' self.set_status("prepare2get") t0 = time.time() lpaths = [] status = True metahandler = MetaHandler() for fentry in self.archive.files(): # Find error checkpoint if not already found if err_entry and not found_checkpoint: if fentry != err_entry: continue else: found_checkpoint = True count += 1 surl, __ = self.archive.get_endpoints(fentry[1]) lpaths.append(surl[surl.rfind('//') + 1:]) if len(lpaths) == limit: xrd_st = self.archive.fs_dst.prepare(lpaths, PrepareFlags.STAGE, callback=metahandler.register(oper, surl)) if not xrd_st.ok: __ = metahandler.wait(oper) err_msg = "Failed prepare2get for path={0}".format(surl) self.logger.error(err_msg) raise IOError(err_msg) # Wait for batch to be executed del lpaths[:] status = status and metahandler.wait(oper) self.logger.debug(("Prepare2get done count={0}/{1}" "").format(count, self.archive.header['num_files'])) if not status: break # Send the remaining requests if lpaths and status: xrd_st = self.archive.fs_dst.prepare(lpaths, PrepareFlags.STAGE, callback=metahandler.register(oper, surl)) if not xrd_st.ok: __ = metahandler.wait(oper) err_msg = "Failed prepare2get" self.logger.error(err_msg) raise IOError(err_msg) # Wait for batch to be executed del lpaths[:] status = status and metahandler.wait(oper) if status: t1 = time.time() self.logger.info("TIMING_prepare2get={0} sec".format(t1 - t0)) else: err_msg = "Failed prepare2get" self.logger.error(err_msg) raise IOError(err_msg)
def update_file_access(self, err_entry=None, found_checkpoint=False): """ Set the ownership and the permissions for the files copied to EOS. This is done only for GET operation i.e. self.archive.d2t == False. Args: err_entry (list): Entry record from the archive file corresponding to the first file/dir that was corrupted. found_checkpoint (boolean): If True, it means the checkpoint was already found and we don't need to search for it i.e. the corrupted entry is a directory. Raises: IOError: chown or chmod operations failed """ if self.archive.d2t: return self.set_status("updating file access") t0 = time.time() oper = 'query' metahandler = MetaHandler() fs = self.archive.fs_src for fentry in self.archive.files(): # If backup operation and time window specified then update only matching ones if self.oper == self.config.BACKUP_OP: if self.archive.header['twindow_type'] and self.archive.header[ 'twindow_val']: dfile = dict( zip(self.archive.header['file_meta'], fentry[2:])) twindow_sec = int(self.archive.header['twindow_val']) tentry_sec = int( float(dfile[self.archive.header['twindow_type']])) if tentry_sec < twindow_sec: continue # Search for the recovery checkpoint if err_entry and not found_checkpoint: if fentry != err_entry: continue else: found_checkpoint = True __, surl = self.archive.get_endpoints(fentry[1]) url = client.URL(surl.encode("utf-8")) dict_meta = dict(zip(self.archive.header['file_meta'], fentry[2:])) # Send the chown async request arg = ''.join([ url.path, "?eos.ruid=0&eos.rgid=0&mgm.pcmd=chown&uid=", dict_meta['uid'], "&gid=", dict_meta['gid'] ]) xrd_st = fs.query(QueryCode.OPAQUEFILE, arg.encode("utf-8"), callback=metahandler.register(oper, surl)) if not xrd_st.ok: __ = metahandler.wait(oper) err_msg = "Failed query chown for path={0}".format(surl) self.logger.error(err_msg) raise IOError(err_msg) # Send the chmod async request mode = int(dict_meta['mode'], 8) # mode is saved in octal format arg = ''.join([ url.path, "?eos.ruid=0&eos.rgid=0&mgm.pcmd=chmod&mode=", str(mode) ]) xrd_st = fs.query(QueryCode.OPAQUEFILE, arg.encode("utf-8"), callback=metahandler.register(oper, surl)) if not xrd_st.ok: __ = metahandler.wait(oper) err_msg = "Failed query chmod for path={0}".format(surl) self.logger.error(err_msg) raise IOError(err_msg) # Send the utime async request to set the mtime mtime = dict_meta['mtime'] mtime_sec, mtime_nsec = mtime.split('.', 1) ctime = dict_meta['ctime'] ctime_sec, ctime_nsec = ctime.split('.', 1) arg = ''.join([ url.path, "?eos.ruid=0&eos.rgid=0&mgm.pcmd=utimes", "&tv1_sec=", ctime_sec, "&tv1_nsec=", ctime_nsec, "&tv2_sec=", mtime_sec, "&tv2_nsec=", mtime_nsec ]) xrd_st = fs.query(QueryCode.OPAQUEFILE, arg.encode("utf-8"), callback=metahandler.register(oper, surl)) if not xrd_st.ok: __ = metahandler.wait(oper) err_msg = "Failed query utimes for path={0}".format(surl) self.logger.error(err_msg) raise IOError(err_msg) status = metahandler.wait(oper) if status: t1 = time.time() self.logger.info("TIMING_update_file_access={0} sec".format(t1 - t0)) else: err_msg = "Failed update file access" self.logger.error(err_msg) raise IOError(err_msg)
def update_file_access(self, err_entry=None, found_checkpoint=False): """ Set the ownership and the permissions for the files copied to EOS. This is done only for GET operation i.e. self.archive.d2t == False. Args: err_entry (list): Entry record from the archive file corresponding to the first file/dir that was corrupted. found_checkpoint (boolean): If True, it means the checkpoint was already found and we don't need to search for it i.e. the corrupted entry is a directory. Raises: IOError: chown or chmod operations failed """ if self.archive.d2t: return self.set_status("updating file access") t0 = time.time() oper = 'query' metahandler = MetaHandler() fs = self.archive.fs_src for fentry in self.archive.files(): # If backup operation and time window specified then update only matching ones if self.oper == self.config.BACKUP_OP: if self.archive.header['twindow_type'] and self.archive.header['twindow_val']: dfile = dict(zip(self.archive.header['file_meta'], fentry[2:])) twindow_sec = int(self.archive.header['twindow_val']) tentry_sec = int(float(dfile[self.archive.header['twindow_type']])) if tentry_sec < twindow_sec: continue # Search for the recovery checkpoint if err_entry and not found_checkpoint: if fentry != err_entry: continue else: found_checkpoint = True __, surl = self.archive.get_endpoints(fentry[1]) url = client.URL(surl.encode("utf-8")) dict_meta = dict(zip(self.archive.header['file_meta'], fentry[2:])) # Send the chown async request arg = ''.join([url.path, "?eos.ruid=0&eos.rgid=0&mgm.pcmd=chown&uid=", dict_meta['uid'], "&gid=", dict_meta['gid']]) xrd_st = fs.query(QueryCode.OPAQUEFILE, arg.encode("utf-8"), callback=metahandler.register(oper, surl)) if not xrd_st.ok: __ = metahandler.wait(oper) err_msg = "Failed query chown for path={0}".format(surl) self.logger.error(err_msg) raise IOError(err_msg) # Send the chmod async request mode = int(dict_meta['mode'], 8) # mode is saved in octal format arg = ''.join([url.path, "?eos.ruid=0&eos.rgid=0&mgm.pcmd=chmod&mode=", str(mode)]) xrd_st = fs.query(QueryCode.OPAQUEFILE, arg.encode("utf-8"), callback=metahandler.register(oper, surl)) if not xrd_st.ok: __ = metahandler.wait(oper) err_msg = "Failed query chmod for path={0}".format(surl) self.logger.error(err_msg) raise IOError(err_msg) # Send the utime async request to set the mtime mtime = dict_meta['mtime'] mtime_sec, mtime_nsec = mtime.split('.', 1) arg = ''.join([url.path, "?eos.ruid=0&eos.rgid=0&mgm.pcmd=utimes", "&tv1_sec=0&tv1_nsec=0&tv2_sec=", mtime_sec, "&tv2_nsec=", mtime_nsec]) xrd_st = fs.query(QueryCode.OPAQUEFILE, arg.encode("utf-8"), callback=metahandler.register(oper, surl)) if not xrd_st.ok: __ = metahandler.wait(oper) err_msg = "Failed query chmod for path={0}".format(surl) self.logger.error(err_msg) raise IOError(err_msg) status = metahandler.wait(oper) if status: t1 = time.time() self.logger.info("TIMING_update_file_access={0} sec".format(t1 - t0)) else: err_msg = "Failed update file access" self.logger.error(err_msg) raise IOError(err_msg)