def test_timeout(self): fs = HDFSClient("/usr/local/hadoop-2.7.7/", None, time_out=6 * 1000, sleep_inter=100) src = "hdfs_test_timeout" dst = "new_hdfs_test_timeout" fs.delete(dst) fs.mkdirs(src) fs.mkdirs(dst) fs.mkdirs(dst + "/" + src) output = "" cmd = "{} -mv {} {}".format(fs._base_cmd, src, dst) try: fs.mv(src, dst, test_exists=False) self.assertFalse( 1, "can't execute cmd:{} output:{}".format(cmd, output)) except FSTimeOut as e: print("execute mv {} to {} timeout".format(src, dst)) ret, output = fluid.core.shell_execute_cmd(cmd, 6 * 1000, 2 * 1000) self.assertNotEqual(ret, 0) print("second mv ret:{} output:{}".format(ret, output))
def test(self): fs = HDFSClient("/usr/local/hadoop-2.7.7", None) dir_path = "./checkpointsaver_test" fs.delete(dir_path) s = CheckpointSaver(fs) fs.mkdirs("{}/exe.exe".format(dir_path)) fs.mkdirs("{}/exe.1".format(dir_path)) fs.mkdirs("{}/exe".format(dir_path)) a = s.get_checkpoint_no(dir_path) self.assertEqual(len(a), 0) fs.mkdirs("{}/__paddle_checkpoint__.0".format(dir_path)) fs.mkdirs("{}/__paddle_checkpoint__.exe".format(dir_path)) a = s.get_checkpoint_no(dir_path) self.assertEqual(len(a), 1) s.clean_redundant_checkpoints(dir_path) s.clean_redundant_checkpoints(dir_path) fs.delete(dir_path)
class TrainEpochRange(SerializableBase): def __init__(self, max_epoch_num, name, checkpoint_inter=None, restored=True): self._max_epoch_num = max_epoch_num self._epoch_no = -1 # current epoch_no self._name = name self._restored_from = None self._exe_status = {} self._flag_generated = False self._checker = g_checker if checkpoint_inter is not None: self._save_checkpoint_inter = checkpoint_inter else: self._save_checkpoint_inter = self._checker.save_checkpoint_inter assert self._save_checkpoint_inter >= 0, "checkpointer:{} must >=0".format( self._save_checkpoint_inter) self._last_checkpoint_time = time.time() self._load_cp_nos = None self._checkpoint_epoch_no = None if not self._checker.valid(): return self._file_name = "range_train_status" if not restored: return self._checkpoint_path = self._checker.get_range_checkpoint_path(name) config = { "fs.default.name": self._checker.hdfs_name, "hadoop.job.ugi": self._checker.hdfs_ugi } if self._checker.ce_test: config = None from paddle.distributed.fleet.utils.fs import HDFSClient self._hdfs = HDFSClient(self._checker.hdfs_home, config) self._cper = CheckpointSaver(self._hdfs) _thread_checker() self._get_last_valid_checkpoint() def _look_for_valid(self, cp_nos): cps = [] epoch_no = -1 for i in cp_nos[::-1]: t = TrainEpochRange(self._max_epoch_num, self.name, restored=False) self._cper.load_checkpoint( self._checkpoint_path, [t], self._checker.trainer_id, checkpoint_no=i, local_cache_path=self._checker._fs_cache) cps.append(t) logger.debug("look for valid:{} t:{}".format(i, t._serialize())) if epoch_no < 0: epoch_no = t._epoch_no else: if epoch_no - t._epoch_no >= 1: return t, i return None, None def _get_last_valid_checkpoint(self): self._load_cp_nos = self._cper.get_checkpoint_no(self._checkpoint_path) logger.info("find checkpoint nos:{}".format(self._load_cp_nos)) if len(self._load_cp_nos) < 1: self._restored_from = CONST_MEMORYINIT return if g_acp_type == CONST_ACP_TYPE: # get the last one self._cper.load_checkpoint( self._checkpoint_path, [self], self._checker.trainer_id, local_cache_path=self._checker._fs_cache) self._restored_from = CONST_CHECKPOINT self._checkpoint_epoch_no = self._epoch_no logger.info("load tain_epoch_range checkpoint:{}".format( self._serialize())) elif g_acp_type == CONST_DACP_TYPE: t, i = self._look_for_valid(self._load_cp_nos) if t is None: self._restored_from = CONST_MEMORYINIT return self._cper.load_checkpoint( self._checkpoint_path, [self], self._checker.trainer_id, checkpoint_no=i, local_cache_path=self._checker._fs_cache) self._restored_from = CONST_CHECKPOINT self._checkpoint_epoch_no = self._epoch_no logger.info("load tain_epoch_range checkpoint:{}".format( self._serialize())) else: assert False, "not supported acp_type:{}".format(g_acp_type) def _to_dict(self): d = { "max_epoch_num": self._max_epoch_num, "epoch_no": self._epoch_no, "name": self._name, "checkpoint_path": self._checkpoint_path, "restored_from": self._restored_from, "checkpoint_epoch_no": self._checkpoint_epoch_no } return d def __str__(self): return self._serialize([]) @property def name(self): return self._name def serialize(self, path): file_name = "{}/{}".format(path, self._file_name) with open(file_name, 'w') as f: s = self._serialize() f.write(s) def _serialize(self, pop_keys=["restored_from", "checkpoint_epoch_no"]): # self d = self._to_dict() for k in pop_keys: d.pop(k, None) # registerd exes d["exe_status"] = {} e = d["exe_status"] for k, t in six.iteritems(self._exe_status): e[t._key] = t._serialize() return json.dumps(d) @property def restored_from(self): return self._restored_from def deserialize(self, path): d = None file_name = "{}/{}".format(path, self._file_name) with open(file_name, 'r') as f: d = json.load(f) # self self._max_epoch_num = d["max_epoch_num"] self._epoch_no = d["epoch_no"] self._name = d["name"] self._checkpoint_path = d["checkpoint_path"] # exes status e = d["exe_status"] for k, v in six.iteritems(e): t = ExeTrainStatus() t._deserialize(v) self._exe_status[k] = t def next(self): _thread_checker() if self._max_epoch_num < 0: self._max_epoch_num = sys.maxint assert self._epoch_no >= -1, "self._epoch_no:{} must >=-1".format( self._epoch_no) self._last_checkpoint_time = time.time() start = self._epoch_no + 1 logger.info("started epoch_no:{} max_epoch_num:{}".format( start, self._max_epoch_num)) for i in range(start, self._max_epoch_num): self._epoch_no = i yield i self.save_checkpoint() def get(self): return self._epoch_no def save_checkpoint(self): # not save last one because exe and program can't be restored. if self._checker.trainer_id == 0: if time.time() - self._last_checkpoint_time >= \ self._save_checkpoint_inter: if g_acp_type == CONST_ACP_TYPE: # not save the last one if self._max_epoch_num > 0 and self._epoch_no != self._max_epoch_num - 1: self._save_checkpoint() elif g_acp_type == CONST_DACP_TYPE: self._save_checkpoint() else: assert False, "not supported acp_type:{}".format( g_acp_type) self._last_checkpoint_time = time.time() def _save_checkpoint(self): """ status => /jobid/xxx_range_xx/range/ model => /exe/ """ if not self._checker.valid(): return e = self._exe_status for k, t in six.iteritems(self._exe_status): m = PaddleModel(t._exe, t._program) p = self._checker.get_exe_checkpoint_path(t._hash_key) t._epoch_no = self.get() path, checkpoint_no = self._cper.save_checkpoint( p, [m], self._checker.trainer_id, local_cache_path=self._checker._fs_cache) # index info t._checkpoint_path = path t._checkpoint_no = checkpoint_no e[t._key] = t logger.debug("save executor checkpoint:{}".format(t._serialize())) if len(self._exe_status) > 0: self._cper.save_checkpoint( self._checkpoint_path, [self], local_cache_path=self._checker._fs_cache) logger.info("save train_epoch_range checkpoint:{}".format( self._serialize())) self._generate_flag() def _generate_flag(self): if self._flag_generated: return name = "can_be_auto_checkpoint.flag" path = self._checker.get_job_path() + "/" + name logger.info("this job can_be_auto_checkpoint") self._hdfs.mkdirs(self._checker.get_job_path()) self._hdfs.touch(path, exist_ok=True) self._flag_generated = True