def main(): argparser = ArgumentParser() argparser.add_argument("file") args = argparser.parse_args() try: o = pickle.load(open(args.file, "rb")) print(betterRepr(o)) except BrokenPipeError: print("BrokenPipeError", file=sys.stderr) sys.exit(1)
def save(self): if not self.filename: return # First write to a temp-file, to be sure that the write happens without errors. # Otherwise, it could happen that we delete the old existing file, then # some error happens (e.g. disk quota), and we loose the newbob data. # Loosing that data is very bad because it basically means that we have to redo all the training. tmp_filename = self.filename + ".new_tmp" f = open(tmp_filename, "w") f.write(betterRepr(self.epochData)) f.write("\n") f.close() os.rename(tmp_filename, self.filename)
def calc_fullsum_scores(meta): from Util import betterRepr fn = Globals.get_fullsum_scores_filename(**meta) if os.path.exists(fn): print("Existing fullsum scores filename:", fn) print("content:\n%s\n" % open(fn).read()) return fn # We assume that we have updated/extended the network topology. assert "output_fullsum" in Globals.engine.network.layers # Run it, and collect stats. analyzer = Globals.engine.analyze(data=Globals.dataset, statistics=None) print("fullsum score:", analyzer.score["cost:output_fullsum"]) print("Write all to:", fn) with open(fn, "w") as f: f.write( betterRepr({ "scores": analyzer.score, "errors": analyzer.error, "stats": analyzer.stats, "num_frames": analyzer.num_frames_accumulated })) return fn
def _save_info(self): filename = self._info_filename from Util import betterRepr with open(filename, "w") as f: f.write("%s\n" % betterRepr(self._info_dict))