def main(cmd): cfg = yaml.safe_load(cmd.cfg.read()) print(yaml.dump(cfg, default_flow_style=False)) root_dir = os.path.join(cfg['training']['path'], cfg['name']) if not os.path.exists(root_dir): os.makedirs(root_dir) tfprocess = TFProcess(cfg) tfprocess.init_net_v2() tfprocess.restore_v2() START_FROM = cmd.start tfprocess.global_step.assign(START_FROM) tfprocess.manager.save(checkpoint_number=START_FROM)
from net import Net from chunkparser import ChunkParser import multiprocessing as mp cfg_path = "128x10-t60-2.yaml" net_path = "128x10-t60-2-5300.pb.gz" ignore_errors = False with open(cfg_path, 'r') as f: cfg = yaml.safe_load(f.read()) print(yaml.dump(cfg, default_flow_style=False)) # START_FROM = args.start tfp = TFProcess(cfg) tfp.init_net_v2() tfp.replace_weights_v2(net_path, ignore_errors) # tfp.global_step.assign(START_FROM) # root_dir = os.path.join(cfg['training']['path'], cfg['name']) # if not os.path.exists(root_dir): # os.makedirs(root_dir) # # tfp.manager.save(checkpoint_number=START_FROM) # print("Wrote model to {}".format(tfp.manager.latest_checkpoint)) cfg['dataset']['input_train'] = "tf/data/*/" cfg['dataset']['input_test'] = "tf/data/*/" import glob for d in glob.glob(cfg['dataset']['input_train']): print(d)