def main(args): role = role_maker.PaddleCloudRoleMaker(is_collective=True) fleet.init(role) config = get_config(args.config, overrides=args.override, show=True) gpu_id = int(os.environ.get('FLAGS_selected_gpus', 0)) place = fluid.CUDAPlace(gpu_id) startup_prog = fluid.Program() valid_prog = fluid.Program() valid_dataloader, valid_fetchs = program.build( config, valid_prog, startup_prog, is_train=False) valid_prog = valid_prog.clone(for_test=True) exe = fluid.Executor(place) exe.run(startup_prog) init_model(config, valid_prog, exe) valid_reader = Reader(config, 'valid')() valid_dataloader.set_sample_list_generator(valid_reader, place) compiled_valid_prog = program.compile(config, valid_prog) program.run(valid_dataloader, exe, compiled_valid_prog, valid_fetchs, -1, 'eval', config)
def main(args): config = get_config(args.config, overrides=args.override, show=True) use_gpu = config.get("use_gpu", True) places = fluid.cuda_places() if use_gpu else fluid.cpu_places() startup_prog = fluid.Program() valid_prog = fluid.Program() valid_dataloader, valid_fetchs = program.build(config, valid_prog, startup_prog, is_train=False, is_distributed=False) valid_prog = valid_prog.clone(for_test=True) exe = fluid.Executor(places[0]) exe.run(startup_prog) init_model(config, valid_prog, exe) valid_reader = Reader(config, 'valid')() valid_dataloader.set_sample_list_generator(valid_reader, places) compiled_valid_prog = program.compile(config, valid_prog) program.run(valid_dataloader, exe, compiled_valid_prog, valid_fetchs, -1, 'eval')
def main(args): role = role_maker.PaddleCloudRoleMaker(is_collective=True) fleet.init(role) config = get_config(args.config, overrides=args.override, show=True) # assign the place gpu_id = int(os.environ.get('FLAGS_selected_gpus', 0)) place = fluid.CUDAPlace(gpu_id) # startup_prog is used to do some parameter init work, # and train prog is used to hold the network startup_prog = fluid.Program() train_prog = fluid.Program() train_dataloader, train_fetchs = program.build(config, train_prog, startup_prog, is_train=True) if config.validate: valid_prog = fluid.Program() valid_dataloader, valid_fetchs = program.build(config, valid_prog, startup_prog, is_train=False) # clone to prune some content which is irrelevant in valid_prog valid_prog = valid_prog.clone(for_test=True) # create the "Executor" with the statement of which place exe = fluid.Executor(place=place) # only run startup_prog once to init exe.run(startup_prog) # load model from checkpoint or pretrained model init_model(config, train_prog, exe) train_reader = Reader(config, 'train')() train_dataloader.set_sample_list_generator(train_reader, place) if config.validate: valid_reader = Reader(config, 'valid')() valid_dataloader.set_sample_list_generator(valid_reader, place) compiled_valid_prog = program.compile(config, valid_prog) compiled_train_prog = fleet.main_program for epoch_id in range(config.epochs): # 1. train with train dataset program.run(train_dataloader, exe, compiled_train_prog, train_fetchs, epoch_id, 'train') # 2. validate with validate dataset if config.validate and epoch_id % config.valid_interval == 0: program.run(valid_dataloader, exe, compiled_valid_prog, valid_fetchs, epoch_id, 'valid') # 3. save the persistable model if epoch_id % config.save_interval == 0: model_path = os.path.join(config.model_save_dir, config.ARCHITECTURE["name"]) save_model(train_prog, model_path, epoch_id)
def main(args, return_dict={}): config = get_config(args.config, overrides=args.override, show=True) config.mode = "valid" # assign place use_gpu = config.get("use_gpu", True) place = paddle.set_device('gpu' if use_gpu else 'cpu') trainer_num = paddle.distributed.get_world_size() use_data_parallel = trainer_num != 1 config["use_data_parallel"] = use_data_parallel if config["use_data_parallel"]: paddle.distributed.init_parallel_env() net = program.create_model(config.ARCHITECTURE, config.classes_num) if config["use_data_parallel"]: net = paddle.DataParallel(net) init_model(config, net, optimizer=None) valid_dataloader = Reader(config, 'valid', places=place)() net.eval() with paddle.no_grad(): top1_acc = program.run(valid_dataloader, config, net, None, None, 0, 'valid') return_dict["top1_acc"] = top1_acc return top1_acc
def teleportrecieve(): content = request.json q0 = content["q0"] q1 = content["q1"] p = """QUBITS 4 MEASURE 0 MEASURE 1 H 2 CNOT 2 3 CLASSICAL 1 1 1 X 3 CLASSICAL 0 1 1 Z 3""" p_list = p.splitlines() if q0 == 1: p_list.insert(1, "X 0") if q1 == 1: p_list.insert(1, "X 1") p = "\n".join(p_list) wvf, msg = program.run(p) msg = isolate_qubit(wvf, 3) return jsonify({"program": p, 'wvf': msg})
def teleportsend(): sendit = request.form.get('sendit') if not sendit is None: url = request.url_root + 'api/teleportrecieve' p = """QUBITS 3 H 1 CNOT 1 2 CNOT 0 1 H 0 MEASURE 0 MEASURE 1""" wvf, msg = program.run(p) # Very hacky way of doing this... @TODO make this better m = re.findall('====== MEASURE qubit (\d) : (\d)', msg) if m[0][0] == '0': q0 = int(m[0][1]) q1 = int(m[1][1]) else: q1 = int(m[0][1]) q0 = int(m[1][1]) print(url) res = requests.post(url, json={"q0": q0, "q1": q1}) if res.ok: j = res.json() p = j['program'] p_split = p.splitlines() p_str = "<ol>" for i in p_split: p_str += '<li><samp class="code-block">' + i + '</li>' p_str += '</ol>' j['program'] = p_str j['a'] = f"Qubit 0 : {q0}<br />Qubit 1 : {q1}" return (jsonify(j))
def main(args): role = role_maker.PaddleCloudRoleMaker(is_collective=True) fleet.init(role) config = get_config(args.config, overrides=args.override, show=True) place = env.place() startup_prog = fluid.Program() train_prog = fluid.Program() train_dataloader, train_fetchs = program.build( config, train_prog, startup_prog, is_train=True) if config.validate: valid_prog = fluid.Program() valid_dataloader, valid_fetchs = program.build( config, valid_prog, startup_prog, is_train=False) valid_prog = valid_prog.clone(for_test=True) exe = fluid.Executor(place) exe.run(startup_prog) init_model(config, train_prog, exe) train_reader = Reader(config, 'train')() train_dataloader.set_sample_list_generator(train_reader, place) if config.validate: valid_reader = Reader(config, 'valid')() valid_dataloader.set_sample_list_generator(valid_reader, place) compiled_valid_prog = program.compile(config, valid_prog) compiled_train_prog = fleet.main_program for epoch_id in range(config.epochs): program.run(train_dataloader, exe, compiled_train_prog, train_fetchs, epoch_id, 'train') if config.validate and epoch_id % config.valid_interval == 0: program.run(valid_dataloader, exe, compiled_valid_prog, valid_fetchs, epoch_id, 'valid') if epoch_id % config.save_interval == 0: model_path = os.path.join(config.model_save_dir, config.architecture) save_model(train_prog, model_path, epoch_id)
def main(): global settings if CONFIG_ENABLED: if VERBOSITY_ENABLED: print ("Configuring options ...") settings = config.load_xml_config(settings) if VERBOSITY_ENABLED: print ("Configuration complete.") pprint(settings) else: if VERBOSITY_ENABLED: print ("Configuration disabled. Using defaults.") settings = { "logfile" : "{}.log", "tempdir" : "tmp"} if ENV_VARS_ENABLED: if VERBOSITY_ENABLED: print ("Checking environment ...") settings = envvars(settings) if VERBOSITY_ENABLED: print ("Environment variables loaded.") if ARGS_ENABLED: if VERBOSITY_ENABLED: print ("Parsing command line arguments ...") settings = getargs(settings) if VERBOSITY_ENABLED: print ("Command line arguments parsed.") if LOGGING_ENABLED: if VERBOSITY_ENABLED: print ("Setting up log file ...") if re.search("{}", settings["logfile"]): settings["logfile"] = settings["logfile"].format(PROGRAM_NAME) LOG = setuplog(settings) LOG.info("Logging set up.") if VERBOSITY_ENABLED: print ("Logging set up.") atexit.register(logging.shutdown) program.run(settings) sys.exit(0)
def main(): parser = argparse.ArgumentParser() parser.add_argument( "-i", "--max_iterations", default=10000, type=int, help= "maximum number of iterations to reach solar mass. If more iterations are needed, the integration is stopped and rho is updated." ) parser.add_argument("-m", "--goal_mass", default=1.5, type=float, help="Goal mass in solar masses") parser.add_argument("-r", "--rho_0", default=1., type=float, help="Density at r=0 in nuclear densities") parser.add_argument("-K", default=1.98183e-15, type=float, help="Adiabatic coefficient in km/m^2") parser.add_argument("-y", "--gamma", default=2.75, type=float, help="polytropic index") parser.add_argument("-t", "--tolerance", default=0.0001, type=float, help="Allowed deviation from goal mass") args = parser.parse_args() args.goal_mass *= c.SOLAR_MASS args.rho_0 *= c.NUCLEAR_DENSITY program.run(args) return 0
def main(args, return_dict={}): config = get_config(args.config, overrides=args.override, show=True) config.mode = "valid" # assign place use_gpu = config.get("use_gpu", True) place = paddle.set_device('gpu' if use_gpu else 'cpu') multilabel = config.get("multilabel", False) trainer_num = paddle.distributed.get_world_size() use_data_parallel = trainer_num != 1 config["use_data_parallel"] = use_data_parallel if config["use_data_parallel"]: paddle.distributed.init_parallel_env() net = program.create_model(config.ARCHITECTURE, config.classes_num) init_model(config, net, optimizer=None) valid_dataloader = Reader(config, 'valid', places=place)() net.eval() with paddle.no_grad(): if not multilabel: top1_acc = program.run(valid_dataloader, config, net, None, None, 0, 'valid') return_dict["top1_acc"] = top1_acc return top1_acc else: all_outs = [] targets = [] for _, batch in enumerate(valid_dataloader()): feeds = program.create_feeds(batch, False, config.classes_num, multilabel) out = net(feeds["image"]) out = F.sigmoid(out) use_distillation = config.get("use_distillation", False) if use_distillation: out = out[1] all_outs.extend(list(out.numpy())) targets.extend(list(feeds["label"].numpy())) all_outs = np.array(all_outs) targets = np.array(targets) mAP = mean_average_precision(all_outs, targets) return_dict["mean average precision"] = mAP return mAP
def add_message(uuid): content = request.json print(content['mytext']) p = content['mytext'] #os.chmod("program.txt", 777) text_file = open("compiler/program.txt", "w") #os.chmod("program.txt", "w") text_file.write(p) text_file.close() os.system("bash -c './compiler/compile.lisp program.txt'") os.system("bash -c 'python preprocessor.py a.ir'") with open('compiler/a.eg', 'r') as myfile: p = myfile.read() wvf, msg = program.run(p) return jsonify({"results": msg})
def run(path): with open(path, 'r') as myfile: data = myfile.read() wvf, msg = program.run(data) print(msg) # Very hacky way of doing this... @TODO make this better m = re.findall('====== MEASURE qubit (\d) : (\d)', msg) if m[0][0] == '0': q0 = int(m[0][1]) q1 = int(m[1][1]) else: q1 = int(m[0][1]) q0 = int(m[1][1]) res = requests.post(url, json={"q0": q0, "q1": q1}) if res.ok: j = res.json() print(j["results"])
def test_bell_states(): state_prep = { "B00": "", "B01": "X 1\n", "B10": "X 0\n", "B11": "X 0\nX 1\n", } #@TODO do this with matrices values instead of strings bell_states = { "B00": "0.71|00> + 0.71|11>", "B01": "0.71|01> + 0.71|10>", "B10": "0.71|00> + -0.71|11>", "B11": "0.71|01> + -0.71|10>", } epr_program = "H 0\nCNOT 0 1" for i in state_prep: bell_program = "QUBITS 2\n" + state_prep[i] + epr_program wvf = run(bell_program) assert wavefunction(wvf) == bell_states[i]
def main(args): paddle.seed(12345) config = get_config(args.config, overrides=args.override, show=True) # assign the place use_gpu = config.get("use_gpu", True) place = paddle.set_device('gpu' if use_gpu else 'cpu') trainer_num = paddle.distributed.get_world_size() use_data_parallel = trainer_num != 1 config["use_data_parallel"] = use_data_parallel if config["use_data_parallel"]: paddle.distributed.init_parallel_env() net = program.create_model(config.ARCHITECTURE, config.classes_num) optimizer, lr_scheduler = program.create_optimizer( config, parameter_list=net.parameters()) dp_net = net if config["use_data_parallel"]: find_unused_parameters = config.get("find_unused_parameters", False) dp_net = paddle.DataParallel( net, find_unused_parameters=find_unused_parameters) # load model from checkpoint or pretrained model init_model(config, net, optimizer) train_dataloader = Reader(config, 'train', places=place)() if config.validate: valid_dataloader = Reader(config, 'valid', places=place)() last_epoch_id = config.get("last_epoch", -1) best_top1_acc = 0.0 # best top1 acc record best_top1_epoch = last_epoch_id vdl_writer_path = config.get("vdl_dir", None) vdl_writer = None if vdl_writer_path: from visualdl import LogWriter vdl_writer = LogWriter(vdl_writer_path) # Ensure that the vdl log file can be closed normally try: for epoch_id in range(last_epoch_id + 1, config.epochs): net.train() # 1. train with train dataset program.run(train_dataloader, config, dp_net, optimizer, lr_scheduler, epoch_id, 'train', vdl_writer) # 2. validate with validate dataset if config.validate and epoch_id % config.valid_interval == 0: net.eval() with paddle.no_grad(): top1_acc = program.run(valid_dataloader, config, net, None, None, epoch_id, 'valid', vdl_writer) if top1_acc > best_top1_acc: best_top1_acc = top1_acc best_top1_epoch = epoch_id model_path = os.path.join(config.model_save_dir, config.ARCHITECTURE["name"]) save_model(net, optimizer, model_path, "best_model") message = "The best top1 acc {:.5f}, in epoch: {:d}".format( best_top1_acc, best_top1_epoch) logger.info(message) # 3. save the persistable model if epoch_id % config.save_interval == 0: model_path = os.path.join(config.model_save_dir, config.ARCHITECTURE["name"]) save_model(net, optimizer, model_path, epoch_id) except Exception as e: logger.error(e) finally: vdl_writer.close() if vdl_writer else None
def main(args): config = get_config(args.config, overrides=args.override, show=True) # assign the place use_gpu = config.get("use_gpu", True) places = fluid.cuda_places() if use_gpu else fluid.cpu_places() # startup_prog is used to do some parameter init work, # and train prog is used to hold the network startup_prog = fluid.Program() train_prog = fluid.Program() best_top1_acc = 0.0 # best top1 acc record if not config.get('use_ema'): train_dataloader, train_fetchs = program.build(config, train_prog, startup_prog, is_train=True, is_distributed=False) else: train_dataloader, train_fetchs, ema = program.build( config, train_prog, startup_prog, is_train=True, is_distributed=False) if config.validate: valid_prog = fluid.Program() valid_dataloader, valid_fetchs = program.build(config, valid_prog, startup_prog, is_train=False, is_distributed=False) # clone to prune some content which is irrelevant in valid_prog valid_prog = valid_prog.clone(for_test=True) # create the "Executor" with the statement of which place exe = fluid.Executor(places[0]) # Parameter initialization exe.run(startup_prog) # load model from 1. checkpoint to resume training, 2. pretrained model to finetune init_model(config, train_prog, exe) train_reader = Reader(config, 'train')() train_dataloader.set_sample_list_generator(train_reader, places) compiled_train_prog = program.compile(config, train_prog, train_fetchs['loss'][0].name) if config.validate: valid_reader = Reader(config, 'valid')() valid_dataloader.set_sample_list_generator(valid_reader, places) compiled_valid_prog = program.compile(config, valid_prog, share_prog=compiled_train_prog) if args.vdl_dir: from visualdl import LogWriter vdl_writer = LogWriter(args.vdl_dir) else: vdl_writer = None for epoch_id in range(config.epochs): # 1. train with train dataset program.run(train_dataloader, exe, compiled_train_prog, train_fetchs, epoch_id, 'train', vdl_writer) # 2. validate with validate dataset if config.validate and epoch_id % config.valid_interval == 0: if config.get('use_ema'): logger.info(logger.coloring("EMA validate start...")) with ema.apply(exe): top1_acc = program.run(valid_dataloader, exe, compiled_valid_prog, valid_fetchs, epoch_id, 'valid') logger.info(logger.coloring("EMA validate over!")) top1_acc = program.run(valid_dataloader, exe, compiled_valid_prog, valid_fetchs, epoch_id, 'valid') if top1_acc > best_top1_acc: best_top1_acc = top1_acc message = "The best top1 acc {:.5f}, in epoch: {:d}".format( best_top1_acc, epoch_id) logger.info("{:s}".format(logger.coloring(message, "RED"))) if epoch_id % config.save_interval == 0: model_path = os.path.join(config.model_save_dir, config.ARCHITECTURE["name"]) save_model(train_prog, model_path, "best_model_in_epoch_" + str(epoch_id)) # 3. save the persistable model if epoch_id % config.save_interval == 0: model_path = os.path.join(config.model_save_dir, config.ARCHITECTURE["name"]) save_model(train_prog, model_path, epoch_id)
def main(args): paddle.seed(12345) config = get_config(args.config, overrides=args.override, show=True) # assign the place use_gpu = config.get("use_gpu", True) place = paddle.set_device('gpu' if use_gpu else 'cpu') trainer_num = paddle.distributed.get_world_size() use_data_parallel = trainer_num != 1 config["use_data_parallel"] = use_data_parallel if config["use_data_parallel"]: paddle.distributed.init_parallel_env() net = program.create_model(config.ARCHITECTURE, config.classes_num) optimizer, lr_scheduler = program.create_optimizer( config, parameter_list=net.parameters()) if config["use_data_parallel"]: net = paddle.DataParallel(net) # load model from checkpoint or pretrained model init_model(config, net, optimizer) train_dataloader = Reader(config, 'train', places=place)() if config.validate: valid_dataloader = Reader(config, 'valid', places=place)() last_epoch_id = config.get("last_epoch", -1) best_top1_acc = 0.0 # best top1 acc record best_top1_epoch = last_epoch_id for epoch_id in range(last_epoch_id + 1, config.epochs): net.train() # 1. train with train dataset program.run(train_dataloader, config, net, optimizer, lr_scheduler, epoch_id, 'train') # 2. validate with validate dataset if config.validate and epoch_id % config.valid_interval == 0: net.eval() with paddle.no_grad(): top1_acc = program.run(valid_dataloader, config, net, None, None, epoch_id, 'valid') if top1_acc > best_top1_acc: best_top1_acc = top1_acc best_top1_epoch = epoch_id if epoch_id % config.save_interval == 0: model_path = os.path.join(config.model_save_dir, config.ARCHITECTURE["name"]) save_model(net, optimizer, model_path, "best_model") message = "The best top1 acc {:.5f}, in epoch: {:d}".format( best_top1_acc, best_top1_epoch) logger.info("{:s}".format(logger.coloring(message, "RED"))) # 3. save the persistable model if epoch_id % config.save_interval == 0: model_path = os.path.join(config.model_save_dir, config.ARCHITECTURE["name"]) save_model(net, optimizer, model_path, epoch_id)
def test_returns_hello(self): self.assertEqual(run(), "hello")
def main(args): role = role_maker.PaddleCloudRoleMaker(is_collective=True) fleet.init(role) config = get_config(args.config, overrides=args.override, show=True) # assign the place gpu_id = int(os.environ.get('FLAGS_selected_gpus', 0)) place = fluid.CUDAPlace(gpu_id) # startup_prog is used to do some parameter init work, # and train prog is used to hold the network startup_prog = fluid.Program() train_prog = fluid.Program() best_top1_acc = 0.0 # best top1 acc record if not config.get('use_ema'): train_dataloader, train_fetchs = program.build(config, train_prog, startup_prog, is_train=True) else: train_dataloader, train_fetchs, ema = program.build(config, train_prog, startup_prog, is_train=True) if config.validate: valid_prog = fluid.Program() valid_dataloader, valid_fetchs = program.build(config, valid_prog, startup_prog, is_train=False) # clone to prune some content which is irrelevant in valid_prog valid_prog = valid_prog.clone(for_test=True) # create the "Executor" with the statement of which place exe = fluid.Executor(place) # Parameter initialization exe.run(startup_prog) # load model from 1. checkpoint to resume training, 2. pretrained model to finetune init_model(config, train_prog, exe) train_reader = Reader(config, 'train')() train_dataloader.set_sample_list_generator(train_reader, place) if config.validate: valid_reader = Reader(config, 'valid')() valid_dataloader.set_sample_list_generator(valid_reader, place) compiled_valid_prog = program.compile(config, valid_prog) compiled_train_prog = fleet.main_program vdl_writer = LogWriter(args.vdl_dir) if args.vdl_dir else None for epoch_id in range(config.epochs): # 1. train with train dataset program.run(train_dataloader, exe, compiled_train_prog, train_fetchs, epoch_id, 'train', vdl_writer) if int(os.getenv("PADDLE_TRAINER_ID", 0)) == 0: # 2. validate with validate dataset if config.validate and epoch_id % config.valid_interval == 0: if config.get('use_ema'): logger.info(logger.coloring("EMA validate start...")) with train_fetchs('ema').apply(exe): top1_acc = program.run(valid_dataloader, exe, compiled_valid_prog, valid_fetchs, epoch_id, 'valid') logger.info(logger.coloring("EMA validate over!")) top1_acc = program.run(valid_dataloader, exe, compiled_valid_prog, valid_fetchs, epoch_id, 'valid') if top1_acc > best_top1_acc: best_top1_acc = top1_acc message = "The best top1 acc {:.5f}, in epoch: {:d}".format( best_top1_acc, epoch_id) logger.info("{:s}".format(logger.coloring(message, "RED"))) if epoch_id % config.save_interval == 0: model_path = os.path.join(config.model_save_dir, config.ARCHITECTURE["name"]) save_model(train_prog, model_path, "best_model_in_epoch_" + str(epoch_id)) # 3. save the persistable model if epoch_id % config.save_interval == 0: model_path = os.path.join(config.model_save_dir, config.ARCHITECTURE["name"]) save_model(train_prog, model_path, epoch_id)
def logout(): program.run()
import program if __name__ == "__main__": program.run()
from program import run if __name__ == "__main__": run()
def main(args): role = role_maker.PaddleCloudRoleMaker(is_collective=True) fleet.init(role) config = get_config(args.config, overrides=args.override, show=True) use_fp16 = config.get('use_fp16', False) if use_fp16: AMP_RELATED_FLAGS_SETTING = { 'FLAGS_cudnn_exhaustive_search': 1, 'FLAGS_conv_workspace_size_limit': 4000, 'FLAGS_cudnn_batchnorm_spatial_persistent': 1, 'FLAGS_max_inplace_grad_add': 8, } os.environ['FLAGS_cudnn_batchnorm_spatial_persistent'] = '1' paddle.fluid.set_flags(AMP_RELATED_FLAGS_SETTING) # assign the place gpu_id = int(os.environ.get('FLAGS_selected_gpus', 0)) place = fluid.CUDAPlace(gpu_id) # startup_prog is used to do some parameter init work, # and train prog is used to hold the network startup_prog = fluid.Program() train_prog = fluid.Program() best_top1_acc = 0.0 # best top1 acc record if not config.get('use_ema'): train_dataloader, train_fetchs = program.build( config, train_prog, startup_prog, is_train=True) else: train_dataloader, train_fetchs, ema = program.build( config, train_prog, startup_prog, is_train=True) if config.validate: valid_prog = fluid.Program() valid_dataloader, valid_fetchs = program.build( config, valid_prog, startup_prog, is_train=False) # clone to prune some content which is irrelevant in valid_prog valid_prog = valid_prog.clone(for_test=True) # create the "Executor" with the statement of which place exe = fluid.Executor(place) # Parameter initialization exe.run(startup_prog) # load model from 1. checkpoint to resume training, 2. pretrained model to finetune init_model(config, train_prog, exe) if not config.get('use_dali', False): train_reader = Reader(config, 'train')() train_dataloader.set_sample_list_generator(train_reader, place) if config.validate: valid_reader = Reader(config, 'valid')() valid_dataloader.set_sample_list_generator(valid_reader, place) compiled_valid_prog = program.compile(config, valid_prog) else: import dali train_dataloader = dali.train(config) if config.validate and int(os.getenv("PADDLE_TRAINER_ID", 0)): if int(os.getenv("PADDLE_TRAINER_ID", 0)) == 0: valid_dataloader = dali.val(config) compiled_valid_prog = program.compile(config, valid_prog) compiled_train_prog = fleet.main_program vdl_writer = None if args.vdl_dir: if version_info.major == 2: logger.info( "visualdl is just supported for python3, so it is disabled in python2..." ) else: from visualdl import LogWriter vdl_writer = LogWriter(args.vdl_dir) for epoch_id in range(config.epochs): # 1. train with train dataset program.run(train_dataloader, exe, compiled_train_prog, train_fetchs, epoch_id, 'train', config, vdl_writer) if int(os.getenv("PADDLE_TRAINER_ID", 0)) == 0: # 2. validate with validate dataset if config.validate and epoch_id % config.valid_interval == 0: if config.get('use_ema'): logger.info(logger.coloring("EMA validate start...")) with ema.apply(exe): top1_acc = program.run( valid_dataloader, exe, compiled_valid_prog, valid_fetchs, epoch_id, 'valid', config) logger.info(logger.coloring("EMA validate over!")) top1_acc = program.run(valid_dataloader, exe, compiled_valid_prog, valid_fetchs, epoch_id, 'valid', config) if top1_acc > best_top1_acc: best_top1_acc = top1_acc message = "The best top1 acc {:.5f}, in epoch: {:d}".format( best_top1_acc, epoch_id) logger.info("{:s}".format(logger.coloring(message, "RED"))) if epoch_id % config.save_interval == 0: model_path = os.path.join(config.model_save_dir, config.ARCHITECTURE["name"]) save_model(train_prog, model_path, "best_model") # 3. save the persistable model if epoch_id % config.save_interval == 0: model_path = os.path.join(config.model_save_dir, config.ARCHITECTURE["name"]) save_model(train_prog, model_path, epoch_id)