def main(params): # load configuration of pre-trained models exp_path = os.path.join("results", params["dataset"], params["model_type"], params["exp"]) config_path = os.path.join(exp_path, "config.yml") config = io_utils.load_yaml(config_path) params["config_path"] = config_path config = M.override_config_from_params(config, params) config["exp_path"] = exp_path cmf.create_save_dirs(config["misc"]) # create logger logger_path = os.path.join(config["exp_path"], "evaluation.log") logger = io_utils.get_logger("Evaluate", log_file_path=logger_path) """ Build data loader """ loader_config = io_utils.load_yaml(params["loader_config_path"]) dset, L = cmf.get_loader(dataset, ["test"], [loader_config], num_workers=params["num_workers"]) config = M.override_config_from_loader(config, dset["test"]) """ Build network """ ckpt_path = os.path.join( exp_path, "checkpoints", "checkpoint_epoch_{:03d}.pkl".format(params["epoch"])) net = cmf.factory_model(config, M, dset["test"], ckpt_path) """ Test networks """ cmf.test_inference(config, L["test"], net)
def train(config): """ Prepare data loader and model""" dsets, L = cmf.get_loader( dataset, split=["train", "test"], loader_configs=[config["train_loader"], config["test_loader"]], num_workers=config["misc"]["num_workers"]) # Prepare tensorboard """ Run training network """ # load config values eval_every = config["evaluation"].get("every_eval", 1) # epoch print_every = config["misc"].get("print_every", 1) # iteration num_step = config["optimize"].get("num_step", 30) # epoch # We evaluate initialized model print("=====> # of iteration per one epoch: {}".format(len(L["train"]))) for epoch in range(num_step): # training loop for batch in L["train"]: pass # validate current model if (epoch > eval_after) and (epoch % eval_every == 0): pass
def main(params): # Obtain configuration path exp_path = os.path.join("results", params["dataset"], params["model_type"], params["experiment"]) config_path = os.path.join(exp_path, "config.yml") params["config_path"] = config_path # prepare model and dataset M, dataset, config = cmf.prepare_experiment(params) # evaluate on GT config["evaluation"]["use_gt"] = params["evaluate_on_gt"] # evaluate on Top 1000 proposals if params["evaluate_on_top1000"]: config["evaluation"]["use_gt"] = False config["evaluation"]["apply_nms"] = False if len(params["proposal"]) > 0: config["evaluation"]["precomputed_proposal_sequence"] = params[ "proposal"] # create logger epoch_logger = cmf.create_logger(config, "EPOCH", "test.log") """ Build data loader """ loader_config = io_utils.load_yaml(params["loader_config_path"]) if params["test_on_server"]: loader_config = loader_config["test_loader"] test_on = "Test_Server" else: loader_config = loader_config["val_loader"] test_on = "Test" dsets, L = cmf.get_loader(dataset, split=["test"], loader_configs=[loader_config], num_workers=params["num_workers"]) config = M.override_config_from_dataset(config, dsets["test"], mode="Test") config["model"]["resume"] = True tensorboard_path = config["misc"]["tensorboard_dir"] config["misc"]["tensorboard_dir"] = "" # config["misc"]["debug"] = params["debug_mode"] """ Evaluating networks """ e0 = params["start_epoch"] e1 = params["end_epoch"] es = params["epoch_stride"] io_utils.check_and_create_dir(tensorboard_path + "_test_s{}_e{}".format(e0, e1)) summary = PytorchSummary(tensorboard_path + "_test_s{}_e{}".format(e0, e1)) for epoch in range(e0, e1 + 1, es): """ Build network """ config["model"]["checkpoint_path"] = \ os.path.join(exp_path, "checkpoints", "epoch_{:03d}.pkl".format(epoch)) net, _ = cmf.factory_model(config, M, dsets["test"], None) net.set_tensorboard_summary(summary) cmf.test(config, L["test"], net, epoch, None, epoch_logger, on=test_on)
def main(params): config = io_utils.load_yaml(params["config"]) # prepare dataset D = cmf.get_dataset(params["dataset"]) dsets, L = cmf.get_loader(D, split=["test"], loader_configs=[config["test_loader"]], num_workers=params["num_workers"]) # Build network M = cmf.get_method(params["method"]) net = M(config, logger=None) net.load_checkpoint(params["checkpoint"], True) if config["model"]["use_gpu"]: net.gpu_mode() # Evaluating networks cmf.test(config, L["test"], net, -1, None, mode="Test")
def train(config): # create loggers it_logger = cmf.create_logger(config, "ITER", "train.log") eval_logger = cmf.create_logger(config, "EPOCH", "scores.log") """ Prepare data loader and model""" dsets, L = cmf.get_loader(dataset, split=["train", "test"], loader_configs=[config["train_loader"], config["test_loader"]], num_workers=config["misc"]["num_workers"]) net, init_step = cmf.factory_model(config, M, dsets["train"], it_logger) # Prepare tensorboard net.create_tensorboard_summary(config["misc"]["tensorboard_dir"]) """ Run training network """ # load config values eval_every = config["evaluation"].get("every_eval", 1) # epoch eval_after= config["evaluation"].get("after_eval", 0) # epoch print_every = config["misc"].get("print_every", 1) # iteration num_step = config["optimize"].get("num_step", 30) # epoch apply_cl_after = config["model"].get("curriculum_learning_at", -1) vis_every = config["misc"].get("vis_every", -1) # epoch """ if vis_every > 0: nsamples = config["misc"].get("vis_nsamples", 12) vis_data = dsets["train"].get_samples(int(nsamples/2)) vis_data.extend(dsets["test"].get_samples(int(nsamples/2))) vis_data = dsets["train"].collate_fn(vis_data) vis_inp, vis_gt = net.prepare_batch(vis_data) net.visualize(vis_inp, vis_gt, "epoch{:03d}".format(0)) """ # We evaluate initialized model #cmf.test(config, L["test"], net, 0, eval_logger, mode="Valid") ii = 1 net.train_mode() # set network as train mode net.reset_status() # initialize status tm = timer.Timer() # tm: timer print("=====> # of iteration per one epoch: {}".format(len(L["train"]))) for epoch in range(init_step, init_step+num_step): # curriculum learning if (apply_cl_after > 0) and (epoch == apply_cl_after): net.apply_curriculum_learning() # training loop for batch in L["train"]: # Forward and update the network data_load_duration = tm.get_duration() tm.reset() net_inps, gts = net.prepare_batch(batch) outputs = net.forward_update(net_inps, gts) run_duration = tm.get_duration() # Compute status for current batch: loss, evaluation scores, etc net.compute_status(outputs["net_output"], gts) # print learning status if (print_every > 0) and (ii % print_every == 0): net.print_status() lr = net.get_lr() txt = "fetching for {:.3f}s, optimizing for {:.3f}s, lr = {:.5f}" it_logger.info(txt.format(data_load_duration, run_duration, lr)) # for debugging if config["misc"]["debug"] and (ii > 2): cmf.test(config, L["test"], net, 0, eval_logger, mode="Valid") break tm.reset(); ii = ii + 1 # iteration done # visualize network learning status #if (vis_every > 0) and (epoch % vis_every == 0): # net.visualize(vis_inp, vis_gt, "epoch{:03d}".format(epoch)) # validate current model if (epoch > eval_after) and (epoch % eval_every == 0): # print training losses net.save_results("epoch{:03d}".format(epoch), mode="Train") net.print_counters_info(eval_logger, epoch, mode="Train") cmf.test(config, L["test"], net, epoch, eval_logger, mode="Valid") net.train_mode() # set network as train mode net.reset_status() # initialize status
def train(config): # create loggers it_logger = cmf.create_logger(config, "ITER", "train.log") epoch_logger = cmf.create_logger(config, "EPOCH", "scores.log") """ Prepare data loader and model""" dsets, L = cmf.get_loader( dataset, split=["train", "test"], loader_configs=[config["train_loader"], config["test_loader"]], num_workers=config["misc"]["num_workers"]) sample_data = dsets["train"].get_samples(1) net, start_epoch = cmf.factory_model(config, M, dsets["train"], it_logger) # Prepare tensorboard net.create_tensorboard_summary("./tensorboard") """ Run training network """ ii = 1 tm, epoch_tm = timer.Timer(), timer.Timer() # tm: timer eval_after = config["evaluation"].get("evaluate_after", 1) # epoch eval_every = config["evaluation"].get("every_eval", 1) # epoch vis_every = config["misc"].get("vis_every", 1000) # iteration print_every = config["misc"].get("print_every", 1) # iteration # We evaluate initialized model #cmf.test(config, L["test"], net, 0, epoch_logger, mode="Valid") for epoch in range(start_epoch, config["optimize"]["num_epoch"] + 1): net.train_mode() # set network as train mode net.reset_status() # initialize status for batch in L["train"]: # Forward and update the network data_load_duration = tm.get_duration() tm.reset() net_inps, gts = net.prepare_batch(batch) outputs = net.forward_update(net_inps, gts) run_duration = tm.get_duration() # Compute status for current batch: loss, evaluation scores, etc net.compute_status(outputs["net_output"][0], gts) # print learning status if ii % print_every == 0: net.print_status(epoch) lr = net_utils.adjust_lr(net.it, net.it_per_epoch, net.config["optimize"]) txt = "fetching for {:.3f}s, optimizing for {:.3f}s, lr = {:.5f}" it_logger.info(txt.format(data_load_duration, run_duration, lr)) # check results for pre-selected samples over training if vis_every > 0 and (ii % vis_every == 0): net.save_results(sample_data, "iteration_{}".format(ii), mode="Train") ii += 1 tm.reset() if config["misc"]["debug"] and (ii > 2): break # iteration done # print training time for 1 epoch txt = "[Epoch {}] total time of training 1 epoch: {:.3f}s" it_logger.info(txt.format(epoch, epoch_tm.get_duration())) # save network every epoch ckpt_path = os.path.join(config["misc"]["result_dir"], "checkpoints", "epoch_{:03d}.pkl".format(epoch)) net.save_checkpoint(ckpt_path) # save results (predictions, visualizations) # Note: save_results() should be called before print_counters_info() net.save_results(sample_data, "epoch_{:03d}".format(epoch), mode="Train") # print status (metric) accumulated over each epoch net.print_counters_info(epoch, epoch_logger, mode="Train") # validate network if (epoch >= eval_after) and (epoch % eval_every == 0): cmf.test(config, L["test"], net, epoch, epoch_logger, mode="Valid") # check curriculum learning net.check_apply_curriculum(epoch) # reset reference time to compute duration of loading data tm.reset() epoch_tm.reset()