Exemple #1
0
def main():
    utils.display_args(args)
    time.sleep(5)
    # setting up reproducibility with selected seed
    torch.manual_seed(args.seed)
    torch.backends.cudnn.deterministic = True
    torch.backends.cudnn.benchmark = False
    # setting up the working directory and recording args
    exp_tag = datetime.datetime.now().strftime("%Y%m%d%H%M%S")
    work_dir = os.path.join(args.work_dir, args.dataset, exp_tag)
    os.makedirs("log", exist_ok=True)
    log_path = os.path.join("log", exp_tag + ".log")
    logger = get_logger(log_path)
    res_list = []
    config.DROPOUT = args.dropout
    start = time.time()
    for i in range(args.run):
        logger.info("=" * 50 + f"Run {i+1}" + "=" * 50)
        res = nas(logger)
        res_list.append(res)
    runnint_time = time.time() - start
    result = pd.concat(res_list,
                       axis=0,
                       keys=[f"Run {i}" for i in range(len(res_list))])

    if args.save is True:
        os.makedirs(work_dir, exist_ok=False)
        utils.save_args(args, os.path.join(work_dir, 'args.txt'))
        file_name = os.path.join(work_dir, "result.csv")

        result.to_csv(file_name, index=True)
        logger.info(f"saving result to {file_name}")
        fig_name = os.path.join(work_dir, "progress.png")
        plt.figure()
        avg_reward = result.groupby(
            result.index.get_level_values(1))["reward"].mean()
        plt.plot(avg_reward)
        plt.title("Best reward in {} runs is {:.4f}".format(
            args.run, result["reward"].max()))
        logger.info(f"saving figure to {fig_name}")
        plt.savefig(fig_name)
    logger.info("Best reward in {} runs is {:.4f}".format(
        args.run, result["reward"].max()))

    device = 'cpu' if torch.cuda.is_available() is False else \
        'cuda:{}'.format(args.gpu)
    if args.evaluate is True:
        test_result(args.dataset, res_list, device, logger, args.layers)
    logger.info(f"Total running time {runnint_time}")
    logger.info("*" * 50 + "End" + "*" * 50)
import numpy as np
from tqdm import tqdm

from utils import display_args, print_time

parser = argparse.ArgumentParser("Compute class weight in target labels.")

parser.add_argument("input_h5",
                    type=str,
                    help="Path to .h5 file containing target labels.")
parser.add_argument("output_npy",
                    type=str,
                    help="Path to output class occurrence .npy file.")

args = parser.parse_args()
display_args(args, __file__)

############## MAIN ##############
# Constant
OUTPUT_SIZE = 2
OUTPUT_KEY = "output_conserved"

start_time = time.time()
count = np.zeros(OUTPUT_SIZE)
with h5py.File(args.input_h5, "r") as label_dset:
    label_group = label_dset[OUTPUT_KEY]
    for group_name in tqdm(label_group.keys()):
        target = label_group[group_name][:, :-3]
        unique = np.unique(target, return_counts=True)
        if len(unique[0]) != OUTPUT_SIZE:
            raise RuntimeError(
Exemple #3
0
            model_desc = "{type}".format(
                    type=str(args.model_type))
            with open("%sdnn_%s_pass_%05d.tar" %
                          (args.model_output_prefix, model_desc,
                           event.pass_id), "w") as f:
                parameters.to_tar(f)

    # training
    trainer.train(
        reader=paddle.batch(
            paddle.reader.shuffle(reader.train(data_path,
                                            feature_dim+1,
                                            args.model_type.is_classification()),
                    buf_size=batch_size*10),
            batch_size=batch_size),
        feeding=feeding,
        event_handler=event_handler,
        num_passes=num_passes)


if __name__ == '__main__':
    display_args(args)
    train(
        data_path=args.data_path,
        model_type=ModelType(args.model_type),
        batch_size=args.batch_size,
        num_passes=args.num_passes,
        class_num=args.class_num,
        num_workers=args.num_workers,
        use_gpu=args.use_gpu)
Exemple #4
0
                logger.info("save model: %sdssm_%s_pass_%05d.tar" %
                            (config.config['model_output_prefix'], model_desc, event.pass_id))

        # if isinstance(event, paddle.event.EndPass):
        #     result = trainer.test(reader=test_reader, feeding=feeding)
        #     logger.info("Test with pass %d, %s" % (event.pass_id, result.metrics))
        #     with open("./data/output/endpass/dssm_params_pass" + str(event.pass_id) + ".tar", "w") as f:
        #         parameters.to_tar(f)

    trainer.train(reader=train_reader,
                  event_handler=_event_handler,
                  feeding=feeding,
                  num_passes=num_passes)
    logger.info("training finish.")


if __name__ == '__main__':
    display_args(config.config)
    train(train_data_paths=config.config["train_data_paths"],
          test_data_paths=config.config["test_data_paths"],
          source_dic_path=config.config["source_dic_path"],
          target_dic_path=config.config["target_dic_path"],
          model_arch=ModelArch(config.config["model_arch"]),
          batch_size=config.config["batch_size"],
          num_passes=config.config["num_passes"],
          share_semantic_generator=config.config["share_network_between_source_target"],
          share_embed=config.config["share_embed"],
          class_num=config.config["class_num"],
          num_workers=config.config["num_workers"],
          use_gpu=config.config["use_gpu"])