示例#1
0
import os
import xnas.core.benchmark as benchmark
import xnas.core.config as config
import xnas.core.logging as logging
import xnas.datasets.loader as loader
import xnas.core.distributed as dist
from xnas.core.config import cfg


logger = logging.get_logger(__name__)


def test_full_time():
    config.dump_cfg()
    logging.setup_logging()
    logger.info("Config:\n{}".format(cfg))
    logger.info(logging.dump_log_data(cfg, "cfg"))

    [train_loader, test_loader] = loader.construct_loader(
        cfg.SEARCH.DATASET, cfg.SEARCH.SPLIT, cfg.SEARCH.BATCH_SIZE)
    
    avg_time = benchmark.compute_full_loader(test_loader, epoch=3)

    for i, _time in enumerate(avg_time):
        logger.info("The {}'s epoch average time is: {}".format(i, _time))

if __name__ == "__main__":
    config.load_cfg_fom_args("Compute model and loader timings.")
    os.makedirs(cfg.OUT_DIR, exist_ok=True)
    dist.multi_proc_run(num_proc=1, fun=test_full_time)
示例#2
0
import xnas.core.checkpoint as checkpoint
import time
from torch.utils.tensorboard import SummaryWriter
import torch.nn.functional as F
import torch.nn as nn
import torch
import numpy as np
import random
import os
import json
import gc
import sys
sys.path.append(".")

# config load and assert
config.load_cfg_fom_args()
config.assert_and_infer_cfg()
cfg.freeze()
# tensorboard
writer = SummaryWriter(log_dir=os.path.join(cfg.OUT_DIR, "tb"))

logger = logging.get_logger(__name__)


def random_sampling(search_space,
                    distribution_optimizer,
                    epoch=-1000,
                    _random=False):

    if _random:
        num_ops, total_edges = search_space.num_ops, search_space.all_edges
示例#3
0
def main():
    config.load_cfg_fom_args("Compute model and loader timings.")
    os.makedirs(cfg.OUT_DIR, exist_ok=True)
    dist.multi_proc_run(num_proc=1, fun=test_full_time)