예제 #1
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument("-c",
                        "--config",
                        type=str,
                        required=True,
                        help="yaml file for config.")
    parser.add_argument(
        "-n",
        "--name",
        type=str,
        default=None,
        help="Name of the model. Used for both logging and saving chkpt.",
    )
    args = parser.parse_args()
    hp = load_hparam(args.config)

    if args.name is not None:
        hp.log.name = args.name

    # random seed
    if hp.train.random_seed is None:
        hp.train.random_seed = random.randint(1, 10000)
    set_random_seed(hp.train.random_seed)

    if hp.train.dist.gpus < 0:
        hp.train.dist.gpus = torch.cuda.device_count()
    if hp.model.device.lower() == "cpu" or hp.train.dist.gpus == 0:
        train_loop(0, hp)
    else:
        distributed_run(train_loop, hp.to_dict(), hp.train.dist.gpus)
예제 #2
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument("-c",
                        "--config",
                        type=str,
                        required=True,
                        help="yaml file for config.")
    parser.add_argument(
        "-n",
        "--name",
        type=str,
        default=None,
        help="Name of the model. Used for both logging and saving chkpt.",
    )
    args = parser.parse_args()
    hp = load_hparam(args.config)

    if args.name is not None:
        hp.log.name = args.name

    # random seed
    if hp.train.random_seed is None:
        hp.train.random_seed = random.randint(1, 10000)
    set_random_seed(hp.train.random_seed)

    # set log/checkpoint dir
    hp.log.chkpt_dir = os.path.join(hp.log.chkpt_dir, hp.log.name)
    hp.log.log_dir = os.path.join(hp.log.log_dir, hp.log.name)
    os.makedirs(hp.log.chkpt_dir, exist_ok=True)
    os.makedirs(hp.log.log_dir, exist_ok=True)

    # set logger
    logging.basicConfig(
        level=logging.INFO,
        format="%(asctime)s - %(levelname)s - %(message)s",
        handlers=[
            logging.FileHandler(
                os.path.join(hp.log.log_dir,
                             "%s-%d.log" % (hp.log.name, time.time()))),
            logging.StreamHandler(),
        ],
    )
    logger = logging.getLogger()

    # set writer (tensorboard / wandb)
    writer = Writer(hp, hp.log.log_dir)

    hp_str = yaml.dump(hp.to_dict())
    logger.info("Config:")
    logger.info(hp_str)

    if hp.data.train_dir == "" or hp.data.test_dir == "":
        logger.error("train or test data directory cannot be empty.")
        raise Exception("Please specify directories of data in %s" %
                        args.config)

    train_loop(hp, logger, writer)
예제 #3
0
def test_net_arch():
    hp = load_hparam("config/default.yaml")
    net = Net_arch(hp)

    # TODO: This is example code. You should change this part as you need. You can code this part as forward
    x = torch.rand(64, 10)
    x = net.fc1(x)
    assert x.shape == (64, 10)
    x = net.fc2(x)
    assert x.shape == (64, 1)
예제 #4
0
def test_net_arch():
    hp = load_hparam("config/default.yaml")
    net = Net_arch(hp)

    # TODO: This is example code. You should change this part as you need. You can code this part as forward
    x = torch.rand(8, 1, 28, 28)
    x = net.conv1(x)  # x: (B,4,14,14)
    assert x.shape == (8, 4, 14, 14)
    x = net.conv2(x)  # x: (B,4,7,7)
    assert x.shape == (8, 4, 7, 7)
    x = torch.flatten(x, 1)  # x: (B,4*7*7)
    assert x.shape == (8, 4 * 7 * 7)
    x = net.fc(x)  # x: (B,10)
    assert x.shape == (8, 10)
예제 #5
0
    def setup_method(self):
        # set log/checkpoint dir
        self.TEST_DIR = pathlib.Path(TEST_DIR)
        self.log_dir = (self.TEST_DIR / "logs").resolve()
        self.chkpt_dir = (self.TEST_DIR / "chkpt").resolve()
        os.makedirs(self.TEST_DIR, exist_ok=True)
        os.makedirs(self.log_dir, exist_ok=True)
        os.makedirs(self.chkpt_dir, exist_ok=True)

        # set hp
        self.hp = load_hparam("config/default.yaml")
        self.hp.model.device = "cpu"
        self.hp.log.log_dir = self.log_dir
        self.hp.log.chkpt_dir = self.chkpt_dir
        self.hp.log.use_wandb = False
        self.hp.log.use_tensorboard = False

        # set logger
        self.logger = make_logger(self.hp)