Exemple #1
0
def main(config: ConfigParser) -> None:
    """
    Main training function.

    Parameters
    ----------
    config : parse_config.ConfigParser
        Parsed configuration JSON file.
    """
    logger: Logger = config.get_logger("train")

    # Setup data_loader instances.
    data_loader: DataLoader = config.initialize("data_loader", module_data)
    valid_data_loader: Optional[DataLoader] = data_loader.split_validation()

    # Build model architecture, then print to console.
    model: Module = config.initialize("arch", module_arch)
    logger.info(model)

    # Get function handles of loss and metrics as well as args.
    loss_fn: Callable = getattr(module_loss, config["loss"]["type"])
    loss_args: Dict[str, Any] = config["loss"]["args"]
    metric_fns: List[Callable] = [
        getattr(module_metric, met) for met in config["metrics"]
    ]
    metric_args: List[Dict[str, Any]] = [
        config["metrics"][met] for met in config["metrics"]
    ]

    # Build optimizer, learning rate scheduler.
    # Delete every line containing lr_scheduler to disable scheduler.
    trainable_params: Iterable[Tensor] = filter(lambda p: p.requires_grad,
                                                model.parameters())
    optimizer: Optimizer = config.initialize("optimizer", torch.optim,
                                             trainable_params)

    lr_scheduler: Optional = config.initialize("lr_scheduler",
                                               torch.optim.lr_scheduler,
                                               optimizer)

    trainer: Trainer = Trainer(
        model,
        loss_fn,
        loss_args,
        metric_fns,
        metric_args,
        optimizer,
        config=config,
        data_loader=data_loader,
        valid_data_loader=valid_data_loader,
        lr_scheduler=lr_scheduler,
    )

    trainer.train()
Exemple #2
0
def main(config: ConfigParser):

    logger = config.get_logger('train')

    data_loader = getattr(module_data, config['data_loader']['type'])(
        config['data_loader']['args']['data_dir'],
        batch_size=config['data_loader']['args']['batch_size'],
        shuffle=config['data_loader']['args']['shuffle'],
        validation_split=config['data_loader']['args']['validation_split'],
        num_batches=config['data_loader']['args']['num_batches'],
        training=True,
        num_workers=config['data_loader']['args']['num_workers'],
        pin_memory=config['data_loader']['args']['pin_memory'])

    # valid_data_loader = data_loader.split_validation()
    valid_data_loader = None
    # test_data_loader = None

    test_data_loader = getattr(module_data, config['data_loader']['type'])(
        config['data_loader']['args']['data_dir'],
        batch_size=128,
        shuffle=False,
        validation_split=0.0,
        training=False,
        num_workers=2)  #.split_validation()

    # build model architecture, then print to console
    model = config.initialize('arch', module_arch)

    # get function handles of loss and metrics
    logger.info(config.config)
    if hasattr(data_loader.dataset, 'num_raw_example'):
        num_examp = data_loader.dataset.num_raw_example
    else:
        num_examp = len(data_loader.dataset)

    train_loss = getattr(module_loss, config['train_loss']['type'])(
        num_examp=num_examp,
        num_classes=config['num_classes'],
        beta=config['train_loss']['args']['beta'])

    val_loss = getattr(module_loss, config['val_loss'])
    metrics = [getattr(module_metric, met) for met in config['metrics']]

    # build optimizer, learning rate scheduler. delete every lines containing lr_scheduler for disabling scheduler
    trainable_params = filter(lambda p: p.requires_grad, model.parameters())

    optimizer = config.initialize('optimizer', torch.optim,
                                  [{
                                      'params': trainable_params
                                  }])

    lr_scheduler = config.initialize('lr_scheduler', torch.optim.lr_scheduler,
                                     optimizer)

    trainer = Trainer(model,
                      train_loss,
                      metrics,
                      optimizer,
                      config=config,
                      data_loader=data_loader,
                      valid_data_loader=valid_data_loader,
                      test_data_loader=test_data_loader,
                      lr_scheduler=lr_scheduler,
                      val_criterion=val_loss)

    trainer.train()
    logger = config.get_logger('trainer', config['trainer']['verbosity'])
    cfg_trainer = config['trainer']
Exemple #3
0
def main(config: ConfigParser):

    logger = config.get_logger('train')
    logger.info(config.config)

    # setup data_loader instances
    data_loader1 = getattr(module_data, config['data_loader']['type'])(
        config['data_loader']['args']['data_dir'],
        batch_size=config['data_loader']['args']['batch_size'],
        shuffle=config['data_loader']['args']['shuffle'],
        validation_split=config['data_loader']['args']['validation_split'],
        num_batches=config['data_loader']['args']['num_batches'],
        training=True,
        num_workers=config['data_loader']['args']['num_workers'],
        pin_memory=config['data_loader']['args']['pin_memory'])

    data_loader2 = getattr(module_data, config['data_loader']['type'])(
        config['data_loader']['args']['data_dir'],
        batch_size=config['data_loader']['args']['batch_size2'],
        shuffle=config['data_loader']['args']['shuffle'],
        validation_split=config['data_loader']['args']['validation_split'],
        num_batches=config['data_loader']['args']['num_batches'],
        training=True,
        num_workers=config['data_loader']['args']['num_workers'],
        pin_memory=config['data_loader']['args']['pin_memory'])

    valid_data_loader = data_loader1.split_validation()

    test_data_loader = getattr(module_data, config['data_loader']['type'])(
        config['data_loader']['args']['data_dir'],
        batch_size=128,
        shuffle=False,
        validation_split=0.0,
        training=False,
        num_workers=2).split_validation()

    # build model architecture
    model1 = config.initialize('arch1', module_arch)
    model_ema1 = config.initialize('arch1', module_arch)
    model_ema1_copy = config.initialize('arch1', module_arch)
    model2 = config.initialize('arch2', module_arch)
    model_ema2 = config.initialize('arch2', module_arch)
    model_ema2_copy = config.initialize('arch2', module_arch)

    # get function handles of loss and metrics
    device_id = list(range(min(torch.cuda.device_count(), config['n_gpu'])))

    if hasattr(data_loader1.dataset, 'num_raw_example') and hasattr(
            data_loader2.dataset, 'num_raw_example'):
        num_examp1 = data_loader1.dataset.num_raw_example
        num_examp2 = data_loader2.dataset.num_raw_example
    else:
        num_examp1 = len(data_loader1.dataset)
        num_examp2 = len(data_loader2.dataset)

    train_loss1 = getattr(module_loss, config['train_loss']['type'])(
        num_examp=num_examp1,
        num_classes=config['num_classes'],
        device='cuda:' + str(device_id[0]),
        config=config.config,
        beta=config['train_loss']['args']['beta'])
    train_loss2 = getattr(module_loss, config['train_loss']['type'])(
        num_examp=num_examp2,
        num_classes=config['num_classes'],
        device='cuda:' + str(device_id[-1]),
        config=config.config,
        beta=config['train_loss']['args']['beta'])

    val_loss = getattr(module_loss, config['val_loss'])
    metrics = [getattr(module_metric, met) for met in config['metrics']]

    # build optimizer, learning rate scheduler. delete every lines containing lr_scheduler for disabling scheduler
    trainable_params1 = filter(lambda p: p.requires_grad, model1.parameters())
    trainable_params2 = filter(lambda p: p.requires_grad, model2.parameters())

    optimizer1 = config.initialize('optimizer1', torch.optim,
                                   [{
                                       'params': trainable_params1
                                   }])
    optimizer2 = config.initialize('optimizer2', torch.optim,
                                   [{
                                       'params': trainable_params2
                                   }])

    lr_scheduler1 = config.initialize('lr_scheduler', torch.optim.lr_scheduler,
                                      optimizer1)
    lr_scheduler2 = config.initialize('lr_scheduler', torch.optim.lr_scheduler,
                                      optimizer2)

    trainer = Trainer(model1,
                      model2,
                      model_ema1,
                      model_ema2,
                      train_loss1,
                      train_loss2,
                      metrics,
                      optimizer1,
                      optimizer2,
                      config=config,
                      data_loader1=data_loader1,
                      data_loader2=data_loader2,
                      valid_data_loader=valid_data_loader,
                      test_data_loader=test_data_loader,
                      lr_scheduler1=lr_scheduler1,
                      lr_scheduler2=lr_scheduler2,
                      val_criterion=val_loss,
                      model_ema1_copy=model_ema1_copy,
                      model_ema2_copy=model_ema2_copy)

    trainer.train()
    logger = config.get_logger('trainer', config['trainer']['verbosity'])
    cfg_trainer = config['trainer']
Exemple #4
0
        _, label = predict(
            data['query'],
            data['target'])  # 从json数据里面读取text字段,生成返回
        response = {'label': list(map(int, label))}
    except (KeyError, TypeError, ValueError):  # 捕获数据类型异常
        raise JsonError(description='Invalid value.')  # 将异常反馈会调用
    return response  # 正常返回,这个response的内容会被转成json格式


if __name__ == '__main__':
    args = argparse.ArgumentParser(description='PyTorch Template')
    args.add_argument('-r', '--resume', default=None, type=str,
                      help='path to latest checkpoint (default: None)')
    args.add_argument('-d', '--device', default=None, type=str,
                      help='indices of GPUs to enable (default: all)')

    config = ConfigParser(args)
    logger = config.get_logger('test')
    # setup data_loader instances
    processor = config.initialize(
        'processor', module_processor, logger, config)
    # build model architecture, then print to console
    model = config.initialize(
        'arch',
        module_arch,
        vocab_size=processor.vocab_size, num_labels=processor.nums_label())
    # logger.info(model)
    agent = Agent(model, config=config)

    app.run(host='0.0.0.0', port=5000, debug=True)
Exemple #5
0
def main(config: ConfigParser):

    logger = config.get_logger('train')

    data_loader = getattr(module_data, config['data_loader']['type'])(
        config['data_loader']['args']['data_dir'],
        batch_size=config['data_loader']['args']['batch_size'],
        shuffle=config['data_loader']['args']['shuffle'],
        validation_split=config['data_loader']['args']['validation_split'],
        num_batches=config['data_loader']['args']['num_batches'],
        training=True,
        num_workers=config['data_loader']['args']['num_workers'],
        pin_memory=config['data_loader']['args']['pin_memory'])

    valid_data_loader = data_loader.split_validation()

    test_data_loader = getattr(module_data, config['data_loader']['type'])(
        config['data_loader']['args']['data_dir'],
        batch_size=128,
        shuffle=False,
        validation_split=0.0,
        training=False,
        num_workers=2).split_validation()

    # build model architecture, then print to console
    model = config.initialize('arch', module_arch)

    train_loss = getattr(module_loss, config['train_loss'])
    val_loss = getattr(module_loss, config['val_loss'])
    metrics = [getattr(module_metric, met) for met in config['metrics']]

    logger.info(str(model).split('\n')[-1])

    # build optimizer, learning rate scheduler. delete every lines containing lr_scheduler for disabling scheduler

    trainable_params = [{
        'params': [
            p for p in model.parameters()
            if (not getattr(p, 'bin_gate', False)) and (
                not getattr(p, 'bin_theta', False)) and (
                    not getattr(p, 'srelu_bias', False))
            and getattr(p, 'requires_grad', False)
        ]
    }, {
        'params': [
            p for p in model.parameters() if getattr(p, 'bin_gate', False)
            and getattr(p, 'requires_grad', False)
        ],
        'lr':
        config['optimizer']['args']['lr'] * 10,
        'weight_decay':
        0
    }, {
        'params': [
            p for p in model.parameters() if getattr(p, 'srelu_bias', False)
            and getattr(p, 'requires_grad', False)
        ],
        'weight_decay':
        0
    }, {
        'params': [
            p for p in model.parameters() if getattr(p, 'bin_theta', False)
            and getattr(p, 'requires_grad', False)
        ],
        'lr':
        config['optimizer']['args']['lr'],
        'weight_decay':
        0
    }]

    optimizer = config.initialize('optimizer', torch.optim, trainable_params)

    lr_scheduler = config.initialize('lr_scheduler', torch.optim.lr_scheduler,
                                     optimizer)

    trainer = Trainer(model,
                      train_loss,
                      metrics,
                      optimizer,
                      config=config,
                      data_loader=data_loader,
                      valid_data_loader=valid_data_loader,
                      test_data_loader=test_data_loader,
                      lr_scheduler=lr_scheduler,
                      val_criterion=val_loss)

    trainer.train()
    logger = config.get_logger('trainer', config['trainer']['verbosity'])
    cfg_trainer = config['trainer']
Exemple #6
0
    args = argparse.ArgumentParser(description='PyTorch Template')
    args.add_argument('-r',
                      '--resume',
                      default=None,
                      type=str,
                      help='path to latest checkpoint (default: None)')
    args.add_argument('-d',
                      '--device',
                      default=None,
                      type=str,
                      help='indices of GPUs to enable (default: all)')

    config = ConfigParser(args)
    logger = config.get_logger('test')
    # setup data_loader instances
    processor = config.initialize('processor', module_processor, logger,
                                  config)
    # build model architecture, then print to console
    # build model architecture, then print to console
    if config.bert_config_path:
        bert_config = BertConfig(config.bert_config_path)
        model = config.initialize('arch',
                                  module_arch,
                                  config=bert_config,
                                  num_labels=processor.nums_label())
    else:
        model = config.initialize_bert_model('arch',
                                             module_arch,
                                             num_labels=processor.nums_label())
    # logger.info(model)
    agent = Agent(model, config=config)
Exemple #7
0
def main(config: ConfigParser) -> None:
    """
    Main testing function.

    Parameters
    ----------
    config : parse_config.ConfigParser
        Parsed configuration JSON file.
    """
    logger: Logger = config.get_logger("test")

    # Setup data_loader instance.
    data_loader: DataLoader = getattr(module_data, config["data_loader"]["type"])(
        config["data_loader"]["args"]["data_dir"],
        batch_size=512,
        shuffle=False,
        validation_split=0.0,
        training=False,
        num_workers=2,
    )

    # Build model architecture.
    model: Module = config.initialize("arch", module_arch)
    logger.info(model)

    # Get function handles of loss and metrics as well as args.
    loss_fn: Callable = getattr(module_loss, config["loss"]["type"])
    loss_args: Dict[str, Any] = config["loss"]["args"]
    metric_fns: List[Callable] = [getattr(module_metric, met) for met in config["metrics"]]
    metric_args: List[Dict[str, Any]] = [config["metrics"][met] for met in config["metrics"]]

    logger.info("Loading checkpoint: {} ...".format(config.resume))
    checkpoint: dict = torch.load(config.resume)
    state_dict: dict = checkpoint["state_dict"]
    if config["n_gpu"] > 1:
        model = torch.nn.DataParallel(model)
    model.load_state_dict(state_dict)

    # Prepare model for testing.
    device: torch.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    model = model.to(device)
    model.eval()

    total_loss: float = 0.0
    total_metrics: Tensor = torch.zeros(len(metric_fns))

    with torch.no_grad():
        i: int
        data: Tensor
        target: Tensor
        for i, (data, target) in enumerate(tqdm(data_loader)):
            data, target = data.to(device), target.to(device)
            output: Tensor = model(data)

            #
            # save sample images, or do something with output here
            #

            # computing loss, metrics on test set
            loss: Tensor = loss_fn(output, target, **loss_args)
            batch_size: int = data.shape[0]
            total_loss += loss.item() * batch_size

            j: int
            metric: Callable
            for j, metric in enumerate(metric_fns):
                total_metrics[j] += metric(output, target, **metric_args[j]) * batch_size

    n_samples: int = len(data_loader.sampler)
    log: Dict[str, Any] = {"loss": total_loss / n_samples}

    met: Callable
    log.update(
        {met.__name__: total_metrics[i].item() / n_samples for i, met in enumerate(metric_fns)}
    )

    logger.info(log)
Exemple #8
0
def main(config: ConfigParser):

    access_token = ''
    with open('./pytorch_line_token') as f:
        access_token = str(f.readline())
    bot = LINENotifyBot(access_token=access_token)

    logger = config.get_logger('train')

    # setup data_loader instances
    data_loader = config.initialize('data_loader', module_data)
    valid_data_loader = data_loader.split_validation()

    # build model architecture, then print to console
    model = config.initialize('arch', module_arch)
    logger.info(model)

    # get function handles of loss and metrics
    loss = getattr(module_loss, config['loss'])
    metrics = [getattr(module_metric, met) for met in config['metrics']]

    # build optimizer, learning rate scheduler. delete every lines containing lr_scheduler for disabling scheduler
    trainable_params = filter(lambda p: p.requires_grad, model.parameters())
    optimizer = config.initialize('optimizer', torch.optim, trainable_params)

    lr_scheduler = config.initialize('lr_scheduler', torch.optim.lr_scheduler,
                                     optimizer)

    trainer = Trainer(model,
                      loss,
                      metrics,
                      optimizer,
                      config=config,
                      data_loader=data_loader,
                      valid_data_loader=valid_data_loader,
                      lr_scheduler=lr_scheduler)

    trainer.train()
    logger = config.get_logger('trainer', config['trainer']['verbosity'])
    cfg_trainer = config['trainer']

    # mlflow.start_run()で__enter__()を実行できるようにする必要がある。一旦棚上げ。
    # mlflow = MLFlow(config.log_dir, logger, cfg_trainer['mlflow'])

    with mlflow.start_run() as run:
        # Log args into mlflow
        log_params(config.config)

        # Log results into mlflow
        for loss in trainer.train_loss_list:
            mlflow.log_metric('train_loss', loss)
        for loss in trainer.val_loss_list:
            mlflow.log_metric('val_loss', loss)

        # Log other info
        # mlflow.log_param('loss_type', 'CrossEntropy')

        # Log model
        mlflow.pytorch.log_model(model, 'model')

    bot.send(message=f'{config["name"]}の訓練が終了しました。@{socket.gethostname()}')
Exemple #9
0
# setup data_loader instances
data_loader = getattr(module_data, config["test_data_loader"]["type"])(
    config["test_data_loader"]["args"]["data_dir"],
    batch_size=32,
    seq_length=128,
    shuffle=False,
    validation_split=0.0,
    training=False,
    num_workers=2,
)

# build model architecture
try:
    config["embedding"]["args"].update({"vocab": data_loader.dataset.vocab})
    embedding = config.initialize("embedding", module_embedding)
except:
    embedding = None
config["arch"]["args"].update({"vocab": data_loader.dataset.vocab})
config["arch"]["args"].update({"embedding": embedding})
model = config.initialize("arch", module_arch)

checkpoint = torch.load(args.resume)
state_dict = checkpoint["state_dict"]
if config["n_gpu"] > 1:
    model = torch.nn.DataParallel(model)
model.load_state_dict(state_dict)

# prepare model for testing
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model = model.to(device)