Exemple #1
0
def train(hidden_features: int = 100, dropout: float = 0.05,
          bias: bool = True, negative_slope: float = 0.05,
          seed: int = 42, device: str = 'cpu', batch_size: int = 5, num_epochs: int = 50,
          out_dir: Path = Path('../out_dir'), monitor: ('filesystem', 'tensorboard') = 'tensorboard'):
    """ train iris classifier

    Args:
        hidden_features: the size of hidden layers
        dropout: the dropout ratio
        bias:  whether or not use the bias in hidden layers
        negative_slope: the ratio of negative part
        seed: the random seed number
        device: device id
        batch_size: the size of each batch
        num_epochs: the total numbers of epochs
        out_dir: the root path of output
        monitor: the type of monitor
    """
    options = locals()
    experiment_dir = out_dir / experiment_hash(**options)
    ensure_output_dir(experiment_dir)
    options_dump(experiment_dir, **options)
    log_system.notice(f'experiment_dir => {experiment_dir}')

    manual_seed(seed)
    log_system.notice(f'seed => {seed}')

    train, test = prepare_iris_dataset(batch_size)

    estimator = IrisEstimator(
        in_features=4, dropout=dropout, num_classes=3, hidden_features=hidden_features,
        negative_slope=negative_slope, bias=bias
    )
    optimizer = optim.Adam(estimator.parameters())
    monitor = get_monitor(monitor)(log_dir=experiment_dir)

    to_device(device, estimator)

    schedule = EpochalSchedule(estimator, optimizer, monitor)
    schedule.register_extension(Periodic(Moment.AFTER_ITERATION, iteration=5))(CommitScalarByMean(
        'criterion', 'acc', chapter='train',
    ))
    schedule.register_extension(Periodic(Moment.AFTER_BACKWARD, iteration=1))(ClipGradNorm(max_norm=4.))
    schedule.register_extension(Periodic(Moment.AFTER_EPOCH, epoch=1))(Pipeline(
        Evaluation(data_loader=test, chapter='test'),
        CommitScalarByMean('criterion', 'acc', chapter='test'),
    ))

    return schedule.run(train, num_epochs)
Exemple #2
0
def glove_(tensor: Tensor, file: Path, vocab: Vocab, encoding: str = 'utf-8') -> None:
    with torch.no_grad():
        for count, (token, vectors) in enumerate(
                token_vectors_stream(file, vocab, encoding), start=1):
            tensor[vocab(token)] = f32_tensor(vectors)
    return log_system.notice(f'loaded {count} tokens from {file}')
Exemple #3
0
 def setattr_with_notice(self, name: str, value) -> None:
     setattr(self, name, value)
     return log_system.notice(
         f'{self.__class__.__name__}.{name} := {value.__class__.__name__}')
Exemple #4
0
 def __call__(self, schedule: 'Schedule') -> None:
     value = getattr(schedule, self.key)
     tm = datetime.now() - getattr(schedule, self.attr)
     log_system.notice(
         f'[{self.key} {value}] finished, time elapsed => {tm}')
     return delattr(schedule, self.attr)
Exemple #5
0
 def __call__(self, schedule: 'Schedule') -> None:
     value = getattr(schedule, self.key)
     tm = datetime.now()
     log_system.notice(f'[{self.key} {value}] start => {tm}')
     return setattr(schedule, self.attr, tm)