Exemplo n.º 1
0
def main(args: argparse.Namespace):
    """
    The main experiment function.

    :param args: Command-line arguments
    """
    torch.manual_seed(args.seed)

    checkpoint_path = Path(args.checkpoint_path) if args.checkpoint_path \
        else None
    if checkpoint_path and not checkpoint_path.is_dir():
        raise OSError(
            f"Checkpoint directory does not exist: '{checkpoint_path}'")

    data_path = Path(args.data_path).resolve()
    train_data = MNIST(data_path, train=True)
    test_data = MNIST(data_path, train=False)

    if args.dataset_fraction < 1:
        train_data = shrink_dataset(train_data, args.dataset_fraction)
        test_data = shrink_dataset(test_data, args.dataset_fraction)

    train_loader = torch.utils.data.DataLoader(dataset=train_data,
                                               batch_size=args.batch_size,
                                               shuffle=True)
    test_loader = torch.utils.data.DataLoader(dataset=test_data,
                                              batch_size=args.test_batch_size)

    # Initialize the hardware / mock
    init(args.calibration_path, args.mock, args.mock_noise_std, args.mock_gain)

    model = Model(mock=args.mock)
    log.info(f"Used model:\n{model}")
    if args.resume_from:
        model.load_state_dict(torch.load(args.resume_from))

    optimizer = torch.optim.Adam(model.parameters(), lr=args.lr)
    scheduler = torch.optim.lr_scheduler.StepLR(optimizer,
                                                step_size=1,
                                                gamma=args.gamma)

    accuracy = test(model, test_loader)
    for epoch in range(1, args.epochs + 1):
        log.info(f"Train epoch {epoch}")
        train(model, train_loader, optimizer)
        accuracy = test(model, test_loader)
        scheduler.step()

        if checkpoint_path:
            save_path = checkpoint_path.joinpath(f"state_{epoch}.pth")
            log.info(f"Save model state to '{save_path}'")
            torch.save(model.state_dict(), save_path)

    hxtorch.release_hardware()
    return accuracy
Exemplo n.º 2
0
def main():
    # initialize the connection, uses default nightly calib for the setup
    hxtorch.init_hardware()

    x = torch.full((128, ), 10.)  # input range: 0...31
    w = torch.full((128, 256), 20.)  # weight range: -63...63
    # this weight uses the whole upper half of the chip

    out = hxtorch.matmul(
        x,
        w,  # noqa E121. The same as in `torch.matmul`
        num_sends=
        1,  # number of subsequent sends of the input in the same integration step
        wait_between_events=5
    )  # wait between sending the individual vector entries (in FPGA cycles)
    # output range: -128...127

    log = hxtorch.logger.get("hxtorch.examples.minimal")
    log.info(f"Input (mean): {x.mean()}, "
             f"weight (mean): {w.mean()}, "
             f"output (mean): {out.mean()}")
    hxtorch.release_hardware()
Exemplo n.º 3
0
 def tearDownClass(cls):
     hxtorch.release_hardware()
Exemplo n.º 4
0
 def test_measure(self):
     hxtorch.init_hardware()
     mock_parameter = hxtorch.measure_mock_parameter()
     hxtorch.release_hardware()
     self.assertGreater(mock_parameter.gain, 0)
     self.assertLessEqual(mock_parameter.gain, 1)
Exemplo n.º 5
0
 def tearDownClass(cls) -> None:
     hxtorch.release_hardware()  # also disconnects executor