예제 #1
0
        'horizontal_parallel_size': 1,
        'pipeline_parallel': {
            'pipeline_parallel_size': int(num_pipeline_stages),
            'num_pipeline_micro_batches': num_pipeline_steps,
            'sliced_schema': pipeline_schema,
            'sliced_axes': sliced_axes,
            'sliced_tensor_names': ['x', 'target', 'output'],
            # Define pipeline stage partition by specifying cut points.
            # 2-stage cut. It's a cut on tensor "12".
            'pipeline_cut_info_string': '12'
        },
        'allreduce_post_accumulation': True
    }
})

trainer = ORTTrainer(model, schema, adam_config, apply_loss, trainer_config)

loss_history = []
for i in range(5):
    l, p = trainer.train_step(x.to(cuda_device), y.to(cuda_device))
    loss_history.append(l)

# Valid ranks are [0, 1, 2, 3].
# [0, 2] forms the 2-stage pipeline in the 1st data parallel group.
# [1, 3] forms the 2-stage pipeline in the 2nd data parallel group.
last_pipeline_stage_ranks = [2, 3]

# The loss values computed at the last pipeline stages. Note that intermediate
# stages may not have valid loss values, so we don't check them.
expected_loss_history = [0.8660, 1.1219, 1.6610, 1.2641, 1.0162]
if rank in last_pipeline_stage_ranks:
예제 #2
0
best_val_loss = float("inf")
epochs = 3  # The number of epochs
best_model = None

model_description = {
    'inputs': [('src', ['bptt', 'batch_size']),
               ('label', ['bptt_x_batch_size'])],
    'outputs': [('loss', [], True), ('output', ['bptt', 'batch_size',
                                                ntokens])]
}

optimizer_config = optim.AdamConfig(lr=learning_rate)

trainer = ORTTrainer(
    model,  # model
    model_description,  # model description
    optimizer_config,  # optimizer configuration
    loss_with_flat_output)  # loss function

for epoch in range(1, epochs + 1):
    epoch_start_time = time.time()
    train()
    val_loss = evaluate(model, val_data)
    print('-' * 89)
    print('| end of epoch {:3d} | time: {:5.2f}s | valid loss {:5.2f} | '
          'valid ppl {:8.2f}'.format(epoch, (time.time() - epoch_start_time),
                                     val_loss, math.exp(val_loss)))
    print('-' * 89)

    if val_loss < best_val_loss:
        best_val_loss = val_loss
예제 #3
0
def main():
    # Training settings
    parser = argparse.ArgumentParser(description='ONNX Runtime MNIST Example')
    parser.add_argument('--train-steps', type=int, default=-1, metavar='N',
                        help='number of steps to train. Set -1 to run through whole dataset (default: -1)')
    parser.add_argument('--batch-size', type=int, default=20, metavar='N',
                        help='input batch size for training (default: 20)')
    parser.add_argument('--test-batch-size', type=int, default=1000, metavar='N',
                        help='input batch size for testing (default: 1000)')
    parser.add_argument('--epochs', type=int, default=1, metavar='N',
                        help='number of epochs to train (default: 1)')
    parser.add_argument('--lr', type=float, default=0.01, metavar='LR',
                        help='learning rate (default: 0.01)')
    parser.add_argument('--no-cuda', action='store_true', default=False,
                        help='disables CUDA training')
    parser.add_argument('--seed', type=int, default=1, metavar='S',
                        help='random seed (default: 1)')
    parser.add_argument('--log-interval', type=int, default=10, metavar='N',
                        help='how many batches to wait before logging training status')
    parser.add_argument('--save-path', type=str, default='',
                        help='Path for Saving the current Model state')

    # Basic setup
    args = parser.parse_args()
    if not args.no_cuda and torch.cuda.is_available():
        device = "cuda"
    else:
        device = "cpu"
    torch.manual_seed(args.seed)
    onnxruntime.set_seed(args.seed)

    # Data loader
    train_loader = torch.utils.data.DataLoader(
        datasets.MNIST('./data', train=True, download=True,
                       transform=transforms.Compose([
                           transforms.ToTensor(),
                           transforms.Normalize((0.1307,), (0.3081,))
                       ])),
        batch_size=args.batch_size, shuffle=True)

    if args.test_batch_size > 0:
        test_loader = torch.utils.data.DataLoader(
            datasets.MNIST('./data', train=False, transform=transforms.Compose([
                transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,))])),
            batch_size=args.test_batch_size, shuffle=True)

    # Modeling
    model = NeuralNet(784, 500, 10)
    model_desc = mnist_model_description()
    optim_config = optim.SGDConfig(lr=args.lr)
    opts = {'device': {'id': device}}
    opts = ORTTrainerOptions(opts)

    trainer = ORTTrainer(model,
                         model_desc,
                         optim_config,
                         loss_fn=my_loss,
                         options=opts)

    # Train loop
    for epoch in range(1, args.epochs + 1):
        train(args.log_interval, trainer, device, train_loader, epoch, args.train_steps)
        if args.test_batch_size > 0:
            test(trainer, device, test_loader)

    # Save model
    if args.save_path:
        torch.save(model.state_dict(), os.path.join(args.save_path, "mnist_cnn.pt"))
        (len(model_output.type.tensor_type.shape.dim) == 1 and model_output.type.tensor_type.shape.dim[0].dim_value != 1):
    print(
        "Training graph output must be a scalar with shape either: [] or [1] shape was: \n"
        + str(model_output.type.tensor_type.shape))
    exit(1)

model_desc = {
    "inputs": inputs_description,  # (name, shape)
    "outputs": [(model_output.name, [], True)],  # (name, shape, is_loss)
}

options = {'device': {'id': 'cpu'}}

trainer = ORTTrainer(model,
                     model_desc,
                     optim_config=SGDConfig(lr=0.01),
                     loss_fn=None,
                     options=ORTTrainerOptions(options))

start = time.time()
loss = trainer.train_step(inputs_values)
end = time.time()

updated_initializers = trainer._training_session.get_state()

for val in trainer._training_session.get_state():
    updated_initializers[val] = updated_initializers[val].flatten().tolist()

training_output = {
    'loss': loss.numpy().flatten().tolist()[0],  # get loss as a number
    'updated_initializers':