Пример #1
0
def main():

  # load pre-trained model
  K.set_image_data_format('channels_first')
  model = load_model("nchw_model.h5")
  model.summary()
  
  # load mnist dataset
  x_test, y_test = data.get_test_dataset()
  img_h = x_test.shape[2]
  img_w = x_test.shape[3]
  helper.print_ascii(x_test[0], img_h, img_w)

  # use Keras to infer
  t0 = time.time()
  y_keras = model.predict(x_test)
  t1 = time.time()
  print('Keras time', t1 - t0)
  data.verify(y_keras, y_test)

  # use TensorRT to infer
  engine = TrtEngine(model, 1000)

  t0 = time.time()
  y_trt = engine.infer(x_test)
  t1 = time.time()
  print('TensorRT time', t1 - t0)
  data.verify(y_trt, y_test)

  engine.save('nchw_engine.bin')
Пример #2
0
def main():
    # load pre-trained model
    model = load_model("nhwc_model.h5")
    model.summary()

    # load mnist dataset
    x_test, y_test = data.get_test_dataset()
    batch_size = 1000
    img_h = x_test.shape[1]
    img_w = x_test.shape[2]
    helper.print_ascii(x_test[0], img_h, img_w)

    # use Keras to do infer
    t0 = time.time()
    y_keras = model.predict(x_test)
    t1 = time.time()
    print('Keras time', t1 - t0)
    data.verify(y_keras, y_test)

    frozen_graph = FrozenGraph(model, (img_h, img_w, 1))

    tf_engine = TfEngine(frozen_graph)
    t0 = time.time()
    y_tf = tf_engine.infer(x_test)
    t1 = time.time()
    print('Tensorflow time', t1 - t0)
    verify(y_tf, y_keras)

    tftrt_engine = TftrtEngine(frozen_graph, batch_size, 'FP32')
    t0 = time.time()
    y_tftrt = tftrt_engine.infer(x_test)
    t1 = time.time()
    print('TFTRT time', t1 - t0)
    verify(y_tftrt, y_keras)

    tftrt_engine = TftrtEngine(frozen_graph, batch_size, 'FP16')
    t0 = time.time()
    y_tftrt = tftrt_engine.infer(x_test)
    t1 = time.time()
    print('TFTRT_FP16 time', t1 - t0)
    verify(y_tftrt, y_keras)
def main_worker(gpus, args):
    # 定义模型,损失函数,优化器
    model = resnet18()
    torch.cuda.set_device('cuda:{}'.format(gpus[0]))
    model.cuda()
    criterion = nn.CrossEntropyLoss()
    optimizer = torch.optim.SGD(model.parameters(),
                                args.lr,
                                momentum=0.9,
                                weight_decay=1e-4)

    # apex initialization
    model, optimizer = amp.initialize(model, optimizer)

    # 如果使用的GPU数量大于1,需要用nn.DataParallel来修饰模型
    if len(gpus) > 1:
        model = nn.DataParallel(model, device_ids=gpus, output_device=gpus[0])

    # Define Training Schedule and Dataloader
    train_scheduler = optim.lr_scheduler.MultiStepLR(optimizer,
                                                     milestones=[60, 120, 160],
                                                     gamma=0.2)
    train_dataset = get_train_dataset()
    train_loader = torch.utils.data.DataLoader(train_dataset,
                                               batch_size=args.batch_size,
                                               num_workers=4,
                                               pin_memory=True)
    test_dataset = get_test_dataset()
    test_loader = torch.utils.data.DataLoader(test_dataset,
                                              batch_size=args.batch_size,
                                              num_workers=4,
                                              pin_memory=True)

    # Training
    for epoch in range(args.epochs):
        start = time.time()
        model.train()

        # 设置 train_scheduler 来调整学习率
        train_scheduler.step(epoch)

        for step, (images, labels) in enumerate(train_loader):
            # 将对应进程的数据放到 GPU 上
            images = images.cuda(non_blocking=True)
            labels = labels.cuda(non_blocking=True)

            outputs = model(images)
            loss = criterion(outputs, labels)

            # 更新优化模型权重
            optimizer.zero_grad()
            # loss.backward()
            with amp.scale_loss(loss, optimizer) as scaled_loss:
                scaled_loss.backward()
            optimizer.step()

            print(
                'Training Epoch: {epoch} [{trained_samples}/{total_samples}]\tLoss: {:0.4f}\tLR: {:0.6f}'
                .format(loss,
                        optimizer.param_groups[0]['lr'],
                        epoch=epoch + 1,
                        trained_samples=step * args.batch_size + len(images),
                        total_samples=len(train_loader.dataset)))

        finish = time.time()
        print('epoch {} training time consumed: {:.2f}s'.format(
            epoch, finish - start))

        # validate after every epoch
        validate(test_loader, model, criterion)
def main_worker(local_rank, nprocs, args):
    args.local_rank = local_rank
    init_seeds(local_rank+1)
    # 获得init_method的通信端口
    init_method = 'tcp://' + args.ip + ':' + args.port

    # 1. 分布式初始化,对于每一个进程都需要进行初始化,所以定义在 main_worker中
    cudnn.benchmark = True
    dist.init_process_group(backend='nccl', init_method=init_method, world_size=args.nprocs,
                            rank=local_rank)

    # 2. 基本定义,模型-损失函数-优化器
    model = resnet18()
    torch.cuda.set_device(local_rank)
    model.cuda(local_rank)
    criterion = nn.CrossEntropyLoss().cuda(local_rank)
    optimizer = torch.optim.SGD(model.parameters(), args.lr, momentum=0.9, weight_decay=1e-4)
    train_scheduler = optim.lr_scheduler.MultiStepLR(optimizer, milestones=[60, 120, 160], gamma=0.2)

    # apex初始化
    model = apex.parallel.convert_syncbn_model(model).to(local_rank) # 使用 apex 提供的 SyncBatchNorm 操作
    model, optimizer = amp.initialize(model, optimizer)
    model = DDP(model)

    # 3. 加载数据,
    batch_size = int(args.batch_size / nprocs)

    train_dataset = get_train_dataset()
    train_sampler = torch.utils.data.distributed.DistributedSampler(train_dataset)
    train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=batch_size, num_workers=4, pin_memory=True, sampler=train_sampler)

    test_dataset = get_test_dataset()
    test_sampler = torch.utils.data.distributed.DistributedSampler(test_dataset)
    test_loader = torch.utils.data.DataLoader(test_dataset, batch_size=batch_size, num_workers=4, pin_memory=True, sampler=test_sampler)

    for epoch in range(args.epochs):
        start = time.time()
        model.train()
        train_sampler.set_epoch(epoch)
        train_scheduler.step(epoch)

        for step, (images, labels) in enumerate(train_loader):
            # 将对应进程的数据放到对应 GPU 上
            images = images.cuda(local_rank, non_blocking=True)
            labels = labels.cuda(local_rank, non_blocking=True)

            outputs = model(images)
            loss = criterion(outputs, labels)

            torch.distributed.barrier()
            reduced_loss = reduce_mean(loss, args.nprocs)

            # 更新优化模型权重, 用scale_loss修饰loss
            optimizer.zero_grad()
            with amp.scale_loss(loss, optimizer) as scaled_loss:
                scaled_loss.backward()
            optimizer.step()

            if args.local_rank == 0:
                print(
                    'Training Epoch: {epoch} [{trained_samples}/{total_samples}]\tLoss: {:0.4f}\tLR: {:0.6f}'.format(
                        reduced_loss,
                        optimizer.param_groups[0]['lr'],
                        epoch=epoch+1,
                        trained_samples=step * args.batch_size + len(images),
                        total_samples=len(train_loader.dataset)
                    ))

        finish = time.time()
        if args.local_rank == 0:
            print('epoch {} training time consumed: {:.2f}s'.format(epoch, finish - start))

        # validate after every epoch
        validate(test_loader, model, criterion, local_rank, args)
Пример #5
0
def main_worker(local_rank, nprocs, args):
    args.local_rank = local_rank
    init_seeds(local_rank + 1)  # set different seed for each worker
    # 获得init_method的通信端口
    init_method = 'tcp://' + args.ip + ':' + args.port

    # 1. 分布式初始化,对于每一个进程都需要进行初始化,所以定义在 main_worker中
    cudnn.benchmark = True
    dist.init_process_group(backend='nccl',
                            init_method=init_method,
                            world_size=args.nprocs,
                            rank=local_rank)

    # 2. 基本定义,模型-损失函数-优化器
    model = resnet18(
    )  # 定义模型,将对应进程放到对应的GPU上, .cuda(local_rank) / .set_device(local_rank)

    # 以下是需要加 local_rank 的部分:模型
    # ================================
    torch.cuda.set_device(local_rank)  # 使用 set_device 和 cuda 来指定需要的 GPU
    model.cuda(local_rank)
    model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(model).to(local_rank)
    model = torch.nn.parallel.DistributedDataParallel(
        model, device_ids=[local_rank])  # 将模型用 DistributedDataParallel 包裹
    # =================================
    criterion = nn.CrossEntropyLoss()
    optimizer = torch.optim.SGD(model.parameters(),
                                args.lr,
                                momentum=0.9,
                                weight_decay=1e-4)
    train_scheduler = optim.lr_scheduler.MultiStepLR(optimizer,
                                                     milestones=[60, 120, 160],
                                                     gamma=0.2)

    # 3. 加载数据,
    batch_size = int(args.batch_size /
                     nprocs)  # 需要手动划分 batch_size 为 mini-batch_size

    train_dataset = get_train_dataset()
    train_sampler = torch.utils.data.distributed.DistributedSampler(
        train_dataset)
    train_loader = torch.utils.data.DataLoader(train_dataset,
                                               batch_size=batch_size,
                                               num_workers=4,
                                               pin_memory=True,
                                               sampler=train_sampler)

    test_dataset = get_test_dataset()
    test_sampler = torch.utils.data.distributed.DistributedSampler(
        test_dataset)
    test_loader = torch.utils.data.DataLoader(test_dataset,
                                              batch_size=batch_size,
                                              num_workers=4,
                                              pin_memory=True,
                                              sampler=test_sampler)

    for epoch in range(args.epochs):
        start = time.time()
        model.train()
        # 需要设置sampler的epoch为当前epoch来保证dataloader的shuffle的有效性
        train_sampler.set_epoch(epoch)

        # 设置 train_scheduler 来调整学习率
        train_scheduler.step(epoch)

        for step, (images, labels) in enumerate(train_loader):
            # 将对应进程的数据放到 GPU 上
            images = images.cuda(non_blocking=True)
            labels = labels.cuda(non_blocking=True)

            outputs = model(images)
            loss = criterion(outputs, labels)

            # torch.distributed.barrier()的作用是,阻塞进程,保证每个进程运行完这一行代码之前的所有代码,才能继续执行,这样才计算平均loss和平均acc的时候不会出现因为进程执行速度不一致的错误
            torch.distributed.barrier()
            reduced_loss = reduce_mean(loss, args.nprocs)

            # 更新优化模型权重
            optimizer.zero_grad()
            loss.backward()
            optimizer.step()

            if args.local_rank == 0:
                print(
                    'Training Epoch: {epoch} [{trained_samples}/{total_samples}]\tLoss: {:0.4f}\tLR: {:0.6f}'
                    .format(reduced_loss,
                            optimizer.param_groups[0]['lr'],
                            epoch=epoch + 1,
                            trained_samples=step * args.batch_size +
                            len(images),
                            total_samples=len(train_loader.dataset)))

        finish = time.time()
        if args.local_rank == 0:
            print('epoch {} training time consumed: {:.2f}s'.format(
                epoch, finish - start))

        # validate after every epoch
        validate(test_loader, model, criterion, local_rank, args)