def main_worker(gpus, args):
    # 定义模型,损失函数,优化器
    model = resnet18()
    torch.cuda.set_device('cuda:{}'.format(gpus[0]))
    model.cuda()
    criterion = nn.CrossEntropyLoss()
    optimizer = torch.optim.SGD(model.parameters(),
                                args.lr,
                                momentum=0.9,
                                weight_decay=1e-4)

    # apex initialization
    model, optimizer = amp.initialize(model, optimizer)

    # 如果使用的GPU数量大于1,需要用nn.DataParallel来修饰模型
    if len(gpus) > 1:
        model = nn.DataParallel(model, device_ids=gpus, output_device=gpus[0])

    # Define Training Schedule and Dataloader
    train_scheduler = optim.lr_scheduler.MultiStepLR(optimizer,
                                                     milestones=[60, 120, 160],
                                                     gamma=0.2)
    train_dataset = get_train_dataset()
    train_loader = torch.utils.data.DataLoader(train_dataset,
                                               batch_size=args.batch_size,
                                               num_workers=4,
                                               pin_memory=True)
    test_dataset = get_test_dataset()
    test_loader = torch.utils.data.DataLoader(test_dataset,
                                              batch_size=args.batch_size,
                                              num_workers=4,
                                              pin_memory=True)

    # Training
    for epoch in range(args.epochs):
        start = time.time()
        model.train()

        # 设置 train_scheduler 来调整学习率
        train_scheduler.step(epoch)

        for step, (images, labels) in enumerate(train_loader):
            # 将对应进程的数据放到 GPU 上
            images = images.cuda(non_blocking=True)
            labels = labels.cuda(non_blocking=True)

            outputs = model(images)
            loss = criterion(outputs, labels)

            # 更新优化模型权重
            optimizer.zero_grad()
            # loss.backward()
            with amp.scale_loss(loss, optimizer) as scaled_loss:
                scaled_loss.backward()
            optimizer.step()

            print(
                'Training Epoch: {epoch} [{trained_samples}/{total_samples}]\tLoss: {:0.4f}\tLR: {:0.6f}'
                .format(loss,
                        optimizer.param_groups[0]['lr'],
                        epoch=epoch + 1,
                        trained_samples=step * args.batch_size + len(images),
                        total_samples=len(train_loader.dataset)))

        finish = time.time()
        print('epoch {} training time consumed: {:.2f}s'.format(
            epoch, finish - start))

        # validate after every epoch
        validate(test_loader, model, criterion)
Esempio n. 2
0
parser.add_argument('--outf', default='.')
parser.add_argument('--rotation', default=None)

args = parser.parse_args()
my_makedir(args.outf)
import torch.backends.cudnn as cudnn
cudnn.benchmark = True


def gn_helper(planes):
    return nn.GroupNorm(args.group_norm, planes)


norm_layer = gn_helper

net = resnet18(num_classes=10, norm_layer=norm_layer)
net.to(device)
net = torch.nn.DataParallel(net)

_, teloader = prepare_test_data(args)
_, trloader = prepare_train_data(args)

parameters = list(net.parameters())
optimizer = torch.optim.SGD(parameters,
                            lr=args.lr,
                            momentum=0.9,
                            weight_decay=1e-4)
criterion = nn.CrossEntropyLoss().to(device)


def train(trloader, epoch):
def main_worker(local_rank, nprocs, args):
    args.local_rank = local_rank
    init_seeds(local_rank+1)
    # 获得init_method的通信端口
    init_method = 'tcp://' + args.ip + ':' + args.port

    # 1. 分布式初始化,对于每一个进程都需要进行初始化,所以定义在 main_worker中
    cudnn.benchmark = True
    dist.init_process_group(backend='nccl', init_method=init_method, world_size=args.nprocs,
                            rank=local_rank)

    # 2. 基本定义,模型-损失函数-优化器
    model = resnet18()
    torch.cuda.set_device(local_rank)
    model.cuda(local_rank)
    criterion = nn.CrossEntropyLoss().cuda(local_rank)
    optimizer = torch.optim.SGD(model.parameters(), args.lr, momentum=0.9, weight_decay=1e-4)
    train_scheduler = optim.lr_scheduler.MultiStepLR(optimizer, milestones=[60, 120, 160], gamma=0.2)

    # apex初始化
    model = apex.parallel.convert_syncbn_model(model).to(local_rank) # 使用 apex 提供的 SyncBatchNorm 操作
    model, optimizer = amp.initialize(model, optimizer)
    model = DDP(model)

    # 3. 加载数据,
    batch_size = int(args.batch_size / nprocs)

    train_dataset = get_train_dataset()
    train_sampler = torch.utils.data.distributed.DistributedSampler(train_dataset)
    train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=batch_size, num_workers=4, pin_memory=True, sampler=train_sampler)

    test_dataset = get_test_dataset()
    test_sampler = torch.utils.data.distributed.DistributedSampler(test_dataset)
    test_loader = torch.utils.data.DataLoader(test_dataset, batch_size=batch_size, num_workers=4, pin_memory=True, sampler=test_sampler)

    for epoch in range(args.epochs):
        start = time.time()
        model.train()
        train_sampler.set_epoch(epoch)
        train_scheduler.step(epoch)

        for step, (images, labels) in enumerate(train_loader):
            # 将对应进程的数据放到对应 GPU 上
            images = images.cuda(local_rank, non_blocking=True)
            labels = labels.cuda(local_rank, non_blocking=True)

            outputs = model(images)
            loss = criterion(outputs, labels)

            torch.distributed.barrier()
            reduced_loss = reduce_mean(loss, args.nprocs)

            # 更新优化模型权重, 用scale_loss修饰loss
            optimizer.zero_grad()
            with amp.scale_loss(loss, optimizer) as scaled_loss:
                scaled_loss.backward()
            optimizer.step()

            if args.local_rank == 0:
                print(
                    'Training Epoch: {epoch} [{trained_samples}/{total_samples}]\tLoss: {:0.4f}\tLR: {:0.6f}'.format(
                        reduced_loss,
                        optimizer.param_groups[0]['lr'],
                        epoch=epoch+1,
                        trained_samples=step * args.batch_size + len(images),
                        total_samples=len(train_loader.dataset)
                    ))

        finish = time.time()
        if args.local_rank == 0:
            print('epoch {} training time consumed: {:.2f}s'.format(epoch, finish - start))

        # validate after every epoch
        validate(test_loader, model, criterion, local_rank, args)
Esempio n. 4
0
########################################################################
parser.add_argument('--resume', default=None)
parser.add_argument('--outf', default='.')
parser.add_argument('--epochs', default=10, type=int)

args = parser.parse_args()
args.threshold += 0.001		# to correct for numeric errors
my_makedir(args.outf)
import torch.backends.cudnn as cudnn
cudnn.benchmark = True

def gn_helper(planes):
    return nn.GroupNorm(args.group_norm, planes)
norm_layer = gn_helper

net = resnet18(num_classes = 10, norm_layer=norm_layer).to(device)
net = torch.nn.DataParallel(net)

print('Resuming from %s...' %(args.resume))
ckpt = torch.load('%s/best.pth' %(args.resume))
net.load_state_dict(ckpt['net'])

criterion = nn.CrossEntropyLoss().to(device)
optimizer = optim.SGD(net.parameters(), lr=args.lr)

_, teloader = prepare_test_data(args)

print("Random Attack...")
for i in range(args.epochs):
    r = np.random.normal(0.4914, 0.2023, (32, 32))
    g = np.random.normal(0.4822, 0.1994, (32, 32))
Esempio n. 5
0
def main_worker(local_rank, nprocs, args):
    args.local_rank = local_rank
    init_seeds(local_rank + 1)  # set different seed for each worker
    # 获得init_method的通信端口
    init_method = 'tcp://' + args.ip + ':' + args.port

    # 1. 分布式初始化,对于每一个进程都需要进行初始化,所以定义在 main_worker中
    cudnn.benchmark = True
    dist.init_process_group(backend='nccl',
                            init_method=init_method,
                            world_size=args.nprocs,
                            rank=local_rank)

    # 2. 基本定义,模型-损失函数-优化器
    model = resnet18(
    )  # 定义模型,将对应进程放到对应的GPU上, .cuda(local_rank) / .set_device(local_rank)

    # 以下是需要加 local_rank 的部分:模型
    # ================================
    torch.cuda.set_device(local_rank)  # 使用 set_device 和 cuda 来指定需要的 GPU
    model.cuda(local_rank)
    model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(model).to(local_rank)
    model = torch.nn.parallel.DistributedDataParallel(
        model, device_ids=[local_rank])  # 将模型用 DistributedDataParallel 包裹
    # =================================
    criterion = nn.CrossEntropyLoss()
    optimizer = torch.optim.SGD(model.parameters(),
                                args.lr,
                                momentum=0.9,
                                weight_decay=1e-4)
    train_scheduler = optim.lr_scheduler.MultiStepLR(optimizer,
                                                     milestones=[60, 120, 160],
                                                     gamma=0.2)

    # 3. 加载数据,
    batch_size = int(args.batch_size /
                     nprocs)  # 需要手动划分 batch_size 为 mini-batch_size

    train_dataset = get_train_dataset()
    train_sampler = torch.utils.data.distributed.DistributedSampler(
        train_dataset)
    train_loader = torch.utils.data.DataLoader(train_dataset,
                                               batch_size=batch_size,
                                               num_workers=4,
                                               pin_memory=True,
                                               sampler=train_sampler)

    test_dataset = get_test_dataset()
    test_sampler = torch.utils.data.distributed.DistributedSampler(
        test_dataset)
    test_loader = torch.utils.data.DataLoader(test_dataset,
                                              batch_size=batch_size,
                                              num_workers=4,
                                              pin_memory=True,
                                              sampler=test_sampler)

    for epoch in range(args.epochs):
        start = time.time()
        model.train()
        # 需要设置sampler的epoch为当前epoch来保证dataloader的shuffle的有效性
        train_sampler.set_epoch(epoch)

        # 设置 train_scheduler 来调整学习率
        train_scheduler.step(epoch)

        for step, (images, labels) in enumerate(train_loader):
            # 将对应进程的数据放到 GPU 上
            images = images.cuda(non_blocking=True)
            labels = labels.cuda(non_blocking=True)

            outputs = model(images)
            loss = criterion(outputs, labels)

            # torch.distributed.barrier()的作用是,阻塞进程,保证每个进程运行完这一行代码之前的所有代码,才能继续执行,这样才计算平均loss和平均acc的时候不会出现因为进程执行速度不一致的错误
            torch.distributed.barrier()
            reduced_loss = reduce_mean(loss, args.nprocs)

            # 更新优化模型权重
            optimizer.zero_grad()
            loss.backward()
            optimizer.step()

            if args.local_rank == 0:
                print(
                    'Training Epoch: {epoch} [{trained_samples}/{total_samples}]\tLoss: {:0.4f}\tLR: {:0.6f}'
                    .format(reduced_loss,
                            optimizer.param_groups[0]['lr'],
                            epoch=epoch + 1,
                            trained_samples=step * args.batch_size +
                            len(images),
                            total_samples=len(train_loader.dataset)))

        finish = time.time()
        if args.local_rank == 0:
            print('epoch {} training time consumed: {:.2f}s'.format(
                epoch, finish - start))

        # validate after every epoch
        validate(test_loader, model, criterion, local_rank, args)