コード例 #1
0
def main():

    args = get_args()

    if args.seed is not None:
        random.seed(args.seed)
        torch.manual_seed(args.seed)
        cudnn.deterministic = True
        warnings.warn('You have chosen to seed training. '
                      'This will turn on the CUDNN deterministic setting, '
                      'which can slow down your training considerably! '
                      'You may see unexpected behavior when restarting '
                      'from checkpoints.')

    if args.gpu is not None:
        warnings.warn('You have chosen a specific GPU. This will completely '
                      'disable data parallelism.')

    # world size stands for num of node we have, usually the case it should be one
    if args.dist_url == "env://" and args.world_size == -1:
        args.world_size = int(os.environ["WORLD_SIZE"])

    args.distributed = args.world_size > 1 or args.multiprocessing_distributed

    ngpus_per_node = torch.cuda.device_count()

    if args.multiprocessing_distributed:
        # Since we have ngpus_per_node processes per node, the total world_size
        # needs to be adjusted accordingly
        args.world_size = ngpus_per_node * args.world_size
        # Use torch.multiprocessing.spawn to launch distributed processes: the
        # main_worker process function
        mp.spawn(main_worker,
                 nprocs=ngpus_per_node,
                 args=(ngpus_per_node, args))
    else:
        # Simply call main_worker function
        main_worker(args.gpu, ngpus_per_node, args)
コード例 #2
0
ファイル: main.py プロジェクト: ml-edu/SEC_pytorch
def main():
    args = get_args()
    log_folder = os.path.join('train_log', args.name)
    writer = SummaryWriter(log_folder)

    if args.gpu is not None:
        print("Use GPU: {} for training".format(args.gpu))

    # number of classes for each dataset.
    if args.dataset == 'PascalVOC':
        num_classes = 21
    elif args.dataset == 'COCO':
        num_classes = 81
    else:
        raise Exception("No dataset named {}.".format(args.dataset))

    # Select Model & Method
    model = models.__dict__[args.arch](num_classes=num_classes)

    if args.gpu is not None:
        torch.cuda.set_device(args.gpu)
        model = model.cuda(args.gpu)

    # Optimizer
    optimizer = torch.optim.SGD(
        [{
            'params': get_parameters(model, bias=False, final=False),
            'lr': args.lr,
            'weight_decay': args.wd
        }, {
            'params': get_parameters(model, bias=True, final=False),
            'lr': args.lr * 2,
            'weight_decay': 0
        }, {
            'params': get_parameters(model, bias=False, final=True),
            'lr': args.lr * 10,
            'weight_decay': args.wd
        }, {
            'params': get_parameters(model, bias=True, final=True),
            'lr': args.lr * 20,
            'weight_decay': 0
        }],
        momentum=args.momentum)

    if args.resume:
        model = load_model(model, args.resume)

    train_loader = data_loader(args)
    data_iter = iter(train_loader)
    train_t = tqdm(range(args.max_iter))
    model.train()
    for global_iter in train_t:
        try:
            images, target, gt_map = next(data_iter)
        except:
            data_iter = iter(data_loader(args))
            images, target, gt_map = next(data_iter)

        if args.gpu is not None:
            images = images.cuda(args.gpu)
            gt_map = gt_map.cuda(args.gpu)
            target = target.cuda(args.gpu)

        output = model(images)

        fc8_SEC_softmax = softmax_layer(output)
        loss_s = seed_loss_layer(fc8_SEC_softmax, gt_map)
        loss_e = expand_loss_layer(fc8_SEC_softmax, target, num_classes - 1)
        fc8_SEC_CRF_log = crf_layer(output, images, iternum=10)
        loss_c = constrain_loss_layer(fc8_SEC_softmax, fc8_SEC_CRF_log)

        loss = loss_s + loss_e + loss_c

        optimizer.zero_grad()
        loss.backward()
        optimizer.step()

        # writer add_scalars
        writer.add_scalar('loss', loss, global_iter)
        writer.add_scalars('losses', {
            'loss_s': loss_s,
            'loss_e': loss_e,
            'loss_c': loss_c
        }, global_iter)

        with torch.no_grad():
            if global_iter % 10 == 0:
                # writer add_images (origin, output, gt)
                origin = images.clone().detach() + torch.tensor(
                    [123., 117., 107.]).reshape(1, 3, 1, 1).cuda(args.gpu)

                size = (100, 100)
                origin = F.interpolate(origin, size=size)
                origins = vutils.make_grid(origin,
                                           nrow=15,
                                           padding=2,
                                           normalize=True,
                                           scale_each=True)

                outputs = F.interpolate(output, size=size)
                _, outputs = torch.max(outputs, dim=1)
                outputs = outputs.unsqueeze(1)
                outputs = vutils.make_grid(outputs,
                                           nrow=15,
                                           padding=2,
                                           normalize=True,
                                           scale_each=True).float()

                gt_maps = F.interpolate(gt_map, size=size)
                _, gt_maps = torch.max(gt_maps, dim=1)
                gt_maps = gt_maps.unsqueeze(1)
                gt_maps = vutils.make_grid(gt_maps,
                                           nrow=15,
                                           padding=2,
                                           normalize=True,
                                           scale_each=True).float()

                # gt_maps = F.interpolate(gt_map.unsqueeze(1).float(), size=size)
                # gt_maps = vutils.make_grid(gt_maps, nrow=15, padding=2, normalize=True, scale_each=True).float()

                grid_image = torch.cat((origins, outputs, gt_maps), dim=1)
                writer.add_image(args.name, grid_image, global_iter)


        description = '[{0:4d}/{1:4d}] loss: {2} s: {3} e: {4} c: {5}'.\
            format(global_iter+1, args.max_iter, loss, loss_s, loss_e, loss_c)
        train_t.set_description(desc=description)

        # save snapshot
        if global_iter % args.snapshot == 0:
            save_checkpoint(model.state_dict(), log_folder,
                            'checkpoint_%d.pth.tar' % global_iter)

        # lr decay
        if global_iter % args.lr_decay == 0:
            args.lr = args.lr * 0.1
            optimizer = adjust_learning_rate(optimizer, args.lr)

    print("Training is over...")
    save_checkpoint(model.state_dict(), log_folder, 'last_checkpoint.pth.tar')
コード例 #3
0
ファイル: main.py プロジェクト: bityangke/SW_Project
def main():
    args = get_args()

    if args.gpu is not None:
        print("Use GPU: {} for training".format(args.gpu))

    # number of classes for each dataset.
    if args.dataset == 'PascalVOC':
        num_classes = 20
    else:
        raise Exception("No dataset named {}.".format(args.dataset))

    # Select Model & Method
    model = models.__dict__[args.arch](pretrained=args.pretrained,
                                       num_classes=num_classes)

    if args.gpu is not None:
        torch.cuda.set_device(args.gpu)
        model = model.cuda(args.gpu)

    # define loss function (criterion) and optimizer
    criterion = nn.MultiLabelSoftMarginLoss().cuda(args.gpu)
    # criterion = nn.BCEWithLogitsLoss().cuda(args.gpu)

    # Take apart parameters to give different Learning Rate
    param_features = []
    param_classifiers = []

    if args.arch.startswith('vgg'):
        for name, parameter in model.named_parameters():
            if 'features.' in name:
                param_features.append(parameter)
            else:
                param_classifiers.append(parameter)
    elif args.arch.startswith('resnet'):
        for name, parameter in model.named_parameters():
            if 'layer4.' in name or 'fc.' in name:
                param_classifiers.append(parameter)
            else:
                param_features.append(parameter)
    else:
        raise Exception("Fail to recognize the architecture")

    # Optimizer
    optimizer = torch.optim.SGD([
        {'params': param_features, 'lr': args.lr},
        {'params': param_classifiers, 'lr': args.lr * args.lr_ratio}],
        momentum=args.momentum,
        weight_decay=args.weight_decay,
        nesterov=args.nest)

    # optionally resume from a checkpoint
    if args.resume:
        model, optimizer = load_model(model, optimizer, args)
    train_loader, val_loader, test_loader = data_loader(args)

    saving_dir = os.path.join(args.log_folder, args.name)

    if args.evaluate:
        # test_ap, test_loss = evaluate_cam(val_loader, model, criterion, args)
        # test_ap, test_loss = evaluate_cam2(val_loader, model, criterion, args)
        test_ap, test_loss = evaluate_cam3(val_loader, model, criterion, args)
        print_progress(test_ap, test_loss, 0, 0, prefix='test')
        return

    # Training Phase
    best_m_ap = 0
    for epoch in range(args.start_epoch, args.epochs):

        adjust_learning_rate(optimizer, epoch, args)

        # Train for one epoch
        train_ap, train_loss = \
            train(train_loader, model, criterion, optimizer, epoch, args)
        print_progress(train_ap, train_loss, epoch+1, args.epochs)

        # Evaluate classification
        val_ap, val_loss = validate(val_loader, model, criterion, epoch, args)
        print_progress(val_ap, val_loss, epoch+1, args.epochs, prefix='validation')

        # # Save checkpoint at best performance:
        is_best = val_ap.mean() > best_m_ap
        if is_best:
            best_m_ap = val_ap.mean()

        save_checkpoint({
            'epoch': epoch + 1,
            'arch': args.arch,
            'state_dict': model.state_dict(),
            'best_m_ap': best_m_ap,
            'optimizer': optimizer.state_dict(),
        }, is_best, saving_dir)

        save_progress(saving_dir, train_ap, train_loss, val_ap, val_loss, args)
コード例 #4
0
ファイル: main.py プロジェクト: terenceylchow124/DSRG_PyTorch
def main():
    args = get_args()
    log_folder = os.path.join('train_log', args.name)
    writer = SummaryWriter(log_folder)
    write_para_report(args)
    if args.gpu is not None:
        print("Use GPU: {} for training".format(args.gpu))

    # number of classes for each dataset.
    if args.dataset == 'PascalVOC':
        num_classes = 21
    elif args.dataset == 'COCO':
        num_classes = 81
    else:
        raise Exception("No dataset named {}.".format(args.dataset))

    # Select Model & Method
    print(args.model)
    model = models.__dict__[args.arch](num_classes=num_classes)
    model = load_model(model, args.resume)

    if args.gpu is not None:
        torch.cuda.set_device(args.gpu)
        model = model.cuda(args.gpu)

    optimizer = torch.optim.SGD(
        [{
            'params': get_parameters(model, bias=False, final=False),
            'lr': args.lr,
            'weight_decay': args.wd
        }, {
            'params': get_parameters(model, bias=True, final=False),
            'lr': args.lr * 2,
            'weight_decay': 0
        }, {
            'params': get_parameters(model, bias=False, final=True),
            'lr': args.lr * 10,
            'weight_decay': args.wd
        }, {
            'params': get_parameters(model, bias=True, final=True),
            'lr': args.lr * 20,
            'weight_decay': 0
        }],
        momentum=args.momentum)

    train_loader = data_loader(args)
    data_iter = iter(train_loader)
    train_t = (range(args.max_iter))
    model.train()

    val_loader = data_loader(args, debugflag=True)
    val_data_iter = iter(val_loader)

    for global_iter in train_t:
        try:
            images, targets, gt_maps, true_gt_imgs = next(data_iter)
        except:
            data_iter = iter(data_loader(args))
            images, targets, gt_maps, true_gt_imgs = next(data_iter)

        images = gpu_allocation(images, args.gpu)
        targets = gpu_allocation(targets, args.gpu)
        gt_maps = gpu_allocation(gt_maps, args.gpu)
        true_gt_imgs = gpu_allocation(true_gt_imgs, args.gpu)

        outputs = model(images)

        # boundary loss
        fc8_softmax = softmax_layer(outputs)  #prob. bx21x41x41
        fc8_CRF_log = crf_layer(outputs, images, iternum=10)
        loss_c = constrain_loss_layer(fc8_softmax, fc8_CRF_log)

        # seeding loss
        gt_map_new = dsrg_layer(targets, gt_maps, fc8_softmax, num_classes,
                                args.thre_fg, args.thre_bg, args.workers)
        gt_map_new = gpu_allocation(gt_map_new, args.gpu)
        loss_dsrg, loss_b, loss_bc, loss_f, loss_fc = dsrg_seed_loss_layer(
            fc8_softmax, gt_map_new)

        # total loss
        loss = loss_dsrg + loss_c

        optimizer.zero_grad()
        loss.backward()
        optimizer.step()

        # writer add_scalars
        writer.add_scalar('loss', loss, global_iter)
        writer.add_scalars('losses', {
            'loss_dsrg': loss_dsrg,
            'loss_c': loss_c
        }, global_iter)

        with torch.no_grad():
            if global_iter % 20 == 0:
                val_data_iter = iter(val_loader)
                val_images, val_targets, val_gt_maps, val_true_gt_imgs = next(
                    val_data_iter)

                val_images, val_true_gt_imgs, val_outputs, val_gt_maps, val_gt_map_new\
                    = validation(val_images, val_targets, val_gt_maps, val_true_gt_imgs, model, args, num_classes)

                images_row, outputs_row, gt_maps_row, gt_maps_new_row, true_gt_imgs_row\
                    = grid_prepare(val_images, val_outputs, val_gt_maps, val_gt_map_new, val_true_gt_imgs, 8)

                images_row, outputs_row, gt_maps_row, gt_maps_new_row, true_gt_imgs_row\
                    = grid_prepare(images, outputs, gt_maps, gt_map_new, true_gt_imgs, args.batch_size)

                grid_image = torch.cat(
                    (images_row, true_gt_imgs_row, outputs_row, gt_maps_row,
                     gt_maps_new_row),
                    dim=1)
                writer.add_image(args.name, grid_image, global_iter)
                writer.close()

        description = "[{0:4d}/{1:4d}] loss: {2:.3f} dsrg: {3:.3f} bg: {5:.3f} fg: {6:.3f} c: {4:.3f}".format(global_iter+1, \
                        args.max_iter, loss, loss_dsrg, loss_c, loss_b, loss_f)

        print(description)

        # save snapshot
        if global_iter % args.snapshot == 0:
            save_checkpoint(model.state_dict(), log_folder,
                            'checkpoint_%d.pth.tar' % global_iter)

        # lr decay
        if global_iter % args.lr_decay == 0:
            args.lr = args.lr * 0.1
            optimizer = adjust_learning_rate(optimizer, args.lr)

    print("Training is over...")
    save_checkpoint(model.state_dict(), log_folder, 'last_checkpoint.pth.tar')