Пример #1
0
def main():
    """Create the model and start the training."""

    print("=====> Set GPU for training")
    if args.cuda:
        print("====> use gpu id: '{}'".format(args.gpus))
        os.environ["CUDA_VISIBLE_DEVICES"] = args.gpus
        if not torch.cuda.is_available():
            raise Exception(
                "No GPU found or Wrong gpu id, please run without --cuda")

    args.seed = random.randint(1, 10000)
    print("Random Seed: ", args.seed)
    torch.manual_seed(args.seed)
    if args.cuda:
        torch.cuda.manual_seed(args.seed)

    h, w = map(int, args.input_size.split(','))
    input_size = (h, w)

    cudnn.enabled = True

    # Create network.

    print("=====> Building network")
    model = Res_Deeplab(num_classes=args.num_classes)
    # For a small batch size, it is better to keep
    # the statistics of the BN layers (running means and variances)
    # frozen, and to not update the values provided by the pre-trained model.
    # If is_training=True, the statistics will be updated during the training.
    # Note that is_training=False still updates BN parameters gamma (scale) and beta (offset)
    # if they are presented in var_list of the optimiser definition.

    print("=====> Loading pretrained weights")
    saved_state_dict = torch.load(args.restore_from)
    new_params = model.state_dict().copy()
    for i in saved_state_dict:
        #Scale.layer5.conv2d_list.3.weight
        i_parts = i.split('.')
        # print i_parts
        if not args.num_classes == 21 or not i_parts[1] == 'layer5':
            new_params['.'.join(i_parts[1:])] = saved_state_dict[i]
    model.load_state_dict(new_params)

    model.train()
    model.cuda()

    cudnn.benchmark = True

    if not os.path.exists(args.snapshot_dir):
        os.makedirs(args.snapshot_dir)

    print('====> Computing network parameters')
    total_paramters = netParams(model)
    print('Total network parameters: ' + str(total_paramters))

    print("=====> Preparing training data")
    trainloader = data.DataLoader(VOCDataSet(args.data_dir,
                                             args.data_list,
                                             max_iters=args.num_steps *
                                             args.batch_size,
                                             crop_size=input_size,
                                             scale=args.random_scale,
                                             mirror=args.random_mirror,
                                             mean=IMG_MEAN),
                                  batch_size=args.batch_size,
                                  shuffle=True,
                                  num_workers=2,
                                  pin_memory=True)

    optimizer = optim.SGD([{
        'params': get_1x_lr_params_NOscale(model),
        'lr': args.learning_rate
    }, {
        'params': get_10x_lr_params(model),
        'lr': 10 * args.learning_rate
    }],
                          lr=args.learning_rate,
                          momentum=args.momentum,
                          weight_decay=args.weight_decay)
    optimizer.zero_grad()

    interp = nn.Upsample(size=input_size, mode='bilinear')

    logFileLoc = args.snapshot_dir + args.logFile
    if os.path.isfile(logFileLoc):
        logger = open(logFileLoc, 'a')
    else:
        logger = open(logFileLoc, 'w')
        logger.write("Parameters: %s" % (str(total_paramters)))
        logger.write("\n%s\t\t%s" % ('iter', 'Loss(train)'))
    logger.flush()

    print("=====> Begin to train")
    for i_iter, batch in enumerate(trainloader):
        images, labels, _, _ = batch
        images = Variable(images).cuda()

        optimizer.zero_grad()
        adjust_learning_rate(optimizer, i_iter)
        pred = interp(model(images))
        loss = loss_calc(pred, labels)
        loss.backward()
        optimizer.step()

        print('iter = ', i_iter, 'of', args.num_steps, 'completed, loss = ',
              loss.data.cpu().numpy())
        logger.write("\n%d\t\t%.5f" % (i_iter, loss.data.cpu().numpy()))
        logger.flush()
        if i_iter >= args.num_steps - 1:
            print('save model ...')
            torch.save(
                model.state_dict(),
                osp.join(args.snapshot_dir,
                         'VOC12_scenes_' + str(args.num_steps) + '.pth'))
            break

        if i_iter % args.save_pred_every == 0 and i_iter != 0:
            print('taking snapshot ...')
            torch.save(
                model.state_dict(),
                osp.join(args.snapshot_dir,
                         'VOC12_scenes_' + str(i_iter) + '.pth'))

    logger.close()
    end = timeit.default_timer()
    print(end - start, 'seconds')
Пример #2
0
def main():
    """Create the model and start the training."""

    os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu
    h, w = map(int, args.input_size.split(','))
    input_size = (h, w)

    cudnn.enabled = True
    torch.manual_seed(args.random_seed)

    model = Res_Deeplab(num_classes=args.num_classes)
    model = torch.nn.DataParallel(model)

    optimizer = optim.SGD([{
        'params': get_1x_lr_params_NOscale(model.module),
        'lr': args.learning_rate
    }, {
        'params': get_10x_lr_params(model.module),
        'lr': 10 * args.learning_rate
    }],
                          lr=args.learning_rate,
                          momentum=args.momentum,
                          weight_decay=args.weight_decay)

    if args.fine_tune:
        # fine tune from coco dataset
        saved_state_dict = torch.load(args.restore_from)
        new_params = model.state_dict().copy()
        for i in saved_state_dict:
            # Scale.layer5.conv2d_list.3.weight
            i_parts = i.split('.')
            if i_parts[1] != 'layer5':
                new_params[i.replace('Scale', 'module')] = saved_state_dict[i]
        model.load_state_dict(new_params)
    elif args.restore_from:
        if os.path.isfile(args.restore_from):
            print("=> loading checkpoint '{}'".format(args.restore_from))
            checkpoint = torch.load(args.restore_from)
            try:
                if args.set_start:
                    args.start_step = int(
                        math.ceil(checkpoint['example'] / args.batch_size))
                model.load_state_dict(checkpoint['state_dict'])
                print("=> loaded checkpoint '{}' (step {})".format(
                    args.restore_from, args.start_step))
            except:
                model.load_state_dict(checkpoint)
                print("=> loaded checkpoint '{}'".format(args.restore_from))
        else:
            print("=> no checkpoint found at '{}'".format(args.restore_from))

    if not args.is_training:
        # Frozen BN
        # when training, the model will use the running means and the
        # running vars of the pretrained model.
        # But note that eval() doesn't turn off history tracking.
        print(
            "Freezing BN layers, that is taking BN as linear transform layer")
        model.eval()
    else:
        model.train()
    model.cuda()

    cudnn.benchmark = True

    trainloader = data.DataLoader(XiangyaTrain(args.data_list,
                                               crop_size=input_size,
                                               scale=args.random_scale,
                                               mirror=args.random_mirror,
                                               color_jitter=args.random_jitter,
                                               rotate=args.random_rotate,
                                               mean=IMG_MEAN),
                                  batch_size=args.batch_size,
                                  shuffle=True,
                                  num_workers=args.num_workers)

    batch_time = AverageMeter()
    data_time = AverageMeter()
    losses = AverageMeter()
    pixel_losses = AverageMeter()
    patch_losses = AverageMeter()
    accuracy = AverageMeter()
    writer = SummaryWriter(args.snapshot_dir)

    cnt = 0
    actual_step = args.start_step
    while actual_step < args.final_step:
        iter_end = timeit.default_timer()
        for i_iter, (images, labels, patch_name) in enumerate(trainloader):
            actual_step = int(args.start_step + cnt)

            data_time.update(timeit.default_timer() - iter_end)

            images = Variable(images).cuda()

            optimizer.zero_grad()
            adjust_learning_rate(optimizer, actual_step)

            # pred = interp(model(images))
            pred = model(images)
            image = images.data.cpu().numpy()[0]
            del images
            # 0 Normal 1 DG 2 JR
            labels = resize_target(labels, pred.size(2))

            pixel_loss, patch_loss = loss_calc(pred, labels)
            loss = pixel_loss.double() + args.loss_coeff * patch_loss
            losses.update(loss.item(), pred.size(0))
            pixel_losses.update(pixel_loss.item(), pred.size(0))
            patch_losses.update(patch_loss.item(), pred.size(0))

            acc = _pixel_accuracy(pred.data.cpu().numpy(),
                                  labels.data.cpu().numpy())
            accuracy.update(acc, pred.size(0))
            loss.backward()
            optimizer.step()

            batch_time.update(timeit.default_timer() - iter_end)
            iter_end = timeit.default_timer()

            if actual_step % args.print_freq == 0:
                print(
                    'iter: [{0}]{1}/{2}\t'
                    'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
                    'Data {data_time.val:.3f} ({data_time.avg:.3f})\t'
                    'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
                    'Pixel Loss {pixel_loss.val:.4f} ({pixel_loss.avg:.4f})\t'
                    'Patch_Loss {patch_loss.val:.4f} ({patch_loss.avg:.4f})\t'
                    'Pixel Accuracy {accuracy.val:.3f} ({accuracy.avg:.3f})'.
                    format(cnt,
                           actual_step,
                           args.final_step,
                           batch_time=batch_time,
                           data_time=data_time,
                           loss=losses,
                           pixel_loss=pixel_losses,
                           patch_loss=patch_losses,
                           accuracy=accuracy))
                writer.add_scalar("train_loss", losses.avg, actual_step)
                writer.add_scalar("pixel_loss", pixel_losses.avg, actual_step)
                writer.add_scalar("patch_loss", patch_losses.avg, actual_step)
                writer.add_scalar("pixel_accuracy", accuracy.avg, actual_step)
                writer.add_scalar("lr", optimizer.param_groups[0]['lr'],
                                  actual_step)

            # TODO complete this part using writer
            if actual_step % args.save_img_freq == 0:
                msk_size = pred.size(2)
                image = image.transpose(1, 2, 0)
                image = cv2.resize(image, (msk_size, msk_size),
                                   interpolation=cv2.INTER_NEAREST)
                image += IMG_MEAN
                image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
                label = labels.data.cpu().numpy()[0]
                label = np.repeat(254, msk_size) - label * 127
                single_pred = pred.data.cpu().numpy()[0].argmax(axis=0)
                single_pred = single_pred * 127
                new_im = Image.new('RGB', (msk_size * 3, msk_size))
                new_im.paste(Image.fromarray(image.astype('uint8'), 'RGB'),
                             (0, 0))
                new_im.paste(Image.fromarray(single_pred.astype('uint8'), 'L'),
                             (msk_size, 0))
                new_im.paste(Image.fromarray(label.astype('uint8'), 'L'),
                             (msk_size * 2, 0))
                new_im_name = 'B' + str(args.batch_size) + '_S' + str(
                    actual_step) + '_' + patch_name[0]
                new_im_file = os.path.join(args.img_dir, new_im_name)
                new_im.save(new_im_file)

            if actual_step % args.save_pred_every == 0 and cnt != 0:
                print('taking snapshot ...')
                torch.save(
                    {
                        'example': actual_step * args.batch_size,
                        'state_dict': model.state_dict()
                    },
                    osp.join(
                        args.snapshot_dir,
                        'Xiangya_Deeplab_B' + str(args.batch_size) + '_S' +
                        str(actual_step) + '.pth'))
            cnt += 1

    print('save the final model ...')
    torch.save(
        {
            'example': actual_step * args.batch_size,
            'state_dict': model.state_dict()
        },
        osp.join(
            args.snapshot_dir, 'Xiangya_Deeplab_B' + str(args.batch_size) +
            '_S' + str(actual_step) + '.pth'))

    end = timeit.default_timer()
    print(end - start, 'seconds')
Пример #3
0
def main():
    """Create the model and start the training."""
    
    os.environ["CUDA_VISIBLE_DEVICES"]=str(args.gpu)
    h, w = map(int, args.input_size.split(','))
    input_size = (h, w)

    cudnn.enabled = True

    # Create network.
    model = Res_Deeplab(num_classes=args.num_classes)
    # For a small batch size, it is better to keep 
    # the statistics of the BN layers (running means and variances)
    # frozen, and to not update the values provided by the pre-trained model. 
    # If is_training=True, the statistics will be updated during the training.
    # Note that is_training=False still updates BN parameters gamma (scale) and beta (offset)
    # if they are presented in var_list of the optimiser definition.

    # Load pretrained COCO dataset 
    saved_state_dict = torch.load(args.restore_from)
    new_params = model.state_dict().copy()

    # for param_tensor in model.state_dict():
    #     print(model.state_dict()[param_tensor].size())


    for i in saved_state_dict:
        #Scale.layer5.conv2d_list.3.weight
        i_parts = i.split('.')
        # print i_parts
        # layer 5 is end of network
        if not args.num_classes == 3 or not i_parts[1]=='layer5':
            new_params['.'.join(i_parts[1:])] = saved_state_dict[i]
    model.load_state_dict(new_params)

    model.cuda() # Moves all parameters into GPU
    
    cudnn.benchmark = True

    # Creating folder for saved parameters of model
    if not os.path.exists(args.snapshot_dir):
        print("Creating Checkpoint Folder")
        os.makedirs(args.snapshot_dir)


    berkeleyDataset = BerkeleyDataSet(args.data_dir, args.data_list, max_iters=args.num_steps*args.batch_size, crop_size=input_size, 
        scale=args.random_scale, mirror=args.random_mirror, mean=IMG_MEAN)
    trainloader = data.DataLoader(berkeleyDataset, batch_size=args.batch_size, shuffle=True, num_workers=5, pin_memory=True)
    

    optimizer = optim.SGD([{'params': get_1x_lr_params_NOscale(model), 'lr': args.learning_rate }, 
                {'params': get_10x_lr_params(model), 'lr': 10*args.learning_rate}], 
                lr=args.learning_rate, momentum=args.momentum, weight_decay=args.weight_decay)

    optimizer.zero_grad()
    
    interp = nn.Upsample(size=input_size, mode='bilinear')

    model.train()
    train_loss = 0
    for i_iter, batch in enumerate(trainloader):
        images, labels, _, _ = batch
        images = Variable(images).cuda()
        optimizer.zero_grad()
        adjust_learning_rate(optimizer, i_iter)
        pred = interp(model(images))
        loss = loss_calc(pred, labels)
        
        loss.backward()
        optimizer.step()
        
        print('iter = ', i_iter, 'of', args.num_steps,'completed, loss = ', loss.item()*images.size(0))

        if i_iter >= args.num_steps-1:
            print('save model ...')
            torch.save(model.state_dict(),osp.join(args.snapshot_dir, 'BDD_'+str(args.num_steps)+'.pth'))
            break

        if i_iter % args.save_pred_every == 0 and i_iter!=0:
            print('taking snapshot ...')
            torch.save(model.state_dict(),osp.join(args.snapshot_dir, 'BDD_'+str(i_iter)+'.pth'))  

    end = timeit.default_timer()
    print(end-start,'seconds')
Пример #4
0
def main():
    """Create the model and start the training."""

    h, w = map(int, args.input_size.split(','))
    input_size = (h, w)

    cudnn.enabled = True
    gpu = args.gpu

    # Create network.
    model = Res_Deeplab(num_classes=args.num_classes)
    # For a small batch size, it is better to keep
    # the statistics of the BN layers (running means and variances)
    # frozen, and to not update the values provided by the pre-trained model.
    # If is_training=True, the statistics will be updated during the training.
    # Note that is_training=False still updates BN parameters gamma (scale) and beta (offset)
    # if they are presented in var_list of the optimiser definition.

    saved_state_dict = torch.load(args.restore_from)
    new_params = model.state_dict().copy()
    for i in saved_state_dict:
        #Scale.layer5.conv2d_list.3.weight
        i_parts = i.split('.')
        # print i_parts
        if not args.num_classes == 21 or not i_parts[1] == 'layer5':
            new_params['.'.join(i_parts[1:])] = saved_state_dict[i]
    model.load_state_dict(new_params)
    #model.float()
    #model.eval() # use_global_stats = True
    model.train()
    model.cuda(args.gpu)

    cudnn.benchmark = True

    if not os.path.exists(args.snapshot_dir):
        os.makedirs(args.snapshot_dir)

    trainloader = data.DataLoader(VOCDataSet(args.data_dir,
                                             args.data_list,
                                             max_iters=args.num_steps *
                                             args.batch_size,
                                             crop_size=input_size,
                                             scale=args.random_scale,
                                             mirror=args.random_mirror,
                                             mean=IMG_MEAN),
                                  batch_size=args.batch_size,
                                  shuffle=True,
                                  num_workers=5,
                                  pin_memory=True)

    optimizer = optim.SGD([{
        'params': get_1x_lr_params_NOscale(model),
        'lr': args.learning_rate
    }, {
        'params': get_10x_lr_params(model),
        'lr': 10 * args.learning_rate
    }],
                          lr=args.learning_rate,
                          momentum=args.momentum,
                          weight_decay=args.weight_decay)
    optimizer.zero_grad()

    interp = nn.Upsample(size=input_size, mode='bilinear')

    for i_iter, batch in enumerate(trainloader):
        images, labels, _, _ = batch
        images = Variable(images).cuda(args.gpu)

        optimizer.zero_grad()
        adjust_learning_rate(optimizer, i_iter)
        pred = interp(model(images))
        loss = loss_calc(pred, labels, args.gpu)
        loss.backward()
        optimizer.step()

        print 'iter = ', i_iter, 'of', args.num_steps, 'completed, loss = ', loss.data.cpu(
        ).numpy()

        if i_iter >= args.num_steps - 1:
            print 'save model ...'
            torch.save(
                model.state_dict(),
                osp.join(args.snapshot_dir,
                         'VOC12_scenes_' + str(args.num_steps) + '.pth'))
            break

        if i_iter % args.save_pred_every == 0 and i_iter != 0:
            print 'taking snapshot ...'
            torch.save(
                model.state_dict(),
                osp.join(args.snapshot_dir,
                         'VOC12_scenes_' + str(i_iter) + '.pth'))

    end = timeit.default_timer()
    print end - start, 'seconds'
Пример #5
0
def main():
    """Create the model and start the training."""

    h, w = map(int, args.input_size.split(','))
    input_size = (h, w)

    cudnn.enabled = True
    gpu = args.gpu

    # Create network.
    model = Res_Deeplab(num_classes=args.num_classes)
    # For a small batch size, it is better to keep
    # the statistics of the BN layers (running means and variances)
    # frozen, and to not update the values provided by the pre-trained model.
    # If is_training=True, the statistics will be updated during the training.
    # Note that is_training=False still updates BN parameters gamma (scale) and beta (offset)
    # if they are presented in var_list of the optimiser definition.

    saved_state_dict = torch.load(args.restore_from)
    new_params = model.state_dict().copy()
    for i in saved_state_dict:
        #Scale.layer5.conv2d_list.3.weight
        i_parts = i.split('.')
        # print i_parts
        if not args.num_classes == 21 or not i_parts[1] == 'layer5':
            new_params['.'.join(i_parts[1:])] = saved_state_dict[i]
    model.load_state_dict(new_params)
    #model.float()
    #model.eval() # use_global_stats = True
    model.train()
    model.cuda(args.gpu)

    cudnn.benchmark = True

    if not os.path.exists(args.snapshot_dir):
        os.makedirs(args.snapshot_dir)

    trainloader = data.DataLoader(VOCDataSet(args.data_dir,
                                             args.data_list,
                                             max_iters=args.num_steps *
                                             args.iter_size,
                                             crop_size=input_size,
                                             scale=args.random_scale,
                                             mirror=args.random_mirror,
                                             mean=IMG_MEAN),
                                  batch_size=args.batch_size,
                                  shuffle=True,
                                  num_workers=1,
                                  pin_memory=True)

    optimizer = optim.SGD([{
        'params': get_1x_lr_params_NOscale(model),
        'lr': args.learning_rate
    }, {
        'params': get_10x_lr_params(model),
        'lr': 10 * args.learning_rate
    }],
                          lr=args.learning_rate,
                          momentum=args.momentum,
                          weight_decay=args.weight_decay)
    optimizer.zero_grad()

    b_loss = 0
    for i_iter, batch in enumerate(trainloader):

        images, labels, _, _ = batch
        images, labels = Variable(images), labels.numpy()
        h, w = images.size()[2:]
        images075 = nn.Upsample(size=(int(h * 0.75), int(w * 0.75)),
                                mode='bilinear')(images)
        images05 = nn.Upsample(size=(int(h * 0.5), int(w * 0.5)),
                               mode='bilinear')(images)

        out = model(images.cuda(args.gpu))
        out075 = model(images075.cuda(args.gpu))
        out05 = model(images05.cuda(args.gpu))
        o_h, o_w = out.size()[2:]
        interpo1 = nn.Upsample(size=(o_h, o_w), mode='bilinear')
        interpo2 = nn.Upsample(size=(h, w), mode='bilinear')
        out_max = interpo2(
            torch.max(torch.stack([out, interpo1(out075),
                                   interpo1(out05)]),
                      dim=0)[0])

        loss = loss_calc(out_max, labels, args.gpu)
        d1, d2 = float(labels.shape[1]), float(labels.shape[2])
        loss100 = loss_calc(
            out,
            nd.zoom(labels, (1.0, out.size()[2] / d1, out.size()[3] / d2),
                    order=0), args.gpu)
        loss075 = loss_calc(
            out075,
            nd.zoom(labels,
                    (1.0, out075.size()[2] / d1, out075.size()[3] / d2),
                    order=0), args.gpu)
        loss05 = loss_calc(
            out05,
            nd.zoom(labels, (1.0, out05.size()[2] / d1, out05.size()[3] / d2),
                    order=0), args.gpu)
        loss_all = (loss + loss100 + loss075 + loss05) / args.iter_size
        loss_all.backward()
        b_loss += loss_all.data.cpu().numpy()

        b_iter = i_iter / args.iter_size

        if b_iter >= args.num_steps - 1:
            print 'save model ...'
            optimizer.step()
            torch.save(
                model.state_dict(),
                osp.join(args.snapshot_dir,
                         'VOC12_scenes_' + str(args.num_steps) + '.pth'))
            break

        if i_iter % args.iter_size == 0 and i_iter != 0:
            print 'iter = ', b_iter, 'of', args.num_steps, 'completed, loss = ', b_loss
            optimizer.step()
            adjust_learning_rate(optimizer, b_iter)
            optimizer.zero_grad()
            b_loss = 0

        if i_iter % (args.save_pred_every *
                     args.iter_size) == 0 and b_iter != 0:
            print 'taking snapshot ...'
            torch.save(
                model.state_dict(),
                osp.join(args.snapshot_dir,
                         'VOC12_scenes_' + str(b_iter) + '.pth'))

    end = timeit.timeit()
    print end - start, 'seconds'
Пример #6
0
def main():
    """Create the model and start the training."""

    h, w = map(int, args.input_size.split(','))
    input_size = (h, w)

    print(input_size)

    cudnn.enabled = True
    gpu = args.gpu

    # Create network.
    model = Res_Deeplab(num_classes=args.num_classes)
    # For a small batch size, it is better to keep
    # the statistics of the BN layers (running means and variances)
    # frozen, and to not update the values provided by the pre-trained model.
    # If is_training=True, the statistics will be updated during the training.
    # Note that is_training=False still updates BN parameters gamma (scale) and beta (offset)
    # if they are presented in var_list of the optimiser definition.
    '''
#    saved_state_dict = torch.load(args.restore_from)
#    new_params = model.state_dict().copy()
#    for i in saved_state_dict:
        #Scale.layer5.conv2d_list.3.weight
#        i_parts = i.split('.')
        # print i_parts
#        if not args.num_classes == 21 or not i_parts[1]=='layer5':
#            new_params['.'.join(i_parts[1:])] = saved_state_dict[i]
#    model.load_state_dict(new_params)
    '''
    #model.float()
    #model.eval() # use_global_stats = True
    #    model = nn.DataParallel(model)
    model.train()
    model.cuda()
    #    model.cuda(args.gpu)

    cudnn.benchmark = True

    if not os.path.exists(args.snapshot_dir):
        os.makedirs(args.snapshot_dir)


#    trainloader = data.DataLoader(VOCDataSet(args.data_dir, args.data_list, max_iters=args.num_steps*args.batch_size, crop_size=input_size,
#                    scale=args.random_scale, mirror=args.random_mirror, mean=IMG_MEAN),
#                    batch_size=args.batch_size, shuffle=True, num_workers=5, pin_memory=True)

    dataset = GenericDataset(DATA_DIRECTORY, 'train', train_transform,
                             mask_transform)
    trainloader = data.DataLoader(dataset,
                                  batch_size=BATCH_SIZE,
                                  shuffle=True,
                                  num_workers=4,
                                  pin_memory=True)

    #    optimizer = optim.SGD([{'params': get_1x_lr_params_NOscale(model), 'lr': args.learning_rate },
    #                 {'params': get_10x_lr_params(model), 'lr': 10*args.learning_rate}],
    #                lr=args.learning_rate, momentum=args.momentum,weight_decay=args.weight_decay)
    optimizer = optim.Adam(
        filter(lambda p: p.requires_grad, model.parameters()), 1e-4)
    #    optimizer = optim.Adam([{'params': get_1x_lr_params_NOscale(model), 'lr': args.learning_rate }], 1e-4)
    #    optimizer = optim.Adam(get_1x_lr_params_NOscale(model), 1e-4)
    #    optimizer = optim.Adam(get_10x_lr_params(model), lr=1e-5)
    optimizer.zero_grad()

    interp = nn.Upsample(size=input_size, mode='bilinear')

    for e in range(epochs):
        for i_iter, batch in enumerate(trainloader):
            images, labels, _, _ = batch
            #        images = Variable(images).cuda(args.gpu)
            images = Variable(images).cuda()

            optimizer.zero_grad()
            adjust_learning_rate(optimizer, i_iter)
            pred = interp(model(images))

            if i_iter % 50 == 0:
                vis.show(
                    F.sigmoid(pred)[:, 1].cpu().data.round().numpy()[0:],
                    labels.numpy())

                loss = loss_calc(pred, labels.squeeze())
                #        loss = loss_calc(pred, labels.squeeze(), args.gpu)
                loss.backward()
                optimizer.step()

                print('iter = ', i_iter, 'of', args.num_steps,
                      'completed, loss = ',
                      loss.data.cpu().numpy())

                #       if i_iter >= args.num_steps-1:
                #           print('save model ...')
                #           torch.save(model.state_dict(),osp.join(args.snapshot_dir, 'VOC12_scenes_'+str(args.num_steps)+'.pth'))
                #            break

                if i_iter % 200 == 0 and i_iter != 0:
                    print('taking snapshot ...')
                    torch.save(
                        model.state_dict(),
                        osp.join(args.snapshot_dir,
                                 'VOC12_scenes_' + str(i_iter) + '.pth'))

    end = timeit.default_timer()
    print(end - start, 'seconds')