Пример #1
0
class SegFeatureExtraction(torch.nn.Module):
    def __init__(self, num_classes, train_sfe=True):
        super(SegFeatureExtraction, self).__init__()
        self.model = Res_Deeplab(num_classes=num_classes).layer4
        if not train_sfe:
            # freeze parameters
            for param in self.model.parameters():
                param.requires_grad = False

    def forward(self, image_batch):
        features = self.model(image_batch)
        return features
Пример #2
0
class MatFeatureExtraction(torch.nn.Module):
    def __init__(self, num_classes, train_nfe=True, normalization=True):
        super(MatFeatureExtraction, self).__init__()
        self.normalization = normalization
        self.model = Res_Deeplab(num_classes=num_classes).layer4
        if not train_nfe:
            # freeze parameters
            for param in self.model.parameters():
                param.requires_grad = False

    def forward(self, image_batch):
        features = self.model(image_batch)
        if self.normalization:
            features = F.normalize(features, p=2, dim=1)
        return features
Пример #3
0
class CommonFeatureExtraction(torch.nn.Module):
    def __init__(self, num_classes, train_cfe=True):
        super(CommonFeatureExtraction, self).__init__()
        self.model = Res_Deeplab(num_classes=num_classes)
        self.resnet_feature_layers = [
            'conv1', 'bn1', 'relu', 'maxpool', 'layer1', 'layer2', 'layer3'
        ]
        resnet_module_list = [
            self.model.conv1, self.model.bn1, self.model.relu,
            self.model.maxpool, self.model.layer1, self.model.layer2,
            self.model.layer3
        ]
        self.model = nn.Sequential(*resnet_module_list)
        if not train_cfe:
            # freeze parameters
            for param in self.model.parameters():
                param.requires_grad = False

    def forward(self, image_batch):
        features = self.model(image_batch)
        return features
Пример #4
0
def main():
    """Create the model and start the training."""

    h, w = map(int, args.input_size.split(','))
    input_size = (h, w)

    print(input_size)

    cudnn.enabled = True
    gpu = args.gpu

    # Create network.
    model = Res_Deeplab(num_classes=args.num_classes)
    # For a small batch size, it is better to keep
    # the statistics of the BN layers (running means and variances)
    # frozen, and to not update the values provided by the pre-trained model.
    # If is_training=True, the statistics will be updated during the training.
    # Note that is_training=False still updates BN parameters gamma (scale) and beta (offset)
    # if they are presented in var_list of the optimiser definition.
    '''
#    saved_state_dict = torch.load(args.restore_from)
#    new_params = model.state_dict().copy()
#    for i in saved_state_dict:
        #Scale.layer5.conv2d_list.3.weight
#        i_parts = i.split('.')
        # print i_parts
#        if not args.num_classes == 21 or not i_parts[1]=='layer5':
#            new_params['.'.join(i_parts[1:])] = saved_state_dict[i]
#    model.load_state_dict(new_params)
    '''
    #model.float()
    #model.eval() # use_global_stats = True
    #    model = nn.DataParallel(model)
    model.train()
    model.cuda()
    #    model.cuda(args.gpu)

    cudnn.benchmark = True

    if not os.path.exists(args.snapshot_dir):
        os.makedirs(args.snapshot_dir)


#    trainloader = data.DataLoader(VOCDataSet(args.data_dir, args.data_list, max_iters=args.num_steps*args.batch_size, crop_size=input_size,
#                    scale=args.random_scale, mirror=args.random_mirror, mean=IMG_MEAN),
#                    batch_size=args.batch_size, shuffle=True, num_workers=5, pin_memory=True)

    dataset = GenericDataset(DATA_DIRECTORY, 'train', train_transform,
                             mask_transform)
    trainloader = data.DataLoader(dataset,
                                  batch_size=BATCH_SIZE,
                                  shuffle=True,
                                  num_workers=4,
                                  pin_memory=True)

    #    optimizer = optim.SGD([{'params': get_1x_lr_params_NOscale(model), 'lr': args.learning_rate },
    #                 {'params': get_10x_lr_params(model), 'lr': 10*args.learning_rate}],
    #                lr=args.learning_rate, momentum=args.momentum,weight_decay=args.weight_decay)
    optimizer = optim.Adam(
        filter(lambda p: p.requires_grad, model.parameters()), 1e-4)
    #    optimizer = optim.Adam([{'params': get_1x_lr_params_NOscale(model), 'lr': args.learning_rate }], 1e-4)
    #    optimizer = optim.Adam(get_1x_lr_params_NOscale(model), 1e-4)
    #    optimizer = optim.Adam(get_10x_lr_params(model), lr=1e-5)
    optimizer.zero_grad()

    interp = nn.Upsample(size=input_size, mode='bilinear')

    for e in range(epochs):
        for i_iter, batch in enumerate(trainloader):
            images, labels, _, _ = batch
            #        images = Variable(images).cuda(args.gpu)
            images = Variable(images).cuda()

            optimizer.zero_grad()
            adjust_learning_rate(optimizer, i_iter)
            pred = interp(model(images))

            if i_iter % 50 == 0:
                vis.show(
                    F.sigmoid(pred)[:, 1].cpu().data.round().numpy()[0:],
                    labels.numpy())

                loss = loss_calc(pred, labels.squeeze())
                #        loss = loss_calc(pred, labels.squeeze(), args.gpu)
                loss.backward()
                optimizer.step()

                print('iter = ', i_iter, 'of', args.num_steps,
                      'completed, loss = ',
                      loss.data.cpu().numpy())

                #       if i_iter >= args.num_steps-1:
                #           print('save model ...')
                #           torch.save(model.state_dict(),osp.join(args.snapshot_dir, 'VOC12_scenes_'+str(args.num_steps)+'.pth'))
                #            break

                if i_iter % 200 == 0 and i_iter != 0:
                    print('taking snapshot ...')
                    torch.save(
                        model.state_dict(),
                        osp.join(args.snapshot_dir,
                                 'VOC12_scenes_' + str(i_iter) + '.pth'))

    end = timeit.default_timer()
    print(end - start, 'seconds')