Example #1
0
 def __init__(self, num_layers, num_input_features, bn_size=4, growth_rate=32, drop_rate=0):
     util.super2(DenseBlock, self).__init__()
     self.num_layers = num_layers
     for i in range(num_layers):
         layer = DenseLayer(num_input_features + i * growth_rate, growth_rate, bn_size, drop_rate)
         self.add_module('denselayer%d' % (i + 1), layer)
     self.n_feat_out = layer.n_feat_out
Example #2
0
 def __init__(self, num_input_features, num_output_features):
     util.super2(Transition, self).__init__()
     self.add_module('norm', nn.BatchNorm2d(num_input_features))
     self.add_module('noli', default_nonlinearity())
     self.add_module('conv', nn.Conv2d(num_input_features, num_output_features,
                                       kernel_size=1, stride=1, bias=False))
     self.add_module('pool', nn.AvgPool2d(kernel_size=2, stride=2))
Example #3
0
    def __init__(self, num_input_features, growth_rate, bn_size, drop_rate=0):
        util.super2(DenseLayer, self).__init__()
        self.bn_size = bn_size
        self.growth_rate = growth_rate
        self.n_feat_internal = bn_size * growth_rate
        self.n_feat_out = num_input_features + growth_rate

        self.add_module('norm.1', nn.BatchNorm2d(num_input_features)),
        self.add_module('noli.1', default_nonlinearity()),
        self.add_module(
            'conv.1',
            nn.Conv2d(num_input_features,
                      bn_size * growth_rate,
                      kernel_size=1,
                      stride=1,
                      bias=False)),
        self.add_module('norm.2', nn.BatchNorm2d(self.n_feat_internal)),
        self.add_module('noli.2', default_nonlinearity()),
        self.add_module(
            'conv.2',
            nn.Conv2d(self.n_feat_internal,
                      growth_rate,
                      kernel_size=3,
                      stride=1,
                      padding=1,
                      bias=False)),
        self.drop_rate = drop_rate
Example #4
0
 def __init__(self,
              in_size1,
              in_size2,
              compress=.5,
              num_layers=2,
              growth_rate=4,
              is_deconv=True):
     util.super2(DenseUNetUp, self).__init__()
     if is_deconv:
         out_size2 = int(compress * in_size2)
         self.up = nn.ConvTranspose2d(in_size2,
                                      out_size2,
                                      kernel_size=2,
                                      stride=2,
                                      bias=False)
     else:
         out_size2 = in_size2
         self.up = nn.Upsample(scale_factor=2, mode='bilinear')
     in_size = in_size1 + out_size2
     bneck_size = int(in_size1 * compress)
     self.pad = PadToAgree()
     self.conv = DenseBlock(num_layers,
                            in_size,
                            growth_rate=growth_rate,
                            bn_size=4)
     self.bottleneck = nn.Conv2d(self.conv.n_feat_out,
                                 bneck_size,
                                 kernel_size=1,
                                 stride=1,
                                 bias=False)
     self.n_feat_out = bneck_size
Example #5
0
    def __init__(self, n_input, n_output, n_mixtures=2):
        util.super2(MixtureOfLogSoftmax, self).__init__()
        self.n_mixtures = n_mixtures
        self.n_input = n_input
        self.n_output = n_output
        self.mixer = torch.nn.Linear(n_input, n_mixtures * n_input)
        self.project = torch.nn.Linear(n_input, n_output)
        self.noli = default_nonlinearity()

        # For the different components, we're assuming equal mixing
        # self.log_prior = torch.log(torch.autograd.Variable(torch.ones(1, n_mixtures, 1) * (1 / n_mixtures)))
        self.log_prior = (1 / float(n_mixtures))
Example #6
0
 def __init__(self, channels, eps=1e-5, rmax=3, dmax=5, lr=0.001):
     util.super2(BatchRenorm2d, self).__init__()
     self.training = True
     self.is_unlock = False
     self.eps = eps
     self.eps_sqrt = np.sqrt(self.eps)
     self.channels = channels
     self.rmax = rmax
     self.dmax = dmax
     self.lr = lr
     self.use_cuda = True
     self.sigma = torch.ones((1, channels)).float()
     self.mean = torch.zeros((1, channels)).float()
Example #7
0
 def forward(self, x):
     new_features = util.super2(DenseLayer, self).forward(x)
     if self.drop_rate > 0:
         new_features = F.dropout(new_features,
                                  p=self.drop_rate,
                                  training=self.training)
     return torch.cat([x, new_features], 1)
Example #8
0
 def __init__(self, n_input, n_output):
     util.super2(LinearLogSoftmax, self).__init__()
     self.n_input = n_input
     self.n_output = n_output
     self.project = torch.nn.Linear(n_input, n_output)
     self.noli = default_nonlinearity()
Example #9
0
    def __init__(self, n_classes=21, n_alt_classes=3, in_channels=3,
                 bn_size=2, growth_rate=16, is_deconv=True):
        util.super2(DenseUNet2, self).__init__()
        self.in_channels = in_channels

        n_feat0 = 36
        from torch import nn
        features = nn.Sequential(ub.odict([
            ('conv0', nn.Conv2d(in_channels, n_feat0, kernel_size=7, stride=1,
                                padding=3,
                                bias=False)),
            ('norm0', nn.BatchNorm2d(n_feat0)),
            ('noli0', default_nonlinearity()),
            # ('pool0', nn.MaxPool2d(kernel_size=3, stride=2, padding=1)),
        ]))

        block_config = [2, 3, 3, 3, 3]
        bn_size = bn_size
        compress = .4
        growth_rate = growth_rate

        n_feat = n_feat0

        # downsampling
        down = []
        densekw = dict(bn_size=bn_size, growth_rate=growth_rate)

        for i, num_layers in enumerate(block_config[0:-1]):
            down.append(('denseblock%d' % (i + 1), DenseBlock(num_layers=num_layers, num_input_features=n_feat, **densekw)))
            n_feat = n_feat + num_layers * growth_rate
            n_feat_compress = int(n_feat * compress)
            down.append(('transition%d' % (i + 1), Transition(num_input_features=n_feat, num_output_features=n_feat_compress)))
            n_feat = n_feat_compress

        # for key, value in down:
        #     self.add_module(key, value)
        self.denseblock1 = down[0][1]
        self.transition1 = down[1][1]
        self.denseblock2 = down[2][1]
        self.transition2 = down[3][1]
        self.denseblock3 = down[4][1]
        self.transition3 = down[5][1]
        self.denseblock4 = down[6][1]
        self.transition4 = down[7][1]

        num_layers = block_config[-1]
        center5 = DenseBlock(num_layers=num_layers, num_input_features=n_feat, **densekw)
        n_feat = n_feat + num_layers * growth_rate

        center5.n_feat_out

        up_concat4 = DenseUNetUp(self.denseblock4.n_feat_out, center5.n_feat_out, is_deconv=is_deconv)
        up_concat3 = DenseUNetUp(self.denseblock3.n_feat_out, up_concat4.n_feat_out, is_deconv=is_deconv)
        up_concat2 = DenseUNetUp(self.denseblock2.n_feat_out, up_concat3.n_feat_out, is_deconv=is_deconv)
        up_concat1 = DenseUNetUp(self.denseblock1.n_feat_out, up_concat2.n_feat_out, is_deconv=is_deconv)

        self.features = features
        self.center5 = center5

        self.up_concat4 = up_concat4
        self.up_concat3 = up_concat3
        self.up_concat2 = up_concat2
        self.up_concat1 = up_concat1

        # final conv (without any concat)
        self.final1 = nn.Conv2d(up_concat1.n_feat_out, n_classes, 1)
        self.final2 = nn.Conv2d(up_concat1.n_feat_out, n_alt_classes, 1)
        self._cache = {}

        self.connections = {
            'path': [
                # Main network forward path
                'features',
                'denseblock1',
                'transition1',
                'denseblock2',
                'transition2',
                'denseblock3',
                'transition3',
                'denseblock4',
                'transition4',
                'center5',
                'up_concat4',
                'up_concat3',
                'up_concat2',
                'up_concat1',
            ],
            'edges': [
                # When a node accepts multiple inputs, we need to specify which
                # order they appear in the signature
                ('denseblock4', 'up_concat4', {'argx': 0}),
                ('denseblock3', 'up_concat3', {'argx': 0}),
                ('denseblock2', 'up_concat2', {'argx': 0}),
                ('denseblock1', 'up_concat1', {'argx': 0}),
                ('up_concat1', 'final1', {'argx': 0}),
                ('up_concat1', 'final2', {'argx': 0}),
            ]
        }
Example #10
0
 def __init__(self):
     util.super2(PadToAgree, self).__init__()
Example #11
0
    def __init__(task, repo=None, workdir=None, clean=2):
        # TODO: generalize
        if repo is None:
            task.repo = expanduser('~/data/v1-annotations')
        else:
            task.repo = expanduser(repo)
        # TODO: generate this automatically
        classnames = [
            'Sky', 'Building', 'Street', 'Parking_Lot', 'Trees', 'Crosswalk',
            'Grass', 'Ground', 'Intersection', 'Shadows', 'Sidewalk', 'Stairs',
            'Other', 'Background', 'Unannotated'
        ]
        null_classname = 'Unannotated'

        if clean <= 0:
            alias = {}
        elif clean == 1:
            alias = {
                # 'Sidewalk': 'Unannotated',
                # 'Grass': 'Ground',
                'Shadows': 'Unannotated',
                'Stairs': 'Unannotated',
                'Background': 'Unannotated',
                'Other': 'Unannotated',
            }
        else:
            # These roughly correspond to shruti's classes
            alias = {
                # Remove Shadows / Background / Other
                'Shadows': 'Unannotated',
                'Background': 'Unannotated',
                'Other': 'Unannotated',

                # Tenous: Remove Sky, Ground, Stairs
                'Sky': 'Unannotated',
                'Ground': 'Unannotated',
                'Stairs': 'Unannotated',

                # VERY Tenous: Remove Sidewalk
                'Sidewalk': 'Unannotated',

                # Intersection and crosswalk become street
                'Intersection': 'Street',
                'Crosswalk': 'Street',
            }
        task.clean = clean

        util.super2(DivaV1, task).__init__(classnames, null_classname, alias)
        task._data_id = None

        # Special colors
        task.customize_colors()

        # Work directory
        if workdir is None:
            task.workdir = expanduser('~/data/work/diva/')
        else:
            task.workdir = expanduser(workdir)

        # Define data preprocessingg
        task.target_shape = (360, 480)
        task.input_shape = (360, 480)
        task.part_overlap = .5
        task.part_keepbound = True
        # task.part_overlap = 0
        # task.part_keepbound = False

        # task.enable_augment = True
        # task.enable_augment = False
        task.enable_augment = True
        # task.aug_params = {
        #     'axis_flips': [0, 1],
        #     'gammas':  [2.5],
        #     'blur':  [10],
        #     'n_rand_zoom':  1,
        #     'n_rand_occl': 1,
        # }
        task.aug_params = {
            'axis_flips': [0, 1],
            'gammas': [.5, 2.5],
            'blur': [5, 20],
            'n_rand_zoom': 2,
            'n_rand_occl': 2,
        }

        task.base_modes = ['lowres', 'part-scale1', 'part-scale2']
        # task.base_modes = ['lowres']

        if task.enable_augment:
            task.modes = task.base_modes + [
                m + '-aug' for m in task.base_modes
            ]
        else:
            task.modes = task.base_modes[:]
        task.scene_base = join(task.repo, 'active')