Beispiel #1
0
 def _build_net(self):
     return (nn.Sequential()
             .add(nn.Concat(0)
                  .add(nn.Linear(2, 5))
                  .add(nn.Linear(2, 5)))
             .add(nn.ReLU())
             .add(nn.Linear(10, 20)))
Beispiel #2
0
 def test_ReLU_reference(self):
     input = torch.randn(10, 20)
     module = nn.ReLU()
     output = module.forward(input)
     self.assertTrue(output[input.ge(0)].eq(input[input.gt(0)]).all())
     self.assertTrue(output[input.lt(0)].eq(0).all())
Beispiel #3
0
def build_model(opt):
    model = torch.nn.Sequential()
    prev_dim = 3
    arch = opt.arch.split(',')

    model = torch.nn.Sequential()

    arch_len = len(arch)

    for i in range(arch_len):
        v = arch[i]
        first_char = v[0]
        needs_relu = True
        needs_bn = True
        next_dim = None
        layer = None
        if first_char == 'c':
            f = int(v[1])
            p = (f - 1) / 2
            s = int(v[3])
            next_dim = int(v[5])
            if opt.padding_type == 'reflect':
                model.add(nn.SpatialReflectionPadding(p, p, p, p))
                p = 0
            elif opt.padding_type == 'replicate':
                model.add(nn.SpatialReplicationPadding(p, p, p, p))
                p = 0
            elif padding_type == 'none':
                p = 0
            layer = nn.SpatialConvolution(prev_dim, next_dim, f, f, s, s, p, p)
        elif first_char == 'f':
            f = int(v[1])
            p = (f - 1) / 2
            s = int(v[3])  
            a = s - 1
            next_dim = int(v[5])
            layer = nn.SpatialFullConvolution(prev_dim, next_dim, f, f, s, s, p, p, a, a)
        elif first_char == 'd':
            next_dim = int(v[1])
            layer = nn.SpatialConvolution(prev_dim, next_dim, 3, 3, 2, 2, 1, 1)
        elif first_char == 'U':
            next_dim = prev_dim
            scale = int(v[1])
            layer = nn.SpatialFullConvolution(prev_dim, next_dim, 3, 3, 2, 2, 1, 1, 1, 1)
            needs_bn = False
            needs_relu = True

        model.add(layer)
        if i == arch_len - 1:
            needs_bn = False
            needs_relu = False

        if needs_bn == True:
            if opt.use_instance_norm == 1:
                model.add(nn.InstanceNormalization(next_dim))
            else:
                model.add(nn.SpatialBatchNormalization(next_dim))
            
        if needs_relu == True:
            model.add(nn.ReLU(true))
        
        prev_dim = next_dim
    
    model.add(bb.Tanh())
    model.add(nn.MulConstant(opt.tanh_constant))
    model.add(nn.TotalVariation(opt.tv_strength))
Beispiel #4
0
 def test_relu(self):
     self._test_single_layer(nn.ReLU())
        return self.lambda_func(self.forward_prepare(input))


class LambdaMap(LambdaBase):
    def forward(self, input):
        return list(map(self.lambda_func, self.forward_prepare(input)))


class LambdaReduce(LambdaBase):
    def forward(self, input):
        return reduce(self.lambda_func, self.forward_prepare(input))


googlenet = nn.Sequential(  # Sequential,
    nn.Conv2d(3, 64, (7, 7), (2, 2), (3, 3)),
    nn.ReLU(),
    nn.MaxPool2d((3, 3), (2, 2), (0, 0), ceil_mode=True),
    Lambda(lambda x, lrn=torch.legacy.nn.SpatialCrossMapLRN(*(
        5, 0.0001, 0.75, 1)): Variable(lrn.forward(x.data))),
    nn.Conv2d(64, 64, (1, 1)),
    nn.ReLU(),
    nn.Conv2d(64, 192, (3, 3), (1, 1), (1, 1)),
    nn.ReLU(),
    Lambda(lambda x, lrn=torch.legacy.nn.SpatialCrossMapLRN(*(
        5, 0.0001, 0.75, 1)): Variable(lrn.forward(x.data))),
    nn.MaxPool2d((3, 3), (2, 2), (0, 0), ceil_mode=True),
    nn.Conv2d(192, 64, (1, 1)),
    nn.ReLU(),
    nn.Conv2d(192, 96, (1, 1)),
    nn.ReLU(),
    nn.Conv2d(96, 128, (3, 3), (1, 1), (1, 1)),