Beispiel #1
0
def iter_desparsify(m):
    for name, child in m.named_children():
        #print(name)
        iter_desparsify(child)
        if type(child) == SparseConv:
            conv = child.conv
            w = conv.weight.data

            mean = 0.0  #w.mean()
            s = (w - mean).std()
            r = (s * torch.clamp(child.r, 0, 5)).item()
            w = F.hardshrink(w - mean, r + 1e-6)
            conv.weight.data = w

            m.__setattr__(name, conv)

        if type(child) == SparseFc:
            fc = child.fc
            w = fc.weight.data

            mean = 0.0  # w.mean()
            s = (w - mean).std()
            r = (s * torch.clamp(child.r, 0, 5)).item()
            w = F.hardshrink(w - mean, r + 1e-6)
            fc.weight.data = w

            m.__setattr__(name, fc)
Beispiel #2
0
def sparsity(model, print_per_layer=False):
    zeros_cnt = 0
    cnt = 0
    for name, layer in model.named_modules():
        if ('Sparse' in layer.__class__.__name__):
            if 'Conv' in layer.__class__.__name__:
                w = layer.conv.weight
            elif 'Fc' in layer.__class__.__name__:
                w = layer.fc.weight
            else:
                print(" Not Recognized Sparse Module ")

            m = 0.0  #w.mean()
            a = torch.clamp(layer.r, 0, 5)
            l = a * (w - m).std()
            nw = F.hardshrink((w - m) / (l + 1e-6), 1)
            tsparsity = (nw == 0).float().sum().item()
            tnum = nw.numel()
            zeros_cnt += tsparsity
            cnt += tnum

            if print_per_layer:
                print("{} sparsity: {}%".format(
                    name, round(100.0 * tsparsity / tnum, 2)))

    return 100 * float(zeros_cnt) / float(cnt), zeros_cnt
Beispiel #3
0
def test_activate():
    a = torch.randn(3, 4)
    b = copy.deepcopy(a)
    a = engine.activate(a, 'hardshrink')
    b = F.hardshrink(b)
    assert torch.equal(a, b), 'activate with str spec did not work correctly.'
    a = engine.activate(a, 'relu')
    b = F.relu(b)
    assert torch.equal(a, b), 'activate with str spec did not work correctly.'
Beispiel #4
0
def sparsify(param, sparsity):
    return F.hardshrink(param, sparsity).data

    total = 0
    correct = 0
    for batch_idx, (inputs, targets) in enumerate(testloader):
        inputs, targets = Variable(inputs,
                                   volatile=True), Variable(targets,
                                                            volatile=True)
        outputs = model2(inputs)
        _, predicted = torch.max(outputs.data, 1)
        total += targets.size(0)
        correct += predicted.eq(targets.data).cpu().sum()

    print('Epoch : %d Test Acc : %.3f' % (epoch, 100. * correct / total))
    print('--------------------------------------------------------------')
    model.train()
Beispiel #5
0
 def test_hardshrink(self):
     inp = torch.randn(1, 3, 32, 32, device='cuda', dtype=self.dtype)
     output = F.hardshrink(inp, lambd=0.5)
Beispiel #6
0
 def out(self, x, pow=torch.ones(1)):
     if self.training:
         return F.hardshrink(x, lambd=self.thres**pow.item())
     else:
         return F.hardshrink(F.relu(x), lambd=self.thres**pow.item())
Beispiel #7
0
def hardfix(inp, tresh, fix_val=None):
    out = F.hardshrink(inp, lambd=tresh)
    out = F.hardtanh(out, min_val=-tresh - 1e-6, max_val=tresh + 1e-6)
    if fix_val:
        out *= fix_val / tresh
    return out
Beispiel #8
0
pic(x, y, (2,4,2), 'elu')
y = F.softplus(x)
pic(x, y, (2,4,2), 'softplus')

''' tanh/ hardtanh/ softsign '''
x = torch.arange(-5,5,0.1).view(-1,1)
y = F.tanh(x)
pic(x, y, (2,4,3), 'tanh')
y = F.hardtanh(x)
pic(x, y, (2,4,3), 'hardtanh')
y = F.softsign(x)
pic(x, y, (2,4,3), 'softsign')

''' hardshrink/ tanhshrink/ tanhshrink '''
x = torch.arange(-3,3,0.1).view(-1,1)
y = F.hardshrink(x)
pic(x, y, (2,4,4), 'hardshrink')
y = F.tanhshrink(x)
pic(x, y, (2,4,4), 'tanhshrink')
y = F.softshrink(x)
pic(x, y, (2,4,4), 'tanhshrink')

''' sigmoid '''
x = torch.arange(-5,5,0.1).view(-1,1)
y = F.sigmoid(x)
pic(x, y, (2,4,5), 'sigmoid')

''' relu6 '''
x = torch.arange(-5,10,0.1).view(-1,1)
y = F.relu6(x)
pic(x, y, (2,4,6), 'relu6')
Beispiel #9
0
def hardshrink(input, *args, **kwargs):
    return _wrap_tensor(input, F.hardshrink(input.F, *args, **kwargs))
Beispiel #10
0
 def forward(self, input):
     out = F.linear(input, self.weight, self.bias)
     #print('safasfa=',self.lam_lin.data[0])
     return F.relu(F.hardshrink(out, self.lam_lin.data[0]))
 def forward(ctx, w):
     w = F.hardshrink(w, 1)
     return w
Beispiel #12
0
 def forward(self, x, y, z, w):
     x = F.hardshrink(x)
     y = F.hardshrink(y, 0.1)
     z = F.hardshrink(z, 0.22)
     w = F.hardshrink(w, 0)
     return x, y, z, w