Example #1
0
    def stepper(self, data, s, target=None, beta=0, return_derivatives=False):
        dsdt = []
        dsdt.append(-s[0] +
                    torch.mul(rhop(s[0]), self.w[1](rho(s[1])) +
                              self.w[2](rho(data))))

        if beta > 0:
            dsdt[0] = dsdt[0] + beta * (target - s[0])

        dsdt.append(-s[1] + torch.mul(
            rhop(s[1]), self.w[4](rho(data)) +
            torch.mm(rho(s[0]), self.w[1].weight)))
        for i in range(self.ns):
            s[i] = s[i] + self.dt * dsdt[i]

        if not self.no_clamp:
            for i in range(self.ns):
                s[i] = s[i].clamp(min=0).clamp(max=1)
                dsdt[i] = torch.where((s[i] == 0) | (s[i] == 1),
                                      torch.zeros_like(dsdt[i],
                                                       device=self.device),
                                      dsdt[i])

        if return_derivatives:
            return s, dsdt
        else:
            return s
Example #2
0
    def stepper(self, data, s, target=None, beta=0, return_derivatives=False):
        dsdt = []
        dsdt.append(-s[0] + rho(self.w[0](s[1])))
        if beta > 0:
            dsdt[0] = dsdt[0] + beta * (target - s[0])

        for i in range(1, self.ns - 1):
            dsdt.append(-s[i] +
                        rho(self.w[2 * i](s[i + 1]) +
                            torch.mm(s[i - 1], self.w[2 * (i - 1)].weight)))

        dsdt.append(-s[-1] +
                    rho(self.w[-1](data) + torch.mm(s[-2], self.w[-3].weight)))

        for i in range(self.ns):
            s[i] = s[i] + self.dt * dsdt[i]

        if return_derivatives:
            return s, dsdt
        else:
            return s
Example #3
0
from chi_diff import applyrho
from kernel import getBitposAfterOneRhoPi as lol
from kernel import getOneBitPos
import main

# line = input().split(" ")
# w = 64
# print(lol(int(line[0]), int(line[1]), w))

w = 64
for x in range(5):
    for y in range(5):
        for z in range(w):
            o1 = applyrho(x, y, z)

            A = [[[0 for k in range(w)] for j in range(5)] for i in range(5)]
            A[x][y][z] = 1

            A = main.rho(A, w)
            o2 = getOneBitPos(A, w)[0]
            if o1 != o2:
                print(x, y, z)
                input()
Example #4
0
    def computeGradients(self, data, s, i):
        gradwpf = []
        gradwpf_bias = []
        gradwpb = []
        gradwip = []
        gradwpi = []

        for k in range(self.ns):
            if k == 0:
                vb = self.wpf[k](rho(data))
                vbhat = self.gb/(self.gb + self.glk + self.ga)*vb
                gradwpf.append((1/self.batch_size)*(torch.mm(torch.transpose(rho(s[k]) - rho(vbhat), 0, 1),rho(data))))
            else:
                vb =  self.wpf[k](rho(s[k - 1]))
                vbhat = self.gb/(self.gb + self.glk + self.ga)*vb
                gradwpf.append((1/self.batch_size)*(torch.mm(torch.transpose(rho(s[k]) - rho(vbhat), 0, 1), rho(s[k - 1]))))

            gradwpf_bias.append((1/self.batch_size)*(rho(s[k]) - rho(vbhat)).sum(0))

        del vb, vbhat

        for k in range(self.ns - 1):
            vi = self.wip[k](rho(s[k]))
            vihat = self.gd/(self.gd + self.glk)*vi
            gradwip.append((1/self.batch_size)*(torch.mm(torch.transpose(rho(i[k + 1]) - rho(vihat), 0, 1), rho(s[k]))))

            va = self.wpi[k](rho(i[k + 1])) + self.wpb[k](rho(s[k + 1]))
            gradwpi.append((1/self.batch_size)*(torch.mm(torch.transpose(-va, 0, 1), rho(i[k + 1]))))

            vtdhat = self.wpb[k](rho(s[k + 1]))
            gradwpb.append((1/self.batch_size)*(torch.mm(torch.transpose(rho(s[k]) - rho(vtdhat), 0, 1), rho(s[k + 1]))))

            del vi, vihat, va, vtdhat

        return gradwpf, gradwpf_bias, gradwpb, gradwpi, gradwip
Example #5
0
    def stepper(self, data, s, i, track_va = False, **kwargs):

        dsdt = []
        #*****dynamics of the interneurons*****#
        didt = []
        #**************************************#

        #***track apical voltage***#
        if track_va:
            va_topdown = []
            va_cancellation = []
        #**************************#


        #Compute derivative of the somatic membrane potential
        for k in range(len(s)):

            #Compute basal voltage
            if k == 0 :#visible layer
                vb = self.wpf[k](rho(data))
            else:
                vb = self.wpf[k](rho(s[k - 1]))

            
            #a)for hidden neurons
            if k < len(s) - 1:

                #Compute and optionally store apical voltage
                va = self.wpi[k](rho(i[k + 1])) + self.wpb[k](rho(s[k + 1]))
                if track_va:
                    va_topdown.append(self.wpb[k](rho(s[k + 1])))
                    va_cancellation.append(self.wpi[k](rho(i[k + 1])))

                #Compute total derivative (Eq. 1)
                dsdt.append( -self.glk*s[k] + self.gb*(vb - s[k]) + self.ga*(va - s[k]) + self.noise*torch.randn_like(s[k]))

                del va

            #b) for output neurons
            else:

                #Compute total derivative (Eq. 1) *with ga = 0*:
                dsdt.append( -self.glk*s[k] + self.gb*(vb - s[k]) + self.noise*torch.randn_like(s[k]))

                #Nudging
                if 'target' in kwargs:
                    dsdt[k] = dsdt[k] + self.gsom*(kwargs['target'] - s[k])

        
            del vb


        #Compute derivative of the interneuron membrane potential
        for k in range(len(i)):
            if i[k] is not None:
                #Compute basal interneuron voltage
                vi = self.wip[k - 1](rho(s[k - 1]))
                
                #Compute total derivative (Eq. 2)
                didt.append(-self.glk*i[k] + self.gd*(vi - i[k]) + self.gsom*(s[k] - i[k]) + self.noise*torch.randn_like(i[k]))
            else:
                didt.append(None)

        #Update the values of the neurons
        for k in range(len(s)):
            s[k] = s[k] + self.dt*dsdt[k]

        for k in range(len(i)):
            if i[k] is not None:
                i[k] = i[k] + self.dt*didt[k]

        if not track_va:
            return s, i

        else:
            return s, i, [va_topdown, va_cancellation]
Example #6
0
    def stepper(self,
                data,
                s,
                inds,
                target=None,
                beta=0,
                return_derivatives=False,
                inplace=False):

        dsdt = []

        #CLASSIFIER PART

        #last classifier layer
        dsdt.append(-s[0] + rho(self.fc[0](s[1].view(s[1].size(0), -1))))
        if beta > 0:
            dsdt[0] = dsdt[0] + beta * (target - s[0])

        #middle classifier layer
        for i in range(1, len(self.size_classifier_tab) - 1):
            dsdt.append(-s[i] +
                        rho(self.fc[i](s[i + 1].view(s[i + 1].size(0), -1)) +
                            torch.mm(s[i - 1], self.fc[i - 1].weight)))

        #CONVOLUTIONAL PART

        #last conv layer
        s_pool, ind = self.pool(self.conv[0](s[self.nc + 1]))
        inds[self.nc] = ind
        dsdt.append(-s[self.nc] + rho(s_pool + torch.mm(
            s[self.nc - 1], self.fc[-1].weight).view(s[self.nc].size())))

        del s_pool, ind

        #middle layers
        for i in range(1, self.nconv - 1):
            s_pool, ind = self.pool(self.conv[i](s[self.nc + i + 1]))
            inds[self.nc + i] = ind

            if inds[self.nc + i - 1] is not None:

                output_size = [
                    s[self.nc + i - 1].size(0), s[self.nc + i - 1].size(1),
                    self.size_conv_tab[i - 1], self.size_conv_tab[i - 1]
                ]
                s_unpool = F.conv_transpose2d(self.unpool(
                    s[self.nc + i - 1],
                    inds[self.nc + i - 1],
                    output_size=output_size),
                                              weight=self.conv[i - 1].weight,
                                              padding=self.P)

            dsdt.append(-s[self.nc + i] + rho(s_pool + s_unpool))
            del s_pool, s_unpool, ind, output_size

        #first conv layer
        s_pool, ind = self.pool(self.conv[-1](data))
        inds[-1] = ind
        if inds[-2] is not None:
            output_size = [
                s[-2].size(0), s[-2].size(1), self.size_conv_tab[-3],
                self.size_conv_tab[-3]
            ]
            s_unpool = F.conv_transpose2d(self.unpool(s[-2],
                                                      inds[-2],
                                                      output_size=output_size),
                                          weight=self.conv[-2].weight,
                                          padding=self.P)
        dsdt.append(-s[-1] + rho(s_pool + s_unpool))
        del s_pool, s_unpool, ind, output_size

        if not inplace:
            for i in range(len(s)):
                s[i] = s[i] + dsdt[i]
        else:
            for i in range(len(s)):
                s[i] += dsdt[i]

        if return_derivatives:
            return s, inds, dsdt
        else:
            del dsdt
            return s, inds
Example #7
0
    def computeGradients(self, beta, data, s, seq):
        gradw = []
        gradw_bias = []
        batch_size = s[0].size(0)

        gradw.append(
            (1 / (beta * batch_size)) *
            (torch.mm(torch.transpose(rho(s[0]), 0, 1), rho(s[0])) -
             torch.mm(torch.transpose(rho(seq[0]), 0, 1), rho(seq[0]))))
        gradw.append(
            (1 / (beta * batch_size)) *
            (torch.mm(torch.transpose(rho(s[0]), 0, 1), rho(s[1])) -
             torch.mm(torch.transpose(rho(seq[0]), 0, 1), rho(seq[1]))))
        gradw.append((1 / (beta * batch_size)) *
                     (torch.mm(torch.transpose(rho(s[0]), 0, 1), rho(data)) -
                      torch.mm(torch.transpose(rho(seq[0]), 0, 1), rho(data))))
        gradw.append(
            (1 / (beta * batch_size)) *
            (torch.mm(torch.transpose(rho(s[1]), 0, 1), rho(s[1])) -
             torch.mm(torch.transpose(rho(seq[1]), 0, 1), rho(seq[1]))))
        gradw.append((1 / (beta * batch_size)) *
                     (torch.mm(torch.transpose(rho(s[1]), 0, 1), rho(data)) -
                      torch.mm(torch.transpose(rho(seq[1]), 0, 1), rho(data))))

        return gradw, gradw_bias
Example #8
0
    def computeGradients(self, beta, data, s, seq):
        gradw = []
        gradw_bias = []
        batch_size = s[0].size(0)

        for i in range(self.ns - 1):
            gradw.append((1 / (beta * batch_size)) * (
                torch.mm(torch.transpose(rho(s[i]), 0, 1), rho(s[i + 1])) -
                torch.mm(torch.transpose(rho(seq[i]), 0, 1), rho(seq[i + 1]))))
            gradw.append(None)
            gradw_bias.append(
                (1 / (beta * batch_size)) * (rho(s[i]) - rho(seq[i])).sum(0))
            gradw_bias.append(None)

        gradw.append((1 / (beta * batch_size)) * torch.mm(
            torch.transpose(rho(s[-1]) - rho(seq[-1]), 0, 1), rho(data)))
        gradw_bias.append(
            (1 / (beta * batch_size)) * (rho(s[-1]) - rho(seq[-1])).sum(0))

        return gradw, gradw_bias