Example #1
0
    def __init__(self, input_shape=P.INPUT_SHAPE):
        super(Net, self).__init__()

        # Shape of the tensors that we expect to receive as input
        self.input_shape = input_shape

        # Here we define the layers of our network

        # Third convolutional layer
        self.conv3 = nn.Conv2d(
            128, 192,
            3)  # 128 input channels, 192 output channels, 3x3 convolutions
        self.bn3 = nn.BatchNorm2d(192)  # Batch Norm layer
        # Fourth convolutional layer
        self.conv4 = nn.Conv2d(
            192, 256,
            3)  # 192 input channels, 256 output channels, 3x3 convolutions
        self.bn4 = nn.BatchNorm2d(256)  # Batch Norm layer

        self.conv_output_size = utils.shape2size(
            utils.get_conv_output_shape(self))

        # FC Layers
        self.fc5 = nn.Linear(
            self.conv_output_size,
            300)  # conv_output_size-dimensional input, 300-dimensional output
        self.bn5 = nn.BatchNorm1d(300)  # Batch Norm layer
        self.fc6 = nn.Linear(
            300, P.NUM_CLASSES
        )  # 300-dimensional input, 10-dimensional output (one per class)
Example #2
0
    def __init__(self, input_shape=P.INPUT_SHAPE):
        super(Net, self).__init__()

        # Shape of the tensors that we expect to receive as input
        self.input_shape = input_shape

        # Here we define the layers of our network

        # First convolutional layer
        self.conv1 = nn.Conv2d(
            3, 96, 5)  # 3 input channels, 96 output channels, 5x5 convolutions
        self.bn1 = nn.BatchNorm2d(96)  # Batch Norm layer
        # Second convolutional layer
        self.conv2 = nn.Conv2d(
            96, 128,
            3)  # 96 input channels, 128 output channels, 3x3 convolutions
        self.bn2 = nn.BatchNorm2d(128)  # Batch Norm layer
        # Third convolutional layer
        self.conv3 = nn.Conv2d(
            128, 192,
            3)  # 128 input channels, 192 output channels, 3x3 convolutions
        self.bn3 = nn.BatchNorm2d(192)  # Batch Norm layer

        self.conv_output_size = utils.shape2size(
            utils.get_conv_output_shape(self))

        # FC Layers
        self.fc4 = nn.Linear(
            self.conv_output_size, P.NUM_CLASSES
        )  # conv_output_size-dimensional input, 10-dimensional output (one per class)
Example #3
0
def gauss(x, w, sigma=None):
    d = torch.norm(kernel_sum2d(x, -w), p=2, dim=4)
    if sigma is None:
        return torch.exp(-d.pow(2) / (2 * utils.shape2size(tuple(w[0].size())))
                         )  # heuristic: use number of dimensions as variance
    #if sigma is None: return torch.exp(-d.pow(2) / (2 * torch.norm(w.view(w.size(0), 1, -1) - w.view(1, w.size(0), -1), p=2, dim=2).max().pow(2)/w.size(0))) # heuristic: normalization condition
    #if sigma is None: return torch.exp(-d.pow(2) / (2 * d.mean().pow(2)))
    return torch.exp(-d.pow(2) / (2 * (sigma.view(1, -1, 1, 1).pow(2))))
Example #4
0
    def __init__(self, input_shape=P.INPUT_SHAPE):
        super(Net, self).__init__()

        # Shape of the tensors that we expect to receive as input
        self.input_shape = input_shape
        self.input_size = utils.shape2size(self.input_shape)

        # FC Layers
        self.fc = nn.Linear(
            self.input_size, P.NUM_CLASSES
        )  # input_size-dimensional input, 10-dimensional output (one per class)
Example #5
0
    def __init__(self, input_shape=P.INPUT_SHAPE):
        super(Net, self).__init__()

        # Shape of the tensors that we expect to receive as input
        self.input_shape = input_shape
        self.input_size = utils.shape2size(self.input_shape)

        # Here we define the layers of our network

        # FC Layers
        self.fc5 = nn.Linear(
            self.input_size,
            300)  # conv_output_size-dimensional input, 300-dimensional output
        self.bn5 = nn.BatchNorm1d(300)  # Batch Norm layer
        self.fc6 = nn.Linear(
            300, P.NUM_CLASSES
        )  # 300-dimensional input, 10-dimensional output (one per class)
Example #6
0
    def __init__(self, input_shape=P.INPUT_SHAPE):
        super(Net, self).__init__()

        # Shape of the tensors that we expect to receive as input
        self.input_shape = input_shape

        # Here we define the layers of our network

        # First convolutional layer
        self.conv1 = nn.Conv2d(
            3, 96, 5, bias=False
        )  # 3 input channels, 96 output channels, 5x5 convolutions
        self.bn1 = nn.BatchNorm2d(96, affine=False)  # Batch Norm layer

        self.conv_output_size = utils.shape2size(
            utils.get_conv_output_shape(self))

        # FC Layers
        self.fc2 = nn.Linear(
            self.conv_output_size, P.NUM_CLASSES, bias=False
        )  # conv_output_size-dimensional input, 10-dimensional output (one per class)
Example #7
0
    def __init__(self,
                 inpt_shape=(1, 28, 28),
                 neuron_shape=(10, 10),
                 vrest=0.5,
                 vreset=0.5,
                 vth=1.,
                 lbound=0.,
                 theta_w=1e-3,
                 sigma=1.,
                 conn_strength=1.,
                 sigma_lateral_exc=1.,
                 exc_strength=1.,
                 sigma_lateral_inh=1.,
                 inh_strength=1.,
                 refrac=5,
                 tc_decay=50.,
                 tc_trace=20.,
                 dt=1.0,
                 nu=(1e-4, 1e-2),
                 reduction=None):
        super().__init__(dt=dt)

        self.inpt_shape = inpt_shape
        self.n_inpt = utils.shape2size(self.inpt_shape)
        self.neuron_shape = neuron_shape
        self.n_neurons = utils.shape2size(self.neuron_shape)
        self.dt = dt

        # Layers
        input = Input(n=self.n_inpt,
                      shape=self.inpt_shape,
                      traces=True,
                      tc_trace=tc_trace)
        population = LIFNodes(shape=self.neuron_shape,
                              traces=True,
                              lbound=lbound,
                              rest=vrest,
                              reset=vreset,
                              thresh=vth,
                              refrac=refrac,
                              tc_decay=tc_decay,
                              tc_trace=tc_trace)
        inh = IFNodes(shape=self.neuron_shape,
                      traces=True,
                      lbound=0.,
                      rest=0.,
                      reset=0.,
                      thresh=0.99,
                      refrac=0,
                      tc_trace=tc_trace)

        # Coordinates
        self.coord_x = torch.rand(
            neuron_shape) * self.neuron_shape[1] / self.neuron_shape[0]
        self.coord_y = torch.rand(neuron_shape)
        self.coord_x_disc = (
            self.coord_x * self.inpt_shape[2] /
            (self.neuron_shape[1] / self.neuron_shape[0])).long()
        self.coord_y_disc = (self.coord_y * self.inpt_shape[1]).long()
        grid_x = (torch.arange(self.inpt_shape[2]).unsqueeze(0).float() +
                  0.5) * (self.neuron_shape[1] /
                          self.neuron_shape[0]) / self.inpt_shape[2]
        grid_y = (torch.arange(self.inpt_shape[1]).unsqueeze(1).float() +
                  0.5) / self.inpt_shape[1]

        # Input-Neurons connections
        w = torch.abs(
            torch.randn(self.inpt_shape[1], self.inpt_shape[2],
                        *self.neuron_shape))
        for k in range(neuron_shape[0]):
            for l in range(neuron_shape[1]):
                sq_dist = (grid_x - self.coord_x[k, l])**2 + (
                    grid_y - self.coord_y[k, l])**2
                w[:, :, k, l] *= torch.exp(-sq_dist / (2 * sigma**2))
        w = w.view(self.n_inpt, self.n_neurons)
        input_mask = w < theta_w
        w[input_mask] = 0.  # Drop connections smaller than threshold
        input_conn = Connection(source=input,
                                target=population,
                                w=w,
                                update_rule=PostPre,
                                nu=nu,
                                reduction=reduction,
                                wmin=0,
                                norm=conn_strength)
        input_conn.normalize()

        # Excitatory self-connections
        w = torch.abs(torch.randn(*self.neuron_shape, *self.neuron_shape))
        for k in range(neuron_shape[0]):
            for l in range(neuron_shape[1]):
                sq_dist = (self.coord_x - self.coord_x[k, l])**2 + (
                    self.coord_y - self.coord_y[k, l])**2
                w[:, :, k,
                  l] *= torch.exp(-sq_dist / (2 * sigma_lateral_exc**2))
                w[k, l, k,
                  l] = 0.  # set connection from neuron to itself to zero
        w = w.view(self.n_neurons, self.n_neurons)
        exc_mask = w < theta_w
        w[exc_mask] = 0.  # Drop connections smaller than threshold
        self_conn_exc = Connection(source=population,
                                   target=population,
                                   w=w,
                                   update_rule=PostPre,
                                   nu=nu,
                                   reduction=reduction,
                                   wmin=0,
                                   norm=exc_strength)
        self_conn_exc.normalize()

        # Inhibitory self-connection
        w = torch.eye(self.n_neurons)
        exc_inh = Connection(source=population, target=inh, w=w)
        w = -torch.abs(torch.randn(*self.neuron_shape, *self.neuron_shape))
        for k in range(neuron_shape[0]):
            for l in range(neuron_shape[1]):
                sq_dist = (self.coord_x - self.coord_x[k, l])**2 + (
                    self.coord_y - self.coord_y[k, l])**2
                w[:, :, k,
                  l] *= torch.exp(-sq_dist / (2 * sigma_lateral_inh**2))
                w[k, l, k,
                  l] = 0.  # set connection from neuron to itself to zero
        w = w.view(self.n_neurons, self.n_neurons)
        inh_mask = w > -theta_w
        w[inh_mask] = 0.  # Drop connections smaller than threshold
        self_conn_inh = Connection(source=inh,
                                   target=population,
                                   w=w,
                                   update_rule=PostPre,
                                   nu=tuple(-a for a in nu),
                                   reduction=reduction,
                                   wmax=0,
                                   norm=inh_strength)
        self_conn_inh.normalize()

        # Add layers to network
        self.add_layer(input, name="X")
        self.add_layer(population, name="Y")
        self.add_layer(inh, name="Z")

        # Add connections
        self.add_connection(input_conn, source="X", target="Y")
        self.add_connection(self_conn_exc, source="Y", target="Y")
        self.add_connection(exc_inh, source="Y", target="Z")
        self.add_connection(self_conn_inh, source="Z", target="Y")

        # Add weight masks to network
        self.masks = {}
        self.add_weight_mask(mask=input_mask, connection_id=("X", "Y"))
        self.add_weight_mask(mask=exc_mask, connection_id=("Y", "Y"))
        self.add_weight_mask(mask=inh_mask, connection_id=("Z", "Y"))

        # Add monitors to record neuron spikes
        self.spike_monitor = Monitor(self.layers["Y"], ["s"])
        self.add_monitor(self.spike_monitor, name="Spikes")