def forward(self, screen, variables):
        action_prob, input = super(AdvantageActorCriticDepth, self).forward(screen, variables)
        if not self.training:
            _, action = action_prob.max(1, keepdim=True)
            return action, None

        # greedy actions
        if random.random() < 0.1:
            action = torch.LongTensor(action_prob.size(0), 1).random_(0, action_prob.size(1))
            action = Variable(action)
            if USE_CUDA:
                action = action.cuda()
        else:
           _, action = action_prob.max(1, keepdim=True)

        # value prediction - critic
        value = F.selu(self.value1(input))
        value = torch.cat([value, variables], 1)
        value = self.batch_norm_value(value)
        value = self.value2(value)

        # save output for backpro
        action_prob = F.log_softmax(action_prob, dim=1)
        self.outputs.append(ModelOutput(action_prob.gather(-1, action), value))
        return action, value
Exemple #2
0
def readout(h, h2):
  catted_reads = map(lambda x: torch.cat([h[x[0]], h2[x[1]]], 1), zip(h2.keys(), h.keys()))
  activated_reads = map(lambda x: F.selu( R(x) ), catted_reads)
  readout = Variable(torch.zeros(1, 128))
  for read in activated_reads:
    readout = readout + read
  return F.tanh( readout )
    def forward(self, screen, variables):
        # cnn
        screen_features = F.max_pool2d(screen, kernel_size=(20, 20), stride=(20, 20))
        screen_features = F.selu(self.conv1(screen_features))
        screen_features = F.selu(self.conv2(screen_features))
        screen_features = F.selu(self.conv3(screen_features))
        screen_features = screen_features.view(screen_features.size(0), -1)

        # features
        input = self.screen_features1(screen_features)
        input = self.batch_norm(input)
        input = F.selu(input)

        # action
        action = F.selu(self.action1(input))
        action = torch.cat([action, variables], 1)
        action = self.batch_norm_action(action)
        action = self.action2(action)

        return action, input
Exemple #4
0
def message_pass(g, h, k):
  for v in g.keys():
    neighbors = g[v]
    for neighbor in neighbors:
      e_vw = neighbor[0] # feature variable
      w = neighbor[1]
      
      m_w = V[k](h[w])
      m_e_vw = E(e_vw)
      reshaped = torch.cat( (h[v], m_w, m_e_vw), 1)
      h[v] = F.selu(U[k](reshaped))
Exemple #5
0
    def forward(self, data, discretize=DISCRETE_CODES):
        code = (self.enc_linear_1(data.to(device)))
        code = F.sigmoid(self.enc_linear_2(code)).to(device)
        code = F.selu(self.enc_linear_3(code)).to(device)
        code = F.sigmoid(self.enc_linear_4(code)).to(device)
        y = torch.ones(self.code_size).to(device).to(device)
        x = torch.zeros(self.code_size).to(device).to(device)

        if (discretize):
            code = code.where(code < 0.5, y)
            code = code.where(code >= 0.5, x)

        return code
Exemple #6
0
    def forward(self, x):
        # Averagepool first
        x = x.unsqueeze(
            1
        )  #turning (Batch x length sequence) -> (Batch x channel = 1 x length sequence)
        x = F.avg_pool1d(x, kernel_size=2, stride=2)
        # first convolutional layer
        x = F.avg_pool1d(self.bn1(F.selu(self.conv1(x))),
                         kernel_size=25,
                         stride=25)
        # second convolutional
        x = F.avg_pool1d(self.bn2(F.selu(self.conv2(x))),
                         kernel_size=4,
                         stride=4)
        x = x.view(x.size()[0], -1)  ## Flattening

        # Fully Connected layer
        x = F.selu(self.fc1(x))
        x = F.selu(self.fc2(x))
        x = F.selu(self.fc3(x))
        x = F.softmax(x)
        return x
Exemple #7
0
 def enc_act(self, inputs):
     if self.enc_act == 'tanh':
         return F.tanh(inputs)
     elif self.enc_act == 'elu':
         return F.elu(inputs)
     elif self.enc_act == 'relu':
         return F.relu(inputs)
     elif self.enc_act == 'selu':
         return F.selu(inputs)
     elif self.enc_act == 'sigmoid':
         return F.sigmoid(inputs)
     else:
         return F.tanh(inputs)
    def forward(self, x):
        t, n = x.size(0), x.size(1)
        new_tuple = (t * n, ) + x.size()[2:]

        x = x.contiguous().view(new_tuple)

        x = F.relu(F.max_pool2d(self.conv1(x), 2))
        x = F.relu(F.max_pool2d(self.conv2_drop(self.conv2(x)), 2))
        x = x.view(-1, 1620)
        x = F.selu(self.fc1(x))
        new_tuple = (t, n) + x.size()[1:]
        x = x.contiguous().view(new_tuple)
        return x
    def forward(self, screen, variables):
        # cnn
        screen_features = F.max_pool2d(screen,
                                       kernel_size=(20, 20),
                                       stride=(20, 20))
        screen_features = F.selu(self.conv1(screen_features))
        screen_features = F.selu(self.conv2(screen_features))
        screen_features = F.selu(self.conv3(screen_features))
        screen_features = screen_features.view(screen_features.size(0), -1)

        # features
        input = self.screen_features1(screen_features)
        input = self.batch_norm(input)
        input = F.selu(input)

        # action
        action = F.selu(self.action1(input))
        action = torch.cat([action, variables], 1)
        action = self.batch_norm_action(action)
        action = self.action2(action)

        return action, input
    def forward(self, data):

        x, edge_index, batch_vec = data.x, data.edge_index, data.batch

        c1 = self.conv1(x, edge_index)
        c1 = F.selu(c1)
        c2 = self.conv2(c1, edge_index)
        c2 = F.selu(c2)
        c3 = self.conv3(c2, edge_index)
        c3 = F.selu(c3)
        c4 = self.conv4(c3, edge_index)
        c4 = F.selu(c4)
        sum_vector = global_add_pool(c4, batch=batch_vec)
        x = self.softmax(sum_vector)
        x = self.lin1(x)
        x = F.relu(x)
        x = self.dropout(x, training=self.training)
        x = self.lin2(x)
        x = F.relu(x)
        x = self.dropout(x, training=self.training)

        return x
    def forward(self, x):
        batchsize = x.size()[0]
        x = F.selu(self.conv1(x))
        x = F.selu(self.conv2(x))
        x = F.selu(self.conv3(x))
        x = torch.max(x, 2, keepdim=True)[0]
        x = x.view(-1, 1024)

        x = F.selu(self.fc1(x))
        x = F.selu(self.fc2(x))
        x = self.fc3(x)

        iden = Variable(
            torch.from_numpy(
                np.array([1, 0, 0, 0, 1, 0, 0, 0, 1
                          ]).astype(np.float32))).view(1,
                                                       9).repeat(batchsize, 1)
        if x.is_cuda:
            iden = iden.cuda()
        x = x + iden
        x = x.view(-1, 3, 3)
        return x
Exemple #12
0
    def decode(self, code):
        """
        Decider in the CAE.

        :param code: image in the latent space
        :return: image reconstructed.
        """
        reconstruction = F.selu(self.decode_lin_1(code))
        reconstruction = torch.sigmoid(self.decode_lin_2(reconstruction))
        reconstruction = reconstruction.view(
            (code.size(0), INPUT_CHANNELS, IMAGE_H, IMAGE_W))

        return reconstruction
Exemple #13
0
    def forward(self, x):
        out = self.layer1(x)
        out = self.layer2(out)
        out = self.layer3(out)
        out = self.layer4(out)
        out = self.layer5(out)

        #print(out.shape)
        out = out.view(out.size(0), -1)
        out = self.flat(out)
        out = F.dropout(out, p=0.3)
        out = F.selu(self.final(out))
        return out
 def encode(self, x):
     h = F.selu(self.conv1(x))
     h = F.selu(self.conv2(h))
     h = F.selu(self.conv3(h))
     h = F.selu(self.conv4(h))
     self.h_shape = h.shape
     h = h.view(-1, h.shape[1] * h.shape[2] * h.shape[3])
     h = F.selu(self.fc1(h))
     h = F.selu(self.fc2(h))
     h = F.selu(self.fc3(h))
     return h
Exemple #15
0
 def _get_features(self, x, mask=None):
     x = self.embedding(x)  # [L, B, E]
     if mask is None:
         x, _ = self.lstm(x)  # [L, B, 2H]
     else:
         lengths = [
             torch.nonzero(mask[:, i]).size(0) for i in range(mask.size(1))
         ]
         packed = pack_padded_sequence(x, lengths)
         x, _ = self.lstm(packed)
         x, _ = pad_packed_sequence(x)
     x = F.selu(self.fc(x))  # [L, B, T]
     return x
Exemple #16
0
    def forward(self, state, action, last_action, hidden_in):
        state = state.permute(1, 0, 2)
        #action = action.permute(1,0,2)
        last_action = last_action.permute(1, 0, 2)

        fc_branch1 = torch.cat([state, action], -1)
        fc_branch1 = F.selu(self.f1(fc_branch1)).permute(1, 0, 2)

        lstm_branch1 = torch.cat([state, last_action], -1)
        lstm_branch1 = F.selu(self.f2(lstm_branch1))
        lstm_branch1 = lstm_branch1.permute(1, 0, 2)
        self.lstm1.flatten_parameters()

        lstm_branch1, lstm_hidden1 = self.lstm1(lstm_branch1, hidden_in)

        merged_branch = torch.cat([fc_branch1, lstm_branch1], -1)

        Q1 = F.selu(self.f3(merged_branch))
        Q1 = self.f4(Q1)
        Q1 = Q1.permute(1, 0, 2)

        fc_branch2 = torch.cat([state, action], -1)
        fc_branch2 = F.relu(self.f1(fc_branch2)).permute(1, 0, 2)

        lstm_branch2 = torch.cat([state, last_action], -1)
        lstm_branch2 = F.relu(self.f2(lstm_branch2))
        lstm_branch2 = lstm_branch2.permute(1, 0, 2)
        self.lstm2.flatten_parameters()

        lstm_branch2, lstm_hidden2 = self.lstm2(lstm_branch2, hidden_in)

        merged_branch2 = torch.cat([fc_branch2, lstm_branch2], -1)

        Q2 = F.relu(self.f3(merged_branch2))
        Q2 = self.f4(Q2)
        Q2 = Q2.permute(1, 0, 2)

        return Q1, Q2, lstm_hidden1, lstm_hidden2
        '''
 def forward(self, x):
     x = F.selu(self.conv1(x))
     x = F.selu(self.conv2(x))
     x = F.max_pool2d(x, 2)
     x = F.selu(self.conv3(x))
     x = F.selu(self.conv4(x))
     x = F.max_pool2d(x, 2)
     x = F.selu(self.conv5(x))
     # x = F.selu(self.conv6(x))
     # x = F.selu(self.conv7(x))
     # x = F.max_pool2d(x, 2)
     # x = F.selu(self.conv8(x))
     # x = F.selu(self.conv9(x))
     # x = F.selu(self.conv10(x))
     # print(x.size())
     x = x.view(x.size(0), -1)
     # print(x.size())
     x = self.fc1(x)
     x = self.fc2(x)
     x = self.fc3(x)
     # print(x.size())
     return F.log_softmax(x, dim=1)
Exemple #18
0
    def forward(self, z, logp, h = None):
        self.masks[0] = Variable(torch.arange(0, self.z_dim).type(torch.LongTensor))
        if self.h_dim > 0:
            self.masks[0] = torch.cat(self.masks[0], torch.zeros(self.h_dim))

        for i in range(1, len(self.masks)):
            st = np.asscalar(np.min(self.masks[i - 1].data.numpy()))
            nd = self.z_dim - 1
            weights = torch.ones(nd - st) / (nd - st)
            self.masks[i] = Variable(torch.multinomial(weights, len(self.masks[i]), replacement = True)) + st
        
        if h is None: cur = z
        else: cur = torch.cat(z, h, dim = 1)
        init_cur = cur # will be used for direct connection

        M_tot = Variable(torch.eye(self.z_dim))

        for i in range(len(self.w_list)):
            w = self.w_list[i]
            b = self.b_list[i]
            dim1, dim2 = w.data.shape
            mask1 = self.masks[i + 1].unsqueeze(1).expand(dim1, dim2)
            mask2 = self.masks[i].unsqueeze(0).expand(dim1, dim2)
            M = torch.ge(mask1, mask2).type(torch.FloatTensor)
            M_tot = M @ M_tot
            
            cur = F.selu(b + cur.mm((M * w).t()))
        
        dim1, dim2 = self.w_m.data.shape
        mask1 = Variable(torch.arange(0, self.z_dim).type(torch.LongTensor)).unsqueeze(1).expand(dim1, dim2)
        mask2 = self.masks[-1].unsqueeze(0).expand(dim1, dim2)
        M = torch.gt(mask1, mask2).type(torch.FloatTensor)
        
        dim1, dim2 = self.d_m.data.shape
        mask1 = Variable(torch.arange(0, self.z_dim).type(torch.LongTensor)).unsqueeze(1).expand(dim1, dim2)
        mask2 = self.masks[0].unsqueeze(0).expand(dim1, dim2)
        M_d = torch.gt(mask1, mask2).type(torch.FloatTensor)
        
        m = self.b_m + cur.mm((M * self.w_m).t()) + init_cur.mm((M_d * self.d_m).t())
        s = self.b_s + cur.mm((M * self.w_s).t()) + init_cur.mm((M_d * self.d_s).t())
        s = torch.sigmoid(s)

        M_tot = M @ M_tot
        
        z = s * z + (1 - s) * m
        # reversing order of z, like paper does
        idx = torch.LongTensor(range(self.z_dim - 1, -1, -1))
        z = z[:, idx]
        logp = logp - torch.sum(torch.log(s), 1)
        
        return z, logp
Exemple #19
0
    def forward(self, inputx, inputlabel, hebb):
        if self.params['activation'] == 'selu':
            activation = F.selu(self.cv1(inputx))
            activation = F.selu(self.cv2(activation))
            activation = F.selu(self.cv3(activation))
            activation = F.selu(self.cv4(activation))
        elif self.params['activation'] == 'relu':
            activation = F.relu(self.cv1(inputx))
            activation = F.relu(self.cv2(activation))
            activation = F.relu(self.cv3(activation))
            activation = F.relu(self.cv4(activation))
        elif self.params['activation'] == 'tanh':
            activation = F.tanh(self.cv1(inputx))
            activation = F.tanh(self.cv2(activation))
            activation = F.tanh(self.cv3(activation))
            activation = F.tanh(self.cv4(activation))
        else:
            raise ValueError("Parameter 'activation' is incorrect (must be tanh, relu or selu)")
        
        #activation = F.relu(self.cv1(inputx))
        #activation = F.relu(self.cv2(activation))
        #activation = F.relu(self.cv3(activation))
        #activation = F.relu(self.cv4(activation))
        
        activationin = activation.view(-1, self.params['nbfeatures'])
        #activationin = activation.view(-1, self.params['nbclasses'])
        


        #activation = activationin.mm(self.w + torch.mul(self.alpha, hebb)) + 10.0 * inputlabel # The expectation is that a nonzero inputlabel will overwhelm the inputs and clamp the outputs
        #activation = activationin.mm( torch.mul(self.alpha, hebb)) + 10.0 * inputlabel # The expectation is that a nonzero inputlabel will overwhelm the inputs and clamp the outputs
        activation = activationin.mm( self.alpha * hebb) + 1000.0 * inputlabel # The expectation is that a nonzero inputlabel will overwhelm the inputs and clamp the outputs
        activationout = F.softmax( activation )
        
        #activationout = F.softmax( activationin )
        
        hebb = (1 - self.eta) * hebb + self.eta * torch.bmm(activationin.unsqueeze(2), activationout.unsqueeze(1))[0] # bmm used to implement outer product; remember activations have a leading singleton dimension
        return activationout, hebb
 def forward(self, x):
     """  """
     # dimension Batch x Channel x Width x Height
     # down
     r1 = self.RBD1.forward(x)
     x = F.max_pool2d(r1, kernel_size=2, stride=2)
     r2 = self.RBD2.forward(x)
     x = F.max_pool2d(r2, kernel_size=2, stride=2)
     r3 = self.RBD3.forward(x)
     x = F.max_pool2d(r3, kernel_size=2, stride=2)
     x = self.RBD4.forward(x)
     # up
     #x = F.interpolate(x, scale_factor=2, mode='bilinear')
     x = F.selu(self.convT2(x))
     x = self.RBU2.forward(torch.cat((r3, x), dim=1))
     #x = F.interpolate(x, scale_factor=2, mode='bilinear')
     x = F.selu(self.convT3(x))
     x = self.RBU3.forward(torch.cat((r2, x), dim=1))
     #x = F.interpolate(x, scale_factor=2, mode='bilinear')
     x = F.selu(self.convT4(x))
     x = self.RBU4.forward(torch.cat((r1, x), dim=1))
     x = F.selu(self.convFinal(x))
     return x
Exemple #21
0
    def forward(self, input):

        batch_size = input.size(0)

        should_view = False
        if len(input.size()) == 2:
            input = input.view(batch_size, self.size, -1)
            should_view = True

        for layer in self.conv:
            input = layer(input) + input
            input = F.selu(input)

        return input.view(batch_size, -1) if should_view else input
Exemple #22
0
    def forward(self, x_in, A):

        x = x_in
        output = []
        for i in range(self.num_stats_out):
            out = torch.spmm(A, x) + x
            for j in range(self.num_gfc_layers):
                out = self.stat_layers[i][j](out)
                out = F.selu(out)
            output.append(out)
            x = torch.mul(x, x_in)

        output = torch.cat(output, 1)
        return output
    def forward(self, x):
        x = x.permute(1, 0, 2, 3)
        collect = torch.Tensor().type(torch.FloatTensor).to(self.device)
        for idx in x:
            layer_1, _ = self.time_slot_GRU_1(idx)
            layer_1 = self.time_slot_Linear_1(
                self.drop(layer_1.contiguous().view(-1, layer_1.size(2))))
            layer_1 = F.selu(self.norm_1(layer_1.view(-1, idx.size(1), 256)))

            layer_2 = self.time_slot_Linear_3(self.drop(layer_1))
            layer_2 = self.time_slot_MaxPool(layer_2).squeeze(2)
            collect = torch.cat((collect, layer_2.unsqueeze(0)), dim=0)

        last_two = collect[collect.size(0) - 2:]

        collect = collect.permute(1, 0, 2)

        last_two, _ = self.last_two_GRU_1(last_two)
        last_two = self.last_two_Linear_1(last_two.contiguous().view(
            -1, last_two.size(2)))
        last_two = last_two.view(-1, 2, 256)

        layer_3, _ = self.week_GRU_1(collect)
        layer_3 = self.week_linear_1(layer_3.contiguous().view(
            -1, layer_3.size(2)))
        layer_3 = F.selu(self.norm_3(layer_3.view(-1, collect.size(1), 256)))

        layer_4 = (torch.cat((layer_3, last_two), dim=1))
        layer_4 = self.MaxPool(layer_4).squeeze(2)

        x = self.norm_4(layer_4)
        x = self.drop(x)
        x = self.highway_1(x)
        x = self.drop(x)
        x = self.linear_5(x)

        return x
Exemple #24
0
 def forward(self, input):
     # separate images from input
     img1 = input.narrow(1, 0, 1)
     img2 = input.narrow(1, 1, 1)
     # architecture of the neural network with activation function SELU
     x = F.selu(
         F.max_pool2d(self.bn1(self.conv1(img1)), kernel_size=2, stride=2))
     x = F.selu(
         F.max_pool2d(self.bn2(self.conv2(x)), kernel_size=2, stride=2))
     x = F.selu(self.fc1(x.view(-1, 256)))
     x = self.fc2(x)
     # weight sharing
     if self.weight_sharing:
         y = F.selu(
             F.max_pool2d(self.bn1(self.conv1(img2)),
                          kernel_size=2,
                          stride=2))
         y = F.selu(
             F.max_pool2d(self.bn2(self.conv2(y)), kernel_size=2, stride=2))
         y = F.selu(self.fc1(y.view(-1, 256)))
         y = self.fc2(y)
     else:
         y = F.selu(
             F.max_pool2d(self.bn1_(self.conv1_(img2)),
                          kernel_size=2,
                          stride=2))
         y = F.selu(
             F.max_pool2d(self.bn2_(self.conv2_(y)),
                          kernel_size=2,
                          stride=2))
         y = F.selu(self.fc1_(y.view(-1, 256)))
         y = self.fc2_(y)
     #concatenation
     xy = torch.cat((x, y), 1)
     z = F.selu(self.fc_comp1(xy))
     z = self.fc_comp2(z)
     return x, y, z
Exemple #25
0
    def forward(self, x):
        original = F.selu(self.original_conv1(x))
        original = F.selu(self.original_conv2(original))
        # Encoder
        layer1 = self.layer1(x)
        layer2 = self.layer2(layer1)
        layer3 = self.layer3(layer2)
        layer4 = self.layer4(layer3)
        layer5 = self.layer5(layer4)
        layer5 = self.layer5_up(layer5)
        # Decoder L5
        x = self.upsample(layer5)
        layer4 = F.selu(self.layer4_up(layer4))

        x = torch.cat([x, layer4], dim=1)
        x = F.selu(self.conv5_up(x))
        # L4
        x = self.upsample(x)
        layer3 = F.selu(self.layer3_up(layer3))
        x = torch.cat([x, layer3], dim=1)
        x = F.selu(self.conv4_up(x))
        # L3
        x = self.upsample(x)
        layer2 = F.selu(self.layer2_up(layer2))
        x = torch.cat([x, layer2], dim=1)
        x = F.selu(self.conv3_up(x))
        # L2
        x = self.upsample(x)
        layer1 = F.selu(self.layer1_up(layer1))
        x = torch.cat([x, layer1], dim=1)
        x = F.selu(self.conv2_up(x))
        # L1
        x = self.upsample(x)
        x = torch.cat([x, original], dim=1)
        x = self.conv1_up(x)

        return x
Exemple #26
0
    def forward(self, img, text):
        text_result = self.text_model(text)

        print(img.shape)

        batch_size, frames, c, h, w = img.shape

        inter_conv = []
        for i in range(frames):
            inter_input = torch.reshape(img[:, i, :, :, :], (batch_size, c, h, w))
            out = self.mm.max_pool(F.selu(self.mm.conv1(inter_input)))
            out = F.selu(self.mm.conv2(out))
            inter_conv.append(out)

        inter_conv = torch.cat(inter_conv, axis=1)
        conv = self.mm.max_pool(F.selu(self.mm.batch1(self.mm.conv3(inter_conv))))
        conv = self.mm.max_pool(F.selu(self.mm.batch2(self.mm.conv4(conv))))
        g_pool = conv.mean(axis=(2,3))
        lin = F.selu(self.mm.lin1(g_pool))
        print('img', lin.shape)
        lin = lin * text_result
        #lin = F.selu(self.lin1(g_pool))
        out = self.mm.output(lin)
        return out
Exemple #27
0
    def forward(self, X, A_hat):
        """
        :param X: Input data of shape (batch_size, num_nodes, num_timesteps)
        :A_hat: The normalized adajacent matrix (num_nodes, num_nodes)
        :return: Output data of shape (batch_size, num_nodes, num_features)
        """
        features = torch.einsum("kk,bkj->bkj", [A_hat, X])
        t2 = torch.einsum("bkj,jh->bkh", [features, self.Theta1])
        t2 += self.bias
        if self.activation == 'relu':
            t2 = F.relu(t2)
        if self.activation == 'selu':
            t2 = F.selu(t2)

        return t2
    def forward(self, inputx, inputlabel, hebb):
        if self.params['activ'] == 'selu':
            activ = F.selu(self.cv1(inputx))
            activ = F.selu(self.cv2(activ))
            activ = F.selu(self.cv3(activ))
            activ = F.selu(self.cv4(activ))
        elif self.params['activ'] == 'relu':
            activ = F.relu(self.cv1(inputx))
            activ = F.relu(self.cv2(activ))
            activ = F.relu(self.cv3(activ))
            activ = F.relu(self.cv4(activ))
        elif self.params['activ'] == 'tanh':
            activ = F.tanh(self.cv1(inputx))
            activ = F.tanh(self.cv2(activ))
            activ = F.tanh(self.cv3(activ))
            activ = F.tanh(self.cv4(activ))
        else:
            raise ValueError("Parameter 'activ' is incorrect (must be tanh, relu or selu)")
        #activ = F.tanh(self.conv2plast(activ.view(1, self.params['nbfeatures'])))
        #activin = activ.view(-1, self.params['plastsize'])
        activin = activ.view(-1, self.params['nbfeatures'])
        
        if self.params['alpha'] == 'free':
            activ = activin.mm( torch.mul(self.alpha, hebb)) + 1000.0 * inputlabel # The expectation is that a nonzero inputlabel will overwhelm the inputs and clamp the outputs
        elif self.params['alpha'] == 'yoked':
            activ = activin.mm( self.alpha * hebb) + 1000.0 * inputlabel # The expectation is that a nonzero inputlabel will overwhelm the inputs and clamp the outputs
        activout = F.softmax( activ )
        
        if self.rule == 'hebb':
            hebb = (1 - self.eta) * hebb + self.eta * torch.bmm(activin.unsqueeze(2), activout.unsqueeze(1))[0] # bmm used to implement outer product; remember activs have a leading singleton dimension
        elif self.rule == 'oja':
            hebb = hebb + self.eta * torch.mul((activin[0].unsqueeze(1) - torch.mul(hebb , activout[0].unsqueeze(0))) , activout[0].unsqueeze(0))  # Oja's rule. Remember that yin, yout are row vectors (dim (1,N)). Also, broadcasting!
        else:
            raise ValueError("Must select one learning rule ('hebb' or 'oja')")

        return activout, hebb
Exemple #29
0
    def forward(self, x):
        x = F.selu(self.fc1(x))
        # x = F.selu(self.fc2(x))
        # x = F.selu(self.fc3(x))

        # t, n = out.size(0), out.size(1)
        # new_tuple = (t * n,) + out.size()[2:]
        # out = out.view(new_tuple)

        # out = out.permute(0, 2, 1).contiguous()
        # out = self.bnd(out)
        # out = out.permute(0, 2, 1).contiguous()
        # out = F.selu(out)
        # out = out.view(t, n, 50).contiguous()
        return x
Exemple #30
0
    def forward(self, x):
        x = F.relu(self.conv_1(x))
        x = self.max_pool_1(x)

        x = F.relu(self.conv_2(x))
        x = self.max_pool_2(x)

        x = x.view(-1, 16*4*4)

        x = F.relu(self.fc1(x))
        x = F.selu(self.fc2(x))
        
        out = F.log_softmax(self.fc3(x), dim=1)

        return out
Exemple #31
0
 def dec_act(self, inputs):
     if self.args.dec_act == 'tanh':
         return F.tanh(inputs)
     elif self.args.dec_act == 'elu':
         return F.elu(inputs)
     elif self.args.dec_act == 'relu':
         return F.relu(inputs)
     elif self.args.dec_act == 'selu':
         return F.selu(inputs)
     elif self.args.dec_act == 'sigmoid':
         return F.sigmoid(inputs)
     elif self.args.dec_act == 'linear':
         return inputs
     else:
         return inputs
Exemple #32
0
def test_selu_activation(N=None):
    from activations import SELU

    N = np.inf if N is None else N

    mine = SELU()
    gold = lambda z: F.selu(torch.FloatTensor(z)).numpy()

    i = 0
    while i < N:
        n_dims = np.random.randint(1, 100)
        z = random_stochastic_matrix(1, n_dims)
        assert_almost_equal(mine.fn(z), gold(z))
        print("PASSED")
        i += 1
Exemple #33
0
    def forward(self, x, y=None):
        batchsize = x.shape[0]
        size_x, size_y = x.shape[1], x.shape[2]

        length = len(self.ws)
        x = self.fc0(x)
        x = x.permute(0, 3, 1, 2)

        for i, (speconv, w) in enumerate(zip(self.sp_convs, self.ws)):
            if i != length - 1:
                x1 = speconv(x)
                x2 = w(x.view(batchsize, self.layers[i], -1))\
                    .view(batchsize, self.layers[i+1], size_x, size_y)
                x = x1 + x2
                x = F.selu(x)
            else:
                x1 = speconv(x, y).reshape(batchsize, self.layers[-1], -1)
                x2 = w(x, y).reshape(batchsize, self.layers[-1], -1)
                x = x1 + x2
        x = x.permute(0, 2, 1)
        x = self.fc1(x)
        x = F.selu(x)
        x = self.fc2(x)
        return x
    def act_fun(self, x):
        if self.opts['activation_fun'] == 'relu':
            activate_f = F.relu(x)
        elif self.opts['activation_fun'] == 'tanh':
            activate_f = F.tanh(x)
        elif self.opts['activation_fun'] == 'elu':
            activate_f = F.elu(x)
        elif self.opts['activation_fun'] == 'selu':
            activate_f = F.selu(x)
        elif self.opts['activation_fun'] == 'leaky_relu':
            activate_f = F.leaky_relu(x)
        elif self.opts['activation_fun'] == 'sigmoid':
            activate_f = F.sigmoid(x)

        return activate_f