Exemplo n.º 1
0
    def get_hidden_values(self, input):
        """ Computes the values of the hidden layer.
        
            Arguments:
                input: the data to feed through the input layer to the
                    hidden layer.
            
            Returns:
                The encoded (lower dimensional) feature vector for a given 
                asin in the form of a 1 hot encoding vector.
        """

        if type(input) == T.TensorVariable:
            sparse_input = basic.csc_from_dense(input)
        else:
            sparse_input = input
        
        lin_output = basic.dot(sparse_input, self.W)
        if self.activation is None and self.b is None:
            return lin_output
        elif self.activation is None:
            return lin_output + self.b
        elif self.b is None:
            return self.activation(lin_output)
        else:
            return self.activation(lin_output + self.b)
Exemplo n.º 2
0
    def output(self, seq_output=True):
        x = self.layer_below.output(seq_output=seq_output)
        # return self.activation(T.dot(X, self.W) + self.b)

        # 	dropout = Dropout()
        # if self.sparse_inputs:
        # 	x = dropout.sparse_dropout(x, self.drop_value, self.num_features_nonzero) # Not written completely
        # else:
        # 	x = dropout.dropout_layer(x, self.drop_value)#, train)

        # convolve
        # theano.shared(value=np.zeros(shape,dtype=theano.config.floatX))
        # output = zero0s((self.inputD,self.size))
        supports = list()
        # for i in range(len(self.adjacency)):

        if not self.featureless:
            if self.sparse_inputs:
                pre_sup = sp.dot(x, self.W)
            else:
                # print(x.shape)
                # print(self.W.shape)
                # exit()
                pre_sup = T.dot(x, self.W)
        else:
            pre_sup = self.W[i]
        support = T.dot(self.adjacency, pre_sup)  #5.5 5.10x10.5
        supports.append(support)
        output = support

        if self.bias:
            output += self.b
        return self.activation(output)
    def get_hidden_values(self, input):
        
        #if type(input) != basic.SparseVariable:
        if type(input) == T.TensorVariable:
            sparse_input = basic.csc_from_dense(input)
        else:
            sparse_input = input
        #x_is_sparse_variable = basic._is_sparse_variable(input)
        #w_is_sparse_variable = basic._is_sparse_variable(self.W)
    
        #if not x_is_sparse_variable and not w_is_sparse_variable:        
        #    fn = T.dot
        #else:        
        #    fn = basic.dot

        lin_output = basic.dot(sparse_input, self.W)
        if self.activation is None and self.b is None:
            return lin_output
        elif self.activation is None:
            return lin_output + self.b
        elif self.b is None:
            return self.activation(lin_output)
        else:
            return self.activation(lin_output + self.b)
Exemplo n.º 4
0
def SpDotTransferFunction(x, W, b):
        if b !=None:
            return ST.dot(x, W) + b
        else:
            return ST.dot(x,W)
Exemplo n.º 5
0
        if _is_sparse_variable(x):
            raise TypeError(x)

        if _is_sparse_variable(y):
            raise TypeError(y)

        if not _is_sparse(p):
            raise TypeError(p)

        rval = p.__class__(p.multiply(numpy.dot(x, y.T)))

        out[0] = rval

    def grad(self, (x, y, p), (gz,)):
        rval = [
            dot(gz, y),
            dot(gz.T, x),
            None
        ]

        return rval
sampling_dot = SamplingDot()


class SamplingDotCsr(gof.Op):
    """
    Optimized SamplingDot when the pattern P is a CSR matrix.

    If we have the input of mixed dtype, we insert cast elemwise in the graph
    to be able to call blas function as they don't allow mixed dtype.
Exemplo n.º 6
0
def SpDotTransferFunction(x, W, b):
    if b != None:
        return ST.dot(x, W) + b
    else:
        return ST.dot(x, W)
Exemplo n.º 7
0
        if _is_sparse_variable(x):
            raise TypeError(x)

        if _is_sparse_variable(y):
            raise TypeError(y)

        if not _is_sparse(p):
            raise TypeError(p)

        rval = p.__class__(p.multiply(numpy.dot(x, y.T)))

        out[0] = rval

    def grad(self, (x, y, p), (gz,)):
        rval = [
            dot(p * gz, y),
            dot(p.T * gz.T, x),
            None
        ]

        return rval
sampling_dot = SamplingDot()


class SamplingDotCsr(gof.Op):
    """
    Optimized SamplingDot when the pattern P is a CSR matrix.

    If we have the input of mixed dtype, we insert cast elemwise in the graph
    to be able to call blas function as they don't allow mixed dtype.
Exemplo n.º 8
0
    def perform(self, node, (x, y, p), (out, )):
        if _is_sparse_variable(x):
            raise TypeError(x)

        if _is_sparse_variable(y):
            raise TypeError(y)

        if not _is_sparse(p):
            raise TypeError(p)

        rval = p.__class__(p.multiply(numpy.dot(x, y.T)))

        out[0] = rval

    def grad(self, (x, y, p), (gz, )):
        rval = [dot(p * gz, y), dot(p.T * gz.T, x), None]

        return rval


sampling_dot = SamplingDot()


class SamplingDotCsr(gof.Op):
    """
    Optimized SamplingDot when the pattern P is a CSR matrix.

    If we have the input of mixed dtype, we insert cast elemwise in the graph
    to be able to call blas function as they don't allow mixed dtype.

    """