コード例 #1
0
    def __call__(self, x):
        size = int(np.sqrt(x.shape[1].value))
        assert (size * size == x.shape[1].value)
        x = K.reshape(x, (-1, size, size))
        xxt = K.batch_dot(x, x, axes=(2, 2))
        regularization = 0.0
        if self.l1:
            regularization += K.sum(self.l1 * K.abs(xxt - K.eye(size)))
        if self.l2:
            regularization += K.sum(self.l2 * K.square(xxt - K.eye(size)))

        return regularization
コード例 #2
0
    def ortho_reg(weight_matrix):
        # orthogonal regularization for aspect embedding matrix
        w_n = weight_matrix / K.cast(K.epsilon() + K.sqrt(K.sum(K.square(weight_matrix), axis=-1, keepdims=True)),
                                     K.floatx())
        reg = K.sum(K.square(K.dot(w_n, K.transpose(w_n)) - K.eye(w_n.shape[0].value)))

        return args.ortho_reg * reg
コード例 #3
0
ファイル: glam.py プロジェクト: psorus/grapa
    def call(self, x):
        mat = x[0]  #Matrix A
        val = x[1]

        eye = K.eye(self.gs)

        p1 = kron(eye, self.selfintact)
        p2 = kron_b1fx1(mat, self.neigintact)

        p = p1 + p2

        v = K.reshape(val, (-1, self.gs * self.param))
        p = K.reshape(p, (-1, self.gs * self.param, self.gs * self.param))

        #print("p",p.shape,"v",v.shape)
        #exit()
        #does not yet work, how to define products with 2 batch dimensions
        #a bit inelegant if you ask me

        for i in range(self.iterations):
            v = K.batch_dot(p, v)
            if self.activate:
                v = self.advrelu(v, self.activation)
        #print("v",v.shape,"a",self.a,"gs",self.gs,"param",self.param)
        #exit()

        ret = K.reshape(
            v,
            (-1, self.a, self.gs,
             self.param))  #keine Ahnung ob richtig rum #doch scheint zu passen

        return ret
コード例 #4
0
ファイル: trainmodel.py プロジェクト: jgrant79/ai-challenge
def convertTraining(training, numVertices, numInputs):
    """
    Convert training data to categorical forms
    """
    converted = []
    for entry in training:
        inputs, labels = np.hsplit(entry, (numInputs, ))
        labels = np.matrix(labels)
        vertexTypes, denseGraph = np.hsplit(inputs, (numVertices, ))
        #Convert node types into categories
        vertexCategories = np.zeros((numVertices, len(mg.NODE_TYPES)))
        for i, t in enumerate(vertexTypes):
            vertexCategories[(i, t)] = 1

        #Separate labels into circuit types and node inclusion labels
        classLabels, inclusionLabels = np.hsplit(labels, (1, ))
        inclusionLabels = inclusionLabels.reshape(inclusionLabels.shape[1],
                                                  inclusionLabels.shape[0])

        #Convert dense graph into adjacency matrix
        graph = buildGraph(denseGraph, denseGraph.shape[0])

        #Add identity matrix to graph
        I = K.eye(graph.shape[0])
        features = K.concatenate((I, vertexCategories))
        spectral = SpectralLayer(graph,
                                 features.shape[1],
                                 activation=tf.nn.relu,
                                 input_shape=features.shape,
                                 useWeights=False)
        converted.append((graph, spectral, features, classLabels))
    return converted
コード例 #5
0
    def call(self, x, **kwargs):
        assert isinstance(x, list)
        inp_a, inp_b = x

        outp_a = K.l2_normalize(inp_a, -1)
        outp_b = K.l2_normalize(inp_b, -1)
        alpha = K.batch_dot(outp_b, outp_a, axes=[2, 2])
        alpha = K.l2_normalize(alpha, 1)
        alpha = K.one_hot(K.argmax(alpha, 1), K.int_shape(inp_a)[1])
        hmax = K.batch_dot(alpha, outp_b, axes=[1, 1])
        kcon = K.eye(K.int_shape(inp_a)[1], dtype='float32')

        m = []
        for i in range(self.output_dim):
            outp_a = inp_a * self.W[i]
            outp_hmax = hmax * self.W[i]
            outp_a = K.l2_normalize(outp_a, -1)
            outp_hmax = K.l2_normalize(outp_hmax, -1)
            outp = K.batch_dot(outp_hmax, outp_a, axes=[2, 2])
            outp = K.sum(outp * kcon, -1, keepdims=True)
            m.append(outp)
        if self.output_dim > 1:
            persp = K.concatenate(m, 2)
        else:
            persp = m[0]
        return [persp, persp]
コード例 #6
0
ファイル: flow_layers.py プロジェクト: mthaha123/nice_glow
 def build(self, input_shape):
     if self.kernel is None:
         (p, l, u, u_diag_sign, u_diag_abs_log,
             l_mask, u_mask) = self.initializer(input_shape)
         self.kernel_p = self.add_weight(name='kernel_p',
                                         shape=p.shape,
                                         initializer=lambda _: p,
                                         trainable=False)
         self.kernel_l = self.add_weight(name='kernel_l',
                                         shape=l .shape,
                                         initializer=lambda _: l,
                                         trainable=True)
         self.kernel_u = self.add_weight(name='kernel_u',
                                         shape=u.shape,
                                         initializer=lambda _: u,
                                         trainable=True)
         self.kernel_u_diag_sign = self.add_weight(name='kernel_u_diag_sign',
                                                      shape=u_diag_sign.shape,
                                                      initializer=lambda _: u_diag_sign,
                                                      trainable=False)
         self.kernel_u_diag_abs_log = self.add_weight(name='kernel_u_diag_abs_log',
                                                      shape=u_diag_abs_log.shape,
                                                      initializer=lambda _: u_diag_abs_log,
                                                      trainable=True)
         self.kernel_l = self.kernel_l * l_mask + K.eye(input_shape[-1])
         self.kernel_u = self.kernel_u * u_mask + K.tf.diag(
             self.kernel_u_diag_sign * K.exp(self.kernel_u_diag_abs_log))
         self.kernel = K.dot(K.dot(self.kernel_p, self.kernel_l),
                             self.kernel_u)
     super(InvDense, self).build(input_shape)
コード例 #7
0
ファイル: glom.py プロジェクト: psorus/grapa
    def call(self, x):
        mat = x[0]  #Matrix A
        val = x[1]

        N = self.neigintact
        S = self.selfintact

        print("N", N.shape)
        print("S", S.shape)

        t1 = self.tp(S, K.eye(self.gs))

        print("t1", t1.shape)

        exit()

        for i in range(self.iterations):
            weignei = K.batch_dot(mat, val)  #Neighbours of the current nodes

            parta = K.dot(weignei, self.neigintact)  #Neighbour part
            partb = K.dot(val, self.selfintact)  #Self Interaction Part
            val = parta + partb

            if self.activate:
                val = self.advrelu(val, self.activation)

        return val
コード例 #8
0
    def build(self, input_shape):
        import tensorflow_probability as tfp
        dimensionality = input_shape[1]

        self.mvn = tfp.distributions.MultivariateNormalFullCovariance(
            loc=K.zeros(dimensionality),
            covariance_matrix=K.eye(dimensionality))
        super().build(input_shape)
コード例 #9
0
ファイル: rmn.py プロジェクト: Reese565/speech_polarization
    def __call__(self, R):
        """Returns a component dependence penalty for matrix R
        """
        RRT = K.dot(R, K.transpose(R))
        I = K.eye(RRT.shape.as_list()[0])
        penalty = self.lamb * K.sqrt(K.sum(K.square(RRT - I)))

        return penalty
コード例 #10
0
 def uncorrelated_feature(self, x):
     if (self.encoding_dim <= 1):
         return 0.0
     else:
         output = K.sum(
             K.square(self.covariance - tf.math.multiply(
                 self.covariance, K.eye(self.encoding_dim))))
         return output
コード例 #11
0
def test_make_soft(_log, train_with_soft_target_stdev, _config):
    if train_with_soft_target_stdev is None:
        _config['train_with_soft_target_stdev'] = 1
    y_true = K.reshape(K.eye(512)[:129, :256], (2, 129, 256))
    y_soft = make_soft(y_true)
    f = K.function([], y_soft)
    _log.info('Output of soft:')
    f1 = f([])

    _log.info(f1[0, 0])
    _log.info(f1[-1, -1])
コード例 #12
0
ファイル: operations.py プロジェクト: bartekwojcik/BigGAN
    def regularization(w):
        #_, _, _, c = w.get_shape().as_list()
        _, _, _, c = K.int_shape(w)

        w = K.reshape(w, [-1, c])

        w_t = K.transpose(w)
        identity = K.eye(c)

        w_t_w = K.dot(w_t, w)
        subtraction = w_t_w - identity

        loss = tf.nn.l2_loss(subtraction)

        return scale * loss
コード例 #13
0
ファイル: lmu.py プロジェクト: ino09/lmu
    def build(self, input_shape):
        """
        Initializes various network parameters.
        """

        input_dim = input_shape[-1]

        # TODO: add regularizers

        self.encoders = self.add_weight(
            name="encoders",
            shape=(input_dim, self.units),
            initializer=self.encoder_initializer,
            trainable=self.trainable_encoders,
        )

        self.dt = self.add_weight(
            name="dt",
            shape=(1, ),
            initializer=self.dt_initializer,
            trainable=self.trainable_dt,
        )

        self.decoders = self.add_weight(
            name="decoders",
            shape=(self.units * self.order, self.output_size),
            initializer=self.decoder_initializer,
            trainable=self.trainable_decoders,
        )

        self.AT = self.add_weight(
            name="AT",
            shape=(self.order, self.order),
            initializer=Constant(self._A.T),  # note: transposed
            trainable=self.trainable_A,
        )

        self.B = self.add_weight(
            name="B",
            shape=(1, 1, self.order),  # system is SISO
            initializer=Constant(self._B[None, None, :]),
            trainable=self.trainable_B,
        )

        self.I = K.eye(self.order)  # noqa: E741
        self.zero_padding = K.zeros((1, self.order + 1))

        self.built = True
コード例 #14
0
 def build(self, input_shape):
     if type(input_shape) == list:
         self.input_spec = list(
             map(lambda x: keras.engine.InputSpec(shape=x), input_shape))
     else:
         self.input_spec = keras.engine.InputSpec(shape=input_shape)
     if isinstance(self.layers, list) and len(self.layers) == 0:
         self.layer.build(input_shape)
         config = self.layer.get_config()
         name = config['name']
         self.layers = []
         for i in range(self.layer_num):
             copied = copy.copy(config)
             copied['name'] = name + '_{}'.format(i + 1)
             self.layers.append(self.layer.__class__.from_config(copied))
     for layer in self.layers:
         layer.build(input_shape)
     if self.hidden_dim is not None:
         self.W = self.add_weight(
             shape=(int(input_shape[-1]), self.hidden_dim * self.layer_num),
             name='{}_W'.format(self.name),
             initializer=keras.initializers.get('uniform'),
         )
         if self.use_bias:
             self.b = self.add_weight(
                 shape=(self.hidden_dim * self.layer_num, ),
                 name='{}_b'.format(self.name),
                 initializer=keras.initializers.get('zeros'),
             )
     if self.reg_index:
         for i, (index, interval, weight) in enumerate(
                 zip(self.reg_index, self.reg_slice, self.reg_weight)):
             weights = []
             if type(interval) is slice:
                 interval = (interval, )
             for layer in self.layers:
                 if interval is None:
                     weights.append(K.flatten(layer.get_weights()[index]))
                 else:
                     weights.append(
                         K.flatten(layer.get_weights()[index][interval]))
             weights = K.stack(weights)
             self.add_loss(weight * K.sum(
                 K.square(
                     K.dot(weights, K.transpose(weights)) -
                     K.eye(len(self.layers)))))
     super(MultiHead, self).build(input_shape)
コード例 #15
0
ファイル: multi_head.py プロジェクト: colabnlp/MedicalQA
 def build(self, input_shape):
     if type(input_shape) == list:
         self.input_spec = list(
             map(lambda x: keras.engine.InputSpec(shape=x), input_shape))
     else:
         self.input_spec = keras.engine.InputSpec(shape=input_shape)
     if not self.layers:
         self.layers = [
             copy.deepcopy(self.layer) for _ in range(self.layer_num)
         ]
     if self.hidden_dim is not None:
         self.W = self.add_weight(
             shape=(input_shape[-1], self.hidden_dim * self.layer_num),
             name='{}_W'.format(self.name),
             initializer=keras.initializers.get('uniform'),
         )
         if self.use_bias:
             self.b = self.add_weight(
                 shape=(self.hidden_dim * self.layer_num, ),
                 name='{}_b'.format(self.name),
                 initializer=keras.initializers.get('zeros'),
             )
         input_shape = input_shape[:-1] + (self.hidden_dim, )
     for i, layer in enumerate(self.layers):
         if not layer.built:
             if self.rename:
                 layer.name = layer.name + '_%d' % (i + 1)
             layer.build(input_shape)
     if self.reg_index:
         for i, (index, interval, weight) in enumerate(
                 zip(self.reg_index, self.reg_slice, self.reg_weight)):
             weights = []
             if type(interval) is slice:
                 interval = (interval, )
             for layer in self.layers:
                 if interval is None:
                     weights.append(K.flatten(layer.get_weights()[index]))
                 else:
                     weights.append(
                         K.flatten(layer.get_weights()[index][interval]))
             weights = K.stack(weights)
             self.add_loss(weight * K.sum(
                 K.square(
                     K.dot(weights, K.transpose(weights)) -
                     K.eye(len(self.layers)))))
     super(MultiHead, self).build(input_shape)
コード例 #16
0
    def build(self, input_shape):
        input_dim = input_shape[-1]

        # TODO: add regularizers

        self.encoders = self.add_weight(
            name='encoders',
            shape=(input_dim, self.units),
            initializer=self.encoder_initializer,
            trainable=self.trainable_encoders)

        self.dt = self.add_weight(
            name='dt',
            shape=(1,),
            initializer=self.dt_initializer,
            trainable=self.trainable_dt)

        self.decoders = self.add_weight(
            name='decoders',
            shape=(self.units*self.order, self.output_size),
            initializer=self.decoder_initializer,
            trainable=self.trainable_decoders)

        self.AT = self.add_weight(
            name='AT',
            shape=(self.order, self.order),
            initializer=Constant(self._A.T),  # note: transposed
            trainable=self.trainable_A)

        self.B = self.add_weight(
            name='B',
            shape=(1, 1, self.order),  # system is SISO
            initializer=Constant(self._B[None, None, :]),
            trainable=self.trainable_B)

        self.I = K.eye(self.order)
        self.zero_padding = K.zeros((1, self.order + 1))

        self.built = True
コード例 #17
0
ファイル: glm.py プロジェクト: psorus/grapa
  def call(self,x):
    mat=x[0]#Matrix A
    val=x[1]

    eye=K.eye(self.gs)
 
    p1=kron(eye,self.selfintact)
    p2=kron_b1(mat,self.neigintact)
    p=p1+p2

      
    v=K.reshape(val,(-1,self.gs*self.param))

    
    for i in range(self.iterations):
      v=K.batch_dot(p,v)
      if self.activate:
        v=self.advrelu(v,self.activation)
  
    ret=K.reshape(v,(-1,self.gs,self.param))#keine Ahnung ob richtig rum #doch scheint zu passen

    return ret
コード例 #18
0
ファイル: glim.py プロジェクト: psorus/grapa
  def call(self,x):
    mat=x[0]#Matrix A
    val=x[1]

    eye=K.eye(self.gs)
 
    p1=kron(eye,self.selfintact)
    p2=kron_b1(mat,self.neigintact)
    p=p1+p2

    p=t.linalg.inv(p)#invert the matrix p, is only truly the inverse of glm if activate=False

      
    v=K.reshape(val,(-1,self.gs*self.param))

    
    for i in range(self.iterations):
      v=K.batch_dot(p,v)
      if self.activate:
        v=self.advrelu(v,self.activation)
  
    ret=K.reshape(v,(-1,self.gs,self.param))#keine Ahnung ob richtig rum #doch scheint zu passen

    return ret
コード例 #19
0
ファイル: main.py プロジェクト: qianrenjian/jodie-tensorflow
# INITIALIZE EMBEDDING
initial_user_embedding = tf.Variable(
    K.l2_normalize(K.random_uniform((1, args.embedding_dim)))
)  # the initial user and item embeddings are learned during training as well
initial_item_embedding = tf.Variable(
    K.l2_normalize(K.random_uniform((1, args.embedding_dim))))

user_embeddings = K.repeat_elements(
    initial_user_embedding, num_users,
    0)  # initialize all users to the same embedding
item_embeddings = K.repeat_elements(
    initial_item_embedding, num_users,
    0)  # initialize all items to the same embedding
item_embedding_static = tf.Variable(
    K.eye(num_items))  # one-hot vectors for static embeddings
user_embedding_static = tf.Variable(
    K.eye(num_users))  # one-hot vectors for static embeddings

# RUN THE JODIE MODEL
'''
THE MODEL IS TRAINED FOR SEVERAL EPOCHS. IN EACH EPOCH, JODIES USES THE TRAINING SET OF INTERACTIONS TO UPDATE ITS PARAMETERS.
'''
print("*** Training the JODIE model for %d epochs ***" % args.epochs)

with trange(args.epochs) as progress_bar_1:
    for ep in progress_bar_1:
        progress_bar_1.set_description('Epoch %d of %d' % (ep, args.epochs))
        # INITIALIZE EMBEDDING TRAJECTORY STORAGE
        user_embeddings_timeseries = tf.constant(
            tf.zeros([num_interactions, args.embedding_dim], tf.float32))
コード例 #20
0
    def compile_backprop(self, proj_type='l2'):
        self.proj_type = proj_type

        layers = [layer for layer in self.model.layers if layer.get_weights()]

        A = [Input(layer.output_shape[1:]) for layer in layers[:-1]]

        # Store the constraint gradients and biases for each layer.
        grads, biases = [], []
        prev_grad, prev_bias = None, None

        for i, layer in enumerate(layers):
            W, b = layer.weights

            if isinstance(layer, Dense):
                if i > 0 and K.ndim(A[i - 1]) == 4 and (K.image_data_format()
                                                        == 'channels_first'):

                    # The `Flatten` layer doesn't respect the channels-first
                    # dimension ordering, so it mixes up our dimensions. We need
                    # to correct for that here.
                    _, ch, h, w = K.int_shape(A[i - 1])
                    _, n_out = K.int_shape(W)
                    W = K.reshape(
                        K.permute_dimensions(K.reshape(W, (h, w, ch, n_out)),
                                             (2, 0, 1, 3)),
                        (ch * h * w, n_out))

                if len(grads) == 0:
                    grad = K.transpose(W)
                    bias = b

                    # Expand to batch shape.
                    grad = grad[None] * K.ones_like(A[i])[:, :, None]
                    bias = bias[None] * K.ones_like(A[i])

                else:
                    A_i = K.reshape(
                        A[i - 1], [-1, np.prod(K.int_shape(A[i - 1])[1:])])

                    grad = (K.transpose(W)[None] * A_i[:, None]) @ grads[-1]
                    bias = (biases[-1] * A_i) @ W + b[None]

                grads.append(grad)
                biases.append(bias)

            else:
                if K.image_data_format() == 'channels_first':
                    _, ch_in, h_in, w_in = layer.input_shape
                    _, ch_out, h_out, w_out = layer.output_shape
                else:
                    _, h_in, w_in, ch_in = layer.input_shape
                    _, h_out, w_out, ch_out = layer.output_shape

                if len(grads) == 0:
                    if K.image_data_format() == 'channels_first':
                        grad = K.conv2d(K.reshape(
                            K.eye(ch_in * h_in * w_in),
                            [ch_in * h_in * w_in, ch_in, h_in, w_in]),
                                        W,
                                        padding=layer.padding,
                                        strides=layer.strides)

                        bias = K.tile(b[:, None, None], [1, h_out, w_out])

                    else:
                        grad = K.conv2d(K.reshape(
                            K.eye(ch_in * h_in * w_in),
                            [ch_in * h_in * w_in, h_in, w_in, ch_in]),
                                        W,
                                        padding=layer.padding,
                                        strides=layer.strides)

                        bias = K.tile(b[None, None], [h_out, w_out, 1])

                    # Expand to batch shape.
                    grad = grad[None] * K.ones_like(A[i])[:, None]
                    bias = bias[None] * K.ones_like(A[i])

                else:

                    n = np.prod(self.input_shape)

                    if K.image_data_format() == 'channels_first':
                        grad = K.reshape(
                            K.conv2d(K.reshape(grad * A[i - 1][:, None],
                                               (-1, ch_in, h_in, w_in)),
                                     W,
                                     padding=layer.padding,
                                     strides=layer.strides),
                            (-1, n, ch_out, h_out, w_out))

                        bias = K.conv2d(bias * A[i - 1],
                                        W,
                                        padding=layer.padding,
                                        strides=layer.strides) + b[None, :,
                                                                   None, None]

                    else:
                        grad = K.reshape(
                            K.conv2d(K.reshape(grad * A[i - 1][:, None],
                                               (-1, h_in, h_in, ch_in)),
                                     W,
                                     padding=layer.padding,
                                     strides=layer.strides),
                            (-1, n, h_out, h_out, ch_out))

                        bias = K.conv2d(bias * A[i - 1],
                                        W,
                                        padding=layer.padding,
                                        strides=layer.strides) + b[None, None,
                                                                   None]

                grads.append(
                    K.permute_dimensions(
                        K.reshape(grad, (-1, n, ch_out * h_out * w_out)),
                        (0, 2, 1)))
                biases.append(K.batch_flatten(bias))

        # Handle the softmax constraints.
        c = K.placeholder((1, ), dtype='int32')

        softmax_grads = grads[-1]
        softmax_biases = biases[-1]

        c_grad = K.permute_dimensions(
            K.gather(K.permute_dimensions(softmax_grads, (1, 0, 2)), c),
            (1, 0, 2))

        c_bias = K.transpose(K.gather(K.transpose(softmax_biases), c))

        grads[-1] = softmax_grads - c_grad
        biases[-1] = softmax_biases - c_bias

        grads_no_first_layer = K.concatenate(grads[1:], axis=1)
        biases_no_first_layer = K.concatenate(biases[1:], axis=1)

        grads = K.concatenate(grads, axis=1)
        biases = K.concatenate(biases, axis=1)

        # Calculate distances.
        x = K.placeholder(self.input_shape)

        distances = proj_dist(proj_type, K.reshape(x, (1, -1)), grads, biases)

        distances_no_first_layer = proj_dist(proj_type, K.reshape(x, (1, -1)),
                                             grads_no_first_layer,
                                             biases_no_first_layer)

        self._grad_f = K.function(A + [c], [grads])
        self._bias_f = K.function(A + [c], [biases])
        self._dist_f = K.function(A + [c, x], [distances])
        self._all_f = K.function(A + [c, x], [
            distances, grads[:, -self.n_classes:], biases[:, -self.n_classes:]
        ])

        self._all_except_first_f = K.function(A + [c, x], [
            distances_no_first_layer, grads_no_first_layer[:,
                                                           -self.n_classes:],
            biases_no_first_layer[:, -self.n_classes:]
        ])

        self.compiled = True

        return self
コード例 #21
0
ファイル: gadd1.py プロジェクト: psorus/grapa
 def call(self, x):
     x = x[0]
     return x + K.eye(self.gs)
コード例 #22
0
def constrained_l2_proj_distance(x, grads, biases):
    u, w, b = x, grads, -biases[:, :, None]

    ndim = K.int_shape(u)[1]
    neurons = K.int_shape(w)[1]
    w = K.reshape(w, (-1, ndim))
    b = K.reshape(b, (-1, 1))

    # Handy naming to make code a bit more readable.
    def is_(x):
        return K.cast(x, 'float32')

    def where(x):
        return K.cast(x, 'float32')

    # Check feasibility.
    # The problem is infeasible if `q` (defined below) can be brought to
    # infinity as `lam` -> +/-infinity. We see that as `lam` -> +infinity,
    # `x_star[i]` becomes 0 if `w[i]` is positive and if `w[i]` is negative.
    # Thus, dq/dlam = -b + sum_{i : w_i < 0}{w_i}. If dq/dlam is positive, then
    # `q` will go to infinity. The other case comes from the symmetric case for
    # when `lam` -> -infinity.
    infeasible = (is_(K.sum(w * where(w > 0), axis=1) < b[:, 0]) +
                  is_(K.sum(w * where(w < 0), axis=1) > b[:, 0]))

    feasible = 1. - infeasible

    # Get the order (as lambda goes from -infinity to 0) in which each dimention
    # transitions to the I stage.
    I_in_order = tf.argsort(u / w * where(w < 0) + (u - 1) / w * where(w > 0))

    # Get the order (as lambda goes from 0 to +infinity) in which each dimention
    # transitions out of the I stage.
    I_out_order = tf.argsort(u / w * where(w > 0) + (u - 1) / w * where(w < 0))

    eye = K.eye(ndim)

    mask_I = K.cumsum(K.concatenate(
        (K.gather(eye, I_in_order), -K.gather(eye, I_out_order)), axis=1),
                      axis=1)

    mask_I = tf.gather(mask_I, np.arange(2 * ndim - 1), axis=1)

    mask_1 = is_(w > 0)[:, None] + K.cumsum(K.concatenate(
        (-tf.gather(
            eye[None] * where(w > 0)[:, None], I_in_order, batch_dims=1),
         tf.gather(
             eye[None] * where(w < 0)[:, None], I_out_order, batch_dims=1)),
        axis=1),
                                            axis=1)

    mask_1 = tf.gather(mask_1, np.arange(2 * ndim - 1), axis=1)

    # Here we collect the dimensions in `w` and `u` that are in the I and 1
    # phase in state `i`. An analysis of the cases shows that given these
    # dimension, the argmax of `q` can be found by the equation below.
    w_I = w[:, None] * mask_I
    u_I = u[:, None] * mask_I
    w_1 = w[:, None] * mask_1

    argmaxes = ((K.sum(w_I * u_I, axis=2) + K.sum(w_1, axis=2) - b) /
                K.sum(w_I * w_I, axis=2))

    # Find the inflection points in `q`.
    inflections = K.concatenate(((u - 1) / w, u / w))

    lam = K.concatenate((argmaxes, inflections))

    x_star_candidates = K.clip(u[:, None] - lam[:, :, None] * w[:, None], 0.,
                               1.)

    # Evaluate `q` on all the candidate points for the max. Select the point
    # that is in fact the max (`opt_index`).
    max_q_candidates = (K.sum(.5 * (x_star_candidates - u[:, None])**2 +
                              lam[:, :, None] * w[:, None] * x_star_candidates,
                              axis=2) - lam * b)

    opt_index = K.argmax(max_q_candidates, axis=1)

    # `x_star` is the projection satisfying the constraints.
    x_star = K.sum(K.one_hot(opt_index, 4 * ndim - 1)[:, :, None] *
                   x_star_candidates,
                   axis=1)

    d = tf.norm(x_star - u, axis=1) / feasible

    return K.reshape(d, (-1, neurons))
コード例 #23
0
def testBackEnd():
    x = tf.zeros([3, 4], tf.int32)
    print('x=', x)
    x = tf.zeros((3, 4), tf.int32)
    print('x=', x)
    return

    a = tf.constant([1, 2, 3, 4, 5, 6, 7, 8], dtype=tf.float32)
    a = K.reshape(a, (4, 4))
    print('a=', a)

    a = tf.constant([[1, 2], [3, 4]], dtype=tf.float32)
    #a = K.abs(-1)
    print('a=', type(a), a)  #a= tf.Tensor(1, shape=(), dtype=int32)
    # a = a.numpy()
    # print('a=',a)

    # a = tf.zeros([0, 3])
    # a = tf.concat([a, [[1, 2, 3], [5, 6, 8]]], axis=0)
    # print('a=',type(a),a)

    b = tf.constant([[1, 8], [2, 3]], dtype=tf.float32)

    c = K.square(a - b)
    print('c=', c)
    d = K.sum(c, axis=0)
    print('d=', d)
    d = K.sum(c, axis=1)
    print('d=', d)

    d = K.sum(c, axis=[0, 1])
    print('d=', d)
    return

    a = K.abs([-1, 0, 9, -10])
    print('a=', a)  #a= tf.Tensor([ 1  0  9 10], shape=(4,), dtype=int32)

    a = K.abs(np.array([-1, 0, 9, -10]))
    print('a=', a)  #a= tf.Tensor([ 1  0  9 10], shape=(4,), dtype=int32)

    a = K.all(np.array([-1, 0, 9, -10]), axis=0)
    print('a=', a)  #a= tf.Tensor(False, shape=(), dtype=bool)

    a = K.all(np.array([[-1, -2, -1], [-1, 0, 9]]), axis=0)  #x axis
    print('a=', a)  #a= tf.Tensor([ True False  True], shape=(3,), dtype=bool)
    a = K.all(np.array([[-1, -2, -1], [-1, 0, 9]]), axis=1)  #y axis
    print('a=', a)  #a= tf.Tensor([ True False], shape=(2,), dtype=bool)

    a = K.arange(1, 100, 10)
    print(
        'a=', a
    )  #a= tf.Tensor([ 1 11 21 31 41 51 61 71 81 91], shape=(10,), dtype=int32)

    a = K.sum(np.array([-1, 0, 9, -10]))
    print('a=', a)  #a= tf.Tensor(-2, shape=(), dtype=int32)

    a = K.square(np.array([-1, 0, 9, -10]))
    print('a=', a)  #a= tf.Tensor([  1   0  81 100], shape=(4,), dtype=int32)

    x = K.placeholder(shape=(2, 3))
    y = K.placeholder(shape=(3, 4))
    xy = K.dot(x, y)
    shape = K.int_shape(xy)
    print('xy=', xy)  #xy= Tensor("MatMul:0", shape=(2, 4), dtype=float32)
    print('xy shape=', shape)  #xy shape= (2, 4)

    kvar = K.eye(3)
    #K.eval(kvar)
    print('kvar=', kvar)
    '''
    array([[1., 0., 0.],
            [0., 1., 0.],
            [0., 0., 1.]], dtype=float32)>
    '''

    a = np.array([[1, 2], [3, 4]])
    a = K.transpose(a)
    print('a=', a)
    '''
    a= tf.Tensor(
            [[1 3]
            [2 4]], shape=(2, 2), dtype=int32)
    '''

    a = K.clip(np.array([-1, 0, 1, 2, 3, 4, 5]), min_value=0, max_value=3)
    print('a=', a)  #a= tf.Tensor([0 0 1 2 3 3 3], shape=(7,), dtype=int32)
コード例 #24
0
def get_sub_mask(s):  # todo check it latter
    len_s = K.shape(s)[1]
    mask = K.cumsum(K.eye(len_s), 1)
    return mask
コード例 #25
0
ファイル: gtopk.py プロジェクト: psorus/grapa
    def call(self, x):
        x = x[0]

        #print("!x",x.shape)

        gs = self.gs
        k = self.k
        param = self.param
        C = self.numericalC

        #print("gs",gs,"k",k,"param",param,"C",C)

        for i in range(10):
            t.print(
                "!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!"
            )

        t.print("calling onetopk", self.metrik, output_stream=sys.stdout)

        #exit()

        mata = K.constant(self.getmata(gs))
        matb = K.constant(self.getmatb(gs))

        #print("mata",mata.shape)
        #print("matb",matb.shape)

        xp = K.permute_dimensions(x, (0, 2, 1))
        #print("xp",xp.shape)

        xa = K.dot(xp, mata)
        xb = K.dot(xp, matb)
        #print("xa",xa.shape,"xb",xb.shape)
        #exit()

        isval = xa[:, self.flag, :] * xb[:, self.flag, :]

        #return isval,isval
        #print("isval",isval.shape)
        #exit()

        ds = xa - xb
        #print("ds",ds.shape)

        dsp = K.permute_dimensions(ds, (0, 2, 1))
        #print("dsp",dsp.shape)

        dspsq = K.square(dsp)
        #print("dspsq",dspsq.shape)
        #print("self.metrik",self.metrik.shape)

        delt = K.dot(dspsq, self.metrik)
        #print("delt",delt.shape)

        delt = K.reshape(
            delt, (-1, self.gs * self.gs)) + (1 - isval) * self.emptyconst

        d = K.reshape(delt, (-1, gs, gs))
        #print("d",d.shape)

        #return d,d

        #####no self interactions
        if self.self_interaction == False:
            one = K.eye(gs)
            #print("one",one.shape)
            d += self.self_interaction_const * one
        #####end no self interactions

        v, _ = t.math.top_k(-d, k=k)
        #print("v",v.shape)
        #return v,v

        vb = v[:, :, -1]
        #print("vb",vb.shape)

        vbs = K.reshape(vb, (-1, gs, 1))
        #print("vbs",vbs.shape)

        su = d + vbs  #plus since top_k(-d)
        #print("su",su.shape)

        #map anything above 0 to 0 and anything below to 1, also map 0 to 1
        #p(-x)=C*d_C(-x)
        #     =d(-C*x)
        #     =1-r(Cx-1)+r(Cx)
        #experimentally:
        #   r(1-Cx)-r(-Cx)

        rel = K.relu(1 - C * su) - K.relu(-C * su)
        #print("rel",rel.shape)

        rel = K.relu(rel) - K.relu(rel - 1)

        #return rel,rel

        dez1 = K.reshape(rel, (-1, self.gs * self.gs))
        #print("dez1",dez1.shape)
        dez2 = dez1 * isval
        #print("dez2",dez2.shape)
        rel = K.reshape(dez2, (-1, self.gs, self.gs))
        print("rel", rel.shape)

        numnei = K.sum(rel, axis=-1)
        print("numnei", numnei.shape)
        factor = self.k / (numnei + 0.00000000001)

        #return K.concatenate((numnei,factor),axis=-1),factor#,factor#numnei,numnei

        print("factor", factor.shape)
        refactor = K.repeat(factor, self.gs)
        print("refactor", refactor.shape)

        refactor = K.permute_dimensions(refactor, (0, 2, 1))

        #return refactor,refactor

        rel = rel * refactor
        print("rel", rel.shape)

        #exit()

        if self.free == 0: return rel, x
        zero1 = K.zeros_like(x[:, :, 0])
        zero1 = K.reshape(zero1, (-1, x.shape[1], 1))
        #print("!",zero1.shape)
        zerolis = []
        for i in range(self.free):
            zerolis.append(zero1)
        zeros = K.concatenate(zerolis, axis=-1)
        #print(zeros.shape)

        return rel, K.concatenate((x, zeros), axis=-1)
コード例 #26
0
    def call(self, inputs):
        """
        Creates the layer as a Keras graph.

        Note that the inputs are tensors with a batch dimension of 1:
        Keras requires this batch dimension, and for full-batch methods
        we only have a single "batch".

        There are three inputs required, the node features, the output
        indices (the nodes that are to be selected in the final layer)
        and the graph adjacency matrix

        Notes:
            This does not add self loops to the adjacency matrix.
            The output indices are only used when ``final_layer=True``

        Args:
            inputs (list): list of inputs with 3 items:
            node features (size 1 x N x F),
            output indices (size 1 x M),
            graph adjacency matrix (size N x N),
            where N is the number of nodes in the graph,
                  F is the dimensionality of node features
                  M is the number of output nodes
        """
        X = inputs[0]  # Node features (1 x N x F)
        out_indices = inputs[1]  # output indices (1 x K)
        A = inputs[2]  # Adjacency matrix (N x N)
        N = K.int_shape(A)[-1]

        batch_dim, n_nodes, _ = K.int_shape(X)
        if batch_dim != 1:
            raise ValueError(
                "Currently full-batch methods only support a batch dimension of one"
            )

        else:
            # Remove singleton batch dimension
            X = K.squeeze(X, 0)
            out_indices = K.squeeze(out_indices, 0)

        outputs = []
        for head in range(self.attn_heads):
            kernel = self.kernels[head]  # W in the paper (F x F')
            attention_kernel = self.attn_kernels[
                head]  # Attention kernel a in the paper (2F' x 1)

            # Compute inputs to attention network
            features = K.dot(X, kernel)  # (N x F')

            # Compute feature combinations
            # Note: [[a_1], [a_2]]^T [[Wh_i], [Wh_2]] = [a_1]^T [Wh_i] + [a_2]^T [Wh_j]
            attn_for_self = K.dot(
                features, attention_kernel[0])  # (N x 1), [a_1]^T [Wh_i]
            attn_for_neighs = K.dot(
                features, attention_kernel[1])  # (N x 1), [a_2]^T [Wh_j]

            # Attention head a(Wh_i, Wh_j) = a^T [[Wh_i], [Wh_j]]
            dense = attn_for_self + K.transpose(
                attn_for_neighs)  # (N x N) via broadcasting

            # Add nonlinearity
            dense = LeakyReLU(alpha=0.2)(dense)

            # Mask values before activation (Vaswani et al., 2017)
            # YT: this only works for 'binary' A, not for 'weighted' A!
            # YT: if A does not have self-loops, the node itself will be masked, so A should have self-loops
            # YT: this is ensured by setting the diagonal elements of A tensor to 1 above
            if not self.saliency_map_support:
                mask = -10e9 * (1.0 - A)
                self.A = A
                dense += mask
                dense = K.softmax(dense)  # (N x N), Eq. 3 of the paper

            else:
                # dense = dense - tf.reduce_max(dense)
                # GAT with support for saliency calculations
                W = (self.delta * A) * K.exp(
                    dense - K.max(dense, axis=1, keepdims=True)
                ) * (1 - self.non_exist_edge) + self.non_exist_edge * (
                    A + self.delta *
                    (K.ones(shape=[N, N], dtype="float") - A) + K.eye(N)
                ) * K.exp(dense - K.max(dense, axis=1, keepdims=True))
                dense = W / K.sum(W, axis=1, keepdims=True)

            # Apply dropout to features and attention coefficients
            dropout_feat = Dropout(self.in_dropout_rate)(features)  # (N x F')
            dropout_attn = Dropout(self.attn_dropout_rate)(dense)  # (N x N)

            # Linear combination with neighbors' features [YT: see Eq. 4]
            node_features = K.dot(dropout_attn, dropout_feat)  # (N x F')

            if self.use_bias:
                node_features = K.bias_add(node_features, self.biases[head])

            # Add output of attention head to final output
            outputs.append(node_features)

        # Aggregate the heads' output according to the reduction method
        if self.attn_heads_reduction == "concat":
            output = K.concatenate(outputs)  # (N x KF')
        else:
            output = K.mean(K.stack(outputs), axis=0)  # N x F')

        # Nonlinear activation function
        output = self.activation(output)

        # On the final layer we gather the nodes referenced by the indices
        if self.final_layer:
            output = K.gather(output, out_indices)

        # Add batch dimension back if we removed it
        if batch_dim == 1:
            output = K.expand_dims(output, 0)

        return output