Exemple #1
0
    def call(self, inputs):
        print("xxxx",inputs)
        expanded_tensor = ktf.expand_dims(inputs[0], -1)
        multiples = [1, self.number_of_transforms, 1, 1, 1]
        tiled_tensor = ktf.tile(expanded_tensor, multiples=multiples)
        repeated_tensor = ktf.reshape(tiled_tensor, ktf.shape(inputs[0]) * np.array([self.number_of_transforms, 1, 1, 1]))

        affine_transforms = inputs[1] / self.affine_mul

        affine_transforms = ktf.reshape(affine_transforms, (-1, 8))
        tranformed = tf_affine_transform(repeated_tensor, affine_transforms)
        res = ktf.reshape(tranformed, [-1, self.number_of_transforms] + self.image_size)
        res = ktf.transpose(res, [0, 2, 3, 1, 4])

        #Use masks
        if len(inputs) == 3:
            mask = ktf.transpose(inputs[2], [0, 2, 3, 1])
            mask = ktf.image.resize_images(mask, self.image_size[:2], method=ktf.image.ResizeMethod.NEAREST_NEIGHBOR)
            res = res * ktf.expand_dims(mask, axis=-1)


        if self.aggregation_fn == 'none':
            res = ktf.reshape(res, [-1] + self.image_size[:2] + [self.image_size[2] * self.number_of_transforms])
        elif self.aggregation_fn == 'max':
            res = ktf.reduce_max(res, reduction_indices=[-2])
        elif self.aggregation_fn == 'avg':
            counts = ktf.reduce_sum(mask, reduction_indices=[-1])
            counts = ktf.expand_dims(counts, axis=-1)
            res = ktf.reduce_sum(res, reduction_indices=[-2])
            res /= counts
            res = ktf.where(ktf.is_nan(res), ktf.zeros_like(res), res)
        return res
Exemple #2
0
def sparse_bool_mask(x, mask, axis=0):
    # Only necessary if indices may have non-unique elements
    indices = tf.boolean_mask(tf.range(tf.shape(x)[axis]), mask)
    n_indices = tf.size(indices)
    # Get indices for the axis
    idx = x.indices[:, axis]
    # Find where indices match the selection
    eq = tf.equal(tf.expand_dims(idx, 1),
                  tf.cast(indices, tf.int64))  # TODO this has quadratic cost
    # Mask for selected values
    sel = tf.reduce_any(eq, axis=1)
    # Selected values
    values_new = tf.boolean_mask(x.values, sel, axis=0)
    # New index value for selected elements
    n_indices = tf.cast(n_indices, tf.int64)
    idx_new = tf.reduce_sum(tf.cast(eq, tf.int64) * tf.range(n_indices),
                            axis=1)
    idx_new = tf.boolean_mask(idx_new, sel, axis=0)
    # New full indices tensor
    indices_new = tf.boolean_mask(x.indices, sel, axis=0)
    indices_new = tf.concat([
        indices_new[:, :axis],
        tf.expand_dims(idx_new, 1), indices_new[:, axis + 1:]
    ],
                            axis=1)
    # New shape
    shape_new = tf.concat(
        [x.dense_shape[:axis], [n_indices], x.dense_shape[axis + 1:]], axis=0)
    return tf.SparseTensor(indices_new, values_new, shape_new)
Exemple #3
0
def radon_transform(x, theta):

    x = tf.cast(x, dtype=tf.float32)

    x_shape = tf.shape(x)
    n_cols = x_shape[2]
    n_rows = x_shape[1]
    n_frames = x_shape[0]
    n_angles = tf.shape(theta)[0]

    x = tf.reshape(x, (-1, 1, n_rows, n_cols, 1))
    x = tf.tile(x, (1, n_angles, 1, 1, 1))
    x = tf.reshape(x, (-1, n_rows, n_cols, 1))

    repeated_theta = repeat_theta(theta, n_angles, n_frames)

    x = tf.cast(x, dtype=tf.uint8)
    #x = tf.contrib.image.rotate(x, repeated_theta, interpolation='BILINEAR')
    x = tf.cast(x, dtype=tf.float32)

    x = tf.reshape(x, (-1, n_angles, n_rows, n_cols, 1))
    x = tf.cast(x, dtype=tf.float32)
    x = tf.reduce_sum(x, 2)

    return x
Exemple #4
0
    def get_gradient_penalty_loss(self, for_discriminator=True):
        if self.gradient_penalty_weight == 0:
            return []

        inp = self.discriminator_input if for_discriminator else self.generator_input
        if type(inp) == list:
            batch_size = ktf.shape(inp[0])[0]
        else:
            batch_size = ktf.shape(inp)[0]

        points = self.grad_generator_output
        print K.int_shape(points)

        gp_list = []
        disc_out = self.discriminator([points])
        if type(disc_out) != list:
            disc_out = [disc_out]
        gradients = ktf.gradients(disc_out[0], points)

        for gradient in gradients:
            if gradient is None:
                continue
            gradient = ktf.reshape(gradient, (batch_size, -1))
            gradient_l2_norm = ktf.sqrt(ktf.reduce_sum(ktf.square(gradient), axis=1))
            if for_discriminator:
                gradient_penalty = self.gradient_penalty_weight * ktf.square(1 - gradient_l2_norm)
            else:
                gradient_penalty = -self.gradient_penalty_weight_generator * gradient_l2_norm
            gp_list.append(ktf.reduce_mean(gradient_penalty))

        if for_discriminator:
            for i in range(len(gp_list)):
                self.discriminator_metric_names.append('gp_loss_' + str(i))
        return gp_list
Exemple #5
0
    def nn_loss(self, reference, target, neighborhood_size=(3, 3)):
        v_pad = neighborhood_size[0] // 2
        h_pad = neighborhood_size[1] // 2
        val_pad = ktf.pad(reference,
                          [[0, 0], [v_pad, v_pad], [h_pad, h_pad], [0, 0]],
                          mode='CONSTANT',
                          constant_values=-10000)

        reference_tensors = []
        for i_begin in range(0, neighborhood_size[0]):
            i_end = i_begin - neighborhood_size[0] + 1
            i_end = None if i_end == 0 else i_end
            for j_begin in range(0, neighborhood_size[1]):
                j_end = j_begin - neighborhood_size[0] + 1
                j_end = None if j_end == 0 else j_end
                sub_tensor = val_pad[:, i_begin:i_end, j_begin:j_end, :]
                reference_tensors.append(ktf.expand_dims(sub_tensor, -1))
        reference = ktf.concat(reference_tensors, axis=-1)
        target = ktf.expand_dims(target, axis=-1)

        abs = ktf.abs(reference - target)
        norms = ktf.reduce_sum(abs, reduction_indices=[-2])
        loss = ktf.reduce_min(norms, reduction_indices=[-1])

        return loss
Exemple #6
0
    def get_gradient_penalty_loss(self):
        if self.gradient_penalty_weight == 0:
            return []

        if type(self.discriminator_input) == list:
            batch_size = ktf.shape(self.discriminator_input[0])[0]
            ranks = [len(inp.get_shape().as_list()) for inp in self.discriminator_input]
        else:
            batch_size = ktf.shape(self.discriminator_input)[0]
            ranks = [len(self.discriminator_input.get_shape().as_list())]

        def cast_all(values, reference_type_vals):
            return [ktf.cast(alpha, dtype=ref.dtype) for alpha, ref in zip(values, reference_type_vals)]

        def std_if_not_int(val):
            if val.dtype.is_integer:
                return 0
            else:
                return ktf.stop_gradient(K.std(val, keepdims=True))

        def point_for_gp_wgan():
            weights = ktf.random_uniform((batch_size, 1), minval=0, maxval=1)
            weights = [ktf.reshape(weights, (-1, ) + (1, ) * (rank - 1)) for rank in ranks]
            weights = cast_all(weights, self.discriminator_input)
            points = [(w * r) + ((1 - w) * f) for r, f, w in zip(self.discriminator_input, self.generator_output, weights)]
            return points

        def points_for_dragan():
            alphas = ktf.random_uniform((batch_size, 1), minval=0, maxval=1)
            alphas = [ktf.reshape(alphas, (-1, ) + (1, ) * (rank - 1)) for rank in ranks]
            alphas = cast_all(alphas, self.discriminator_input)
            fake = [ktf.random_uniform(ktf.shape(t), minval=0, maxval=1) * std_if_not_int(t) * 0.5
                       for t in self.discriminator_input]
            fake = cast_all(fake, self.discriminator_input)

            points = [(w * r) + ((1 - w) * f) for r, f, w in zip(self.discriminator_input, fake, alphas)]
            return points

        points = {'wgan-gp': point_for_gp_wgan(), 'dragan': points_for_dragan()}
        points = points[self.gradient_penalty_type]

        gp_list = []
        disc_out = self.discriminator(points)
        if type(disc_out) != list:
            disc_out = [disc_out]
        gradients = ktf.gradients(disc_out[0], points)

        for gradient in gradients:
            if gradient is None:
                continue
            gradient = ktf.reshape(gradient, (batch_size, -1))
            gradient_l2_norm = ktf.sqrt(ktf.reduce_sum(ktf.square(gradient), axis=1))
            gradient_penalty = self.gradient_penalty_weight * ktf.square(1 - gradient_l2_norm)
            gp_list.append(ktf.reduce_mean(gradient_penalty))

        for i in range(len(gp_list)):
            self.discriminator_metric_names.append('gp_loss_' + str(i))
        return gp_list
Exemple #7
0
def degrees(A):
    """
    Computes the degrees of each node in A, dealing with sparse A and batch mode
    automatically.
    :param A: Tensor or SparseTensor with rank k = {2, 3}.
    :return: Tensor or SparseTensor of rank k - 1.
    """
    if K.is_sparse(A):
        D = tf.sparse.reduce_sum(A, axis=-1)
    else:
        D = tf.reduce_sum(A, axis=-1)

    return D
Exemple #8
0
    def call(self, inputs):
        # Note that I is useless, because thee layer cannot be used in graph
        # batch mode.
        if len(inputs) == 3:
            X, A, I = inputs
        else:
            X, A = inputs
            I = None

        N = K.shape(A)[-1]
        # Check if the layer is operating in batch mode (X and A have rank 3)
        batch_mode = K.ndim(A) == 3

        # Get normalized adjacency
        if K.is_sparse(A):
            I_ = tf.sparse.eye(N, dtype=A.dtype)
            A_ = tf.sparse.add(A, I_)
        else:
            I_ = tf.eye(N, dtype=A.dtype)
            A_ = A + I_
        fltr = ops.normalize_A(A_)

        # Node embeddings
        Z = K.dot(X, self.kernel_emb)
        Z = ops.filter_dot(fltr, Z)
        if self.activation is not None:
            Z = self.activation(Z)

        # Compute cluster assignment matrix
        S = K.dot(X, self.kernel_pool)
        S = ops.filter_dot(fltr, S)
        S = activations.softmax(S, axis=-1)  # softmax applied row-wise

        # Link prediction loss
        S_gram = ops.matmul_A_BT(S, S)
        if K.is_sparse(A):
            LP_loss = tf.sparse.add(
                A, -S_gram)  # A/tf.norm(A) - S_gram/tf.norm(S_gram)
        else:
            LP_loss = A - S_gram
        LP_loss = tf.norm(LP_loss, axis=(-1, -2))
        if batch_mode:
            LP_loss = K.mean(LP_loss)
        self.add_loss(LP_loss)

        # Entropy loss
        entr = tf.negative(
            tf.reduce_sum(tf.multiply(S, K.log(S + K.epsilon())), axis=-1))
        entr_loss = K.mean(entr, axis=-1)
        if batch_mode:
            entr_loss = K.mean(entr_loss)
        self.add_loss(entr_loss)

        # Pooling
        X_pooled = ops.matmul_AT_B(S, Z)
        A_pooled = ops.matmul_AT_B_A(S, A)

        output = [X_pooled, A_pooled]

        if I is not None:
            I_mean = tf.segment_mean(I, I)
            I_pooled = ops.tf_repeat_1d(I_mean, tf.ones_like(I_mean) * self.k)
            output.append(I_pooled)

        if self.return_mask:
            output.append(S)

        return output