예제 #1
0
파일: models.py 프로젝트: hyzcn/cnn_graph
    def _inference(self, x, dropout):
        with tf.name_scope('conv1'):
            # Transform to Fourier domain
            x_2d = tf.reshape(x, [-1, 28, 28])
            x_2d = tf.complex(x_2d, 0)
            xf_2d = tf.fft2d(x_2d)
            xf = tf.reshape(xf_2d, [-1, NFEATURES])
            xf = tf.expand_dims(xf, 1)  # NSAMPLES x 1 x NFEATURES
            xf = tf.transpose(xf)  # NFEATURES x 1 x NSAMPLES
            # Filter
            Wreal = self._weight_variable([int(NFEATURES/2), self.F, 1])
            Wimg = self._weight_variable([int(NFEATURES/2), self.F, 1])
            W = tf.complex(Wreal, Wimg)
            xf = xf[:int(NFEATURES/2), :, :]
            yf = tf.matmul(W, xf)  # for each feature
            yf = tf.concat([yf, tf.conj(yf)], axis=0)
            yf = tf.transpose(yf)  # NSAMPLES x NFILTERS x NFEATURES
            yf_2d = tf.reshape(yf, [-1, 28, 28])
            # Transform back to spatial domain
            y_2d = tf.ifft2d(yf_2d)
            y_2d = tf.real(y_2d)
            y = tf.reshape(y_2d, [-1, self.F, NFEATURES])
            # Bias and non-linearity
            b = self._bias_variable([1, self.F, 1])
#            b = self._bias_variable([1, self.F, NFEATURES])
            y += b  # NSAMPLES x NFILTERS x NFEATURES
            y = tf.nn.relu(y)
        with tf.name_scope('fc1'):
            W = self._weight_variable([self.F*NFEATURES, NCLASSES])
            b = self._bias_variable([NCLASSES])
            y = tf.reshape(y, [-1, self.F*NFEATURES])
            y = tf.matmul(y, W) + b
        return y
  def _operator_and_mat_and_feed_dict(self, shape, dtype, use_placeholder):
    shape = list(shape)
    diag_shape = shape[:-1]

    # Upper triangle will be ignored.
    # Use a diagonal that ensures this matrix is well conditioned.
    tril = tf.random_normal(shape=shape, dtype=dtype.real_dtype)
    diag = tf.random_uniform(
        shape=diag_shape, dtype=dtype.real_dtype, minval=2., maxval=3.)
    if dtype.is_complex:
      tril = tf.complex(
          tril, tf.random_normal(shape, dtype=dtype.real_dtype))
      diag = tf.complex(
          diag, tf.random_uniform(
              shape=diag_shape, dtype=dtype.real_dtype, minval=2., maxval=3.))

    tril = tf.matrix_set_diag(tril, diag)

    tril_ph = tf.placeholder(dtype=dtype)

    if use_placeholder:
      # Evaluate the tril here because (i) you cannot feed a tensor, and (ii)
      # tril is random and we want the same value used for both mat and
      # feed_dict.
      tril = tril.eval()
      operator = linalg.LinearOperatorTriL(tril_ph)
      feed_dict = {tril_ph: tril}
    else:
      operator = linalg.LinearOperatorTriL(tril)
      feed_dict = None

    mat = tf.matrix_band_part(tril, -1, 0)

    return operator, mat, feed_dict
    def __loss__(self):
        """
        Calculate loss
        :return:
        """

        # Context loss L2
        predict_image = tf.abs(tf.complex(real=self.predict_g2['real'], imag=self.predict_g2['imag']))
        label_image = tf.abs(tf.complex(real=self.labels['real'], imag=self.labels['imag']))
        self.context_loss = tf.reduce_mean(tf.square(tf.contrib.layers.flatten(predict_image - label_image)))

        # self.context_loss = tf.reduce_mean(tf.square(real_diff) + tf.square(imag_diff), name='Context_loss_mean')
        print("You are using L2 loss")

        tf.summary.scalar('g_loss_context_only', self.context_loss, collections='G2')

        self.g_loss = self.FLAGS.gen_loss_context * self.context_loss
        # self.g_loss = self.FLAGS.gen_loss_adversarial * g_loss + self.FLAGS.gen_loss_context * context_loss
        tf.summary.scalar('g_loss_plus_context', self.g_loss, collections='G2')

        # if len(self.regularization_values) > 0:
        # reg_loss_g = self.reg_w * tf.reduce_sum(self.regularization_values)
        self.reg_loss_g = self.get_weights_regularization(dump=self.FLAGS.dump_debug, collection='G2')
        self.g_loss_no_reg = self.g_loss
        self.g_loss += self.reg_loss_g
        if self.FLAGS.dump_debug:
            tf.summary.scalar('g_loss_plus_context_plus_reg', self.g_loss, collections='G2')
            tf.summary.scalar('g_loss_reg_only', self.reg_loss_g, collections='D')
    def __D__(self, input_d):
        """
        Define the discriminator
        """
        # Input d holds real&imaginary values. The discriminative decision based on reconstructed image
        input_to_discriminator = input_d
        org = input_to_discriminator[0]
        fake = input_to_discriminator[1]

        rec_org = tf.abs(tf.expand_dims(input=tf.complex(real=org[:, 0, :, :], imag=org[:, 1, :, :]), dim=1))
        rec_fake = tf.abs(tf.expand_dims(input=tf.complex(real=fake[:, 0, :, :], imag=fake[:, 1, :, :]), dim=1))
        tf.summary.image('D_x_input_reconstructed' + 'Original', tf.transpose(rec_org, (0,2,3,1)), collections='D', max_outputs=4)
        tf.summary.image('D_x_input_reconstructed' + 'Fake', tf.transpose(rec_fake, (0,2,3,1)), collections='G2', max_outputs=4)
        input_to_discriminator = tf.concat(input_to_discriminator, axis=0)

        # Model convolutions
        out_dim = 8  # 128x128
        self.conv_1_d = ops.conv2d(input_to_discriminator, output_dim=out_dim, k_h=3, k_w=3, d_h=1, d_w=1, name="D_conv_1")
        self.pool_1_d = tf.layers.max_pooling2d(self.conv_1_d, pool_size=[2, 2], strides=2, padding='same',
                                              data_format='channels_first',name="D_pool_1")
        self.conv_1_bn_d = ops.batch_norm(self.pool_1_d, self.train_phase, decay=0.98, name="D_bn1")
        # self.relu_1_d = tf.nn.relu(self.conv_1_bn_d)
        self.relu_1_d = ops.lrelu(self.conv_1_bn_d, name="D_relu1")

        out_dim = 16  # 64x64
        self.conv_2_d = ops.conv2d(self.relu_1_d, output_dim=out_dim, k_h=3, k_w=3, d_h=1, d_w=1,
                                            name="D_conv_2")
        self.pool_2_d = tf.layers.max_pooling2d(self.conv_2_d, pool_size=[2, 2], strides=2, padding='same',
                                              data_format='channels_first',name="D_pool_2")
        self.conv_2_bn_d = ops.batch_norm(self.pool_2_d, self.train_phase, decay=0.98, name="D_bn2")
        # self.relu_2_d = tf.nn.relu(self.conv_2_bn_d)
        self.relu_2_d = ops.lrelu(self.conv_2_bn_d, name="D_relu2")

        # out_dim = 32  # 32x32
        out_dim = 8  # 32x32
        self.conv_3_d = ops.conv2d(self.relu_2_d, output_dim=out_dim, k_h=3, k_w=3, d_h=1, d_w=1,
                                            name="D_conv_3")
        self.pool_3_d = tf.layers.max_pooling2d(self.conv_3_d, pool_size=[2, 2], strides=2, padding='same',
                                              data_format='channels_first',name="D_pool_3")
        self.conv_3_bn_d = ops.batch_norm(self.pool_3_d, self.train_phase, decay=0.98, name="D_bn3")
        # self.relu_3_d = tf.nn.relu(self.conv_3_bn_d)
        self.relu_3_d = ops.lrelu(self.conv_3_bn_d, name="D_relu3")

        # out_dim = 16  # 16x16
        # self.conv_4_d = ops.conv2d(self.relu_3_d, output_dim=out_dim, k_h=3, k_w=3, d_h=1, d_w=1,
        #                                     name="D_conv_4")
        #self.pool_4_d = tf.layers.max_pooling2d(self.conv_4_d, pool_size=[2, 2], strides=2, padding='same',
        #                                      data_format='channels_first',name="D_pool_4")
        # self.conv_4_bn_d = ops.batch_norm(self.pool_4_d, self.train_phase, decay=0.98, name="D_bn4")
        # # self.relu_4_d = tf.nn.relu(self.conv_4_bn_d)
        # self.relu_4_d = ops.lrelu(self.conv_4_bn_d)

        out_dim = 1
        self.affine_1_d = ops.linear(tf.contrib.layers.flatten(self.relu_3_d), output_size=out_dim, scope="D_affine_1")
        predict_d = self.affine_1_d
        # Dump prediction out

        return tf.nn.sigmoid(predict_d), predict_d
예제 #5
0
    def bilinear_pool(self, x1, x2):

        p1 = tf.matmul(x1, self.C[0])
        p2 = tf.matmul(x2, self.C[1])
        pc1 = tf.complex(p1, tf.zeros_like(p1))
        pc2 = tf.complex(p2, tf.zeros_like(p2))

        conved = tf.batch_ifft(tf.batch_fft(pc1) * tf.batch_fft(pc2))
        return tf.real(conved)
    def __loss__(self):
        """
        Calculate loss
        :return:
        """
        with tf.variable_scope("discriminator") as scope:
            self.d_loss_real = tf.reduce_mean(self.predict_d_logits)
            tf.summary.scalar('d_loss_real', self.d_loss_real, collections='D')
            # scope.reuse_variables()
            self.d_loss_fake = tf.reduce_mean(self.predict_d_logits_for_g)
            tf.summary.scalar('d_loss_fake', self.d_loss_fake, collections='D')

            if self.FLAGS.dump_debug:
                tf.summary.image('D_predict_real', tf.transpose(tf.reshape(self.predict_d_logits,(-1,1,1,1)), (0, 2, 3, 1)), collections='D')
                tf.summary.image('D_predict_fake', tf.transpose(tf.reshape(self.predict_d_logits_for_g, (-1,1,1,1)), (0, 2, 3, 1)), collections='D')

        self.d_loss = self.d_loss_fake - self.d_loss_real
        tf.summary.scalar('d_loss', self.d_loss, collections='D')

        self.reg_loss_d = self.get_weights_regularization(dump=self.FLAGS.dump_debug, collection='D')
        self.d_loss_no_reg = self.d_loss
        self.d_loss += self.reg_loss_d
        if self.FLAGS.dump_debug:
            tf.summary.scalar('d_loss_plus_reg', self.d_loss, collections='D')
            tf.summary.scalar('d_loss_reg_only', self.reg_loss_d, collections='D')

        # Generative loss
        # g_loss = tf.reduce_mean(ops.binary_cross_entropy(preds=self.predict_d_for_g, targets=tf.ones_like(self.predict_d_for_g)))
        g_loss = -tf.reduce_mean(self.predict_d_logits_for_g)

        tf.summary.scalar('g_loss', g_loss, collections='G2')

        # Context loss L2
        predict_image = tf.abs(tf.complex(real=self.predict_g2['real'], imag=self.predict_g2['imag']))
        label_image = tf.abs(tf.complex(real=self.labels['real'], imag=self.labels['imag']))
        self.context_loss = tf.reduce_mean(tf.square(tf.contrib.layers.flatten(predict_image - label_image)))

        # self.context_loss = tf.reduce_mean(tf.square(real_diff) + tf.square(imag_diff), name='Context_loss_mean')
        print("You are using L2 loss")

        tf.summary.scalar('g_loss_context_only', self.context_loss, collections='G2')

        self.g_loss = self.adv_loss_w * g_loss + self.FLAGS.gen_loss_context * self.context_loss
        # self.g_loss = self.FLAGS.gen_loss_adversarial * g_loss + self.FLAGS.gen_loss_context * context_loss
        tf.summary.scalar('g_loss_plus_context', self.g_loss, collections='G2')

        # if len(self.regularization_values) > 0:
        # reg_loss_g = self.reg_w * tf.reduce_sum(self.regularization_values)
        self.reg_loss_g = self.get_weights_regularization(dump=self.FLAGS.dump_debug, collection='G2')
        self.g_loss_no_reg = self.g_loss
        self.g_loss += self.reg_loss_g
        if self.FLAGS.dump_debug:
            tf.summary.scalar('g_loss_plus_context_plus_reg', self.g_loss, collections='G2')
            tf.summary.scalar('g_loss_reg_only', self.reg_loss_g, collections='D')

        tf.summary.scalar('diff-loss', tf.abs(self.d_loss - self.g_loss), collections='G2')
예제 #7
0
    def __D__(self, input_d, input_type):
        """
        Define the discriminator
        """
        # Dump input image out
        input_real = tf.concat(axis=0, values=[input_d[0]['real'], input_d[1]['real']])
        input_imag = tf.concat(axis=0, values=[input_d[0]['imag'], input_d[1]['imag']])
        input_to_discriminator = tf.concat([input_real, input_imag], axis=1)

        org, fake = tf.split(input_to_discriminator, num_or_size_splits=2, axis=0)
        #
        org = tf.reshape(tf.abs(tf.complex(real=tf.squeeze(org[:,0,:,:]), imag=tf.squeeze(org[:,1,:,:]))), shape=[-1, 1, self.dims_out[1], self.dims_out[2]])
        fake = tf.reshape(tf.abs(tf.complex(real=tf.squeeze(fake[:,0,:,:]), imag=tf.squeeze(fake[:,1,:,:]))), shape=[-1, 1, self.dims_out[1], self.dims_out[2]])
        #
        tf.summary.image('D_x_input_reconstructed' + 'Original', tf.transpose(org, (0,2,3,1)), collections='D', max_outputs=4)
        tf.summary.image('D_x_input_reconstructed' + 'Fake', tf.transpose(fake, (0,2,3,1)), collections='G', max_outputs=4)

        # Model convolutions
        out_dim = 8  # 128x128
        self.conv_1_d = ops.conv2d(input_to_discriminator, output_dim=out_dim, k_h=3, k_w=3, d_h=1, d_w=1, name="D_conv_1")
        self.pool_1_d = tf.layers.max_pooling2d(self.conv_1_d, pool_size=[2, 2], strides=2, padding='same',
                                              data_format='channels_first',name="D_pool_1")
        self.conv_1_bn_d = ops.batch_norm(self.pool_1_d, self.train_phase, decay=0.98, name="D_bn1")
        # self.relu_1_d = tf.nn.relu(self.conv_1_bn_d)
        self.relu_1_d = ops.lrelu(self.conv_1_bn_d)

        out_dim = 16  # 64x64
        self.conv_2_d = ops.conv2d(self.relu_1_d, output_dim=out_dim, k_h=3, k_w=3, d_h=1, d_w=1,
                                            name="D_conv_2")
        self.pool_2_d = tf.layers.max_pooling2d(self.conv_2_d, pool_size=[2, 2], strides=2, padding='same',
                                              data_format='channels_first',name="D_pool_2")
        self.conv_2_bn_d = ops.batch_norm(self.pool_2_d, self.train_phase, decay=0.98, name="D_bn2")
        # self.relu_2_d = tf.nn.relu(self.conv_2_bn_d)
        self.relu_2_d = ops.lrelu(self.conv_2_bn_d)

        # out_dim = 32  # 32x32
        out_dim = 8  # 32x32
        self.conv_3_d = ops.conv2d(self.relu_2_d, output_dim=out_dim, k_h=3, k_w=3, d_h=1, d_w=1,
                                            name="D_conv_3")
        self.pool_3_d = tf.layers.max_pooling2d(self.conv_3_d, pool_size=[2, 2], strides=2, padding='same',
                                              data_format='channels_first',name="D_pool_3")
        self.conv_3_bn_d = ops.batch_norm(self.pool_3_d, self.train_phase, decay=0.98, name="D_bn3")
        self.relu_3_d = ops.lrelu(self.conv_3_bn_d)

        out_dim = 1
        self.affine_1_d = ops.linear(tf.contrib.layers.flatten(self.relu_3_d), output_size=out_dim, scope="D_affine_1")
        predict_d = self.affine_1_d
        # Dump prediction out

        return tf.nn.sigmoid(predict_d), predict_d
예제 #8
0
    def __D__(self, input_d, input_type):
        """
        Define the discriminator
        """
        # Dump input image out
        input_real = tf.concat(axis=0, values=[input_d[0]['real'], input_d[1]['real']])
        input_imag = tf.concat(axis=0, values=[input_d[0]['imag'], input_d[1]['imag']])

        # Input d holds real&imaginary values. The discriminative decision based on reconstructed image
        input_to_discriminator = self.get_reconstructed_image(real=input_real, imag=input_imag, name='Both')

        org, fake = tf.split(input_to_discriminator, num_or_size_splits=2, axis=0)

        org = tf.reshape(tf.abs(tf.complex(real=tf.squeeze(org[:, 0, :, :]), imag=tf.squeeze(org[:, 1, :, :]))),
                         shape=[-1, 1, self.dims_out[1], self.dims_out[2]])
        fake = tf.reshape(tf.abs(tf.complex(real=tf.squeeze(fake[:, 0, :, :]), imag=tf.squeeze(fake[:, 1, :, :]))),
                          shape=[-1, 1, self.dims_out[1], self.dims_out[2]])

        tf.summary.image('D_x_input_reconstructed' + 'Original', tf.transpose(org, (0, 2, 3, 1)), collections='D',
                         max_outputs=4)
        tf.summary.image('D_x_input_reconstructed' + 'Fake', tf.transpose(fake, (0, 2, 3, 1)), collections='G',
                         max_outputs=4)

        # Model convolutions
        out_dim = 8    # 256x256 => 128x128
        conv1, pool1 = ops.conv_conv_pool(input_to_discriminator, n_filters=[out_dim, out_dim], activation=tf.nn.relu,
                                          training=self.train_phase, name='D_block_1')

        out_dim = 16   # 128x128 => 64x64
        conv2, pool2 = ops.conv_conv_pool(pool1, n_filters=[out_dim, out_dim], activation=tf.nn.relu,
                                          training=self.train_phase, name='D_block_2')

        out_dim = 32   # 64x128 => 32x32
        conv3, pool3 = ops.conv_conv_pool(pool2, n_filters=[out_dim, out_dim], activation=tf.nn.relu,
                                          training=self.train_phase, name='D_block_3')

        out_dim = 64   # 32x32 => 16x16
        conv4, pool4 = ops.conv_conv_pool(pool3, n_filters=[out_dim, out_dim], activation=tf.nn.relu,
                                          training=self.train_phase, name='D_block_4')

        out_dim = 128  # 16x16
        conv5 = ops.conv_conv_pool(pool4, n_filters=[out_dim, out_dim], activation=tf.nn.relu,
                                   training=self.train_phase, name='D_block_5', pool=False)

        out_dim = 1
        self.affine_1_d = ops.linear(tf.contrib.layers.flatten(conv5), output_size=out_dim, scope="D_affine_1")
        predict_d = self.affine_1_d
        # Dump prediction out

        return tf.nn.sigmoid(predict_d), predict_d
 def random_spatial_to_spectral(self, channels, filters, height, width):
     # Create a truncated random image, then compute the FFT of that image and return it's values
     # used to initialize spectrally parameterized filters
     # an alternative to this is to initialize directly in the spectral domain
     w = tf.truncated_normal([channels, filters, height, width], mean=0, stddev=0.01)
     fft = tf.batch_fft2d(tf.complex(w, 0.0 * w), name='spectral_initializer')
     return fft.eval(session=self.sess)
    def __D__(self, input_d):
        """
        Define the discriminator
        """
        # Input d holds real&imaginary values. The discriminative decision based on reconstructed image
        input_to_discriminator = input_d
        org = input_to_discriminator[0]
        fake = input_to_discriminator[1]

        rec_org = tf.abs(tf.expand_dims(input=tf.complex(real=org[:, 0, :, :], imag=org[:, 1, :, :]), dim=1))
        rec_fake = tf.abs(tf.expand_dims(input=tf.complex(real=fake[:, 0, :, :], imag=fake[:, 1, :, :]), dim=1))
        tf.summary.image('D_x_input_reconstructed' + 'Original', tf.transpose(rec_org, (0,2,3,1)), collections='D', max_outputs=4)
        tf.summary.image('D_x_input_reconstructed' + 'Fake', tf.transpose(rec_fake, (0,2,3,1)), collections='G2', max_outputs=4)
        input_to_discriminator = tf.concat(input_to_discriminator, axis=0)

        return None
 def test_complex_tensor_with_nonzero_imag_raises(self):
   x = tf.convert_to_tensor([1., 2, 0])
   y = tf.convert_to_tensor([1., 2, 0])
   z = tf.complex(x, y)
   with self.test_session():
     with self.assertRaisesOpError("ABC123"):
       linear_operator_util.assert_zero_imag_part(z, message="ABC123").run()
  def _operator_and_mat_and_feed_dict(self, shape, dtype, use_placeholder):
    shape = list(shape)
    diag_shape = shape[:-1]

    diag = tf.random_normal(diag_shape, dtype=dtype.real_dtype)
    if dtype.is_complex:
      diag = tf.complex(
          diag, tf.random_normal(diag_shape, dtype=dtype.real_dtype))

    diag_ph = tf.placeholder(dtype=dtype)

    if use_placeholder:
      # Evaluate the diag here because (i) you cannot feed a tensor, and (ii)
      # diag is random and we want the same value used for both mat and
      # feed_dict.
      diag = diag.eval()
      operator = linalg.LinearOperatorDiag(diag_ph)
      feed_dict = {diag_ph: diag}
    else:
      operator = linalg.LinearOperatorDiag(diag)
      feed_dict = None

    mat = tf.matrix_diag(diag)

    return operator, mat, feed_dict
    def get_reconstructed_image(self, real, imag, name=None):
        """
        :param real:
        :param imag:
        :param name:
        :return:
        """
        complex_k_space_label = tf.complex(real=tf.squeeze(real), imag=tf.squeeze(imag), name=name+"_complex_k_space")
        rec_image_complex = tf.expand_dims(tf.ifft2d(complex_k_space_label), axis=1)
        
        rec_image_real = tf.reshape(tf.real(rec_image_complex), shape=[-1, 1, self.dims_out[1], self.dims_out[2]])
        rec_image_imag = tf.reshape(tf.imag(rec_image_complex), shape=[-1, 1, self.dims_out[1], self.dims_out[2]])

        # Shifting
        top, bottom = tf.split(rec_image_real, num_or_size_splits=2, axis=2)
        top_left, top_right = tf.split(top, num_or_size_splits=2, axis=3)
        bottom_left, bottom_right = tf.split(bottom, num_or_size_splits=2, axis=3)

        top_shift = tf.concat(axis=3, values=[bottom_right, bottom_left])
        bottom_shift = tf.concat(axis=3, values=[top_right, top_left])
        shifted_image = tf.concat(axis=2, values=[top_shift, bottom_shift])


        # Shifting
        top_imag, bottom_imag = tf.split(rec_image_imag, num_or_size_splits=2, axis=2)
        top_left_imag, top_right_imag = tf.split(top_imag, num_or_size_splits=2, axis=3)
        bottom_left_imag, bottom_right_imag = tf.split(bottom_imag, num_or_size_splits=2, axis=3)

        top_shift_imag = tf.concat(axis=3, values=[bottom_right_imag, bottom_left_imag])
        bottom_shift_imag = tf.concat(axis=3, values=[top_right_imag, top_left_imag])
        shifted_image_imag = tf.concat(axis=2, values=[top_shift_imag, bottom_shift_imag])

        shifted_image_two_channels = tf.stack([shifted_image[:,0,:,:], shifted_image_imag[:,0,:,:]], axis=1)
        return shifted_image_two_channels
 def test_assert_positive_definite_does_not_raise_if_pd_and_complex(self):
   with self.test_session():
     x = [1., 2.]
     y = [1., 0.]
     diag = tf.complex(x, y)  # Re[diag] > 0.
     # Should not fail
     linalg.LinearOperatorDiag(diag).assert_positive_definite().run()
 def test_assert_non_singular_does_not_raise_for_complex_nonsingular(self):
   with self.test_session():
     x = [1., 0.]
     y = [0., 1.]
     diag = tf.complex(x, y)
     # Should not raise.
     linalg.LinearOperatorDiag(diag).assert_non_singular().run()
    def __call__(self, inputs, state, scope=None ):
        with tf.variable_scope(scope or type(self).__name__):
            unitary_hidden_state, secondary_cell_hidden_state = tf.split(1,2,state)


            mat_in = tf.get_variable('mat_in', [self.input_size, self.state_size*2])
            mat_out = tf.get_variable('mat_out', [self.state_size*2, self.output_size])
            in_proj = tf.matmul(inputs, mat_in)            
            in_proj_c = tf.complex(tf.split(1,2,in_proj))
            out_state = modReLU( in_proj_c + 
                ulinear(unitary_hidden_state, self.state_size),
                tf.get_variable(name='bias', dtype=tf.float32, shape=tf.shape(unitary_hidden_state), initializer = tf.constant_initalizer(0.)),
                scope=scope)


        with tf.variable_scope('unitary_output'):
            '''computes data linear, unitary linear and summation -- TODO: should be complex output'''
            unitary_linear_output_real = linear.linear([tf.real(out_state), tf.imag(out_state), inputs], True, 0.0)
        

        with tf.variable_scope('scale_nonlinearity'):
            modulus = tf.complex_abs(unitary_linear_output_real)
            rescale = tf.maximum(modulus + hidden_bias, 0.) / (modulus + 1e-7)

        #transition to data shortcut connection


        #out_ = tf.matmul(tf.concat(1,[tf.real(out_state), tf.imag(out_state), ] ), mat_out) + out_bias

        #hidden state is complex but output is completely real
        return out_, out_state #complex 
 def test_complex_tensor_with_imag_zero_doesnt_raise(self):
   x = tf.convert_to_tensor([1., 0, 3])
   y = tf.convert_to_tensor([0., 0, 0])
   z = tf.complex(x, y)
   with self.test_session():
     # Should not raise.
     linear_operator_util.assert_zero_imag_part(z, message="ABC123").run()
def random_normal(shape, mean=0.0, stddev=1.0, dtype=tf.float32, seed=None):
  """Tensor with (possibly complex) Gaussian entries.

  Samples are distributed like

  ```
  N(mean, stddev^2), if dtype is real,
  X + iY,  where X, Y ~ N(mean, stddev^2) if dtype is complex.
  ```

  Args:
    shape:  `TensorShape` or Python list.  Shape of the returned tensor.
    mean:  `Tensor` giving mean of normal to sample from.
    stddev:  `Tensor` giving stdev of normal to sample from.
    dtype:  `TensorFlow` `dtype` or numpy dtype
    seed:  Python integer seed for the RNG.

  Returns:
    `Tensor` with desired shape and dtype.
  """
  dtype = tf.as_dtype(dtype)

  with tf.name_scope("random_normal"):
    samples = tf.random_normal(
        shape, mean=mean, stddev=stddev, dtype=dtype.real_dtype, seed=seed)
    if dtype.is_complex:
      if seed is not None:
        seed += 1234
      more_samples = tf.random_normal(
          shape, mean=mean, stddev=stddev, dtype=dtype.real_dtype, seed=seed)
      samples = tf.complex(samples, more_samples)
    return samples
 def test_nonzero_complex_tensor_doesnt_raise(self):
   x = tf.convert_to_tensor([1., 0, 3])
   y = tf.convert_to_tensor([1., 2, 0])
   z = tf.complex(x, y)
   with self.test_session():
     # Should not raise.
     linear_operator_util.assert_no_entries_with_modulus_zero(
         z, message="ABC123").run()
 def test_zero_complex_tensor_raises(self):
   x = tf.convert_to_tensor([1., 2, 0])
   y = tf.convert_to_tensor([1., 2, 0])
   z = tf.complex(x, y)
   with self.test_session():
     with self.assertRaisesOpError("ABC123"):
       linear_operator_util.assert_no_entries_with_modulus_zero(
           z, message="ABC123").run()
 def test_assert_self_adjoint_raises_if_diag_has_complex_part(self):
   with self.test_session():
     x = [1., 0.]
     y = [0., 1.]
     diag = tf.complex(x, y)
     operator = linalg.LinearOperatorDiag(diag)
     with self.assertRaisesOpError("imaginary.*not self-adjoint"):
       operator.assert_self_adjoint().run()
 def test_assert_positive_definite_raises_for_negative_real_eigvalues(self):
   with self.test_session():
     diag_x = [1.0, -2.0]
     diag_y = [0., 0.]  # Imaginary eigenvalues should not matter.
     diag = tf.complex(diag_x, diag_y)
     operator = linalg.LinearOperatorDiag(diag)
     with self.assertRaisesOpError("non-positive real.*not positive definite"):
       operator.assert_positive_definite().run()
예제 #23
0
def random_herm(D, dtype):
    if dtype.is_complex:
        h = tf.complex(
            tf.random_normal((D, D), dtype=dtype.real_dtype),
            tf.random_normal((D, D), dtype=dtype.real_dtype))
    else:
        h = tf.random_normal((D, D), dtype=dtype)
    return 0.5 * (h + tf.linalg.adjoint(h))
 def test_assert_self_adjoint_does_not_raise_for_diag_with_zero_imag(self):
   with self.test_session():
     x = [1., 0.]
     y = [0., 0.]
     diag = tf.complex(x, y)
     operator = linalg.LinearOperatorDiag(diag)
     # Should not raise
     operator.assert_self_adjoint().run()
예제 #25
0
 def _compareMake(self, real, imag, use_gpu):
   np_ans = real + (1j) * imag
   with self.test_session(use_gpu=use_gpu):
     real = tf.convert_to_tensor(real)
     imag = tf.convert_to_tensor(imag)
     tf_ans = tf.complex(real, imag)
     out = tf_ans.eval()
   self.assertAllEqual(np_ans, out)
   self.assertShapeEqual(np_ans, tf_ans)
예제 #26
0
    def __G2__(self, g2_input):
        """
        This network gets the generator's output (estimated k-space) and
        fine tune the reconstructed image results
        :return:
        """

        # Input d holds real&imaginary values. The discriminative decision based on reconstructed image
        reconstructed_image = self.get_reconstructed_image(real=g2_input['real'], imag=g2_input['imag'], name='Both')

        reconstructed_image = tf.expand_dims(input=tf.complex(real=reconstructed_image[:, 0, :, :],
                                                             imag=reconstructed_image[:, 1, :, :]), dim=1)
        reconstructed_image = tf.abs(reconstructed_image)

        tf.summary.image('G2_reconstructed_input' + 'Fake_from_G', tf.transpose(reconstructed_image, (0, 2, 3, 1)),
                         collections='G', max_outputs=4)

        out_dim = 16  # -> 128x128
        # self.conv_1_g2 = ops.conv2d(reconstructed_image, output_dim=out_dim, k_h=3, k_w=3, d_h=1, d_w=1, name="G_2_conv_1")
        self.conv_1_g2 = ops.conv2d(reconstructed_image, output_dim=out_dim, k_h=3, k_w=3, d_h=2, d_w=2, name="G_2_conv_1")
        self.conv_1_bn_g2 = ops.batch_norm(self.conv_1_g2, self.train_phase, decay=0.98, name="G_2_bn1")
        self.relu_1_g2 = tf.nn.relu(self.conv_1_bn_g2)

        out_dim = 32  # -> 64x64
        # self.conv_2_g2 = ops.conv2d(self.relu_1_g2, output_dim=out_dim, k_h=3, k_w=3, d_h=1, d_w=1, name="G_2_conv_2")
        self.conv_2_g2 = ops.conv2d(self.relu_1_g2, output_dim=out_dim, k_h=3, k_w=3, d_h=2, d_w=2, name="G_2_conv_2")
        self.conv_2_bn_g2 = ops.batch_norm(self.conv_2_g2, self.train_phase, decay=0.98, name="G_2_bn2")
        self.relu_2_g2 = tf.nn.relu(self.conv_2_bn_g2)

        out_dim = 48  # -> 128x128
        out_shape = [self.batch_size, out_dim, 128, 128]
        self.conv_3_g2 = ops.conv2d_transpose(self.relu_2_g2, output_shape=out_shape, k_h=3, k_w=3, d_h=2, d_w=2,
                                              name="G_2_deconv_3")
        self.conv_3_bn_g2 = ops.batch_norm(self.conv_3_g2, self.train_phase, decay=0.98, name="G_3_bn3")
        self.relu_3_g2 = tf.nn.relu(self.conv_3_bn_g2)

        out_dim = 16  # -> 256x256
        out_shape = [self.batch_size, out_dim, 256, 256]
        self.conv_4_g2 = ops.conv2d_transpose(self.relu_3_g2, output_shape=out_shape, k_h=3, k_w=3, d_h=2, d_w=2,
                                              name="G_2_deconv_4")
        self.conv_4_bn_g2 = ops.batch_norm(self.conv_4_g2, self.train_phase, decay=0.98, name="G_3_bn4")
        self.relu_4_g2 = tf.nn.relu(self.conv_4_bn_g2)

        out_dim = 1
        self.conv_5_g2 = ops.conv2d(self.relu_4_g2, output_dim=out_dim, k_h=3, k_w=3, d_h=1, d_w=1, name="G_2_conv_5")

        self.g2_out = reconstructed_image - self.conv_5_g2

        if self.FLAGS.dump_debug:
            tf.summary.image('G_2_output_no_res', tf.transpose(self.conv_5_g2, (0, 2, 3, 1)), collections='G', max_outputs=4)
            tf.summary.image('G_2_output_plus_res', tf.transpose(self.g2_out, (0, 2, 3, 1)), collections='G', max_outputs=4)
            tf.summary.image('G_2_with_artifacts', tf.transpose(reconstructed_image, (0, 2, 3, 1)), collections='G', max_outputs=4)

        tf.add_to_collection("predict", self.g2_out)

        return self.g2_out
def bilinear_pool(x1, x2, output_size):
    """ Computes approximation of bilinear pooling with respect to x1, x2.
    For detailed explaination, see the paper (https://arxiv.org/abs/1511.06062)

    Args:
      x1: A `Tensor` with shape (batch_size, x1_size).
      x2: A `Tensor` with shape ((batch_size, x2_size).
      output_size: Output projection size. (`int`)

    Returns:
       A Tensor with shape (batch_size, output_size).
    """

    p1 = count_sketch(x1, output_size)
    p2 = count_sketch(x2, output_size)
    pc1 = tf.complex(p1, tf.zeros_like(p1))
    pc2 = tf.complex(p2, tf.zeros_like(p2))

    return tf.batch_ifft(tf.batch_fft(pc1) * tf.batch_fft(pc2))
예제 #28
0
  def __init__(self, **kwargs):
    """
    """
    super(AlternatingRealToComplexLayer, self).__init__(**kwargs)

    input_placeholder = self.input_data.get_placeholder_as_batch_major()

    real_value = tf.strided_slice(input_placeholder, [0, 0, 0], tf.shape(input_placeholder), [1, 1, 2])
    imag_value = tf.strided_slice(input_placeholder, [0, 0, 1], tf.shape(input_placeholder), [1, 1, 2])
    self.output.placeholder = tf.complex(real_value, imag_value)
    self.output.size_placeholder = {0: self.input_data.size_placeholder[self.input_data.time_dim_axis_excluding_batch]}
예제 #29
0
def random_isometry(D1, D2, dtype=tf.complex64):
    assert D1 <= D2
    if dtype.is_complex:
        A = tf.complex(
            tf.random_normal((D1, D2), dtype=dtype.real_dtype),
            tf.random_normal((D1, D2), dtype=dtype.real_dtype)) / math.sqrt(2)
    else:
        A = tf.random_normal((D1, D2), dtype=dtype)

    A_inv, _ = _uinv_decomp(tf.matmul(A, A, adjoint_b=True))

    return A_inv @ A
예제 #30
0
def vector_environ(vec, spec, complex=False):
    loc = 0
    vd = {}
    for (nm, sz) in spec.items():
        vd[nm] = tf.slice(vec, [loc], [sz], name=nm)
        loc += sz
    if complex:
        for (nm, sz) in spec.items():
            rv = vd[nm]
            cv = tf.slice(vec, [loc], [sz], name=nm+'_j')
            vd[nm] = tf.complex(rv, cv, name=nm+'_c')
            loc += sz
    return vd
예제 #31
0
def real2complex(x):
    channel = x.shape[-1] // 2
    if x.shape.ndims == 3:
        return tf.complex(x[:, :, :channel], x[:, :, channel:])
    elif x.shape.ndims == 4:
        return tf.complex(x[:, :, :, :channel], x[:, :, :, channel:])
예제 #32
0
# W9 = tf.get_variable("W9", shape=[layers, layers], initializer=tf.contrib.layers.xavier_initializer())
# b9 = tf.Variable(tf.random_normal([layers]))
# L9 = tf.nn.tanh(tf.matmul(L8, W9) + b9)
# L9 = tf.layers.batch_normalization(L9)

W5 = tf.get_variable("W5",
                     shape=[layers, Nsubc * 2],
                     initializer=tf.contrib.layers.xavier_initializer())
b5 = tf.Variable(tf.random_normal([Nsubc * 2]))

hypothesis = tf.matmul(L2, W5) + b5

X_real = hypothesis[:, 0:Nsubc]
X_imag = hypothesis[:, Nsubc:2 * Nsubc]
X_symbol = tf.complex(X_real, X_imag)
# X_symbol = tf.transpose(X_symbol)

# X_symbol = np.vstack((X_symbol,np.zeros((batch_size,Nsubc*(L-1)))))

X_symbol_ifft = tf.ifft(X_symbol)

encoded_symbol_mean = tf.reduce_mean(tf.abs(X_symbol_ifft)**2, axis=1)
RCM = tf.sqrt(
    tf.reduce_mean(
        (tf.abs(X_symbol_ifft)**6), axis=1) / encoded_symbol_mean**3)
# cost = 0.01*tf.reduce_mean(tf.abs(peak_power_symbol)) + tf.reduce_sum(tf.square(tf.abs(hypothesis-X)))/tf.reduce_sum(tf.square(tf.abs(X))) + tf.contrib.layers.l2_regularizer(.5)(W1)
cost = 0.1 * tf.reduce_mean(tf.abs(RCM)) + tf.reduce_sum(
    tf.square(tf.abs(hypothesis - X))) / tf.reduce_sum(tf.square(tf.abs(X)))
# cost = 0.01*tf.reduce_mean(tf.abs(peak_power_symbol)) + tf.reduce_sum(tf.square(tf.abs(hypothesis-X)))
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost)
예제 #33
0
 def cplx(r, i):  # Combine to a complex vector
   return tf.complex(r, i)
예제 #34
0
    def call(self, inputs,training=False):
        channel_axis = 1 if self.data_format == 'channels_first' else -1
        input_dim    = tf.shape(inputs)[channel_axis] // 2
        if self.rank == 1:
            f_real   = self.kernel[:, :, :self.filters]
            f_imag   = self.kernel[:, :, self.filters:]
        elif self.rank == 2:
            f_real   = self.kernel[:, :, :, :self.filters]
            f_imag   = self.kernel[:, :, :, self.filters:]
        elif self.rank == 3:
            f_real   = self.kernel[:, :, :, :, :self.filters]
            f_imag   = self.kernel[:, :, :, :, self.filters:]
        #self.add_loss(1e-2*tf.reduce_sum((f_real-f_imag)**2))
        if self.data_format == "channels_last":
            self.data_format = "NHWC"
        elif self.data_format == "channels_first":
            self.data_format = "NCHW"

        convArgs = {"strides":       self.strides[0]       if self.rank == 1 else self.strides,
                    "padding":       self.padding.upper(),
                    "data_format":   self.data_format,
                    "dilations": self.dilation_rate[0] if self.rank == 1 else self.dilation_rate}
        convFunc = {1: tf.nn.conv1d,
                    2: tf.nn.conv2d,
                    3: tf.nn.conv3d}[self.rank]

        # processing if the weights are assumed to be represented in the spectral domain

        if self.spectral_parametrization:
            if   self.rank == 1:
                f_real = tf.transpose(f_real, perm=(2,1,0))
                f_imag = tf.transpose(f_imag, perm=(2,1,0))
                f      = tf.complex(f_real,f_imag)#tf.concat([f_real, f_imag], axis=0)
                fshape = tf.shape(f)
                f      = tf.reshape(f, (fshape[0] * fshape[1], fshape[2]))
                f      = tf.signal.ifft(f)
                f      = tf.reshape(f, fshape)
                f_real = tf.math.real(f)#f[:fshape[0]//2]
                f_imag = tf.math.imag(f)#f[fshape[0]//2:]
                f_real = tf.transpose(f_real, perm=(2,1,0))
                f_imag = tf.transpose(f_imag,perm=(2,1,0))
            elif self.rank == 2:
                f_real = tf.transpose(f_real, perm=(3,2,0,1))
                f_imag = tf.transpose(f_imag, perm=(3,2,0,1))
                f      = tf.complex(f_real,f_imag)#tf.concat([f_real, f_imag], axis=0)
                fshape = tf.shape(f)
                f      = tf.reshape(f, (fshape[0] * fshape[1], fshape[2], fshape[3]))
                f      = tf.signal.ifft2d(f)
                f      = tf.reshape(f, fshape)
                f_real = tf.math.real(f)#f[:fshape[0]//2]
                f_imag = tf.math.imag(f)#f[fshape[0]//2:]
                f_real = tf.transpose(f_real, perm=(2,3,1,0))
                f_imag = tf.transpose(f_imag, perm=(2,3,1,0))

        # In case of weight normalization, real and imaginary weights are normalized

        if self.normalize_weight:
            ker_shape = self.kernel_shape
            nb_kernels = ker_shape[-2] * ker_shape[-1]

            kernel_shape_4_norm = (np.prod(self.kernel_size), nb_kernels)
            reshaped_f_real = tf.reshape(f_real, kernel_shape_4_norm)
            reshaped_f_imag = tf.reshape(f_imag, kernel_shape_4_norm)
            reduction_axes = list(range(2))
            del reduction_axes[-1]
            mu_real = tf.reduce_mean(reshaped_f_real, axis=reduction_axes)
            mu_imag = tf.reduce_mean(reshaped_f_imag, axis=reduction_axes)

            broadcast_mu_shape = [1] * 2
            broadcast_mu_shape[-1] = nb_kernels
            broadcast_mu_real = tf.reshape(mu_real, broadcast_mu_shape)
            broadcast_mu_imag = tf.reshape(mu_imag, broadcast_mu_shape)
            reshaped_f_real_centred = reshaped_f_real - broadcast_mu_real
            reshaped_f_imag_centred = reshaped_f_imag - broadcast_mu_imag
            Vrr = tf.reduce_mean(reshaped_f_real_centred ** 2, axis=reduction_axes) + self.epsilon
            Vii = tf.reduce_mean(reshaped_f_imag_centred ** 2, axis=reduction_axes) + self.epsilon
            Vri = tf.reduce_mean(reshaped_f_real_centred * reshaped_f_imag_centred,
                         axis=reduction_axes) + self.epsilon
            
            normalized_weight = complex_normalization(
                tf.concat([reshaped_f_real, reshaped_f_imag], axis=-1),
                Vrr, Vii, Vri,
                beta = None,
                gamma_rr = self.gamma_rr,
                gamma_ri = self.gamma_ri,
                gamma_ii = self.gamma_ii,
                scale=True,
                center=False,
                axis=-1
            )

            normalized_real = normalized_weight[:, :nb_kernels]
            normalized_imag = normalized_weight[:, nb_kernels:]
            f_real = tf.reshape(normalized_real, self.kernel_shape)
            f_imag = tf.reshape(normalized_imag, self.kernel_shape)

        # Performing complex convolution

        f_real._keras_shape = self.kernel_shape
        f_imag._keras_shape = self.kernel_shape

        cat_kernels_4_real = tf.concat([f_real, -f_imag], axis=-2)
        cat_kernels_4_imag = tf.concat([f_imag,  f_real], axis=-2)
        cat_kernels_4_complex = tf.concat([cat_kernels_4_real, cat_kernels_4_imag], axis=-1)
        #cat_kernels_4_complex._keras_shape = self.kernel_size + (2 * input_dim, 2 * self.filters)

        output = convFunc(inputs, cat_kernels_4_complex, **convArgs)

        if self.use_bias:
            output = tf.nn.bias_add(
                output,
                self.bias,
                data_format=self.data_format
            )

        if self.activation is not None:
            output = self.activation(output)

        return output
예제 #35
0
 tf_regloss += tf_zernloss
 tf_regloss += tf_icshiftloss
 
 '''Define Optimizer'''
                                         
 '''Negativity Constraint'''                                          
 #tf_negsqrloss = reg.Reg_NegSqr(tf_helper.extract(tf.cast(muscat.TF_obj, tf.float32), muscat.mysize))#-tf.minimum(tf.reduce_min(muscat.TF_obj-1.),0) 
 tf_negsqrloss = reg.Reg_NegSqr(muscat.TF_obj-myparams.nEmbb-myparams.dn)#-tf.minimum(tf.reduce_min(muscat.TF_obj-1.),0) 
 tf_negsqrloss += reg.Reg_NegSqr(muscat.TF_obj_absorption)
 tf_negsqrloss *= experiments.lambda_neg
 
 
 # Correc the fwd model - not good here!
 tf_glob_real = tf.constant(0.,name='tf_glob_real')
 tf_glob_imag = tf.constant(0.,name='tf_glob_imag')
 tf_norm = tf.complex(tf_glob_real, tf_glob_imag) 
 
 '''Define Loss-function'''
 if(0):
     print('-------> Losstype is L1')
     tf_fidelity = tf.reduce_mean((tf.abs((muscat.tf_meas) - tf_fwd))) # allow a global phase parameter to avoid unwrapping effects
 elif(0):
     print('-------> Losstype mixed L2 ')
     tf_fidelity = tf.reduce_mean(tf_helper.tf_abssqr(tf.real(muscat.tf_meas) - tf.real(tf_fwd))+tf_helper.tf_abssqr(tf.imag(muscat.tf_meas) - tf.imag(tf_fwd))) # allow a global phase parameter to avoid unwrapping effects
 elif(1):
     print('-------> Losstype is L2')
     tf_fidelity = tf.reduce_mean(tf_helper.tf_abssqr(muscat.tf_meas - tf_fwd + tf_norm)) # allow a global phase parameter to avoid unwrapping effects
 elif(0):
     print('-------> Losstype is L2')
     tf_fidelity_real = tf.reduce_mean(tf.losses.mean_squared_error(tf.real(muscat.tf_meas), tf.real(tf_fwd))) # allow a global phase parameter to avoid unwrapping effects
     tf_fidelity_imag = tf.reduce_mean(tf.losses.mean_squared_error(tf.imag(muscat.tf_meas), tf.imag(tf_fwd))) # allow a global phase parameter to avoid unwrapping effects
예제 #36
0
def ACC_tf(img, Rn=None):  # (256,256,12)
    """
    Coil Combine according to DO Walsh et al. Adaptive Reconstruction of
    Phased Array MR Imagery, MRM, 2000.
    Img: 2D images of individual coil (row * column * TEs * coils)
    Rn: noise correlation matrix
    """
    if img.shape.ndims == 4:
        [H, W, TEs, coils] = img.shape[0].value, img.shape[1].value, img.shape[
            2].value, img.shape[3].value,
    elif img.shape.ndims == 3:
        TEs = 1
        [H, W,
         coils] = img.shape[0].value, img.shape[1].value, img.shape[2].value,
        img = tf.expand_dims(img, axis=2)
    else:
        raise ValueError('Invalid image data!')

    if Rn is None:
        Rn = tf.eye(coils)
    kernel_size = 7
    iRn = tf.matrix_inverse(Rn)

    Rs = tf.zeros([H, W, coils, coils], dtype=tf.complex64)
    i_list = []
    for i in range(coils):
        j_list = []
        for j in range(coils):
            e = Rs[:, :, i, j]
            for n in range(TEs):
                a1 = img[:, :, n, i]
                a2 = tf.math.conj(img[:, :, n, j])
                a = a1 * a2
                a = tf.expand_dims(tf.expand_dims(a, 0), -1)
                a_complex = tf.concat([tf.real(a), tf.imag(a)], -1)
                b = tf.ones([kernel_size, kernel_size], dtype=tf.complex64)
                b = tf.expand_dims(tf.expand_dims(b, -1), -1)
                b_real = tf.concat([tf.real(b), -1 * tf.imag(b)], -2)
                b_imag = tf.concat([tf.imag(b), tf.real(b)], -2)
                b_complex = tf.concat([b_real, b_imag], -1)
                c = tf.nn.convolution(a_complex, b_complex, padding='SAME')
                c = tf.complex(c[:, :, :, :1], -c[:, :, :, 1:])
                d = tf.squeeze(c)
                e = e + d
            j_list.append(e)
        Rs_j = tf.stack(j_list, -1)
        i_list.append(Rs_j)
    Rs_ij = tf.stack(i_list, -2)
    Rs = tf.transpose(Rs_ij, [0, 1, 3, 2])

    v = tf.ones([H, W, coils], dtype=tf.complex64)
    N = 2
    for i in range(N):
        a = tf.tile(tf.expand_dims(v, -1), [1, 1, 1, coils])
        b = tf.reduce_sum(Rs * a, axis=2)
        v = tf.squeeze(
            tf.reduce_sum(Rs *
                          tf.tile(tf.expand_dims(v, -1), [1, 1, 1, coils]),
                          axis=2))
        d = tf.cast(tf.sqrt(tf.reduce_sum(v * tf.conj(v), axis=2)), tf.float32)
        e = tf.ones_like(d) * np.spacing(1)
        d = tf.cast(tf.where(tf.less_equal(d, e), e, d), tf.complex64)
        v = tf.divide(v, tf.tile(tf.expand_dims(d, -1), [1, 1, coils]))

    v = tf.expand_dims(v, 2)

    C_img = tf.squeeze(tf.reduce_sum(img * v, axis=3))

    return C_img
예제 #37
0
dur = 10
dt = 0.05
time = tf.range(dur, delta=dt, dtype=tf.float64)
nosc = 2

canonical_hopf_params = {'alpha': tf.Variable(tf.random.uniform((), minval=-0.00001, maxval=0.00001, dtype=tf.float64), trainable=True, dtype=tf.float64),
                        'beta1': tf.Variable(tf.random.uniform((), minval=-0.00001, maxval=0.00001, dtype=tf.float64), trainable=True, dtype=tf.float64),
                        'beta2': tf.Variable(tf.random.uniform((), minval=-0.00001, maxval=0, dtype=tf.float64), trainable=True, dtype=tf.float64),
                        'epsilon': tf.Variable(tf.random.uniform((), minval=0, maxval=0.00001, dtype=tf.float64), trainable=True, dtype=tf.float64)}

l1 = tg.oscillators(name='l1', osctype=canonical_hopf_params, 
                    freqspacing='log', freqlims=(1.0, 2.0), nosc=nosc, 
                    #initconds=tf.complex(tf.constant(1.0, dtype=tf.float64, shape=(nosc,)), 
                    #                    tf.constant(0.0, dtype=tf.float64, shape=(nosc,))))
                    initconds=tf.complex(tf.random.truncated_normal((nosc,), stddev=1.0,  dtype=tf.float64),
                                            tf.random.truncated_normal((nosc,), dtype=tf.float64, stddev=1.0)))

s1 = tg.stimulus(name='s1', values=tf.complex(0*time, 0*time), fs=1/dt) # no stimulus
target = tf.constant(1, dtype=tf.float64, shape=(6,1))

l1 = tg.connect(source=s1, target=l1, matrixinit=0.0+1j*0.0)

GrFNN = tg.Model(name='GrFNN', layers=[l1], stim=s1, time=time)

def train_step(GrFNN, target):
    with tf.GradientTape() as tape:
        GrFNN = GrFNN.integrate()
        mse = tf.losses.MeanSquaredError()
        curr_loss = mse(target,
                        tf.abs(GrFNN.layers[0].allsteps[-6:,0]))
    grads = tape.gradient(curr_loss, [GrFNN.layers[0].params['alpha'], GrFNN.layers[0].params['beta1'],
예제 #38
0
def complex_softmax(real, imag):

    magnitude = tf.abs(tf.complex(real, imag))
    magnitude = tf.keras.layers.Softmax()(magnitude)

    return magnitude