示例#1
0
    def antenna_jones(lm, stokes, alpha, ref_freq):
        """
        Compute the jones terms for each antenna.

        lm, stokes and alpha are the source variables.
        """

        # Compute the complex phase
        cplx_phase = rime.phase(lm, D.uvw, D.frequency, CT=CT)

        # Check for nans/infs in the complex phase
        phase_msg = ("Check that '1 - l**2  - m**2 >= 0' holds "
                    "for all your lm coordinates. This is required "
                    "for 'n = sqrt(1 - l**2 - m**2) - 1' "
                    "to be finite.")

        phase_real = tf.check_numerics(tf.real(cplx_phase), phase_msg)
        phase_imag = tf.check_numerics(tf.imag(cplx_phase), phase_msg)

        # Compute the square root of the brightness matrix
        # (as well as the sign)
        bsqrt, sgn_brightness = rime.b_sqrt(stokes, alpha,
            D.frequency, ref_freq, CT=CT,
            polarisation_type=polarisation_type)

        # Check for nans/infs in the bsqrt
        bsqrt_msg = ("Check that your stokes parameters "
                    "satisfy I**2 >= Q**2 + U**2 + V**2. "
                    "Montblanc performs a cholesky decomposition "
                    "of the brightness matrix and the above must "
                    "hold for this to produce valid values.")

        bsqrt_real = tf.check_numerics(tf.real(bsqrt), bsqrt_msg)
        bsqrt_imag = tf.check_numerics(tf.imag(bsqrt), bsqrt_msg)

        # Compute the direction dependent effects from the beam
        ejones = rime.e_beam(lm, D.frequency,
            D.pointing_errors, D.antenna_scaling,
            beam_sin, beam_cos,
            D.beam_extents, D.beam_freq_map, D.ebeam)

        deps = [phase_real, phase_imag, bsqrt_real, bsqrt_imag]
        deps = [] # Do nothing for now

        # Combine the brightness square root, complex phase,
        # feed rotation and beam dde's
        with tf.control_dependencies(deps):
            antenna_jones = rime.create_antenna_jones(bsqrt, cplx_phase,
                                                    feed_rotation, ejones, FT=FT)
            return antenna_jones, sgn_brightness
    def __call__(self, inputs, state, scope=None ):
        with tf.variable_scope(scope or type(self).__name__):
            unitary_hidden_state, secondary_cell_hidden_state = tf.split(1,2,state)


            mat_in = tf.get_variable('mat_in', [self.input_size, self.state_size*2])
            mat_out = tf.get_variable('mat_out', [self.state_size*2, self.output_size])
            in_proj = tf.matmul(inputs, mat_in)            
            in_proj_c = tf.complex(tf.split(1,2,in_proj))
            out_state = modReLU( in_proj_c + 
                ulinear(unitary_hidden_state, self.state_size),
                tf.get_variable(name='bias', dtype=tf.float32, shape=tf.shape(unitary_hidden_state), initializer = tf.constant_initalizer(0.)),
                scope=scope)


        with tf.variable_scope('unitary_output'):
            '''computes data linear, unitary linear and summation -- TODO: should be complex output'''
            unitary_linear_output_real = linear.linear([tf.real(out_state), tf.imag(out_state), inputs], True, 0.0)
        

        with tf.variable_scope('scale_nonlinearity'):
            modulus = tf.complex_abs(unitary_linear_output_real)
            rescale = tf.maximum(modulus + hidden_bias, 0.) / (modulus + 1e-7)

        #transition to data shortcut connection


        #out_ = tf.matmul(tf.concat(1,[tf.real(out_state), tf.imag(out_state), ] ), mat_out) + out_bias

        #hidden state is complex but output is completely real
        return out_, out_state #complex 
    def get_reconstructed_image(self, real, imag, name=None):
        """
        :param real:
        :param imag:
        :param name:
        :return:
        """
        complex_k_space_label = tf.complex(real=tf.squeeze(real), imag=tf.squeeze(imag), name=name+"_complex_k_space")
        rec_image_complex = tf.expand_dims(tf.ifft2d(complex_k_space_label), axis=1)
        
        rec_image_real = tf.reshape(tf.real(rec_image_complex), shape=[-1, 1, self.dims_out[1], self.dims_out[2]])
        rec_image_imag = tf.reshape(tf.imag(rec_image_complex), shape=[-1, 1, self.dims_out[1], self.dims_out[2]])

        # Shifting
        top, bottom = tf.split(rec_image_real, num_or_size_splits=2, axis=2)
        top_left, top_right = tf.split(top, num_or_size_splits=2, axis=3)
        bottom_left, bottom_right = tf.split(bottom, num_or_size_splits=2, axis=3)

        top_shift = tf.concat(axis=3, values=[bottom_right, bottom_left])
        bottom_shift = tf.concat(axis=3, values=[top_right, top_left])
        shifted_image = tf.concat(axis=2, values=[top_shift, bottom_shift])


        # Shifting
        top_imag, bottom_imag = tf.split(rec_image_imag, num_or_size_splits=2, axis=2)
        top_left_imag, top_right_imag = tf.split(top_imag, num_or_size_splits=2, axis=3)
        bottom_left_imag, bottom_right_imag = tf.split(bottom_imag, num_or_size_splits=2, axis=3)

        top_shift_imag = tf.concat(axis=3, values=[bottom_right_imag, bottom_left_imag])
        bottom_shift_imag = tf.concat(axis=3, values=[top_right_imag, top_left_imag])
        shifted_image_imag = tf.concat(axis=2, values=[top_shift_imag, bottom_shift_imag])

        shifted_image_two_channels = tf.stack([shifted_image[:,0,:,:], shifted_image_imag[:,0,:,:]], axis=1)
        return shifted_image_two_channels
示例#4
0
def sparse_dot_product0(emb, tuples, use_matmul=True, output_type='real'):
    """
    Compute the dot product of complex vectors.
    It uses complex vectors but tensorflow does not optimize in the complex space (or there is a bug in the gradient
    propagation with complex numbers...)
    :param emb: embeddings
    :param tuples: indices at which we compute dot products
    :return: scores (dot products)
    """
    n_t = tuples.get_shape()[0].value
    rk = emb.get_shape()[1].value
    emb_sel_a = tf.gather(emb, tuples[:, 0])
    emb_sel_b = tf.gather(emb, tuples[:, 1])
    if use_matmul:
        pred_cplx = tf.squeeze(tf.batch_matmul(
                tf.reshape(emb_sel_a, [n_t, rk, 1]),
                tf.reshape(emb_sel_b, [n_t, rk, 1]), adj_x=True))
    else:
        pred_cplx = tf.reduce_sum(tf.mul(tf.conj(emb_sel_a), emb_sel_b), 1)
    if output_type == 'complex':
        return pred_cplx
    elif output_type == 'real':
        return tf.real(pred_cplx) + tf.imag(pred_cplx)
    elif output_type == 'real':
        return tf.abs(pred_cplx)
    elif output_type == 'angle':
        raise NotImplementedError('No argument or inverse-tanh function for complex number in Tensorflow')
    else:
        raise NotImplementedError()
示例#5
0
  def _compareMulGradient(self, data):
    # data is a float matrix of shape [n, 4].  data[:, 0], data[:, 1],
    # data[:, 2], data[:, 3] are real parts of x, imaginary parts of
    # x, real parts of y and imaginary parts of y.
    with self.test_session():
      inp = tf.convert_to_tensor(data)
      xr, xi, yr, yi = tf.split(1, 4, inp)

      def vec(x):  # Reshape to a vector
        return tf.reshape(x, [-1])
      xr, xi, yr, yi = vec(xr), vec(xi), vec(yr), vec(yi)

      def cplx(r, i):  # Combine to a complex vector
        return tf.complex(r, i)
      x, y = cplx(xr, xi), cplx(yr, yi)
      # z is x times y in complex plane.
      z = x * y
      # Defines the loss function as the sum of all coefficients of z.
      loss = tf.reduce_sum(tf.real(z) + tf.imag(z))
      epsilon = 0.005
      jacob_t, jacob_n = tf.test.compute_gradient(inp,
                                                  list(data.shape),
                                                  loss,
                                                  [1],
                                                  x_init_value=data,
                                                  delta=epsilon)
    self.assertAllClose(jacob_t, jacob_n, rtol=epsilon, atol=epsilon)
示例#6
0
 def _compareRealImag(self, cplx, use_gpu):
   np_real, np_imag = np.real(cplx), np.imag(cplx)
   with self.test_session(use_gpu=use_gpu) as sess:
     inx = tf.convert_to_tensor(cplx)
     tf_real = tf.real(inx)
     tf_imag = tf.imag(inx)
     tf_real_val, tf_imag_val = sess.run([tf_real, tf_imag])
   self.assertAllEqual(np_real, tf_real_val)
   self.assertAllEqual(np_imag, tf_imag_val)
   self.assertShapeEqual(np_real, tf_real)
   self.assertShapeEqual(np_imag, tf_imag)
示例#7
0
def tf_complex2real(input_data):
    """
    Parameters
    ----------
    input_data : nrow x ncol.

    Returns
    -------
    outputs concatenated real and imaginary parts as nrow x ncol x 2

    """

    return tf.stack([tf.real(input_data), tf.imag(input_data)], axis=-1)
示例#8
0
def TF_NUFT(A, SN, Kd, P):
    # A is data, e.g. of size H,W,nMaps
    # SN should be from Fessler, .* Channel maps; so finally H,W,nMaps
    # Kd is the final size for the overFT, e.g. H*2,W*2
    # P is a sparse matrix of nTraj x H*W ; <101x16320 sparse matrix of type '<class 'numpy.complex128'>'	with 2525 stored elements in Compressed Sparse Column format>

    # MData=scipy.io.loadmat('/media/a/f38a5baa-d293-4a00-9f21-ea97f318f647/home/a/gUM/ForTFNUFT.mat')
    # A=MData['A']
    # SN=MData['SN']
    # Kd=MData['Kd']
    # P=MData['P']

    # NUbyFS3=MData['NUbyFS3'].T

    ToPad = [Kd[0, 0] - A.shape[0], Kd[0, 1] - A.shape[1]]

    paddings = tf.constant([[0, ToPad[0]], [0, ToPad[1]], [0, 0]])
    # paddings = tf.constant([[0, 68], [0, 60]])
    nMaps = 2  # A.shape[1]

    Idx = scipy.sparse.find(P)
    I2 = np.vstack([Idx[0], Idx[1]]).T

    sp_R = tf.SparseTensor(I2, tf.cast(np.real(Idx[2]), tf.float32),
                           [101, 16320])
    sp_I = tf.SparseTensor(I2, tf.cast(np.imag(Idx[2]), tf.float32),
                           [101, 16320])

    SNx = tf.constant(tf.cast(SN, tf.complex64))
    Ax = tf.constant(tf.cast(A, tf.complex64))

    SNx = tf.reshape(SNx, [SNx.shape[0], SNx.shape[1], 1])
    Step1 = tf.multiply(Ax, SNx)
    Padded = tf.pad(Step1, paddings, "CONSTANT")
    Step2 = tf.transpose(tf.fft(
        tf.transpose(tf.fft(tf.transpose(Padded, perm=[2, 0, 1])),
                     perm=[0, 2, 1])),
                         perm=[1, 2, 0])
    # Step2=tf.fft(tf.transpose(tf.fft(Padded),perm=[1,0]))
    Col = tf.reshape(Step2, [-1, nMaps])
    ColR = tf.real(Col)
    ColI = tf.imag(Col)
    RR = tf.sparse_tensor_dense_matmul(sp_R, ColR)
    RI = tf.sparse_tensor_dense_matmul(sp_R, ColI)
    IR = tf.sparse_tensor_dense_matmul(sp_I, ColR)
    II = tf.sparse_tensor_dense_matmul(sp_I, ColI)
    R = RR - II
    I = RI + IR
    C = tf.complex(R, I)

    return C
示例#9
0
def rfl_mul(h, state_size, no, reuse):
    """
    Multiplication with a reflection.
    Implementing R = I - (vv*/|v|^2)
    Input:
        h: hidden state_vector.
        state_size: The RNN state size.
        reuse: True if graph variables should be reused.
    Returns:
        R*h
    """
    with tf.variable_scope("reflection_v_" + str(no), reuse=reuse):
        vr = tf.get_variable('vr',
                             shape=[state_size, 1],
                             dtype=tf.float32,
                             initializer=tf.glorot_uniform_initializer())
        vi = tf.get_variable('vi',
                             shape=[state_size, 1],
                             dtype=tf.float32,
                             initializer=tf.glorot_uniform_initializer())

    with tf.variable_scope("ref_mul_" + str(no), reuse=reuse):
        hr = tf.real(h)
        hi = tf.imag(h)
        vstarv = tf.reduce_sum(vr**2 + vi**2)
        hr_vr = tf.matmul(hr, vr)
        hr_vi = tf.matmul(hr, vi)
        hi_vr = tf.matmul(hi, vr)
        hi_vi = tf.matmul(hi, vi)

        # tf.matmul with transposition is the same as T.outer
        # we need something of the shape [batch_size, state_size] in the end
        a = tf.matmul(hr_vr - hi_vi, vr, transpose_b=True)
        b = tf.matmul(hr_vi + hi_vr, vi, transpose_b=True)
        c = tf.matmul(hr_vr - hi_vi, vi, transpose_b=True)
        d = tf.matmul(hr_vi + hi_vr, vr, transpose_b=True)

        # the thing we return is:
        # return_re = hr - (2/vstarv)(d - c)
        # return_im = hi - (2/vstarv)(a + b)
        new_hr = hr - (2.0 / vstarv) * (a + b)
        new_hi = hi - (2.0 / vstarv) * (d - c)
        new_state = tf.complex(new_hr, new_hi)

        # v = tf.complex(vr, vi)
        # vstarv = tf.complex(tf.reduce_sum(vr**2 + vi**2), 0.0)
        # # vstarv = tf.matmul(tf.transpose(tf.conj(v)), v)
        # vvstar = tf.matmul(v, tf.transpose(tf.conj(v)))
        # refsub = (2.0/vstarv)*vvstar
        # R = tf.identity(refsub) - refsub
        return new_state
    def phase(self, y_true_comb, y_pred_comb):
        y_true, y_mask = tf.split(y_true_comb, [1, 1], axis=-1)
        y_pred, y_blah = tf.split(y_pred_comb, [1, 1], axis=-1)

        print('true comb', y_true_comb.shape)
        print('pred comb', y_pred_comb.shape)

        #print(tf.reshape(y_pred,[-1,y_pred.shape[1],y_pred.shape[2]]).shape)
        pred_fft = tf.signal.fft2d(
            tf.cast(tf.reshape(y_pred, [-1, y_pred.shape[1], y_pred.shape[2]]),
                    dtype=tf.complex64))
        pred_fft_shifted = self.fftshift(pred_fft, axes=(1, 2))
        true_fft = tf.signal.fft2d(
            tf.cast(tf.reshape(y_true, [-1, y_pred.shape[1], y_pred.shape[2]]),
                    dtype=tf.complex64))
        true_fft_shifted = self.fftshift(true_fft, axes=(1, 2))

        #print(pred_fft.dtype)
        #print(tf.convert_to_tensor(self.np_mask,dtype=tf.complex64).dtype)
        #mask = tf.cast(tf.convert_to_tensor(self.np_mask),dtype=tf.complex64)
        #mask_b = tf.broadcast_to(mask,tf.shape(true_fft))

        #masked_true = tf.ragged.boolean_mask(data=true_fft,mask=self.np_mask.reshape((x,y,2)))
        #masked_pred = tf.ragged.boolean_mask(data=pred_fft,mask=self.np_mask.reshape((x,y,2)))
        y_mask_comp = tf.cast(tf.reshape(
            y_mask, [-1, y_pred.shape[1], y_pred.shape[2]]),
                              dtype=tf.complex64)
        #y_mask_comp = tf.cast(y_mask,dtype=tf.complex64)
        masked_true = tf.multiply(y_mask_comp, true_fft_shifted)
        masked_pred = tf.multiply(y_mask_comp, pred_fft_shifted)

        pred_ifft = tf.signal.ifft2d(self.ifftshift(masked_pred, axes=(1, 2)))
        true_ifft = tf.signal.ifft2d(self.ifftshift(masked_true, axes=(1, 2)))

        pred_phase = tf.atan2(tf.imag(pred_ifft), tf.real(pred_ifft))
        true_phase = tf.atan2(tf.imag(true_ifft), tf.real(true_ifft))
        #print(y_pred.shape)
        return pred_phase, true_phase
示例#11
0
def colnorm(y, pnorm=2):
    if iscomplex(y):
        yr = tf.real(y)
        yi = tf.imag(y)
        m2 = yr * yr + yi * yi
    else:
        m2 = y * y

    outshape = (1, int(y.get_shape()[1]))
    if pnorm == 2:
        y = tf.sqrt(tf.reduce_sum(m2, 0))
    elif pnorm == 0:
        y = tf.reduce_sum(tf.to_float(m2 > 0), 0)
    return tf.reshape(y, outshape)
示例#12
0
def mod_sigmoid_gamma(z, scope='', reuse=None):
    """
    ModSigmoid implementation, with uncoupled and unbounded
    alpha and beta.
    """
    with tf.variable_scope('mod_sigmoid_beta_' + scope, reuse=reuse):
        alpha = tf.get_variable('alpha', [],
                                dtype=tf.float32,
                                initializer=tf.constant_initializer(0.0))
        beta = tf.get_variable('beta', [],
                               dtype=tf.float32,
                               initializer=tf.constant_initializer(1.0))
        pre_act = alpha * tf.real(z) + beta * tf.imag(z)
        return tf.complex(tf.nn.sigmoid(pre_act), tf.zeros_like(pre_act))
示例#13
0
def get_power(signal, axis=-2):
    """Calculates power for `signal`

    Args:
        signal (tf.Tensor): Single frequency signal with shape (D, T) or (F, D, T).
        axis: reduce_mean axis
    Returns:
        tf.Tensor: Power with shape (T,) or (F, T)

    """
    power = tf.real(signal)**2 + tf.imag(signal)**2
    power = tf.reduce_mean(power, axis=axis)

    return power
示例#14
0
def cRelu(input):
    '''
    Args:
        input: complex tensor
    Return:
        output
    '''
    input_real = tf.real(input)
    input_imag = tf.imag(input)
    output_real = tf.nn.relu(input_real)
    output_imag = tf.nn.relu(input_imag)
    output = tf.complex(output_real,output_imag)

    return output
示例#15
0
def modrelu(z, b, comp):
    if comp:
        z_norm = tf.sqrt(tf.square(tf.real(z)) +
                         tf.square(tf.imag(z))) + 0.00001
        step1 = z_norm + b
        step2 = tf.complex(tf.nn.relu(step1), tf.zeros_like(z_norm))
        step3 = z / tf.complex(z_norm, tf.zeros_like(z_norm))
    else:
        z_norm = tf.abs(z) + 0.00001
        step1 = z_norm + b
        step2 = tf.nn.relu(step1)
        step3 = tf.sign(z)

    return tf.multiply(step3, step2)
示例#16
0
def mod_sigmoid_sum_beta(z, scope='', reuse=None):
    """ Probably not a good idea. """
    with tf.variable_scope('mod_sigmoid_sum_beta_' + scope, reuse=reuse):
        alpha = tf.get_variable('alpha', [],
                                dtype=tf.float32,
                                initializer=tf.constant_initializer(0.0))
        beta = tf.get_variable('beta', [],
                               dtype=tf.float32,
                               initializer=tf.constant_initializer(0.0))
        sig_alpha = tf.nn.sigmoid(alpha)
        sig_beta = tf.nn.sigmoid(beta)
        sig_sum = (sig_alpha * tf.nn.sigmoid(tf.real(z)) +
                   sig_beta * tf.nn.sigmoid(tf.imag(z)))
        return tf.complex(sig_sum, tf.zeros_like(sig_sum))
示例#17
0
 def __call__(self, inputs, state, scope=None ):
     zero_initer = tf.constant_initializer(0.)
     with tf.variable_scope(scope or type(self).__name__):
         mat_in = tf.get_variable('W_in', [self.input_size, self.state_size*2])
         mat_out = tf.get_variable('W_out', [self.state_size*2, self.output_size])
         in_proj = tf.matmul(inputs, mat_in)
         in_proj_c = tf.complex( in_proj[:, :self.state_size], in_proj[:, self.state_size:] )
         out_state = modrelu_c( in_proj_c + 
             ulinear_c(state,transform=self.transform),
             tf.get_variable(name='B', dtype=tf.float32, shape=[self.state_size], initializer=zero_initer)
             )
         out_bias = tf.get_variable(name='B_out', dtype=tf.float32, shape=[self.output_size], initializer = zero_initer)
         out = tf.matmul( tf.concat(1,[tf.real(out_state), tf.imag(out_state)] ), mat_out ) + out_bias
     return out, out_state
示例#18
0
文件: models.py 项目: corcra/uRNN
def relu_mod(state, state_size, scope=None, real=False, name=None):
    """
    Rectified linear unit for complex-valued state.
    (Equation 8 in http://arxiv.org/abs/1511.06464)
    """
    batch_size = state.get_shape()[0]
    with vs.variable_scope(scope or "ReLU_mod"):
        if not real:
            # WARNING: complex_abs has no gradient registered in the docker version for some reason
            # [[ LookupError: No gradient defined for operation 'RNN/complex_RNN_99/ReLU_mod/ComplexAbs' (op type: ComplexAbs) ]]
            #modulus = tf.complex_abs(state)
            modulus = tf.sqrt(tf.real(state)**2 + tf.imag(state)**2)
            bias_term = vs.get_variable(
                "Bias",
                dtype=tf.float32,
                initializer=tf.constant(np.random.uniform(low=-0.01,
                                                          high=0.01,
                                                          size=(state_size)),
                                        dtype=tf.float32,
                                        shape=[state_size]))
            #        bias_tiled = tf.tile(bias_term, [1, batch_size])

            rescale = tf.complex(
                tf.maximum(modulus + bias_term, 0) /
                (modulus + 1e-5 * tf.ones_like(modulus)),
                tf.zeros_like(modulus))
            #rescale = tf.complex(tf.maximum(modulus + bias_term, 0) / ( modulus + 1e-5), 0.0)
        else:
            # state is [state_re, state_im]
            hidden_size = state_size / 2
            state_re = tf.slice(state, [0, 0], [-1, hidden_size])
            state_im = tf.slice(state, [0, hidden_size], [-1, hidden_size])
            modulus = tf.sqrt(state_re**2 + state_im**2)
            # this is [batch_size, hidden_size] in shape, now...
            bias_re = vs.get_variable(
                "Bias",
                dtype=tf.float32,
                initializer=tf.constant(np.random.uniform(low=-0.01,
                                                          high=0.01,
                                                          size=(hidden_size)),
                                        dtype=tf.float32,
                                        shape=[hidden_size]))
            rescale = tf.maximum(modulus + bias_re,
                                 0) / (modulus + 1e-5 * tf.ones_like(modulus))


#            bias_term = tf.concat(0, [bias_re, tf.zeros_like(bias_re)])
#            rescale = tf.maximum(modulus + bias_term, 0) / ( modulus + 1e-5*tf.ones_like(modulus) )
        output = tf.mul(state, tf.tile(rescale, [1, 2]), name=name)
    return output
示例#19
0
def data_consistency(generated, X_k, mask):
    gene_complex = real2complex(generated)
    gene_complex = tf.transpose(gene_complex, [0, 3, 1, 2])
    mask = tf.transpose(mask, [0, 3, 1, 2])
    X_k = tf.transpose(X_k, [0, 3, 1, 2])
    gene_fft = tf.fft2d(gene_complex)
    out_fft = X_k + gene_fft * (1.0 - mask)
    output_complex = tf.ifft2d(out_fft)
    output_complex = tf.transpose(output_complex, [0, 2, 3, 1])
    output_real = tf.cast(tf.real(output_complex), dtype=tf.float32)
    output_imag = tf.cast(tf.imag(output_complex), dtype=tf.float32)
    output = tf.concat([output_real, output_imag], axis=-1)

    return output
示例#20
0
def normalize(state):
    """Normalizes the fermionic states.

    Arguments:
        state (tensor of shape (batch_size, rank, num_fermions, num_matrices, N, N)): fermionic states

    Returns:
        state_normalized (tensor of same shape as state)
    """
    num_fermions = int(state.shape[-4])
    norm = lambda x: tf.cast(tf.sqrt(tf.reduce_sum(tf.real(x) * tf.real(x) + tf.imag(x) * tf.imag(x), axis=-1)), tf.complex64)
    state = state / norm(tf.reshape(state, state.shape.as_list()[:-3] + [-1]))[:, :, :, tf.newaxis, tf.newaxis, tf.newaxis]
    norm = tf.cast(tf.pow(tf.real(overlap(state, state)), 0.5 / num_fermions), tf.complex64)
    return state / norm[:, tf.newaxis, tf.newaxis, tf.newaxis, tf.newaxis, tf.newaxis]
示例#21
0
def c2q1d(x):
    """ An internal function to convert a 1D Complex vector back to a real
    array,  which is twice the height of x.
    """
    # Input has shape [batch, r, c, 2]
    r, c = x.get_shape().as_list()[1:3]
    x1 = tf.real(x)
    x2 = tf.imag(x)
    # Stack 2 inputs of shape [batch, r, c] to [batch, r, 2, c]
    y = tf.stack([x1, x2], axis=-2)
    # Reshaping interleaves the results
    y = tf.reshape(y, [-1, 2 * r, c])

    return y
示例#22
0
def spectral_filter_gen(c_in,
                        c_out_total,
                        basic_len,
                        len_list,
                        use_bias,
                        name='spectral_filter'):
    with tf.variable_scope(name):
        c_out = int(c_out_total) / len(len_list)
        if CONV_KERNEL_INIT == 'freq':
            kernel_r = tf.get_variable(
                'kernel_real',
                shape=[1, basic_len, c_in, c_out],
                initializer=tf.contrib.layers.xavier_initializer())
            kernel_i = tf.get_variable(
                'kernel_imag',
                shape=[1, basic_len, c_in, c_out],
                initializer=tf.contrib.layers.xavier_initializer())
        elif CONV_KERNEL_INIT == 'time':
            kernel = tf.get_variable(
                'kernel',
                shape=[1, basic_len, c_out, 2 * (c_in + 1)],
                initializer=tf.contrib.layers.xavier_initializer())
            kernel_c = tf.fft(tf.complex(kernel, 0. * kernel))
            kernel_c = kernel_c[:, :, :, 1:(c_in + 1)]
            kernel_c = tf.transpose(kernel_c, [0, 1, 3, 2])
            kernel_r = tf.real(kernel_c)
            kernel_i = tf.imag(kernel_c)
        kernel_dict = {}
        for filter_len in len_list:
            if filter_len == basic_len:
                kernel_dict[filter_len] = [kernel_r, kernel_i]
            else:
                kernel_exp_r = tf.image.resize_bilinear(kernel_r,
                                                        [filter_len, c_in],
                                                        align_corners=True)
                kernel_exp_i = tf.image.resize_bilinear(kernel_i,
                                                        [filter_len, c_in],
                                                        align_corners=True)
                kernel_dict[filter_len] = [kernel_exp_r, kernel_exp_i]
        if use_bias:
            bias_complex_r = tf.get_variable(
                'bias_real', shape=[c_out], initializer=tf.zeros_initializer())
            bias_complex_i = tf.get_variable(
                'bias_imag', shape=[c_out], initializer=tf.zeros_initializer())
            bias_complex = tf.complex(bias_complex_r,
                                      bias_complex_i,
                                      name='bias')
            return kernel_dict, bias_complex
        else:
            return kernel_dict
示例#23
0
def angle(z):
    if z.dtype == tf.complex128:
        dtype = tf.float64
    else:
        dtype = tf.float32
    x = tf.real(z)
    y = tf.imag(z)
    xneg = tf.cast(x < 0.0, dtype)
    yneg = tf.cast(y < 0.0, dtype)
    ypos = tf.cast(y >= 0.0, dtype)

    offset = xneg * (ypos - yneg) * np.pi

    return tf.atan(y / x) + offset
示例#24
0
def compute_cost(Prediction, Ju, PhaseNumber):

    true_abs = tf.sqrt(
        tf.square(tf.real(coil_imgs)) + tf.square(tf.imag(coil_imgs)) + 1e-12)
    true_sum = tf.sqrt(tf.reduce_sum(tf.square(true_abs), 1))
    ui_0_abs = tf.sqrt(
        tf.square(tf.real(ui_0)) + tf.square(tf.imag(ui_0)) + 1e-12)
    ui_0_sum = tf.sqrt(tf.reduce_sum(tf.square(ui_0_abs), 1))
    cost_0 = tf.reduce_mean(tf.abs(ui_0_sum - true_sum))

    cost = tf.reduce_mean(tf.abs(Ju[-1] - true_sum))

    pred_abs = tf.sqrt(
        tf.square(tf.real(Prediction[-1])) +
        tf.square(tf.imag(Prediction[-1])))
    pred_sum = tf.sqrt(tf.reduce_sum(tf.square(pred_abs), 1))
    cost_ui = tf.reduce_mean(tf.abs(pred_sum - true_sum))

    #    ui_real =  tf.abs((tf.real(Prediction[-1]) - tf.real(coil_imgs)))
    #    ui_imag =  tf.abs((tf.imag(Prediction[-1]) - tf.imag(coil_imgs)))
    #    cost_ui = tf.reduce_mean( ui_real + ui_imag )

    # ssim
    output_abs = tf.expand_dims(tf.abs(Ju[-1]), -1)
    target_abs = tf.expand_dims(tf.abs(target), -1)
    L = tf.reduce_max(target_abs, axis=(1, 2, 3),
                      keepdims=True) - tf.reduce_min(
                          target_abs, axis=(1, 2, 3), keepdims=True)
    ssim = Utils.ssim(output_abs, target_abs, L=L)

    # MSE_VN  prediction vs. target 8.0
    target_abs = tf.sqrt(tf.real((target) * tf.conj(target)) + 1e-12)
    output_abs = tf.sqrt(tf.real((Ju[-1]) * tf.conj(Ju[-1])) + 1e-12)
    energy = tf.reduce_mean(tf.reduce_sum(
        ((output_abs - target_abs)**2))) / batch_size

    return [cost_0, cost, ssim, cost_ui, energy]
示例#25
0
def beamsplitter_matrix(t, r, D, batched=False, save=False, directory=None):
    """creates the two mode beamsplitter matrix"""
    if not batched:
        # put in a fake batch dimension for broadcasting convenience
        t = tf.expand_dims(t, 0)
        r = tf.expand_dims(r, 0)
    t = tf.cast(tf.reshape(t, [-1, 1, 1, 1, 1, 1]), def_type)
    r = tf.cast(tf.reshape(r, [-1, 1, 1, 1, 1, 1]), def_type)
    mag_t = tf.cast(t, tf.float32)
    mag_r = tf.abs(r)
    phase_r = tf.atan2(tf.imag(r), tf.real(r))

    rng = tf.range(D, dtype=tf.float32)
    N = tf.reshape(rng, [1, -1, 1, 1, 1, 1])
    n = tf.reshape(rng, [1, 1, -1, 1, 1, 1])
    M = tf.reshape(rng, [1, 1, 1, -1, 1, 1])
    k = tf.reshape(rng, [1, 1, 1, 1, 1, -1])
    n_minus_k = n - k
    N_minus_k = N - k
    M_minus_n_plus_k = M - n + k
    # need to deal with 0*(-n) for n integer
    n_minus_k = tf.where(tf.greater(n, k), n_minus_k, tf.zeros_like(n_minus_k))
    N_minus_k = tf.where(tf.greater(N, k), N_minus_k, tf.zeros_like(N_minus_k))
    M_minus_n_plus_k = tf.where(tf.greater(M_minus_n_plus_k, 0),
                                M_minus_n_plus_k,
                                tf.zeros_like(M_minus_n_plus_k))

    powers = tf.cast(
        tf.pow(mag_t, k) * tf.pow(mag_r, n_minus_k) *
        tf.pow(mag_r, N_minus_k) * tf.pow(mag_t, M_minus_n_plus_k),
        def_type,
    )
    phase = tf.exp(1j * tf.cast(phase_r * (n - N), def_type))

    # load parameter-independent prefactors
    prefac = get_prefac_tensor(D, directory, save)

    if prefac.graph != phase.graph:
        # if cached prefactors live on another graph, we'll have to reload them into this graph.
        # In future versions, if 'copy_variable_to_graph' comes out of contrib, consider using that
        get_prefac_tensor.cache_clear()
        prefac = get_prefac_tensor(D, directory, save)

    BS_matrix = tf.reduce_sum(phase * powers * prefac, -1)

    if not batched:
        # drop artificial batch index
        BS_matrix = tf.squeeze(BS_matrix, [0])
    return BS_matrix
示例#26
0
def LSEnet(model, Ip, u1p, u2p):
    n_pair = 2
    # computation graph that defines least squared estimation of the electric field
    delta_Ep_pred = tf.cast(tf.tensordot(u1p, model.G1_real, axes=[[-1], [1]]) + tf.tensordot(u2p, model.G2_real, axes=[[-1], [1]]), tf.complex128) + \
      + 1j * tf.cast(tf.tensordot(u1p, model.G1_imag, axes=[[-1], [1]]) + tf.tensordot(u2p, model.G2_imag, axes=[[-1], [1]]), tf.complex128)

    delta_Ep_expand = tf.expand_dims(delta_Ep_pred, 2)
    delta_Ep_expand_diff = delta_Ep_expand[:, 1::
                                           2, :, :] - delta_Ep_expand[:, 2::
                                                                      2, :, :]

    # amp_square = 0.5 * (Ip[:, 1::2, :]+Ip[:, 2::2, :]) - tf.tile(tf.expand_dims(Ip[:, 0, :], 1), [1, n_pair, 1])
    # amp = tf.sqrt(tf.maximum(amp_square, tf.zeros_like(amp_square)))
    # amp_expand = tf.expand_dims(amp, 2)
    # delta_Ep_expand_diff = delta_Ep_expand_diff * tf.cast(amp_expand / tf.abs(delta_Ep_expand_diff), tf.complex128)

    y = tf.transpose(Ip[:, 1::2, :] - Ip[:, 2::2, :], [0, 2, 1])

    H = tf.concat(
        [2 * tf.real(delta_Ep_expand_diff), 2 * tf.imag(delta_Ep_expand_diff)],
        axis=2)
    H = tf.transpose(H, [0, 3, 1, 2])
    Ht_H = tf.matmul(tf.transpose(H, [0, 1, 3, 2]), H)
    Ht_H_inv_Ht = tf.matmul(
        tf.matrix_inverse(Ht_H + tf.eye(2, dtype=tf.float64) * 1e-12),
        tf.transpose(H, [0, 1, 3, 2]))
    x_new = tf.squeeze(tf.matmul(Ht_H_inv_Ht, tf.expand_dims(y, -1)), -1)

    n_observ = model.n_observ
    contrast_p = tf.reduce_mean(Ip, axis=2)

    d_contrast_p = tf.reduce_mean(tf.abs(delta_Ep_pred)**2, axis=2)

    # Rp = tf.tensordot(tf.expand_dims(model.R0 + model.R1*contrast_p + 4*(model.Q0+model.Q1*d_contrast_p)*contrast_p, axis=-1),
    # tf.ones((1, model.num_pix), dtype=tf.float64), axes=[[-1], [0]]) + 1e-24
    Rp = tf.tensordot(tf.expand_dims(model.R0 * tf.ones_like(contrast_p),
                                     axis=-1),
                      tf.ones((1, model.num_pix), dtype=tf.float64),
                      axes=[[-1], [0]]) + 1e-24
    Rp = tf.transpose(Rp, [0, 2, 1])
    R_diff = Rp[:, :, 1::2] + Rp[:, :, 2::2]
    R = tf.matrix_set_diag(
        tf.concat([tf.expand_dims(tf.zeros_like(R_diff), -1)] *
                  (n_observ // 2), -1), R_diff)
    P_new = tf.matmul(tf.matmul(Ht_H_inv_Ht, R),
                      tf.transpose(Ht_H_inv_Ht, [0, 1, 3, 2]))
    Enp_pred_new = tf.cast(x_new[:, :, 0], dtype=tf.complex128) + 1j * tf.cast(
        x_new[:, :, 1], dtype=tf.complex128)
    return Enp_pred_new, P_new, H
示例#27
0
    def call(self, inputs, state):
        """The most basic URNN cell.
        Args:
            inputs (Tensor - batch_sz x num_in): One batch of cell input.
            state (Tensor - batch_sz x num_units): Previous cell state: COMPLEX
        Returns:
        A tuple (outputs, state):
            outputs (Tensor - batch_sz x num_units*2): Cell outputs on the whole batch.
            state (Tensor - batch_sz x num_units): New state of the cell.
        """
        #print("cell.call inputs:", inputs.shape, inputs.dtype)
        #print("cell.call state:", state.shape, state.dtype)

        # prepare input linear combination
        inputs_mul = tf.matmul(inputs, tf.transpose(
            self.w_ih))  # [batch_sz, 2*num_units]
        inputs_mul_c = tf.complex(inputs_mul[:, :self._num_units],
                                  inputs_mul[:, self._num_units:])
        # [batch_sz, num_units]

        with tf.name_scope("hidden"):
            # prepare state linear combination (always complex!)
            state_c = tf.complex(state[:, :self._num_units],
                                 state[:, self._num_units:])

            state_mul = self.D1.mul(state_c)
            state_mul = mat.FFT(state_mul)
            state_mul = self.R1.mul(state_mul)
            state_mul = self.P.mul(state_mul)
            state_mul = self.D2.mul(state_mul)
            state_mul = mat.IFFT(state_mul)
            state_mul = self.R2.mul(state_mul)
            state_mul = self.D3.mul(state_mul)
            # [batch_sz, num_units]

            # calculate preactivation
            preact = inputs_mul_c + state_mul
            # [batch_sz, num_units]

        new_state_c = mat.modReLU(preact, self.b_h)  # [batch_sz, num_units] C
        new_state = tf.concat(
            [tf.real(new_state_c), tf.imag(new_state_c)],
            1)  # [batch_sz, 2*num_units] R
        # outside network (last dense layer) is ready for 2*num_units -> num_out
        output = new_state
        # print("cell.call output:", output.shape, output.dtype)
        # print("cell.call new_state:", new_state.shape, new_state.dtype)

        return output, new_state
示例#28
0
def layers_conv1d_transpose_complex(inputs,
                                    filters,
                                    kernal,
                                    strides=1,
                                    padding='valid'):
    '''
    Implement 1-D complex convolution layer based on real 2-D convolution layer
    :param inputs: 4-D real or 3-D complex tensor, [batch, size, channel, IQ(2)]
    :param filters: number of filters
    :param kernal: size of kernal
    :param strides:
    :param padding:
    :return: 4-D real or 3-D complex tensor, [batch, size, filters, IQ(2)]
    '''
    shapes = inputs.get_shape()
    rank = len(shapes)
    dtype = inputs.dtype
    assert (type(kernal) == int)
    if rank == 3 and dtype == tf.complex64:
        inputs_re = tf.real(inputs)
        inputs_im = tf.imag(inputs)
        inputs_re = tf.reshape(inputs_re, [-1, shapes[1], shapes[2], 1])
        inputs_im = tf.reshape(inputs_im, [-1, shapes[1], shapes[2], 1])
        inputs = tf.concat([inputs_re, inputs_im], axis=3)
        complex_flag = True
    elif rank == 4 and shapes[-1] == 2:
        complex_flag = False
        pass
    else:
        raise NameError('Check input tensor dtypes or shape')

    conv = tf.transpose(inputs, perm=[0, 1, 3, 2])

    conv = tf.layers.conv2d_transpose(conv,
                                      filters * 2, (kernal, 1),
                                      strides=strides,
                                      padding=padding)
    conv = tf.reshape(conv, [-1, shapes[1], shapes[3] * 2, filters])
    shapes_conv = conv.get_shape().as_list()
    assert (shapes[3] == 2)
    conv_re = conv[:, :, 0, :] - conv[:, :, 3, :]
    conv_im = conv[:, :, 1, :] - conv[:, :, 2, :]
    conv_re = tf.reshape(conv_re, [-1, shapes_conv[1], 1, filters])
    conv_im = tf.reshape(conv_im, [-1, shapes_conv[1], 1, filters])
    output = tf.concat([conv_re, conv_im], axis=2)
    output = tf.transpose(output, perm=[0, 1, 3, 2])
    if complex_flag:
        output = tf.complex(output[:, :, :, 0], output[:, :, :, 1])
    return output
示例#29
0
def upsample(x, mask):

    image_complex = tf.ifft2d(x)
    image_size = [FLAGS.batch_size, FLAGS.sample_size,
                  FLAGS.sample_size_y]  #tf.shape(image_complex)

    #get real and imaginary parts
    image_real = tf.reshape(tf.real(image_complex),
                            [image_size[0], image_size[1], image_size[2], 1])
    image_imag = tf.reshape(tf.imag(image_complex),
                            [image_size[0], image_size[1], image_size[2], 1])

    out = tf.concat([image_real, image_imag], 3)

    return out
    def angle(self, z):
        if z.dtype == tf.complex128:
            dtype = tf.float64
        elif z.dtype == tf.complex64:
            dtype = tf.float32
        else:
            raise ValueError('input z must be of type complex64 or complex128')

        x = tf.real(z)
        y = tf.imag(z)
        x_neg = tf.cast(x < 0.0, dtype)
        y_neg = tf.cast(y < 0.0, dtype)
        y_pos = tf.cast(y >= 0.0, dtype)
        offset = x_neg * (y_pos - y_neg) * np.pi
        return tf.atan(y / x) + offset
示例#31
0
 def squeeze(self, z, mode):
     """
     Apply the single-mode squeezing operator to the specified mode.
     """
     with self._graph.as_default():
         z = tf.cast(z, ops.def_type)
         r = tf.abs(z)
         x = tf.real(z)
         y = tf.imag(z)
         theta = tf.atan2(y, x)
         r = self._maybe_batch(r)
         theta = self._maybe_batch(theta)
         self._check_incompatible_batches(r, theta)
         new_state = ops.squeezer(r, theta, mode, self._state, self._cutoff_dim, self._state_is_pure, self._batched)
         self._update_state(new_state)
示例#32
0
 def single_memory_gate(self, h, x, scope, bias_init):
     """
     Use the real and imaginary parts of the gate equation to do the gating.
     """
     with tf.variable_scope(scope, self._reuse):
         if self._real:
             raise ValueError('Real cells cannot be single gated.')
         else:
             ghs = complex_matmul(h,
                                  self._num_units,
                                  scope='ghs',
                                  reuse=self._reuse)
             gxs = complex_matmul(x,
                                  self._num_units,
                                  scope='gxs',
                                  reuse=self._reuse,
                                  bias=True,
                                  bias_init_c=bias_init,
                                  bias_init_r=bias_init)
             gs = ghs + gxs
             return (tf.complex(tf.nn.sigmoid(tf.real(gs)),
                                tf.zeros_like(tf.real(gs))),
                     tf.complex(tf.nn.sigmoid(tf.imag(gs)),
                                tf.zeros_like(tf.imag(gs))))
示例#33
0
def complex_to_channels(image,
                        data_format='channels_last',
                        name='complex2channels'):
    """Convert data from complex to channels."""
    if len(image.shape) != 3 and len(image.shape) != 4:
        raise TypeError('Input data must be have 3 or 4 dimensions')

    axis_c = -1 if data_format == 'channels_last' else -3

    if image.dtype is not tf.complex64 and image.dtype is not tf.complex128:
        raise TypeError('Input data must be complex')

    with tf.name_scope(name):
        image_out = tf.concat((tf.real(image), tf.imag(image)), axis_c)
    return image_out
def calRx(H_hat, Tx, hparams):
    H_hat = tf.squeeze(H_hat * hparams.scale_factor)
    H_hat_real = tf.slice(H_hat, [0, 0, 0], [-1, -1, 1])
    H_hat_imag = tf.slice(H_hat, [0, 0, 1], [-1, -1, 1])

    Tx_real = tf.slice(Tx, [0, 0, 0], [-1, -1, 1])
    Tx_imag = tf.slice(Tx, [0, 0, 1], [-1, -1, 1])

    H_hat_complex = tf.complex(H_hat_real, H_hat_imag, name='H_hat_complex')
    Tx_complex = tf.complex(Tx_real, Tx_imag, name='Tx_complex')

    Rx_complex = tf.multiply(H_hat_complex, Tx_complex)
    Rx = tf.concat([tf.real(Rx_complex), tf.imag(Rx_complex)], axis=2)

    return Rx
示例#35
0
def add_convolution_params(params, const_params, config):
    def generate_random_numbers(config, zero_mean=True):
        init = np.random.randn(config['num_stages'],
                               config['filter_size'],
                               config['filter_size'],
                               config['features_in'],
                               config['features_out']).astype(np.float32) / \
               np.sqrt(config['filter_size'] ** 2 * config['features_in'])
        if zero_mean:
            init -= np.mean(init, axis=(1, 2, 3), keepdims=True)

        return init

    # define prox calculations
    if 'prox_zero_mean' in config and config['prox_zero_mean'] == False:
        prox_zero_mean = False
    else:
        prox_zero_mean = True

    if 'prox_norm' in config and config['prox_norm'] == False:
        prox_norm = False
    else:
        prox_norm = True

    print('kernel {}'.format(config['name']))
    print('  prox_zero_mean: ', prox_zero_mean)
    print('  prox_norm: ', prox_norm)

    # filter kernels
    k_0 = generate_random_numbers(config) + 1j * generate_random_numbers(config)
    k = tf.Variable(initial_value=k_0, dtype=tf.complex64, name=config['name'])

    prox_k = proxmaps.zero_mean_norm_ball(k, zero_mean=prox_zero_mean, normalize=prox_norm, axis=(1,2,3))

    params.add(k, prox=prox_k)

    # add kernels to summary
    def get_kernel_img(k):
        _, _, n_f_in, n_f_out = k.shape
        k_img = tf.concat([tf.concat([k[:, :, in_f, out_f] for in_f in range(n_f_in)], axis=0)
                           for out_f in range(n_f_out)], axis=1)
        k_img = tf.expand_dims(tf.expand_dims(k_img, -1), 0)
        return k_img

    with tf.variable_scope('kernel_%s_summary' % config['name']):
        for i in range(config['num_stages']):
            tf.summary.image('%s_%d_real' % (config['name'], i + 1), get_kernel_img(tf.real(k[i])), collections=['images'])
            tf.summary.image('%s_%d_imag' % (config['name'], i + 1), get_kernel_img(tf.imag(k[i])), collections=['images'])
示例#36
0
  def get_mu_tensor(self):
    const_fact = self._dist_to_opt_avg**2 * self._h_min**2 / 2 / self._grad_var
    coef = tf.Variable([-1.0, 3.0, 0.0, 1.0], dtype=tf.float32, name="cubic_solver_coef")
    coef = tf.scatter_update(coef, tf.constant(2), -(3 + const_fact) )        
    roots = tf.py_func(np.roots, [coef], Tout=tf.complex64, stateful=False)
    
    # filter out the correct root
    root_idx = tf.logical_and(tf.logical_and(tf.greater(tf.real(roots), tf.constant(0.0) ),
      tf.less(tf.real(roots), tf.constant(1.0) ) ), tf.less(tf.abs(tf.imag(roots) ), 1e-5) )
    # in case there are two duplicated roots satisfying the above condition
    root = tf.reshape(tf.gather(tf.gather(roots, tf.where(root_idx) ), tf.constant(0) ), shape=[] )
    tf.assert_equal(tf.size(root), tf.constant(1) )

    dr = self._h_max / self._h_min
    mu = tf.maximum(tf.real(root)**2, ( (tf.sqrt(dr) - 1)/(tf.sqrt(dr) + 1) )**2)    
    return mu
示例#37
0
def get_power_inverse(signal, channel_axis=0):
    """Calculates inverse power for `signal`

    Args:
        signal (tf.Tensor): Single frequency signal with shape (D, T).
        channel_axis (int): Axis of the channel dimension. Will be averaged.

    Returns:
        tf.Tensor: Inverse power with shape (T,)

    """
    power = tf.reduce_mean(
        tf.real(signal) ** 2 + tf.imag(signal) ** 2, axis=channel_axis)
    eps = 1e-10 * tf.reduce_max(power)
    inverse_power = tf.reciprocal(tf.maximum(power, eps))
    return inverse_power
示例#38
0
 def _compareGradient(self, x):
     # x[:, 0] is real, x[:, 1] is imag.  We combine real and imag into
     # complex numbers. Then, we extract real and imag parts and
     # computes the squared sum. This is obviously the same as sum(real
     # * real) + sum(imag * imag). We just want to make sure the
     # gradient function is checked.
     with self.test_session():
         inx = tf.convert_to_tensor(x)
         real, imag = tf.split(1, 2, inx)
         real, imag = tf.reshape(real, [-1]), tf.reshape(imag, [-1])
         cplx = tf.complex(real, imag)
         cplx = tf.conj(cplx)
         loss = tf.reduce_sum(tf.square(tf.real(cplx))) + tf.reduce_sum(tf.square(tf.imag(cplx)))
         epsilon = 1e-3
         jacob_t, jacob_n = tf.test.compute_gradient(inx, list(x.shape), loss, [1], x_init_value=x, delta=epsilon)
     self.assertAllClose(jacob_t, jacob_n, rtol=epsilon, atol=epsilon)
示例#39
0
  def get_mu_tensor(self):
    const_fact = self._dist_to_opt_avg**2 * self._h_min**2 / 2 / self._grad_var
    coef = tf.Variable([-1.0, 3.0, 0.0, 1.0], dtype=tf.float32, name="cubic_solver_coef")
    coef = tf.scatter_update(coef, tf.constant(2), -(3 + const_fact) )        
    roots = tf.py_func(np.roots, [coef], Tout=tf.complex64, stateful=False)
    
    # filter out the correct root
    root_idx = tf.logical_and(tf.logical_and(tf.greater(tf.real(roots), tf.constant(0.0) ),
      tf.less(tf.real(roots), tf.constant(1.0) ) ), tf.less(tf.abs(tf.imag(roots) ), 1e-5) )
    # in case there are two duplicated roots satisfying the above condition
    root = tf.reshape(tf.gather(tf.gather(roots, tf.where(root_idx) ), tf.constant(0) ), shape=[] )
    #tf.assert_equal(tf.size(root), tf.constant(1) )

    dr = self._h_max / self._h_min
    mu = tf.maximum(tf.real(root)**2, ( (tf.sqrt(dr) - 1)/(tf.sqrt(dr) + 1) )**2)    
    return mu
    def __call__(self, inputs, state, scope=None ):
        zero_initer = tf.constant_initializer(0.)
        with tf.variable_scope(scope or type(self).__name__):

            #nick there are these two matrix multiplications and they are used to convert regular input sizes to complex outputs -- makes sense -- we can further modify this for lstm configurations
            mat_in = tf.get_variable('W_in', [self.input_size, self.state_size*2])
            mat_out = tf.get_variable('W_out', [self.state_size*2, self.output_size])

            in_proj = tf.matmul(inputs, mat_in)
            in_proj_c = tf.complex( in_proj[:, :self.state_size], in_proj[:, self.state_size:] )
            out_state = modrelu_c( in_proj_c + 
                ulinear_c(state,transform=self.transform),
                tf.get_variable(name='B', dtype=tf.float32, shape=[self.state_size], initializer=zero_initer)
                )
            out_bias = tf.get_variable(name='B_out', dtype=tf.float32, shape=[self.output_size], initializer = zero_initer)
            out = tf.matmul( tf.concat(1,[tf.real(out_state), tf.imag(out_state)] ), mat_out ) + out_bias
        return out, out_state
示例#41
0
  def __init__(self, **kwargs):
    """
    """
    def _interleaveVectors(vec1, vec2):
        vec1 = tf.expand_dims(vec1, 3)
        vec2 = tf.expand_dims(vec2, 3)
        interleaved = tf.concat([vec1, vec2], 3)
        interleaved = tf.reshape(interleaved, (tf.shape(vec1)[0], tf.shape(vec1)[1], tf.shape(vec1)[2] * 2))
        return interleaved
    super(ComplexToAlternatingRealLayer, self).__init__(**kwargs)

    input_placeholder = self.input_data.get_placeholder_as_batch_major()

    real_value = tf.real(input_placeholder)
    imag_value = tf.imag(input_placeholder)
    self.output.placeholder = _interleaveVectors(real_value, imag_value)
    self.output.size_placeholder = {0: self.input_data.size_placeholder[self.input_data.time_dim_axis_excluding_batch]}
示例#42
0
    def _optimize_2s_local(self,
                           thresh=1E-10,
                           D=None,
                           ncv=40,
                           Ndiag=10,
                           landelta=1E-5,
                           landeltaEta=1E-5,
                           verbose=0):
        raise NotImplementedError()
        mpol = self.mpo[self.mpo.pos - 1]
        mpor = self.mpo[self.mpo.pos]
        Ml, Mc, dl, dlp = mpol.shape
        Mc, Mr, dr, drp = mpor.shape
        mpo = tf.reshape(
            ncon.ncon([mpol, mpor], [[-1, 1, -3, -5], [1, -2, -4, -6]]),
            [Ml, Mr, dl * dr, dlp * drp])
        initial = ncon.ncon([
            self.mps[self.mps.pos-1],
            self.mps.mat,
            self.mps[self.mps.pos]],
                            [[-1,-2,1],[1,2],[2,-3,-4]]
        )
        Dl,dl,dr,Dr=initial.shape
        tf.reshape(initial,[Dl,dl*dr,Dr])
        if self.walltime_log:
            t1=time.time()
            
        nit, vecs, alpha, beta = LZ.do_lanczos(
            L=self.left_envs[self.mps.pos - 1],
            mpo=mpo,
            R=self.right_envs[self.mps.pos],
            initial_state=initial,
            ncv=ncv,
            delta=landelta
        )
        if self.walltime_log:
            self.walltime_log(lan=[(time.time()-t1)/float(nit)]*int(nit),QR=[],add_layer=[],num_lan=[int(nit)])                        
        
        temp = tf.reshape(
            tf.reshape(opt, [
                self.mps.D[self.mps.pos - 1], dlp, drp,
                self.mps.D[self.mps.pos + 1]
            ]), [])
        opt.split(mps_merge_data).transpose(0, 2, 3, 1).merge([[0, 1], [2, 3]])

        U, S, V = temp.svd(truncation_threshold=thresh, D=D)
        Dnew = S.shape[0]
        if verbose > 0:
            stdout.write(
                "\rTS-DMRG it=%i/%i, sites=(%i,%i)/%i: optimized E=%.16f+%.16f at D=%i"
                % (self._it, self.Nsweeps, self.mps.pos - 1, self.mps.pos,
                   len(self.mps), tf.real(e), tf.imag(e), Dnew))
            stdout.flush()
        if verbose > 1:
            print("")
        
        Z = np.sqrt(ncon.ncon([S, S], [[1], [1]]))
        self.mps.mat = S.diag() / Z

        self.mps[self.mps.pos - 1] = U.split([merge_data[0],
                                              [U.shape[1]]]).transpose(0, 2, 1)
        self.mps[self.mps.pos] = V.split([[V.shape[0]],
                                          merge_data[1]]).transpose(0, 2, 1)
        self.left_envs[self.mps.pos] = self.add_layer(
            B=self.left_envs[self.mps.pos - 1],
            mps_tensor=self.mps[self.mps.pos - 1],
            mpo_tensor=self.mpo[self.mps.pos - 1],
            conj_mps_tensor=self.mps[self.mps.pos - 1],
            direction=1
        )
        
        self.right_envs[self.mps.pos - 1] = self.add_layer(
            B=self.right_envs[self.mps.pos],
            mps_tensor=self.mps[self.mps.pos],
            mpo_tensor=self.mpo[self.mps.pos],
            conj_mps_tensor=self.mps[self.mps.pos],
            direction=-1
        )
        return e
def stack_real_imag(x):

    stack_axis = len(x.get_shape().as_list())
    return tf.stack((tf.real(x), tf.imag(x)), axis=stack_axis)
def complex_mul_real( z, r ):
    return tf.complex(tf.real(z)*r, tf.imag(z)*r)
def abs2_c(z):
    return tf.real(z)*tf.real(z)+tf.imag(z)*tf.imag(z)
示例#46
0
def compose(input, rank=3):
    return input
    real = tf.real(input)
    imag = tf.imag(input)
    return tf.concat(rank, [real, imag])
def build_func():
    slices_tensor = tf.placeholder(dtype=tf.complex64, shape=[None, None])
    S_tensor = tf.placeholder(dtype=tf.complex64, shape=[None, None])
    envelope_tensor = tf.placeholder(dtype=tf.float32, shape=[None])
    ctf_tensor = tf.placeholder(dtype=tf.float32, shape=[None, None])
    d_tensor = tf.placeholder(dtype=tf.complex64, shape=[None, None])
    logW_S_tensor = tf.placeholder(dtype=tf.float32, shape=[None])
    logW_I_tensor = tf.placeholder(dtype=tf.float32, shape=[None])
    logW_R_tensor = tf.placeholder(dtype=tf.float32, shape=[None])
    div_in_tensor = tf.placeholder(dtype=tf.float32, shape=[])
    sigma2_coloured_tensor = tf.placeholder(dtype=tf.float32, shape=[None])

    cproj = tf.expand_dims(slices_tensor, 1) * tf.complex(ctf_tensor, tf.zeros_like(ctf_tensor)) # r * i * t
    cim = tf.expand_dims(S_tensor, 1) * d_tensor  # s * i * t
    correlation_I = tf.real(tf.expand_dims(cproj, 1)) * tf.real(cim) \
                    + tf.imag(tf.expand_dims(cproj, 1)) * tf.imag(cim)  # r * s * i * t
    power_I = tf.real(cproj) ** 2 + tf.imag(cproj) ** 2  # r * i * t

    g_I =tf.complex(envelope_tensor, tf.zeros_like(envelope_tensor)) * tf.expand_dims(cproj, 1) - cim  # r * s * i * t

    sigma2_I = tf.real(g_I) ** 2 + tf.imag(g_I) ** 2  # r * s * i * t

    tmp = tf.reduce_sum(sigma2_I / sigma2_coloured_tensor, reduction_indices=3)  # r * s * i

    e_I = div_in_tensor * tmp + logW_I_tensor  # r * s * i

    g_I *= tf.complex(ctf_tensor, tf.zeros_like(ctf_tensor))  # r * s * i * t


    etmp = my_logsumexp_tensorflow(e_I)  # r * s
    e_S = etmp + logW_S_tensor  # r * s

    tmp = logW_S_tensor + tf.expand_dims(logW_R_tensor, 1)  # r * s
    phitmp = tf.exp(e_I - tf.expand_dims(etmp, 2))  # r * s * i
    I_tmp = tf.expand_dims(tmp, 2) + e_I

    correlation_S = tf.reduce_sum(tf.expand_dims(phitmp, 3) * correlation_I, reduction_indices=2)  # r * s * t
    power_S = tf.reduce_sum(tf.expand_dims(phitmp, 3) * tf.expand_dims(power_I, 1), reduction_indices=2)  # r * s * t
    sigma2_S = tf.reduce_sum(tf.expand_dims(phitmp, 3) * sigma2_I, reduction_indices=2)  # r * s * t
    g_S = tf.reduce_sum(tf.complex(tf.expand_dims(phitmp, 3), tf.zeros_like(tf.expand_dims(phitmp, 3)))
        * g_I, reduction_indices=2)  # r * s * t

    etmp = my_logsumexp_tensorflow(e_S)  # r
    e_R = etmp + logW_R_tensor  # r

    tmp = logW_R_tensor  # r
    phitmp = tf.exp(e_S - tf.expand_dims(etmp, 1))  # r * s
    S_tmp = tf.expand_dims(tmp, 1) + e_S
    correlation_R = tf.reduce_sum(tf.expand_dims(phitmp, 2) * correlation_S, reduction_indices=1)  # r * t
    power_R = tf.reduce_sum(tf.expand_dims(phitmp, 2) * power_S, reduction_indices=1)  # r * t
    sigma2_R = tf.reduce_sum(tf.expand_dims(phitmp, 2) * sigma2_S, reduction_indices=1)  # r * t

    g = tf.reduce_sum(tf.complex(tf.expand_dims(phitmp, 2), tf.zeros_like(tf.expand_dims(phitmp, 2)))
        * g_S, reduction_indices=1)  # r * t

    tmp = -2.0 * div_in_tensor
    nttmp = tmp * envelope_tensor / sigma2_coloured_tensor

    e = my_logsumexp_tensorflow(e_R)
    lse_in = -e

    # Noise estimate
    phitmp = e_R - e  # r
    R_tmp = phitmp
    phitmp = tf.exp(phitmp)

    sigma2_est = tf.squeeze(tf.matmul(tf.expand_dims(phitmp, 0), sigma2_R), squeeze_dims=[0])
    correlation = tf.squeeze(tf.matmul(tf.expand_dims(phitmp, 0), correlation_R), squeeze_dims=[0])
    power = tf.squeeze(tf.matmul(tf.expand_dims(phitmp, 0), power_R), squeeze_dims=[0])

    global func
    global inputs
    global objectives
    func = tf.Session()
    inputs = [slices_tensor,
              S_tensor,
              envelope_tensor,
              ctf_tensor,
              d_tensor,
              logW_S_tensor,
              logW_I_tensor,
              logW_R_tensor,
              div_in_tensor,
              sigma2_coloured_tensor]
    objectives = [g, I_tmp, S_tmp, R_tmp, sigma2_est, correlation, power, nttmp, lse_in, phitmp]
示例#48
0
文件: ops.py 项目: kestrelm/tfdeploy
 def test_Imag(self):
     t = tf.imag(self.random(3, 4, complex=True))
     self.check(t)