예제 #1
0
def fft_cost(true, pred, conf, fft_weights=None):

    #loop over the color channels:
    cost = 0.
    true_fft_abssum = 0
    pred_fft_abssum = 0
    for i in range(3):

        slice_true = tf.slice(true, [0, 0, 0, i], [-1, -1, -1, 1])
        slice_pred = tf.slice(pred, [0, 0, 0, i], [-1, -1, -1, 1])

        slice_true = tf.squeeze(
            tf.complex(slice_true, tf.zeros_like(slice_true)))
        slice_pred = tf.squeeze(
            tf.complex(slice_pred, tf.zeros_like(slice_pred)))

        true_fft = tf.fft2d(slice_true)
        pred_fft = tf.fft2d(slice_pred)

        if 'fft_emph_highfreq' in conf:
            abs_diff = tf.mul(tf.complex_abs(true_fft - pred_fft), fft_weights)
            cost += tf.reduce_sum(tf.square(abs_diff)) / tf.to_float(
                tf.size(pred_fft))
        else:
            cost += tf.reduce_sum(
                tf.square(tf.complex_abs(true_fft - pred_fft))) / tf.to_float(
                    tf.size(pred_fft))

        true_fft_abssum += tf.complex_abs(true_fft)
        pred_fft_abssum += tf.complex_abs(pred_fft)

    return cost, true_fft_abssum, pred_fft_abssum
def buildModel_fft(input_dim):
    # This network is used to pre-train the optical flow.
    input_ = Input(shape=(input_dim))
    # =========================================================================
    act_ = net_base(input_, nb_filter=64)
    # =========================================================================
    density_pred =  Convolution2D(1, 1, 1, bias = False, activation='linear',\
                                  init='orthogonal',name='pred',border_mode='same')(act_)

    imageMean = tf.reduce_mean(density_pred)
    node4 = tf.reshape(density_pred, [1, 1, 128, 128])
    fftstack = tf.fft2d(tf.complex(node4, tf.zeros((1, 1, 128, 128))))
    out = (tf.cast(tf.complex_abs(tf.ifft2d(fftstack * tf.conj(fftstack))),
                   dtype=tf.float32) / imageMean**2 / (128 * 128)) - 1

    def count(out):
        sigma = 4.0
        rough_sig = sigma * 2.3588 * 0.8493218
        return (128 * 128) / (
            (tf.reduce_max(out) - tf.reduce_min(out)) * np.pi * (rough_sig**2))

    #lam.build((1,1))
    y_true_cast = K.placeholder(shape=(1, 1, 128, 128), dtype='float32')
    #K.set_value(y_true_cast,out)

    # =========================================================================
    model = Model(input=input_, output=density_pred)
    opt = SGD(lr=1e-2, momentum=0.9, nesterov=True)
    model.compile(optimizer=opt, loss='mse')
    return model
예제 #3
0
    def __call__(self, inputs, state, scope=None):
        with tf.variable_scope(scope or type(self).__name__):
            unitary_hidden_state, secondary_cell_hidden_state = tf.split(
                1, 2, state)

            mat_in = tf.get_variable('mat_in',
                                     [self.input_size, self.state_size * 2])
            mat_out = tf.get_variable('mat_out',
                                      [self.state_size * 2, self.output_size])
            in_proj = tf.matmul(inputs, mat_in)
            in_proj_c = tf.complex(tf.split(1, 2, in_proj))
            out_state = modReLU(
                in_proj_c + ulinear(unitary_hidden_state, self.state_size),
                tf.get_variable(name='bias',
                                dtype=tf.float32,
                                shape=tf.shape(unitary_hidden_state),
                                initializer=tf.constant_initalizer(0.)),
                scope=scope)

        with tf.variable_scope('unitary_output'):
            '''computes data linear, unitary linear and summation -- TODO: should be complex output'''
            unitary_linear_output_real = linear.linear(
                [tf.real(out_state),
                 tf.imag(out_state), inputs], True, 0.0)

        with tf.variable_scope('scale_nonlinearity'):
            modulus = tf.complex_abs(unitary_linear_output_real)
            rescale = tf.maximum(modulus + hidden_bias, 0.) / (modulus + 1e-7)

        #transition to data shortcut connection

        #out_ = tf.matmul(tf.concat(1,[tf.real(out_state), tf.imag(out_state), ] ), mat_out) + out_bias

        #hidden state is complex but output is completely real
        return out_, out_state  #complex
    def __call__(self, inputs, state, scope=None ):
        with tf.variable_scope(scope or type(self).__name__):
            unitary_hidden_state, secondary_cell_hidden_state = tf.split(1,2,state)


            mat_in = tf.get_variable('mat_in', [self.input_size, self.state_size*2])
            mat_out = tf.get_variable('mat_out', [self.state_size*2, self.output_size])
            in_proj = tf.matmul(inputs, mat_in)            
            in_proj_c = tf.complex(tf.split(1,2,in_proj))
            out_state = modReLU( in_proj_c + 
                ulinear(unitary_hidden_state, self.state_size),
                tf.get_variable(name='bias', dtype=tf.float32, shape=tf.shape(unitary_hidden_state), initializer = tf.constant_initalizer(0.)),
                scope=scope)


        with tf.variable_scope('unitary_output'):
            '''computes data linear, unitary linear and summation -- TODO: should be complex output'''
            unitary_linear_output_real = linear.linear([tf.real(out_state), tf.imag(out_state), inputs], True, 0.0)
        

        with tf.variable_scope('scale_nonlinearity'):
            modulus = tf.complex_abs(unitary_linear_output_real)
            rescale = tf.maximum(modulus + hidden_bias, 0.) / (modulus + 1e-7)

        #transition to data shortcut connection


        #out_ = tf.matmul(tf.concat(1,[tf.real(out_state), tf.imag(out_state), ] ), mat_out) + out_bias

        #hidden state is complex but output is completely real
        return out_, out_state #complex 
예제 #5
0
    def _testGrad(self,
                  shape,
                  dtype=None,
                  max_error=None,
                  bias=None,
                  sigma=None):
        np.random.seed(7)
        if dtype in (tf.complex64, tf.complex128):
            value = tf.complex(
                self._biasedRandN(shape, bias=bias, sigma=sigma),
                self._biasedRandN(shape, bias=bias, sigma=sigma))
        else:
            value = tf.convert_to_tensor(self._biasedRandN(shape, bias=bias),
                                         dtype=dtype)

        for use_gpu in [True, False]:
            with self.test_session(use_gpu=use_gpu):
                if dtype in (tf.complex64, tf.complex128):
                    output = tf.complex_abs(value)
                else:
                    output = tf.abs(value)
                error = tf.test.compute_gradient_error(
                    value, shape, output,
                    output.get_shape().as_list())
        self.assertLess(error, max_error)
예제 #6
0
파일: fft.py 프로젝트: diggerdu/Mendelssohn
def istft(spec, overlap=4):
    assert (spec.shape[0] > 1)
    S = placeholder(dtype=tf.complex64, shape=spec.shape)
    X = tf.complex_abs(tf.concat(0, [tf.ifft(frame) \
            for frame in tf.unstack(S)]))
    sess = tf.Session()
    return sess.run(X, feed_dict={S: spec})
예제 #7
0
def modrelu_c(in_c, bias):
    if not in_c.dtype.is_complex:
        raise(ValueError('modrelu_c: Argument in_c must be complex type'))
    if bias.dtype.is_complex:
        raise(ValueError('modrelu_c: Argument bias must be real type'))
    n = tf.complex_abs(in_c)
    scale = 1./(n+1e-5)
    return complex_mul_real(in_c, ( tf.nn.relu(n+bias)*scale ))
예제 #8
0
 def fft_discriminator(self, inp):
     scaled_inp = inp / 256
     shuffled_inp = tf.transpose(scaled_inp, perm=[0, 3, 1, 2])
     inp_fft = tf.fft2d(tf.cast(shuffled_inp, tf.complex64))
     amp = tf.complex_abs(inp_fft)
     with tf.variable_scope("fft_dense1"):
         h = dense_block(amp, leaky_relu=True, output_size=1024)
     with tf.variable_scope("fft_dense2"):
         h = dense_block(h, output_size=1)
     return h
예제 #9
0
 def testArithmeticRenames(self):
     with self.test_session() as s:
         stuff = tf.split(1, 2, [[1, 2, 3, 4], [4, 5, 6, 7]])
         vals = s.run(stuff)
         self.assertAllEqual(vals, [[[1, 2], [4, 5]], [[3, 4], [6, 7]]])
         self.assertAllEqual(tf.neg(tf.mul(tf.add(1, 2), tf.sub(5, 3))).eval(), -6)
         self.assertAllEqual(s.run(tf.listdiff([1, 2, 3], [3, 3, 4]))[0], [1, 2])
         self.assertAllEqual(tf.list_diff([1, 2, 3], [3, 3, 4])[0].eval(), [1, 2])
         a = [[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]]
         foo = np.where(np.less(a, 2), np.negative(a), a)
         self.assertAllEqual(tf.select(tf.less(a, 2), tf.neg(a), a).eval(), foo)
         self.assertAllEqual(tf.complex_abs(tf.constant(3 + 4.0j)).eval(), 5)
예제 #10
0
    def _testGrad(self, shape, dtype=None, max_error=None, bias=None, sigma=None):
        np.random.seed(7)
        if dtype in (tf.complex64, tf.complex128):
            value = tf.complex(
                self._biasedRandN(shape, bias=bias, sigma=sigma), self._biasedRandN(shape, bias=bias, sigma=sigma)
            )
        else:
            value = tf.convert_to_tensor(self._biasedRandN(shape, bias=bias), dtype=dtype)

        with self.test_session(use_gpu=True):
            if dtype in (tf.complex64, tf.complex128):
                output = tf.complex_abs(value)
            else:
                output = tf.abs(value)
            error = tf.test.compute_gradient_error(value, shape, output, output.get_shape().as_list())
        self.assertLess(error, max_error)
예제 #11
0
 def testArithmeticRenames(self):
     with self.cached_session() as s:
         stuff = tf.split(1, 2, [[1, 2, 3, 4], [4, 5, 6, 7]])
         vals = s.run(stuff)
         self.assertAllEqual(vals, [[[1, 2], [4, 5]], [[3, 4], [6, 7]]])
         self.assertAllEqual(
             tf.neg(tf.mul(tf.add(1, 2), tf.sub(5, 3))).eval(), -6)
         self.assertAllEqual(
             s.run(tf.listdiff([1, 2, 3], [3, 3, 4]))[0], [1, 2])
         self.assertAllEqual(
             tf.list_diff([1, 2, 3], [3, 3, 4])[0].eval(), [1, 2])
         a = [[1., 2., 3.], [4., 5., 6.]]
         foo = np.where(np.less(a, 2), np.negative(a), a)
         self.assertAllEqual(
             tf.select(tf.less(a, 2), tf.neg(a), a).eval(), foo)
         self.assertAllEqual(tf.complex_abs(tf.constant(3 + 4.j)).eval(), 5)
예제 #12
0
파일: rotate.py 프로젝트: ringw/MetaOMR
def get_angle(page):
  img = tf.cast(page.image, tf.float32)
  square = get_square(img)
  f = tf.complex_abs(tf.fft2d(tf.cast(square, tf.complex64))[:MAX_SIZE//2, :])
  x_arr = (
      tf.cast(tf.concat(0,
                        [tf.range(MAX_SIZE // 2),
                         tf.range(1, MAX_SIZE // 2 + 1)[::-1]]),
              tf.float32))[None, :]
  y_arr = tf.cast(tf.range(MAX_SIZE // 2), tf.float32)[:, None]
  f = tf.select(x_arr * x_arr + y_arr * y_arr < 32 * 32, tf.zeros_like(f), f)
  m = tf.argmax(tf.reshape(f, [-1]), dimension=0)
  x = tf.cast((m + MAX_SIZE // 4) % (MAX_SIZE // 2) - (MAX_SIZE // 4), tf.float32)
  y = tf.cast(tf.floordiv(m, MAX_SIZE // 2), tf.float32)
  return(tf.cond(
      y > 0, lambda: tf.atan(x / y), lambda: tf.constant(np.nan, tf.float32)),
      square)
예제 #13
0
def get_angle(page):
    img = tf.cast(page.image, tf.float32)
    square = get_square(img)
    f = tf.complex_abs(
        tf.fft2d(tf.cast(square, tf.complex64))[:MAX_SIZE // 2, :])
    x_arr = (tf.cast(
        tf.concat(
            0, [tf.range(MAX_SIZE // 2),
                tf.range(1, MAX_SIZE // 2 + 1)[::-1]]), tf.float32))[None, :]
    y_arr = tf.cast(tf.range(MAX_SIZE // 2), tf.float32)[:, None]
    f = tf.select(x_arr * x_arr + y_arr * y_arr < 32 * 32, tf.zeros_like(f), f)
    m = tf.argmax(tf.reshape(f, [-1]), dimension=0)
    x = tf.cast((m + MAX_SIZE // 4) % (MAX_SIZE // 2) - (MAX_SIZE // 4),
                tf.float32)
    y = tf.cast(tf.floordiv(m, MAX_SIZE // 2), tf.float32)
    return (tf.cond(y > 0, lambda: tf.atan(x / y),
                    lambda: tf.constant(np.nan, tf.float32)), square)
예제 #14
0
파일: fft.py 프로젝트: diggerdu/Mendelssohn
def stft(wav, n_fft=1024, overlap=4, dt=tf.int32, absp=False):
    assert (wav.shape[0] > n_fft)
    X = tf.placeholder(dtype=dt, shape=wav.shape)
    X = tf.cast(X, tf.float32)
    hop = n_fft / overlap

    ## prepare constant variable
    Pi = tf.constant(np.pi, dtype=tf.float32)
    W = tf.constant(scipy.hanning(n_fft), dtype=tf.float32)
    S = tf.pack([tf.fft(tf.cast(tf.multiply(W,X[i:i+n_fft]),\
            tf.complex64)) for i in range(1, wav.shape[0] - n_fft, hop)])
    abs_S = tf.complex_abs(S)
    sess = tf.Session()
    if absp:
        return sess.run(abs_S, feed_dict={X: wav})
    else:
        return sess.run(S, feed_dict={X: wav})
예제 #15
0
def test_fft():


    img = Image.open('test.png')
    img.show()
    img = np.asarray(img)


    input_image = tf.placeholder(tf.float32, shape= [64, 64, 3])
    img = img.astype(np.float32) / 255.

    with tf.Session() as sess:
        sess.run(tf.initialize_all_variables())

        feed_dict = {input_image: img,
                     }

        fft_abs = 0

        for i in range(3):
            input_slice = tf.slice(input_image, [0, 0, i], [-1, -1, 1])
            input_slice = tf.squeeze(tf.complex(input_slice, tf.zeros_like(input_slice)))
            fft = tf.fft2d(input_slice)
            fft_abs += tf.complex_abs(fft)

        fft_res = sess.run([fft_abs], feed_dict= feed_dict)
        fft_res = np.clip(fft_res, 0, 10)
        fft_res = np.squeeze(fft_res)

        # fft_res = fft_res[5:59, 5:59]
        plt.imshow(fft_res)
        plt.colorbar()
        plt.show()


        res_img = Image.fromarray((fft_res*255.).astype(np.uint8))
        res_img.show()
예제 #16
0
# JULIA SET
# Y, X = np.mgrid[-2:2:0.005, -2:2:0.005]

# Definiamo il punto corrente
Z = X + 1j * Y
c = tf.constant(Z.astype("complex64"))

zs = tf.Variable(c)
ns = tf.Variable(tf.zeros_like(c, "float32"))

# c = complex(0.0,0.75)
# c = complex(-1.5,-1.5)
sess = tf.InteractiveSession()
tf.global_variables_initializer().run()

# Compute the new values of z: z^2 + x
zs_ = zs * zs + c
# zs_ = zs*zs - c

# Have we diverged with this new value?
not_diverged = tf.complex_abs(zs_) < 4

step = tf.group(zs.assign(zs_), ns.assign_add(tf.cast(not_diverged,
                                                      "float32")))

for i in range(200):
    step.run()

plt.imshow(ns.eval())
plt.show()
예제 #17
0
파일: ops.py 프로젝트: kestrelm/tfdeploy
 def test_ComplexAbs(self):
     t = tf.complex_abs(self.random(3, 4, complex=True))
     self.check(t)
예제 #18
0
    tf.matmul(a, b, transpose_a=False,
    transpose_b=False, a_is_sparse=False,
    b_is_sparse=False, name=None) 	矩阵相乘
    tf.matrix_determinant(input, name=None) 	返回方阵的行列式
    tf.matrix_inverse(input, adjoint=None, name=None) 	求方阵的逆矩阵,adjoint为True时,计算输入共轭矩阵的逆矩阵
    tf.cholesky(input, name=None) 	对输入方阵cholesky分解,
    即把一个对称正定的矩阵表示成一个下三角矩阵L和其转置的乘积的分解A=LL^T
    tf.matrix_solve(matrix, rhs, adjoint=None, name=None) 	求解tf.matrix_solve(matrix, rhs, adjoint=None, name=None)
    matrix为方阵shape为[M,M],rhs的shape为[M,K],output为[M,K]

四、复数操作
    tf.complex(real, imag, name=None) 	将两实数转换为复数形式
    # tensor ‘real’ is [2.25, 3.25]
    # tensor imag is [4.75, 5.75]
    tf.complex(real, imag) ==> [[2.25 + 4.75j], [3.25 + 5.75j]]
    tf.complex_abs(x, name=None) 	计算复数的绝对值,即长度。
    # tensor ‘x’ is [[-2.25 + 4.75j], [-3.25 + 5.75j]]
    tf.complex_abs(x) ==> [5.25594902, 6.60492229]
    tf.conj(input, name=None) 	计算共轭复数
    tf.imag(input, name=None)
    tf.real(input, name=None) 	提取复数的虚部和实部
    tf.fft(input, name=None) 	计算一维的离散傅里叶变换,输入数据类型为complex64

五、归约计算(Reduction)
    tf.reduce_sum(input_tensor, reduction_indices=None,
    keep_dims=False, name=None) 	计算输入tensor元素的和,或者安照reduction_indices指定的轴进行求和
    # ‘x’ is [[1, 1, 1]
    # [1, 1, 1]]
    tf.reduce_sum(x) ==> 6
    tf.reduce_sum(x, 0) ==> [2, 2, 2]
    tf.reduce_sum(x, 1) ==> [3, 3]
예제 #19
0
def get_reg_loss(tfs):
    
    # Regulizer
    with tf.name_scope('reg_errors'):
        
        reg_loss = tfs.loss
        
        # amplitude
        if 'amplitude' in tfs.sys_para.reg_coeffs:
            amp_reg_alpha_coeff = tfs.sys_para.reg_coeffs['amplitude']
            amp_reg_alpha = amp_reg_alpha_coeff / float(tfs.sys_para.steps)
            reg_loss = reg_loss + amp_reg_alpha * tf.nn.l2_loss(tfs.ops_weight)
        
        # gaussian envelope
        if 'envelope' in tfs.sys_para.reg_coeffs:
            reg_alpha_coeff = tfs.sys_para.reg_coeffs['envelope']
            reg_alpha = reg_alpha_coeff / float(tfs.sys_para.steps)
            reg_loss = reg_loss + reg_alpha * tf.nn.l2_loss(
                tf.mul(tfs.tf_one_minus_gaussian_envelope, tfs.ops_weight))

        # Limiting the dwdt of control pulse
        if 'dwdt' in tfs.sys_para.reg_coeffs:
            zeros_for_training = tf.zeros([tfs.sys_para.ops_len, 2])
            new_weights = tf.concat(1, [tfs.ops_weight, zeros_for_training])
            new_weights = tf.concat(1, [zeros_for_training, new_weights])
            dwdt_reg_alpha_coeff = tfs.sys_para.reg_coeffs['dwdt']
            dwdt_reg_alpha = dwdt_reg_alpha_coeff / float(tfs.sys_para.steps)
            reg_loss = reg_loss + dwdt_reg_alpha * tf.nn.l2_loss(
                (new_weights[:, 1:] - new_weights[:, :tfs.sys_para.steps + 3]) / tfs.sys_para.dt)

        # Limiting the d2wdt2 of control pulse
        if 'd2wdt2' in tfs.sys_para.reg_coeffs:
            d2wdt2_reg_alpha_coeff = tfs.sys_para.reg_coeffs['d2wdt2']
            d2wdt2_reg_alpha = d2wdt2_reg_alpha_coeff / float(tfs.sys_para.steps)
            reg_loss = reg_loss + d2wdt2_reg_alpha * tf.nn.l2_loss((new_weights[:, 2:] - \
                                                                              2 * new_weights[:,
                                                                                  1:tfs.sys_para.steps + 3] + new_weights[:,
                                                                                                               :tfs.sys_para.steps + 2]) / (
                                                                             tfs.sys_para.dt ** 2))
        # bandpass filter on the control    
        if 'bandpass' in tfs.sys_para.reg_coeffs:
            ## currently does not support bandpass reg for CPU (no CPU kernel for FFT)
            if not tfs.sys_para.use_gpu:
                raise ValueError('currently does not support bandpass reg for CPU (no CPU kernel for FFT)')
            
            bandpass_reg_alpha_coeff = tfs.sys_para.reg_coeffs['bandpass']
            bandpass_reg_alpha = bandpass_reg_alpha_coeff/ float(tfs.sys_para.steps)
            
            tf_u = tf.cast(tfs.ops_weight,dtype=tf.complex64)
           
            tf_fft = tf.complex_abs(tf.fft(tf_u))
            
            band = np.array(tfs.sys_para.reg_coeffs['band'])

            band_id = (band*tfs.sys_para.total_time).astype(int)
            half_id = int(tfs.sys_para.steps/2)
            
            
            fft_loss = bandpass_reg_alpha*(tf.reduce_sum(tf_fft[:,0:band_id[0]]) + tf.reduce_sum(tf_fft[:,band_id[1]:half_id]))
            
            reg_loss = reg_loss + fft_loss
        

        # Limiting the access to forbidden states
        if 'forbidden' in tfs.sys_para.reg_coeffs:
            inter_reg_alpha_coeff = tfs.sys_para.reg_coeffs['forbidden']
            inter_reg_alpha = inter_reg_alpha_coeff / float(tfs.sys_para.steps)
            if tfs.sys_para.is_dressed:
                v_sorted = tf.constant(c_to_r_mat(np.reshape(sort_ev(tfs.sys_para.v_c, tfs.sys_para.dressed_id),
                                                             [len(tfs.sys_para.dressed_id), len(tfs.sys_para.dressed_id)])),
                                       dtype=tf.float32)

            for inter_vec in tfs.inter_vecs:
                if tfs.sys_para.is_dressed and ('forbid_dressed' in tfs.sys_para.reg_coeffs and tfs.sys_para.reg_coeffs['forbid_dressed']):
                    inter_vec = tf.matmul(tf.transpose(v_sorted), inter_vec)
                for state in tfs.sys_para.reg_coeffs['states_forbidden_list']:
                    forbidden_state_pop = tf.square(inter_vec[state, :]) + \
                                          tf.square(inter_vec[tfs.sys_para.state_num + state, :])
                    reg_loss = reg_loss + inter_reg_alpha * tf.nn.l2_loss(forbidden_state_pop)
                    
        # Speeding up the gate time
        if 'speed_up' in tfs.sys_para.reg_coeffs:
            speed_up_reg_alpha_coeff = - tfs.sys_para.reg_coeffs['speed_up']
            speed_up_reg_alpha = speed_up_reg_alpha_coeff / float(tfs.sys_para.steps)
            
            target_vecs_all_timestep = tf.tile(tf.reshape(tfs.target_vecs,[2*tfs.sys_para.state_num,1,len(tfs.inter_vecs)]) , [1,tfs.sys_para.steps+1,1])
            
            target_vecs_inner_product = tfs.get_inner_product_3D(tfs.inter_vecs_packed,target_vecs_all_timestep)
            reg_loss = reg_loss + speed_up_reg_alpha * tf.nn.l2_loss(target_vecs_inner_product)
            

        return reg_loss
                    
예제 #20
0
def main():

# Use NumPy to create a 2D array of complex numbers on [-2,2]x[-2,2]

    Y, X = np.mgrid[-1.3:1.3:0.005, -2:1:0.005]
    print 'Y shape: ', Y.shape
    print 'X shape: ', X.shape

    Z = X+1j*Y

    xs = tf.constant(Z.astype("complex64"))
    zs = tf.Variable(xs)
    ns = tf.Variable(tf.zeros_like(xs, "float32"))
    not_diverged = tf.Variable(np.ones(Z.shape, dtype=np.bool))
    Z_mod_at_div = tf.Variable(2 * tf.ones_like(xs, "float32"))

    for i in range(MAX_ITERS):
        # Compute the new values of z: z^2 + x
        zs_ = zs*zs + xs
        # Have we diverged with this new value?
        cur_mod = tf.complex_abs(zs_)
        not_diverged_ = cur_mod < 4
        # Operation to update the zs and the iteration count.

        # Note: We keep computing zs after they diverge! This
        #       is very wasteful! There are better, if a little
        #       less simple, ways to do this.
        ns_ = ns + tf.cast(not_diverged_, "float32")
        diverged_this_step = tf.logical_and(tf.logical_not(not_diverged_), not_diverged)
        Z_mod_at_div = tf.select(diverged_this_step, cur_mod, Z_mod_at_div)

        zs = zs_

        ns = ns_
        not_diverged = not_diverged_
    mus = tf.select(not_diverged, ns, ns + 1 - tf.log(tf.log(Z_mod_at_div)) / np.log(2))

    with tf.Session() as sess:
        tf.initialize_all_variables().run()


        print 'running!'
        ns_evaled, Z_mod_at_div_evaled, mus_evaled = sess.run([ns, Z_mod_at_div, mus])
        print 'done running!'
        print Z_mod_at_div_evaled
        non_zeros_z_mod = np.where(np.abs(Z_mod_at_div_evaled) > 0.01)
        print non_zeros_z_mod
        print 'max mod: %f, min mod: %f' % (np.max(Z_mod_at_div_evaled), np.min(Z_mod_at_div_evaled[non_zeros_z_mod]))

        print 'diff between mus and ns: '
        diff = mus_evaled - ns_evaled
        print diff
        print 'max: %f, min: %f' % (np.max(diff), np.min(diff))

    DisplayFractal(mus_evaled, 'mandelbrot.png')
    DisplayFractal(ns_evaled, 'mandelbrot_notfrac.png')

    img_int_ext = interior_exterior_map(ns_evaled)
    print "img_int_ext.max, %f, img_int_ext.min: %f" % \
        (img_int_ext.max(), img_int_ext.min())

    location = (X.shape[0] / 2, (4 * X.shape[1]) / 5)
    radius = (X.shape[0] / 10)
    ext_radius = (X.shape[0] / 70)
    img_int_ext_pendant = add_pendant(img_int_ext, location, radius, ext_radius)

    DisplayFractal(img_int_ext_pendant, "mandelbrot_int_ext_pendant.png")

    img_int_ext_pendant_noend = np.copy(img_int_ext_pendant)
    for i in xrange(X.shape[1] / 5):
        for j in xrange(X.shape[0]):
            img_int_ext_pendant_noend[j, i] = 1

    DisplayFractal(img_int_ext_pendant_noend, "mandelbrot_int_ext_pendant_noend.png")

    img_int_ext_pendant_noend_bigmiddle = np.copy(img_int_ext_pendant_noend)
    location_bigmiddle = np.array((X.shape[0] / 2, int(X.shape[1] / 2.5)))
    radius_bigmiddle = X.shape[1] / 25
    for i in xrange(X.shape[1]):
        for j in xrange(X.shape[0]):
            dist = norm(np.array((j, i)) - location_bigmiddle)
            if dist < radius_bigmiddle:
                img_int_ext_pendant_noend_bigmiddle[j, i] = -1

    DisplayFractal(img_int_ext_pendant_noend_bigmiddle, "mandelbrot_int_ext_pendant_noend_bigmiddle.png")

    
    print "img_int_ext: "
    print img_int_ext
    DisplayFractal(img_int_ext, "mandelbrot_int_ext.png")
    contours = measure.find_contours(img_int_ext_pendant_noend_bigmiddle, 0.0)
    len_contours = [len(contour_i) for contour_i in contours]
    print len_contours
    contours_sorted_by_size = sorted(contours, key=len)
    len_contours = [len(contour_i) for contour_i in contours]
    print len_contours
    bigcontour = contours_sorted_by_size[0]
    #embed()

    #bigcontour_int_ext = contour_to_int_ext_map(bigcontour, X, Y)

    #embed()

    #fig, ax = plt.subplots()
    #ax.imshow(img_int_ext, interpolation='nearest', cmap=plt.cm.gray)
    #for n, contour in enumerate(contours):
        #ax.plot(contour[:, 1], contour[:, 0], linewidth=2)
    #ax.plot(bigcontour[:, 1], bigcontour[:, 0], linewidth=2)

    #ax.axis('image')
    #ax.set_xticks([])
    #ax.set_yticks([])


    dwg = svgwrite.Drawing('mandelbrot.svg', profile='tiny')
    for j in [-1, -2]:
        contour = contours_sorted_by_size[j]
        for i in xrange(contour.shape[0] - 1):
            #print tuple(bigcontour[i])
            dwg.add(dwg.line(tuple(contour[i]), tuple(contour[i + 1]), \
                    stroke=svgwrite.rgb(10, 10, 16, '%')))
    #dwg.add(dwg.text('Test', insert=(0, 0.2), fill='red'))
    dwg.save()
    #plt.show()

    #error = 100 * np.abs(mus_evaled - ns_evaled)
    #print error.shape
    #error = error.reshape(list(error.shape)+[1])
    #print error.shape
    #error_img = np.concatenate([error, error, error], 2)
    #error_img = np.uint8(np.clip(error_img, 0, 255))
    #scipy.misc.imsave('mandelbrot_errors.png', error_img)

    # 3d mandelbrot!

    max_dist = 10
    #tsdf = gen_tsdf(img_int_ext_pendant_noend_bigmiddle, max_dist)
    tsdf = np.load("tsdf_10.npy")
    #np.save("tsdf_10", tsdf)
    #plt.imshow(tsdf)
    #plt.show()

    # a = -3 / 20 
    # b = 53 / 20 
    # c = -1
    # for 10.5 at 10, 8.5 at 5, and 1.5 at 1
    # with a * x**2 + b * x + c formula for height

    int_ext_3d_map = gen_int_ext_3d_map_from_tsdf(tsdf, max_dist)
    #embed()

    vertices, triangles = mcubes.marching_cubes(int_ext_3d_map, 0)
    mcubes.export_mesh(vertices, triangles, "mandelbrot_smoothed.dae", "Mandelbrot_pendant")
    #embed()

    from mayavi import mlab
    mlab.triangular_mesh(
        vertices[:, 0], vertices[:, 1], vertices[:, 2],
        triangles)
    mlab.show()
예제 #21
0
def inference(images, isTraining=False):
    """Build the grka model.
    Args:
      images: Images returned from distorted_inputs() or inputs().
    Returns:
      Logits.
    """
    # We instantiate all variables using tf.get_variable() instead of
    # tf.Variable() in order to share variables across multiple GPU
    # training runs.
    # If we only ran this model on a single GPU, we could simplify this function
    # by replacing all instances of tf.get_variable() with tf.Variable().

    images = tf.identity(images, name="inference_input")

    batch_size = images.get_shape()[0].value

    keepProp = tf.identity(FLAGS.dropout_keep_probability, name="keepProb")

    spect = tf.complex_abs(
        tf.split(1, 2,
                 tf.fft(tf.complex(images * hamming,
                                   tf.zeros_like(images))))[0])

    window_width = 2205
    shift = 735

    data = tf.reshape([
        tf.split(
            1, 2,
            tf.pad(
                tf.fft(
                    tf.reshape(
                        tf.complex(
                            images[:, i:i + window_width] * hamming2,
                            tf.zeros_like(images[:, i:i + window_width])),
                        [batch_size, window_width])), [[0, 0], [0, 1]]))[0]
        for i in range(0, IMAGE_SIZE - window_width + 1, shift)
    ], [4, batch_size, 1103])

    data = tf.reshape(tf.transpose(tf.complex_abs(data), [1, 0, 2]),
                      [batch_size, 1103 * 4])

    spect = tf.concat(1, [spect, data])

    tf.summary.image('images',
                     tf.reshape(spect, [batch_size, 1, 6617, 1]),
                     max_outputs=16)

    spect = tf.reshape(spect, [batch_size, 6617, 1])

    with tf.variable_scope('conv1') as scope:
        # Move everything into depth so we can perform a single matrix multiply.
        # reshape = tf.reshape(spect, [FLAGS.batch_size, -1])
        kernel = _variable_with_weight_decay('weights',
                                             shape=[3, 1, 64],
                                             connections=3 + 64,
                                             wd=WEIGHT_DECAY)
        conv = tf.nn.conv1d(spect, kernel, 1, padding='SAME')
        bias = batch_norm_wrapper(conv,
                                  is_training=isTraining,
                                  shape=[0, 1, 2])
        conv1 = tf.nn.elu(bias, name=scope.name)
        _activation_summary(conv1)

        # grid = put_activations_on_grid(tf.reshape(conv, [batch_size, 1, 6617,
        #                                                  64]), (8, 8))
        # tf.summary.image('conv1/activations', grid, max_outputs=1)

        pool1 = tf.reshape(
            tf.nn.max_pool(tf.reshape(conv1, [batch_size, 6617, 1, 64]),
                           ksize=[1, 2, 1, 1],
                           strides=[1, 2, 1, 1],
                           padding='SAME',
                           name='pool1'), [batch_size, 3309, 64])

    with tf.variable_scope('conv2') as scope:
        kernel = _variable_with_weight_decay('weights',
                                             shape=[3, 64, 64],
                                             connections=3 * 64 + 64,
                                             wd=WEIGHT_DECAY)
        conv = tf.nn.conv1d(pool1, kernel, 1, padding='SAME')
        bias = batch_norm_wrapper(conv,
                                  is_training=isTraining,
                                  shape=[0, 1, 2])
        conv2 = tf.nn.elu(bias, name=scope.name)
        _activation_summary(conv2)
        pool2 = tf.reshape(
            tf.nn.max_pool(tf.reshape(conv2, [batch_size, 3309, 1, 64]),
                           ksize=[1, 2, 1, 1],
                           strides=[1, 2, 1, 1],
                           padding='SAME',
                           name='pool2'), [batch_size, 1655, 64])

    with tf.variable_scope('conv3') as scope:
        kernel = _variable_with_weight_decay('weights',
                                             shape=[3, 64, 64],
                                             connections=3 * 64 + 64,
                                             wd=WEIGHT_DECAY)
        conv = tf.nn.conv1d(pool2, kernel, 1, padding='SAME')
        bias = batch_norm_wrapper(conv,
                                  is_training=isTraining,
                                  shape=[0, 1, 2])
        conv3 = tf.nn.elu(bias, name=scope.name)
        _activation_summary(conv3)
        pool3 = tf.reshape(
            tf.nn.max_pool(tf.reshape(conv3, [batch_size, 1655, 1, 64]),
                           ksize=[1, 2, 1, 1],
                           strides=[1, 2, 1, 1],
                           padding='SAME',
                           name='pool3'), [batch_size, 828, 64])

    with tf.variable_scope('conv4') as scope:
        kernel = _variable_with_weight_decay('weights',
                                             shape=[3, 64, 128],
                                             connections=3 * 64 + 128,
                                             wd=WEIGHT_DECAY)
        conv = tf.nn.conv1d(pool3, kernel, 1, padding='SAME')
        bias = batch_norm_wrapper(conv,
                                  is_training=isTraining,
                                  shape=[0, 1, 2])
        conv4 = tf.nn.elu(bias, name=scope.name)
        _activation_summary(conv4)

        # grid = put_activations_on_grid(tf.reshape(conv, [batch_size, 1, 828,
        #                                                  128]), (16, 8))
        # tf.summary.image('conv1/activations', grid, max_outputs=1)

        pool4 = tf.reshape(
            tf.nn.max_pool(tf.reshape(conv4, [batch_size, 828, 1, 128]),
                           ksize=[1, 2, 1, 1],
                           strides=[1, 2, 1, 1],
                           padding='SAME',
                           name='pool4'), [batch_size, -1])

        # local3
        with tf.variable_scope('local1') as scope:
            # Move everything into depth so we can perform a single matrix multiply.
            # reshape = tf.reshape(spect, [FLAGS.batch_size, -1])
            reshape = tf.reshape(pool4, [batch_size, -1])
            dim = reshape.get_shape()[1].value

            weights = _variable_with_weight_decay('weights',
                                                  shape=[dim, 6624],
                                                  connections=dim + 6624,
                                                  wd=WEIGHT_DECAY)
            bn1 = batch_norm_wrapper(tf.matmul(reshape, weights),
                                     is_training=isTraining)
            local1 = tf.nn.elu(bn1, name=scope.name)
            local1 = tf.nn.dropout(local1, keepProp)
            _activation_summary(local1)

        # softmax, i.e. softmax(WX + b)
        with tf.variable_scope('softmax_linear') as scope:
            weights = _variable_with_weight_decay(
                'weights', [6624, NUM_CLASSES + 1],
                connections=6624 + NUM_CLASSES + 1,
                wd=0.0)
            biases = _variable_on_cpu('biases', [NUM_CLASSES + 1],
                                      tf.constant_initializer(0.0))
            softmax_linear = tf.add(tf.matmul(local1, weights),
                                    biases,
                                    name=scope.name)
            _activation_summary(softmax_linear)

        return softmax_linear
예제 #22
0
# Use NumPy to create a 2D array of complex numbers on [-2,2]x[-2,2]

Y, X = np.mgrid[-1.3:1.3:0.005, -2:1:0.005]
Z = X+1j*Y

xs = tf.constant(Z.astype(np.complex64))
zs = tf.Variable(xs)
ns = tf.Variable(tf.zeros_like(xs, tf.float32))

tf.global_variables_initializer().run()

# Compute the new values of z: z^2 + x
zs_ = zs*zs + xs

# Have we diverged with this new value?
not_diverged = tf.complex_abs(zs_) < 4

# Operation to update the zs and the iteration count.
#
# Note: We keep computing zs after they diverge! This
#       is very wasteful! There are better, if a little
#       less simple, ways to do this.
#
step = tf.group(
  zs.assign(zs_),
  ns.assign_add(tf.cast(not_diverged, tf.float32))
  )

for i in range(200): step.run()

DisplayFractal(ns.eval())
예제 #23
0
 def complex_mod_of_real(x):
     xshp = x.get_shape().as_list()
     assert xshp[1] % 2 == 0
     xcplx = tf.complex(x[:, 0:xshp[1]/2], x[:, xshp[1]/2:])
     return tf.complex_abs(xcplx)