예제 #1
0
def compare_nufft1d1( nufft_func1, nufft_func2, ms=1000, mc=100000 ):
    # Test vs the direct method
    print(30 * '-')
    rng = np.random.RandomState(0)
    x = 100 * rng.rand(mc)
    c = 1j*np.sin(x) + 1j*np.sin(10*x)
    for df in [1, 2.0]:
        for iflag in [1, -1]:
            print ("testing 1d df=%f, iflag=%f"% (df,iflag))
            F1 = nufft_func1(x, c, ms, df=df, iflag=iflag)
            F2 = nufft_func2(x, c, ms, df=df, iflag=iflag)
            ut.plot(np.absolute(F1))
            ut.plot(np.absolute(F2))
            assert np.allclose(F1, F2, rtol=1e-02, atol=1e-02)
    print("- Results match the 1d DFT")
예제 #2
0
def compare_nufft1d21( nufft_func1, nufft_func2, ms=1000, mc=100000, Reptime=5 ):
    rng = np.random.RandomState(0)
    # Time the nufft function
    x = 100 * rng.rand(mc)
    c0 = np.sin(3*x) + 0.1*1j*np.sin(4.5*x)
    F1 = nufft_func1(x, c0, ms)
    times = []
    for i in range(Reptime):
        t0 = time()
        F2 = nufft_func2(x, F1, ms)
        t1 = time()
        times.append(t1 - t0)
        ut.plot(np.absolute(F1))
        ut.plot(np.absolute(F2))
        #ut.plot(np.real(F1),np.real(F2),'o')
    print("- Execution time (M={0}): {1:.2g} sec".format(mc, np.median(times)))
예제 #3
0
def test1():
    model = tf_wrap.tf_model_top([None,  2*960],  [None,  4], tf_prediction_func, tf_optimize_func, tf_error_func)

    batch_size = 800
    # load far and trr
    # read rf and tr arrays from mat file
    mat_contents  = sio.loadmat(pathdat+'mrf_t1t2b0pd_mrf_randphasecyc_traintest.mat');
    far           = np.array(mat_contents["rf"].astype(np.complex128).squeeze())
    trr           = np.array(mat_contents["trr"].astype(np.float64).squeeze())
    # prepare for sequence simulation, y->x_hat
    Nk            = far.shape[0]
    ti            = 10 #ms
    M0            = np.array([0.0,0.0,1.0]).astype(np.float64)

    #run tensorflow on cpu, count of gpu = 0
    config = tf.ConfigProto()#(device_count = {'GPU': 0})
    #allow tensorflow release gpu memory
    config.gpu_options.allow_growth=True

    #run for 2000
    for i in range(2000):
        batch_ys           = np.random.uniform(0,1,(batch_size,4)).astype(np.float64)
        #batch_ys[:,2]      = np.zeros(batch_size)
        #batch_ys[:,3]      = np.ones(batch_size)
        batch_xs   = np.zeros((batch_size,2*Nk), dtype = np.float64)
       # intial seq simulation with t1t2b0 values
        #seq_data = ssad.irssfp_arrayin_data( batch_size, Nk ).set( batch_ys )
        T1r, T2r, dfr, PDr = ssmrf.set_par(batch_ys)
        batch_xs_c         = ssmrf.bloch_sim_batch_cuda( batch_size, 100, Nk, PDr,\
         T1r, T2r, dfr, M0, trr, far, ti )
        #seperate real/imag parts or abs/angle parts, no noise output
        batch_xs[:,0:Nk] = np.real(batch_xs_c)
        batch_xs[:,Nk:2*Nk] = np.imag(batch_xs_c)

        #input with noise
        #batch_xsnoise = batch_xs  + np.random.uniform(-0.01,0.01,(batch_size,2*Nk))

        #train_step.run(feed_dict={x: batch_xs, y_: batch_ys, keep_prob: 0.5})
        model.train(batch_xs, batch_ys)
        if i%10 == 0:
            prey = model.prediction(batch_xs,np.zeros(batch_ys.shape))
            ut.plot(prey[...,0], batch_ys[...,0], line_type = '.', pause_close = 1)
            ut.plot(prey[...,1], batch_ys[...,1], line_type = '.', pause_close = 1)
            ut.plot(prey[...,2], batch_ys[...,2], line_type = '.', pause_close = 1)
            ut.plot(prey[...,3], batch_ys[...,3], line_type = '.', pause_close = 1)            
            model.test(batch_xs, batch_ys)
        if i%100 == 0:
            model.save('../save_data/MRF_encoder_t1t2b0')
def test4():
    # read rf and tr arrays from mat file
    mat_contents  = sio.loadmat('../save_data/MRF_far_trr.mat');
    far           = np.array(mat_contents["far"].astype(np.complex128).squeeze())
    trr           = np.array(mat_contents["trr"].astype(np.float64).squeeze())

    Nk            = far.shape[0]#960#
    model = tf_wrap.tf_model_top( [None,  2 * Nk], [None,  4], tf_prediction_func, tf_optimize_func, tf_error_func, 1.0)
    model.restore('../save_data/MRF_encoder_t1t2b0')

    mat_contents2 = sio.loadmat(pathdat+'test_outy.mat');
    data_y = mat_contents2["test_outy"]

    batch_size = data_y.shape[0]
    # load far and trr

    # prepare for sequence simulation, y->x_hat
    ti            = 10 #ms
    M0            = np.array([0.0,0.0,1.0]).astype(np.float64)

    #run tensorflow on cpu, count of gpu = 0
    config = tf.ConfigProto()#(device_count = {'GPU': 0})
    #allow tensorflow release gpu memory
    config.gpu_options.allow_growth=True

    batch_ys           = data_y#np.random.uniform(0,1,(batch_size,4)).astype(np.float64)
    #batch_ys[:,2] = np.zeros(batch_size)
    #batch_ys[:,3]      = np.ones(batch_size)
    batch_xs   = np.zeros((batch_size, 2 * Nk), dtype = np.float64)
    batch_xs_c = np.zeros((batch_size, Nk),     dtype = np.complex128)
    # intial seq simulation with t1t2b0 values
    #seq_data = ssad.irssfp_arrayin_data( batch_size, Nk ).set( batch_ys )
    T1r, T2r, dfr, PDr        = ssmrf.set_par(batch_ys)
    batch_xs_c[...,0:Nk]      = ssmrf.bloch_sim_batch_cuda( batch_size, 100, Nk, PDr,\
     T1r, T2r, dfr, M0, trr, far, ti )
    #seperate real/imag parts or abs/angle parts, no noise output
    batch_xs[:,0:Nk] = np.real(batch_xs_c)
    batch_xs[:,Nk:2*Nk] = np.imag(batch_xs_c)
    #input with noise
    #batch_xsnoise = batch_xs  #+ np.random.uniform(-0.05,0.05,(batch_size,2*Nk))
    prey = model.prediction(batch_xs,np.zeros(batch_ys.shape))
    model.test(batch_xs, batch_ys)
    ut.plot(prey[...,0], batch_ys[...,0], line_type = '.')
    ut.plot(prey[...,1], batch_ys[...,1], line_type = '.')
    ut.plot(prey[...,2], batch_ys[...,2], line_type = '.')
    ut.plot(prey[...,3], batch_ys[...,3], line_type = '.')

    sio.savemat(pathdat+'out_cnn_testdata3.mat', {'testim_outy': prey})
예제 #5
0
def time_nufft1d2( nufft_func1, nufft_func2, ms=1000, mc=100000, Reptime=5 ):
    rng = np.random.RandomState(0)
    # Time the nufft function
    x = 100 * rng.rand(mc)
    c0 = np.sin(x)
    F = nufft_func1(x, c0, ms)
    times = []
    for i in range(Reptime):
        t0 = time()
        c = nufft_func2(x, F, ms)
        t1 = time()
        times.append(t1 - t0)
    ut.plot(x,np.real(c0),'o')
    ut.plot(x,np.real(c),'o')
    ut.plot(np.real(c0),np.real(c),'o')
    print("- Execution time (M={0}): {1:.2g} sec".format(mc, np.median(times)))
예제 #6
0
def test3():
    mat_contents = sio.loadmat(pathdat + 'dict_pca.mat')
    dictall = np.array(mat_contents["avedictall"].astype(np.float32))
    label = np.array(mat_contents["dict_label"].astype(np.float32))

    #dictall = dictall/np.ndarray.max(dictall.flatten())
    for i in range(dictall.shape[0]):
        tc = dictall[i, :] - np.mean(dictall[i, :])
        dictall[i, :] = tc / np.linalg.norm(tc)
    dictall = 1000 * dictall / np.ndarray.max(dictall.flatten())

    model = tf_wrap.tf_model_top([None, 13], [None, 3], tf_prediction_func,
                                 tf_optimize_func, tf_error_func)
    model.restore('../save_data/test_model_save')
    model.test(dictall, label)
    prey = model.prediction(dictall, np.zeros(label.shape))
    ut.plot(prey[..., 0], label[..., 0], line_type='.')
    ut.plot(prey[..., 1], label[..., 1], line_type='.')
    ut.plot(prey[..., 2], label[..., 2], line_type='.')
def test1():
    mat_contents = sio.loadmat(pathdat + 'dict_pca_2fa_2freq.mat')
    #dict_pca
    coeff = np.array(mat_contents["coeff"].astype(np.float32))

    par = mat_contents["par"]

    batch_size = 800
    Nk = par[0]['irfreq'][0][0][0]  #892#far.shape[0]#par.irfreq#
    Ndiv = coeff.shape[1]  #par[0]['ndiv'][0][0][0]#16
    Nscan = 3
    orig_Ndiv = coeff.shape[0] // Nscan
    npar = 8  ##
    model = tf_wrap.tf_model_top([None, Ndiv], [None, npar],
                                 tf_prediction_func,
                                 tf_optimize_func,
                                 tf_error_func,
                                 arg=0.5)
    #model.restore(pathdat + 'test_model_save_2fa_2freq')

    fa1 = 45.0  #par[0]['fa1'][0][0][0].astype(np.float32)#35#30 #deg
    fa2 = 30.0  #par[0]['fa2'][0][0][0].astype(np.float32)
    tr = par[0]['tr'][0][0][0].astype(np.float32)  #3.932#4.337 #ms
    ti = par[0]['ti'][0][0][0].astype(np.float32)  #11.0 #ms
    te = par[0]['te'][0][0][0].astype(np.float32)  #1.5 #ms

    far, trr, ter = simut.rftr_const(Nk, 1.0, tr, te)

    M0 = simut.def_M0()

    #run tensorflow on cpu, count of gpu = 0
    config = tf.ConfigProto()  #(device_count = {'GPU': 0})
    #allow tensorflow release gpu memory
    config.gpu_options.allow_growth = True
    t1t2_group = np.zeros((4, 2), dtype=np.float64)
    t1t2_group[0, 0] = 600.0 / 5000.0  #600     #0.12
    t1t2_group[0, 1] = 40.0 / 500.0  #40     #0.08
    t1t2_group[1, 0] = 1000.0 / 5000.0  #1000   #0.2
    t1t2_group[1, 1] = 80.0 / 500.0  #80      #0.16
    t1t2_group[2, 0] = 3000.0 / 5000.0  #3000  #0.6
    t1t2_group[2, 1] = 200.0 / 500.0  #200     #0.4
    t1t2_group[3, 0] = 0.0 / 5000.0
    t1t2_group[3, 1] = 0.0 / 500.0

    for i in range(1000000):
        batch_ys = np.random.uniform(0, 1,
                                     (batch_size, npar)).astype(np.float64)
        batch_ys[:, 2] = np.random.uniform(0, 1.0 / tr, (batch_size)).astype(
            np.float64)  #np.zeros(batch_size)#

        batch_ys_tmp = np.random.uniform(0, 4, (batch_size))

        for k in range(batch_size):
            if batch_ys_tmp[k] <= 4 and batch_ys_tmp[k] > 3:
                batch_ys[k, 0] = t1t2_group[0, 0] + np.random.uniform(
                    -0.05, 0.025)  #0.07 to 0.145
                batch_ys[k, 1] = t1t2_group[0, 1] + np.random.uniform(
                    -0.05, 0.025)  #0.04 to 0.105
                batch_ys[k, 4] = 1.0
                batch_ys[k, 5] = 0.0
                batch_ys[k, 6] = 0.0
            elif batch_ys_tmp[k] <= 3 and batch_ys_tmp[k] > 2:
                batch_ys[k, 0] = t1t2_group[1, 0] + np.random.uniform(
                    -0.035, 0.035)  #-0.035,0.035  #0.145 to 0.255
                batch_ys[k, 1] = t1t2_group[1, 1] + np.random.uniform(
                    -0.035, 0.035)  #0.105 to 0.215
                batch_ys[k, 4] = 0.0
                batch_ys[k, 5] = 1.0
                batch_ys[k, 6] = 0.0
            elif batch_ys_tmp[k] <= 2 and batch_ys_tmp[k] > 1:
                batch_ys[k, 0] = t1t2_group[2, 0] + np.random.uniform(
                    -0.15, 0.25)  #-0.15,0.25        #0.45  to 0.85
                batch_ys[k, 1] = t1t2_group[2, 1] + np.random.uniform(
                    -0.15, 0.25)  #0.55  to 0.65
                batch_ys[k, 4] = 0.0
                batch_ys[k, 5] = 0.0
                batch_ys[k, 6] = 1.0
            else:
                batch_ys[k, 0] = t1t2_group[3,
                                            0]  #+ np.random.uniform(-0.0,0.02)
                batch_ys[k, 1] = t1t2_group[3,
                                            1]  #+ np.random.uniform(-0.0,0.08)
                batch_ys[k, 4] = 0.0
                batch_ys[k, 5] = 0.0
                batch_ys[k, 6] = 0.0

        T1r, T2r, dfr, PDr = ssmrf.set_par(batch_ys[..., 0:4])
        b1r = 2.0 * batch_ys[..., 7] + 0.5
        batch_xs_c1 = ssmrf.bloch_sim_batch_cuda3(batch_size, 100, Nk, PDr,
                                                  T1r, T2r, dfr, b1r, M0, trr,
                                                  ter, fa1 * far, ti)
        batch_xs_c2 = ssmrf.bloch_sim_batch_cuda3(batch_size, 100, Nk, PDr,
                                                  T1r, T2r, dfr, b1r, M0, trr,
                                                  ter, fa2 * far, ti)
        #batch_xs_c3         = ssmrf.bloch_sim_batch_cuda2( batch_size, 100, Nk, PDr, T1r, T2r, dfr, M0, trr, ter, np.absolute(fa2*far), ti )
        batch_xs_c3 = ssmrf.bloch_sim_batch_cuda3(batch_size, 100, Nk, PDr,
                                                  T1r, T2r, dfr + 107.0, b1r,
                                                  M0, trr, ter, fa2 * far, ti)

        #ut.plot(np.absolute(batch_xs_c[0,:]))
        batch_xs = np.zeros((batch_size, Nscan * orig_Ndiv),
                            dtype=batch_xs_c1.dtype)
        if orig_Ndiv is not Nk:
            batch_xs[:, 0:orig_Ndiv] = (
                simut.average_dict(batch_xs_c1, orig_Ndiv)
            )  #(np.dot(np.absolute(simut.average_dict(batch_xs_c, Ndiv)), coeff))
            batch_xs[:, orig_Ndiv:2 * orig_Ndiv] = (
                simut.average_dict(batch_xs_c2, orig_Ndiv)
            )  #(np.dot(np.absolute(simut.average_dict(batch_xs_c, Ndiv)), coeff))
            batch_xs[:, 2 * orig_Ndiv:3 * orig_Ndiv] = (
                simut.average_dict(batch_xs_c3, orig_Ndiv)
            )  #(np.dot(np.absolute(simut.average_dict(batch_xs_c, Ndiv)), coeff))

        batch_xs = batch_xs + np.random.ranf(1)[0] * np.random.uniform(
            -0.002, 0.002, (batch_xs.shape))
        batch_xs = np.absolute(batch_xs)

        if 1:
            batch_xs = np.dot(batch_xs, coeff)

        else:
            batch_xs = batch_xs

        for dd in range(batch_xs.shape[0]):
            tc1 = batch_xs[dd, :]

            normtc1 = np.linalg.norm(tc1)

            if normtc1 > 0.01:
                batch_xs[dd, :] = tc1
            else:
                batch_ys[dd, :] = np.zeros([1, npar])
        batch_xs = batch_xs / np.ndarray.max(batch_xs.flatten())

        model.test(batch_xs, batch_ys)
        model.train(batch_xs, batch_ys)

        if i % 100 == 0:
            prey = model.prediction(batch_xs, np.zeros(batch_ys.shape))
            for nn in range(npar):
                ut.plot(prey[..., nn],
                        batch_ys[..., nn],
                        line_type='.',
                        pause_close=1)
            model.save(pathdat + 'test_model_save_b1_2fa_2freq')
예제 #8
0
def espirit_3d( xcrop, x_shape, nsingularv = 150, hkwin_shape = (16,16,16),\
    pad_before_espirit = 0, pad_fact = 1, sigv_th = 0.01, nsigv_th = 0.2 ):
    #ft = op.FFTnd((0,1,2))#3d fft operator
    ft = op.FFTWnd((0, 1, 2))  #3d fft operator
    timing = utc.timing()
    #multidimention tensor as the block hankel matrix
    #first 2 are x, y dims with rolling window size of hkwin_shape
    #last 1 is coil dimension, with stride of 1
    # output dims are : (3_hankel_dims + 1_coil_dim)_win_size + (3_hankel_dims + 1_coil_dim)_rolling_times
    timing.start()
    h = hk.hankelnd_r(xcrop,
                      (hkwin_shape[0], hkwin_shape[1], hkwin_shape[2], 1))
    timing.stop().display('Create Hankel ').start()
    dimh = h.shape
    #flatten the tensor to create a matrix= [flatten(fist4 dims), flatten(last4 dims)]
    #the second dim of hmtx contain coil information, i.e. dimh[3]=1, dimh[7]=N_coils
    hmtx = h.reshape(( dimh[0]* dimh[1]* dimh[2]* dimh[3], dimh[4], dimh[5], dimh[6], dimh[7])).\
              reshape((dimh[0]* dimh[1]* dimh[2]* dimh[3], dimh[4]* dimh[5]* dimh[6]* dimh[7]))
    timing.stop().display('Reshape Hankel ').start()
    #svd, could try other approaches
    # V has the coil information since the second dim of hmtx has coil data
    U, s, V = np.linalg.svd(hmtx, full_matrices=False)
    #U, s, V = randomized_svd(hmtx, n_components=nsingularv,n_iter=5,random_state=None)
    #U, s, V = scipy.sparse.linalg.svds(hmtx, nsingularv )
    timing.stop().display('SVD ').start()
    #S = np.diag(s)
    #ut.plotim1(np.absolute(V[:,0:150]).T)#plot V singular vectors
    ut.plot(s.T)  #plot singular values
    for k in range(len(s)):
        if s[k] > s[0] * nsigv_th:
            nsingularv = k
    print('extract %g out of %g singular vectors' % (nsingularv, len(s)))
    #invh = np.zeros(x.shape,complex)
    #print h.shape
    #hk.invhankelnd(h,invh,(2,3,1))

    #reshape vn to generate k-space vn tensor
    #first dim is singular vector, which is transposed to the second last dimension
    vn = V[0:nsingularv, :].reshape(
        (nsingularv, dimh[4], dimh[5], dimh[6], dimh[7])).transpose(
            (1, 2, 3, 0, 4))

    #zero pad vn, vn matrix of reshaped singular vectors,
    #dims of vn: nx,ny,nsingularv,ncoil
    #do pading before espirit, reduce the memory requirement
    if pad_before_espirit is 0:
        nx = min(pad_fact * xcrop.shape[0], x_shape[0])
        ny = min(pad_fact * xcrop.shape[1], x_shape[1])
        nz = min(pad_fact * xcrop.shape[2], x_shape[2])
    else:
        nx = x_shape[0]
        ny = x_shape[1]
        nz = x_shape[2]
    #coil dim
    nc = x_shape[3]
    #create hamming window
    hwin = hamming3d(vn.shape[0], vn.shape[1], vn.shape[2])
    # apply hamming window
    vn = np.multiply(vn, hwin[:, :, :, np.newaxis, np.newaxis])
    #zero pad
    vn = ut.pad3d(vn, nx, ny, nz)
    #plot first singular vecctor Vn[0]
    imvn = ft.backward(vn)
    #ut.plotim3(np.absolute(imvn[:,:,0,:].squeeze()))#spatial feature of V[:,1] singular vector
    sim = np.zeros((nx, ny, nz), dtype=vn.dtype)
    Vim = np.zeros((nx, ny, nz, nc), dtype=vn.dtype)
    #espirit loop, Vim eigen vector, Sim eigen value, this is a pixel wise PCA on vn
    for ix in range(nx):
        for iy in range(ny):
            for iz in range(nz):
                vpix = imvn[ix, iy, iz, :, :].squeeze()
                vpix = np.matrix(vpix).transpose()
                vvH = vpix.dot(vpix.getH())
                U, s, V = np.linalg.svd(vvH, full_matrices=False)
                #s, V = numpy.linalg.eig(vvH)
                #U, s, V = randomized_svd(vvH, n_components=2,n_iter=5,random_state=None)
                sim[ix, iy, iz] = s[0]
                Vim[ix, iy, iz, :] = V[0, :].squeeze()

    Vim = np.conj(Vim)
    timing.stop().display('ESPIRIT ')
    #pad the image after espirit
    if pad_before_espirit is 0:
        Vim = ft.backward(
            ut.pad3d(ft.forward(Vim), x_shape[0], x_shape[1], x_shape[2]))
        sim = ft.backward(
            ut.pad3d(ft.forward(sim), x_shape[0], x_shape[1], x_shape[2]))
    #plot first eigen vector, which is coil sensitvity map, and eigen value
    ut.plotim3(np.absolute(Vim[Vim.shape[0] // 2, :, :, :].squeeze()))
    ut.plotim1(np.absolute(sim[Vim.shape[0] // 2, :, :].squeeze()))
    #Vim_dims_name = ['x', 'y', 'z', 'coil']
    #sim_dims_name = ['x', 'y', 'z']
    Vimnorm = np.linalg.norm(Vim, axis=3)
    Vim = np.divide(Vim, 1e-6 + Vimnorm[:, :, :, np.newaxis])

    sim = sim / np.max(sim.flatten())
    for ix in range(x_shape[0]):
        for iy in range(x_shape[1]):
            for iz in range(x_shape[2]):
                if sim[ix, iy, iz] < sigv_th:
                    Vim[ix, iy, iz, :] = np.zeros(nc)
    return Vim, np.absolute(sim)  #, Vim_dims_name, sim_dims_name
예제 #9
0
def test1():
    mat_contents  = sio.loadmat(pathdat+'dict_pca.mat');#dict_pca
    #dictall       = np.array(mat_contents["avedictall"].astype(np.float32))
    #label         = np.array(mat_contents["dict_label"].astype(np.float32))
    coeff         = np.array(mat_contents["coeff"].astype(np.float32))
    #cn_orders     = np.array(mat_contents["cn_orders"].astype(np.float32))
    par           = mat_contents["par"]

    batch_size = 800
    Nk         = par[0]['irfreq'][0][0][0]#892#far.shape[0]#par.irfreq#
    Ndiv       = coeff.shape[1]#par[0]['ndiv'][0][0][0]#16
    orig_Ndiv  = coeff.shape[0] 
    npar       = 7
    model = tf_wrap.tf_model_top([None,  Ndiv], [None,  npar], tf_prediction_func, tf_optimize_func, tf_error_func, arg = 0.5)


    fa         = par[0]['fa'][0][0][0].astype(np.float32)#35#30 #deg
    tr         = par[0]['tr'][0][0][0].astype(np.float32)#3.932#4.337 #ms
    ti         = par[0]['ti'][0][0][0].astype(np.float32)#11.0 #ms
    te         = 1.5 #ms
    #print(fa)
    #print(tr)
    #print(ti)
    #print(Nk)
    #print(Ndiv)

    far, trr,ter   = simut.rftr_const(Nk, fa, tr, te)
    M0         = simut.def_M0()

    #run tensorflow on cpu, count of gpu = 0
    config     = tf.ConfigProto()#(device_count = {'GPU': 0})
    #allow tensorflow release gpu memory
    config.gpu_options.allow_growth=True
    t1t2_group = np.zeros((4, 2),dtype = np.float64)
    t1t2_group[0,0] = 600.0/5000.0
    t1t2_group[0,1] = 40.0/500.0
    t1t2_group[1,0] = 1000.0/5000.0
    t1t2_group[1,1] = 80.0/500.0     
    t1t2_group[2,0] = 3000.0/5000.0
    t1t2_group[2,1] = 200.0/500.0 
    t1t2_group[3,0] = 0.0/5000.0
    t1t2_group[3,1] = 0.0/500.0 

    for i in range(1000000):
        batch_ys           = np.random.uniform(0,1,(batch_size,npar)).astype(np.float64)
        batch_ys[:,2]      = np.zeros(batch_size)#np.random.uniform(0,1.0/tr,(batch_size)).astype(np.float64)
        batch_ys_tmp       = np.random.uniform(0,4,(batch_size))

        for k in range(batch_size):
            if batch_ys_tmp[k] <= 4 and batch_ys_tmp[k] > 3:
                batch_ys[k,0] = t1t2_group[0,0] + np.random.uniform(-0.05,0.025)
                batch_ys[k,1] = t1t2_group[0,1] + np.random.uniform(-0.05,0.025)
                batch_ys[k,4] = 1.0
                batch_ys[k,5] = 0.0
                batch_ys[k,6] = 0.0
            elif batch_ys_tmp[k] <= 3 and batch_ys_tmp[k] > 2:
                batch_ys[k,0] = t1t2_group[1,0] + np.random.uniform(-0.025,0.025)
                batch_ys[k,1] = t1t2_group[1,1] + np.random.uniform(-0.025,0.025)   
                batch_ys[k,4] = 0.0
                batch_ys[k,5] = 1.0
                batch_ys[k,6] = 0.0                            
            elif batch_ys_tmp[k] <= 2 and batch_ys_tmp[k] > 1:
                batch_ys[k,0] = t1t2_group[2,0] + np.random.uniform(-0.15,0.25)
                batch_ys[k,1] = t1t2_group[2,1] + np.random.uniform(-0.15,0.25) 
                batch_ys[k,4] = 0.0
                batch_ys[k,5] = 0.0
                batch_ys[k,6] = 1.0                             
            else: 
                batch_ys[k,0] = t1t2_group[3,0]
                batch_ys[k,1] = t1t2_group[3,1] 
                batch_ys[k,4] = 0.0
                batch_ys[k,5] = 0.0
                batch_ys[k,6] = 0.0                              


        T1r, T2r, dfr, PDr = ssmrf.set_par(batch_ys[...,0:4])
        batch_xs_c         = ssmrf.bloch_sim_batch_cuda2( batch_size, 100, Nk, PDr, T1r, T2r, dfr, M0, trr, ter, far, ti )


        #ut.plot(np.absolute(batch_xs_c[0,:]))   
        if orig_Ndiv is not Nk:
            batch_xs = np.absolute(simut.average_dict(batch_xs_c, orig_Ndiv))#(np.dot(np.absolute(simut.average_dict(batch_xs_c, Ndiv)), coeff)) 
        else:
            batch_xs = np.absolute(batch_xs_c)


        #ut.plot(np.absolute(batch_xs[0,:]))  
        #batch_xt = batch_xs
        batch_xs = batch_xs + np.random.ranf(1)[0]*np.random.uniform(-0.1,0.1,(batch_xs.shape))

        #batch_xs = batch_xs/np.ndarray.max(batch_xs.flatten())
        if 1:
            batch_xs = np.dot(batch_xs, coeff)
            #batch_xt = np.dot(batch_xt, coeff)
        else:
            batch_xs = batch_xs
            #batch_xt = batch_xs
        for dd in range(batch_xs.shape[0]):
            tc1 = batch_xs[dd,:] #- np.mean(imall[i,:])
            #tc2 = batch_xt[dd,:]        
            normtc1 = np.linalg.norm(tc1)
            #normtc2 = np.linalg.norm(tc2)
            if normtc1  > 0.05: #and batch_ys[dd,0]*5000 > 3*500*batch_ys[dd,1] and batch_ys[dd,0]*5000 < 20*500*batch_ys[dd,1]
                batch_xs[dd,:] = tc1#/normtc1
                #batch_xt[dd,:] = tc2#/normtc2
            else:
        #        batch_xs[dd,:] = np.zeros([1,Ndiv])
        #        batch_xt[dd,:] = np.zeros([1,Ndiv])
                batch_ys[dd,:] = np.zeros([1,npar])
        batch_xs = batch_xs/np.ndarray.max(batch_xs.flatten())
        #batch_xt = batch_xt/np.ndarray.max(batch_xt.flatten())
        #for kk in range(batch_xs.shape[1]):
        #    batch_xs [:,kk] = (batch_xs[:,kk])/np.std(batch_xs[:,kk] )#- np.mean(batch_xs[:,kk])
        #    batch_xt [:,kk] = (batch_xt[:,kk])/np.std(batch_xt[:,kk] )#- np.mean(batch_xt[:,kk])

        #ut.plot(np.real(batch_xs[0,:]),pause_close = 1)

        #batch_xs = batch_xs *  batch_ys[0,3] #* np.random.ranf(1)[0]#
        model.test(batch_xs, batch_ys)        
        model.train(batch_xs, batch_ys)

        if i % 100 == 0:
            prey = model.prediction(batch_xs,np.zeros(batch_ys.shape))
            ut.plot(prey[...,0], batch_ys[...,0], line_type = '.', pause_close = 1)
            ut.plot(prey[...,1], batch_ys[...,1], line_type = '.', pause_close = 1)
            ut.plot(prey[...,2], batch_ys[...,2], line_type = '.', pause_close = 1)
            ut.plot(prey[...,3], batch_ys[...,3], line_type = '.', pause_close = 1)
            ut.plot(prey[...,4], batch_ys[...,4], line_type = '.', pause_close = 1)
            model.save(pathdat + 'test_model_save')
예제 #10
0
def test1():
    mat_contents = sio.loadmat(pathdat + 'dict_pca.mat')
    #dict_pca
    #dictall       = np.array(mat_contents["avedictall"].astype(np.float32))
    #label         = np.array(mat_contents["dict_label"].astype(np.float32))
    coeff = np.array(mat_contents["coeff"].astype(np.float32))
    #cn_orders     = np.array(mat_contents["cn_orders"].astype(np.float32))
    par = mat_contents["par"]

    batch_size = 800
    Nk = par[0]['irfreq'][0][0][0]  #892#far.shape[0]#par.irfreq#
    Ndiv = coeff.shape[1]  #par[0]['ndiv'][0][0][0]#16
    orig_Ndiv = coeff.shape[0]
    npar = 4
    model = tf_wrap.tf_model_top([None, Ndiv], [None, npar],
                                 tf_prediction_func,
                                 tf_optimize_func,
                                 tf_error_func,
                                 arg=0.5)

    fa = par[0]['fa'][0][0][0].astype(np.float32)  #35#30 #deg
    tr = par[0]['tr'][0][0][0].astype(np.float32)  #3.932#4.337 #ms
    ti = par[0]['ti'][0][0][0].astype(np.float32)  #11.0 #ms
    #print(fa)
    #print(tr)
    #print(ti)
    #print(Nk)
    #print(Ndiv)

    far, trr = simut.rftr_const(Nk, fa, tr)
    M0 = simut.def_M0()

    #run tensorflow on cpu, count of gpu = 0
    config = tf.ConfigProto()  #(device_count = {'GPU': 0})
    #allow tensorflow release gpu memory
    config.gpu_options.allow_growth = True

    for i in range(1000000):
        batch_ys = np.random.uniform(0, 1, (batch_size, 4)).astype(np.float64)
        #batch_ys[:,0]      = batch_ys[:,0] + 1.0*batch_ys[:,1]/10.0
        #batch_ys[:,0]      = np.random.uniform(0.07,1.0,(batch_size)).astype(np.float64)
        #batch_ys[:,1]      = np.random.uniform(0.0,0.2,(batch_size)).astype(np.float64)
        batch_ys[:, 2] = np.random.uniform(0, 1.0 / tr,
                                           (batch_size)).astype(np.float64)
        #batch_ys[:,2]      = np.zeros(batch_size)
        #batch_ys[:,3]      = np.ones(batch_size)#np.random.uniform(0.4,1,(batch_size)).astype(np.float64)#

        #batch_ys[:,0] = np.round(batch_ys[:,0]*20)/20
        #batch_ys[:,1] = np.round(batch_ys[:,1]*20)/20
        #batch_ys[:,2] = np.round(batch_ys[:,2]*20)/20
        #batch_ys[:,3] = np.round(batch_ys[:,3]*5)/5
        #batch_ys[:,3] = np.round(batch_ys[:,3]*5)/5
        # intial seq simulation with t1t2b0 values
        #seq_data = ssad.irssfp_arrayin_data( batch_size, Nk ).set( batch_ys )
        T1r, T2r, dfr, PDr = ssmrf.set_par(batch_ys)
        batch_xs_c = ssmrf.bloch_sim_batch_cuda(batch_size, 100, Nk, PDr, T1r,
                                                T2r, dfr, M0, trr, far, ti)

        #ut.plot(np.absolute(batch_xs_c[0,:]))
        if orig_Ndiv is not Nk:
            batch_xs = np.absolute(
                simut.average_dict(batch_xs_c, orig_Ndiv)
            )  #(np.dot(np.absolute(simut.average_dict(batch_xs_c, Ndiv)), coeff))
            #batch_xs  = np.absolute(simut.average_dict_cnorders(batch_xs_c, cn_orders)) #np.absolute(np.dot(batch_xs_c, cn_orders))
        else:
            batch_xs = np.absolute(batch_xs_c)

        #ut.plot(np.absolute(batch_xs[0,:]))
        batch_xt = batch_xs
        batch_xs = batch_xs + np.random.ranf(1)[0] * np.random.uniform(
            -0.1, 0.1, (batch_xs.shape))

        #batch_xs = batch_xs/np.ndarray.max(batch_xs.flatten())
        if 1:
            batch_xs = np.dot(batch_xs, coeff)
            batch_xt = np.dot(batch_xt, coeff)
        else:
            batch_xs = batch_xs
            batch_xt = batch_xs
        #batch_ys[:,3]      = np.zeros(batch_size)
        for dd in range(batch_xs.shape[0]):
            tc1 = batch_xs[dd, :]  #- np.mean(imall[i,:])
            tc2 = batch_xt[dd, :]
            normtc1 = np.linalg.norm(tc1)
            normtc2 = np.linalg.norm(tc2)
            if normtc2 > 0.1:  #and batch_ys[dd,0]*5000 > 3*500*batch_ys[dd,1] and batch_ys[dd,0]*5000 < 20*500*batch_ys[dd,1]
                batch_xs[dd, :] = tc1  #/normtc1
                batch_xt[dd, :] = tc2  #/normtc2
            else:
                #        batch_xs[dd,:] = np.zeros([1,Ndiv])
                #        batch_xt[dd,:] = np.zeros([1,Ndiv])
                batch_ys[dd, :] = np.zeros([1, npar])

        batch_xs = batch_xs / np.ndarray.max(batch_xs.flatten())
        batch_xt = batch_xt / np.ndarray.max(batch_xt.flatten())
        #for kk in range(batch_xs.shape[1]):
        #    batch_xs [:,kk] = (batch_xs[:,kk])/np.std(batch_xs[:,kk] )#- np.mean(batch_xs[:,kk])
        #    batch_xt [:,kk] = (batch_xt[:,kk])/np.std(batch_xt[:,kk] )#- np.mean(batch_xt[:,kk])

        #ut.plot(np.real(batch_xs[0,:]),pause_close = 1)

        #batch_ys[:,3]      = np.ones(batch_size) * np.random.ranf(1)[0]
        #batch_xs = batch_xs *  batch_ys[0,3] #* np.random.ranf(1)[0]#
        model.test(batch_xs, batch_ys)
        model.train(batch_xt, batch_ys)
        model.train(batch_xs, batch_ys)

        if i % 100 == 0:
            prey = model.prediction(batch_xs, np.zeros(batch_ys.shape))
            ut.plot(prey[..., 0],
                    batch_ys[..., 0],
                    line_type='.',
                    pause_close=1)
            ut.plot(prey[..., 1],
                    batch_ys[..., 1],
                    line_type='.',
                    pause_close=1)
            ut.plot(prey[..., 2],
                    batch_ys[..., 2],
                    line_type='.',
                    pause_close=1)
            ut.plot(prey[..., 3],
                    batch_ys[..., 3],
                    line_type='.',
                    pause_close=1)
            model.save(pathdat + 'test_model_save')
def test1():
    mat_contents  = sio.loadmat(pathdat+'mrf_t1t2b0pd_mrf_randphasecyc_traintest.mat');
    far           = np.array(mat_contents["rf"].astype(np.complex128).squeeze())
    trr           = np.array(mat_contents["trr"].astype(np.float64).squeeze())

    Nk     = far.shape[0]#960#
    model  = tf_wrap.tf_model_top( [None,  2 * Nk], [None,  4], tf_prediction_func, tf_optimize_func, tf_error_func, 0.5)

    batch_size = 800
    # generate far and trr
    #far_amp    = np.random.uniform(0, 15.0/180.0 * np.pi, (Nk,))
    #far_phase  = np.random.uniform(-np.pi,         np.pi, (Nk,))
    #far        = np.multiply(far_amp, np.exp(far_phase)).astype(np.complex128).squeeze()
    #trr        = np.random.uniform(3.0, 16.0, (Nk,)).astype(np.float64).squeeze()

    #far, trr   = simut.rftr_const(Nk, 15.0, 4.0)
    #far,trr    = simut.rftr_rand(Nk, fa, 3, 16)
    # prepare for sequence simulation, y->x_hat
    ti         = 10 #ms
    M0         = np.array([0.0,0.0,1.0]).astype(np.float64)

    #run tensorflow on cpu, count of gpu = 0
    config     = tf.ConfigProto()#(device_count = {'GPU': 0})
    #allow tensorflow release gpu memory
    config.gpu_options.allow_growth=True
    
    Nite       = 200000
    #run for 2000
    for i in range(Nite):
        batch_ys         = np.random.uniform(0,1,(batch_size,4)).astype(np.float64)
        #batch_ys         = np.random.randint(1,10,(batch_size,4)).astype(np.float64)/10.0
        
        #batch_ys[:,2]    = np.random.uniform(0,0.2,(batch_size)).astype(np.float64)#np.zeros(batch_size)
        #batch_ys[:,3]      = np.ones(batch_size)

        batch_xs   = np.zeros((batch_size,2 * Nk), dtype = np.float64)
        batch_xs_c = np.zeros((batch_size, Nk),    dtype = np.complex128)

        # intial seq simulation with t1t2b0 values
        #seq_data = ssad.irssfp_arrayin_data( batch_size, Nk ).set( batch_ys )
        T1r, T2r, dfr, PDr        = ssmrf.set_par(batch_ys)
        batch_xs_c[...,0:Nk]      = ssmrf.bloch_sim_batch_cuda( batch_size, 100, Nk, PDr,\
         T1r, T2r, dfr, M0, trr, far, ti )
        #seperate real/imag parts or abs/angle parts, no noise output
        batch_xs[:,0:Nk]    = np.real(batch_xs_c)
        batch_xs[:,Nk:2*Nk] = np.imag(batch_xs_c)

        #input with noise
        batch_xsnoise = batch_xs  + np.random.ranf(1)[0]*np.random.uniform(-0.4,0.4,(batch_size,2*Nk))
        model.train(batch_xsnoise, batch_ys)
        if i%10 == 0:
            model.test(batch_xsnoise, batch_ys)
        if i%100 == 0 or i >= (Nite - 1):
            model.save('../save_data/MRF_encoder_t1t2b0')
            sio.savemat('../save_data/MRF_far_trr.mat', {'far':far, 'trr':trr})
        if i % 100 == 0:
            prey = model.prediction(batch_xs,np.zeros(batch_ys.shape))
            ut.plot(prey[...,0], batch_ys[...,0], line_type = '.', pause_close = 1)
            ut.plot(prey[...,1], batch_ys[...,1], line_type = '.', pause_close = 1)
            ut.plot(prey[...,2], batch_ys[...,2], line_type = '.', pause_close = 1)
            ut.plot(prey[...,3], batch_ys[...,3], line_type = '.', pause_close = 1)
예제 #12
0
def test():
    ft = opt.FFT2d()

    # simulated image
    mat_contents = sio.loadmat('data/brain_8ch.mat')
    x = mat_contents["DATA"]

    #ut.plotim1(np.absolute(x[:,:,0]))

    im = ft.backward(x[:, :, :])
    ut.plotim3(np.absolute(im[:, :, :]))

    #shape of x
    nx, ny, nc = x.shape

    xcrop = ut.crop2d(x, 30)

    #ksp = ft.forward(im)
    #ut.plotim1(np.absolute(ksp[:,:]))

    #multidimention tensor as the block hankel matrix
    h = hk.hankelnd_r(xcrop, (16, 16, 1))
    dimh = h.shape
    #flatten the tensor to create a matrix
    hmtx = h.reshape(
        (dimh[0] * dimh[1] * dimh[2], dimh[3], dimh[4], dimh[5])).reshape(
            (dimh[0] * dimh[1] * dimh[2], dimh[3] * dimh[4] * dimh[5]))

    #svd, could try other approaches
    U, s, V = np.linalg.svd(hmtx, full_matrices=False)
    #S = np.diag(s)
    #ut.plotim1(np.absolute(V[:,0:150]).T)#plot V singular vectors
    ut.plot(s)  #plot sigular values
    #invh = np.zeros(x.shape,complex)
    #print h.shape
    #hk.invhankelnd(h,invh,(2,3,1))

    #reshape vn to generate k-space vn tensor
    nsingular = 150  #number of truncated sigular vectors
    vn = V[0:nsingular, :].reshape(
        (nsingular, dimh[3], dimh[4], dimh[5])).transpose((1, 2, 0, 3))

    #zero pad vn, vn matrix of reshaped singular vectors,
    #dims of vn: nx,ny,nsingular,ncoil
    vn = ut.pad2d(vn, nx, ny)
    #plot first singular vecctor Vn[0]
    imvn = ft.forward(vn)
    #ut.plotim3(np.absolute(imvn[:,:,0,:].squeeze()))#spatial feature of V[:,1] singular vector
    sim = 1j * np.zeros((nx, ny))
    Vim = 1j * np.zeros((nx, ny, nc))
    #Uim = 1j*np.zeros((nx,ny,nc))

    for ix in range(nx):
        for iy in range(ny):
            vpix = imvn[ix, iy, :, :].squeeze()
            vpix = np.matrix(vpix).transpose()
            vvH = vpix.dot(vpix.getH())
            U, s, V = np.linalg.svd(vvH, full_matrices=False)
            sim[ix, iy] = s[0]
            Vim[ix, iy, :] = V[0, :].squeeze()
            #Uim[ix,iy,:] = U[:,0].squeeze()

    #plot first eigen vector, eigen value
    ut.plotim3(np.absolute(Vim))
    #ut.plotim3(np.absolute(Uim))
    ut.plotim1(np.absolute(sim))
예제 #13
0
def test1():
    mat_contents = sio.loadmat(pathdat + 'dict_pca.mat')
    #dict_pca
    #dictall       = np.array(mat_contents["avedictall"].astype(np.float32))
    #label         = np.array(mat_contents["dict_label"].astype(np.float32))

    par = mat_contents["par"]

    read_coeff_flag = 1  # 0 not use coeff; 1 read coeff from mat file; 2 generate coeff by pca
    abs_flag = 0  #apply pca on absolute time course
    batch_size = 800
    Nk = par[0]['irfreq'][0][0][0]  #892#far.shape[0]#par.irfreq#

    if read_coeff_flag is 1:
        coeff = np.array(mat_contents["coeff"].astype(np.float32))
        Ndiv = coeff.shape[1]  #Nk #par[0]['ndiv'][0][0][0]#16
        orig_Ndiv = coeff.shape[0]  #Nk
    elif read_coeff_flag is 2:
        Ndiv = 20  #20 pca
        orig_Ndiv = Nk  #coeff.shape[0]#Nk
    else:
        Ndiv = Nk  #coeff.shape[1]#Nk #par[0]['ndiv'][0][0][0]#16
        orig_Ndiv = Nk  #coeff.shape[0]#Nk

    npar = 4
    model = tf_wrap.tf_model_top([None, Ndiv], [None, npar],
                                 tf_prediction_func, tf_optimize_func,
                                 tf_error_func)

    fa = par[0]['fa'][0][0][0].astype(np.float32)  #35#30 #deg
    tr = par[0]['tr'][0][0][0].astype(np.float32)  #3.932#4.337 #ms
    ti = par[0]['ti'][0][0][0].astype(np.float32)  #11.0 #ms
    #print(fa)
    #print(tr)
    #print(ti)
    #print(Nk)
    #print(Ndiv)

    far, trr = simut.rftr_const(Nk, fa, tr)
    #far,trr    = simut.rftr_rand(Nk, fa, tr, 2*tr)
    M0 = simut.def_M0()

    #run tensorflow on cpu, count of gpu = 0
    config = tf.ConfigProto()  #(device_count = {'GPU': 0})
    #allow tensorflow release gpu memory
    config.gpu_options.allow_growth = True

    #compute pca
    if read_coeff_flag is 2:
        batch_ys = np.random.uniform(0, 1, (batch_size, 4)).astype(np.float64)
        #batch_ys[:,0]      = np.random.uniform(0.1,0.6,(batch_size)).astype(np.float64)
        #batch_ys[:,1]      = np.random.uniform(0.1,0.3,(batch_size)).astype(np.float64)
        #batch_ys[:,2]      = np.random.uniform(0,1.0/tr,(batch_size)).astype(np.float64)
        #batch_ys[:,3]      = np.random.uniform(0.1,1.0,(batch_size)).astype(np.float64)# np.ones(batch_size)## np.random.uniform(0.5,1,(batch_size)).astype(np.float64)#

        # intial seq simulation with t1t2b0 values
        npca = Ndiv
        T1r, T2r, dfr, PDr = ssmrf.set_par(batch_ys)
        batch_xs_c = ssmrf.bloch_sim_batch_cuda(batch_size, 100, Nk, PDr, T1r,
                                                T2r, dfr, M0, trr, far, ti)
        #ut.plot(np.absolute(batch_xs_c[0,:]),pause_close =1)
        if orig_Ndiv < Nk:
            batch_xs = simut.average_dict(
                batch_xs_c, orig_Ndiv
            )  #(np.dot(np.absolute(simut.average_dict(batch_xs_c, Ndiv)), coeff))
        else:
            batch_xs = batch_xs_c
        pca_mtx = np.dot(np.matrix(batch_xs).getH(), batch_xs)
        U, s, V = scipy.sparse.linalg.svds(pca_mtx, npca)
        coeff = U[:, npca - 1::-1]
        sio.savemat(pathdat + 'MRF_pca_coeff.mat', {
            'coeff': coeff,
            'dict': batch_xs
        })

    for i in range(1000000):
        batch_ys = np.random.uniform(0, 1, (batch_size, 4)).astype(np.float64)
        #batch_ys[:,0]      = np.random.uniform(0.1,0.6,(batch_size)).astype(np.float64)
        #batch_ys[:,1]      = np.random.uniform(0.1,0.3,(batch_size)).astype(np.float64)
        batch_ys[:, 2] = np.random.uniform(0, 1.0 / tr,
                                           (batch_size)).astype(np.float64)
        #batch_ys[:,3]      = np.ones(batch_size)#np.random.uniform(0.1,1.0,(batch_size)).astype(np.float64)# # np.random.uniform(0.5,1,(batch_size)).astype(np.float64)#
        #batch_ys[:,2]      = np.zeros(batch_size)
        #batch_ys[:,2]      = np.random.uniform(0.19,0.21,(batch_size)).astype(np.float64)#0.2*np.ones(batch_size)
        # intial seq simulation with t1t2b0 values
        #seq_data = ssad.irssfp_arrayin_data( batch_size, Nk ).set( batch_ys )
        T1r, T2r, dfr, PDr = ssmrf.set_par(batch_ys)
        batch_xs_c = ssmrf.bloch_sim_batch_cuda(batch_size, 100, Nk, PDr, T1r,
                                                T2r, dfr, M0, trr, far, ti)

        #ut.plot(np.absolute(batch_xs_c[0,:]),pause_close =1)
        if orig_Ndiv < Nk:
            batch_xs = simut.average_dict(
                batch_xs_c, orig_Ndiv
            )  #(np.dot(np.absolute(simut.average_dict(batch_xs_c, Ndiv)), coeff))
        else:
            batch_xs = batch_xs_c
        batch_xs = batch_xs + np.random.ranf(1)[0] * np.random.uniform(
            -0.005, 0.005, (batch_xs.shape))

        #batch_xs = batch_xs/np.ndarray.max(batch_xs.flatten())
        if read_coeff_flag is 1:
            if abs_flag:
                batch_xs = np.dot(np.absolute(batch_xs), coeff)
            else:
                batch_xs = np.absolute(np.dot(batch_xs, coeff))
        elif read_coeff_flag is 2:
            batch_xs = np.absolute(np.dot(batch_xs, coeff))
        else:
            batch_xs = np.absolute(batch_xs)

        for dd in range(batch_xs.shape[0]):
            tc1 = batch_xs[dd, :]  #- np.mean(imall[i,:])
            normtc1 = np.linalg.norm(tc1)
            if normtc1 > 0.04 and batch_ys[dd, 0] * 5000 > 3 * 500 * batch_ys[
                    dd, 1]:
                batch_xs[dd, :] = tc1  #/normtc1
            else:
                batch_ys[dd, :] = np.zeros([1, npar])

        batch_xs = 1000 * batch_xs
        #ut.plot(np.absolute(batch_xs[0,:]),pause_close =1)
        #batch_ys[:,2]      = np.zeros(batch_size)
        #batch_ys[:,3]      = np.zeros(batch_size)

        model.train(batch_xs, batch_ys)
        model.test(batch_xs, batch_ys)
        if i % 100 == 0:
            prey = model.prediction(batch_xs, np.zeros(batch_ys.shape))
            ut.plot(prey[..., 0],
                    batch_ys[..., 0],
                    line_type='.',
                    pause_close=1)
            ut.plot(prey[..., 1],
                    batch_ys[..., 1],
                    line_type='.',
                    pause_close=1)
            ut.plot(prey[..., 2],
                    batch_ys[..., 2],
                    line_type='.',
                    pause_close=1)
            ut.plot(prey[..., 3],
                    batch_ys[..., 3],
                    line_type='.',
                    pause_close=1)
            model.save(pathdat + 'test_model_savecnn')