예제 #1
0
    def __get_excit_wfm(filepath):
        """
        Returns the excitation BE waveform present in the more parms.mat file
        
        Parameters
        ------------
        filepath : String / unicode
            Absolute filepath of the .mat parameter file
        
        Returns
        -----------
        ex_wfm : 1D numpy float array
            Band Excitation waveform

        """
        if not path.exists(filepath):
            warn('BEPSndfTranslator - NO more_parms.mat file found')
            return np.zeros(1000, dtype=np.float32)

        if 'more_parms' in filepath:
            matread = loadmat(filepath, variable_names=['FFT_BE_wave'])
            fft_full = np.complex64(np.squeeze(matread['FFT_BE_wave']))
            bin_inds = None
            fft_full_rev = None
        else:
            matread = loadmat(filepath, variable_names=['FFT_BE_wave', 'FFT_BE_rev_wave', 'BE_bin_ind'])
            bin_inds = np.uint(np.squeeze(matread['BE_bin_ind'])) - 1
            fft_full = np.complex64(np.squeeze(matread['FFT_BE_wave']))
            fft_full_rev = np.complex64(np.squeeze(matread['FFT_BE_rev_wave']))

        return fft_full, fft_full_rev, bin_inds
예제 #2
0
파일: test_chash.py 프로젝트: lebedov/chash
    def test_numpy(self):
        assert chash(np.bool_(True)) == chash(np.bool_(True))

        assert chash(np.int8(1)) == chash(np.int8(1))
        assert chash(np.int16(1))
        assert chash(np.int32(1))
        assert chash(np.int64(1))

        assert chash(np.uint8(1))
        assert chash(np.uint16(1))
        assert chash(np.uint32(1))
        assert chash(np.uint64(1))

        assert chash(np.float32(1)) == chash(np.float32(1))
        assert chash(np.float64(1)) == chash(np.float64(1))
        assert chash(np.float128(1)) == chash(np.float128(1))

        assert chash(np.complex64(1+1j)) == chash(np.complex64(1+1j))
        assert chash(np.complex128(1+1j)) == chash(np.complex128(1+1j))
        assert chash(np.complex256(1+1j)) == chash(np.complex256(1+1j))

        assert chash(np.datetime64('2000-01-01')) == chash(np.datetime64('2000-01-01'))
        assert chash(np.timedelta64(1,'W')) == chash(np.timedelta64(1,'W'))

        self.assertRaises(ValueError, chash, np.object())

        assert chash(np.array([[1, 2], [3, 4]])) == \
            chash(np.array([[1, 2], [3, 4]]))
        assert chash(np.array([[1, 2], [3, 4]])) != \
            chash(np.array([[1, 2], [3, 4]]).T)
        assert chash(np.array([1, 2, 3])) == chash(np.array([1, 2, 3]))
        assert chash(np.array([1, 2, 3], dtype=np.int32)) != \
            chash(np.array([1, 2, 3], dtype=np.int64))
예제 #3
0
	def runTest(self):
		node_list = ['n1', 'n2', 'n3']
		V = np.complex64(( 1.02500000+0.0j, 1.00057074-0.03669173j, 1.02970624+0.0245978j))
		phase = np.complex64((0.0, -0.03665437, 0.02388363))

		P = np.complex64((1.00010597, -4.0, 3.0))
		Q = np.complex64((0.90512173, -2.0, 1.36935805))

		netlist = Netlist()

		
		# Check values are close to what we expect
		

		try:
			netlist.load_from_file("testing/inputfile2.txt")
			netlist.run()

			# print abs(netlist.result.V)- abs(V)

			print netlist.result.V
			print netlist.result.P


			# assert (np.allclose(netlist.result.V, V, rtol=1e-3, atol=1e-3))
			# assert (np.allclose(netlist.result.phase, phase, rtol=1e-3, atol=1e-3))
			# assert (np.allclose(netlist.result.P, P, rtol=1e-3, atol=1e-3))
			# assert (np.allclose(netlist.result.Q, Q, rtol=1e-3, atol=1e-3))
		finally:
			del netlist
예제 #4
0
  def testCountingLoopHandrolledC64(self):
    # Define a function for the loop body
    @function.Defun(dtypes.int32, dtypes.complex64)
    def loop_body(step, rsum):
      step_out = step + constant_op.constant(1, dtype=dtypes.int32)
      sum_out = rsum + constant_op.constant(1.5 + 2j, dtype=dtypes.complex64)
      return step_out, sum_out

    # Define a function for the loop condition
    @function.Defun(dtypes.int32, dtypes.complex64)
    def loop_cond(step, rsum):
      del rsum
      return step < 10

    with self.cached_session() as sess:
      init_index = array_ops.placeholder(dtypes.int32, [])
      init_sum = array_ops.placeholder(dtypes.complex64, [])
      with self.test_scope():
        loop_outputs = xla.while_loop([init_index, init_sum], loop_cond,
                                      loop_body)

      result = sess.run(loop_outputs, {init_index: 0, init_sum: 0.0})
      self.assertAllClose(result[1], np.complex64(15 + 20j), rtol=1e-3)
      no_iters_result = sess.run(loop_outputs, {init_index: 10, init_sum: 0.0})
      self.assertAllClose(no_iters_result[1], np.complex64(0), rtol=1e-3)
예제 #5
0
    def generate(self, width, height, real_axis_range, imag_axis_range, tasks):
        if not is_gpu_accelerated():
            self._logger.error(
                'No GPU acceleration is available, please use CPU.')
            return

        iterations = np.empty(width * height, np.int32)
        iterations_gpu = gpuarray.to_gpu(iterations)

        z_values = np.empty(width * height, np.float32)
        z_values_gpu = gpuarray.to_gpu(z_values)

        cmin = complex(real_axis_range[0], imag_axis_range[0])
        cmax = complex(real_axis_range[1], imag_axis_range[1])
        dc = cmax - cmin

        dx, mx = divmod(width, self._block_size[0])
        dy, my = divmod(height, self._block_size[1])
        grid_size = ((dx + (mx > 0)), (dy + (my > 0)))

        self._get_pixel_iterations(
            iterations_gpu, z_values_gpu,
            np.int32(width), np.int32(height),
            np.complex64(cmin), np.complex64(dc),
            block=self._block_size, grid=grid_size)

        return (iterations_gpu, z_values_gpu, abs(dc))
예제 #6
0
파일: test_idl.py 프로젝트: hitej/meta-core
    def test_arrays_replicated_3d(self):
        s = readsav(path.join(DATA_PATH, 'struct_arrays_replicated_3d.sav'), verbose=False)

        # Check column types
        assert_(s.arrays_rep.a.dtype.type is np.object_)
        assert_(s.arrays_rep.b.dtype.type is np.object_)
        assert_(s.arrays_rep.c.dtype.type is np.object_)
        assert_(s.arrays_rep.d.dtype.type is np.object_)

        # Check column shapes
        assert_equal(s.arrays_rep.a.shape, (4, 3, 2))
        assert_equal(s.arrays_rep.b.shape, (4, 3, 2))
        assert_equal(s.arrays_rep.c.shape, (4, 3, 2))
        assert_equal(s.arrays_rep.d.shape, (4, 3, 2))

        # Check values
        for i in range(4):
            for j in range(3):
                for k in range(2):
                    assert_array_identical(s.arrays_rep.a[i, j, k],
                                           np.array([1, 2, 3], dtype=np.int16))
                    assert_array_identical(s.arrays_rep.b[i, j, k],
                                           np.array([4., 5., 6., 7.],
                                                    dtype=np.float32))
                    assert_array_identical(s.arrays_rep.c[i, j, k],
                                           np.array([np.complex64(1+2j),
                                                     np.complex64(7+8j)]))
                    assert_array_identical(s.arrays_rep.d[i, j, k],
                                           np.array([b"cheese", b"bacon", b"spam"],
                                                    dtype=np.object))
예제 #7
0
def test_dump_np_scalars():
	data = [
		int8(-27),
		complex64(exp(1)+37j),
		(
			{
				'alpha': float64(-exp(10)),
				'str-only': complex64(-1-1j),
			},
			uint32(123456789),
			float16(exp(-1)),
			{
				int64(37),
				uint64(-0),
			},
		),
	]
	replaced = encode_scalars_inplace(deepcopy(data))
	json = dumps(replaced)
	rec = loads(json)
	print(data)
	print(rec)
	assert data[0] == rec[0]
	assert data[1] == rec[1]
	assert data[2][0] == rec[2][0]
	assert data[2][1] == rec[2][1]
	assert data[2][2] == rec[2][2]
	assert data[2][3] == rec[2][3]
	assert data[2] == tuple(rec[2])
예제 #8
0
    def test_cublasCgemmBatched(self):
        l, m, k, n = 11, 7, 5, 3
        A = (np.random.rand(l, m, k)+1j*np.random.rand(l, m, k)).astype(np.complex64)
        B = (np.random.rand(l, k, n)+1j*np.random.rand(l, k, n)).astype(np.complex64)

        C_res = np.einsum('nij,njk->nik', A, B)

        a_gpu = gpuarray.to_gpu(A)
        b_gpu = gpuarray.to_gpu(B)
        c_gpu = gpuarray.empty((l, m, n), np.complex64)

        alpha = np.complex64(1.0)
        beta = np.complex64(0.0)

        a_arr = bptrs(a_gpu)
        b_arr = bptrs(b_gpu)
        c_arr = bptrs(c_gpu)

        cublas.cublasCgemmBatched(self.cublas_handle, 'n','n',
                                  n, m, k, alpha,
                                  b_arr.gpudata, n,
                                  a_arr.gpudata, k,
                                  beta, c_arr.gpudata, n, l)

        assert np.allclose(C_res, c_gpu.get())
예제 #9
0
def hiss_removal(audio):
    pend = len(audio)-(4410+1102)
    song = sonify(audio, 44100) 
    song.FrameGenerator().__next__()
    song.window() 
    song.Spectrum()
    noise_fft = song.fft(song.windowed_x)[:song.H+1]
    noise_power = np.log10(np.abs(noise_fft + 2 ** -16))
    noise_floor = np.exp(2.0 * noise_power.mean())                                    
    mn = song.magnitude_spectrum
    e_n = energy(mn)   
    pin = 0                
    output = np.zeros(len(audio))
    hold_time = 0
    ca = 0
    cr = 0
    amp = audio.max()
    while pin < pend:
        selection = pin+2048
        song.frame = audio[pin:selection] 
        song.window()     
        song.M = 2048            
        song.Spectrum()
        e_m = energy(song.magnitude_spectrum)
        SNR = 10 * np.log10(e_m / e_n)
        ft = song.fft(song.windowed_x)[:song.H+1]
        power_spectral_density = np.abs(ft) ** 2
        song.Envelope()
        song.AttackTime()
        rel_time = rel(song.attack_time)
        rel_coef = to_coef(rel_time, 44100)
        at_coef = to_coef(song.attack_time, 44100)
        ca = ca + song.attack_time
        cr = cr + rel_time 
        if SNR > 0:                
            np.add.at(output, range(pin, selection), audio[pin:selection])                                
        else:                    
            if np.any(power_spectral_density < noise_floor):                                    
                gc = dyn_constraint_satis(ft, [power_spectral_density, noise_floor], 0.12589254117941673) 
                if ca > hold_time:
                    gc = np.complex64([at_coef * gc[i- 1] + (1 - at_coef) * x if x > gc[i- 1] else x for i,x in enumerate(gc)])
                if ca <= hold_time:
                    gc = np.complex64([gc[i- 1] for i,x in enumerate(gc)])
                if cr > hold_time:
                    gc = np.complex64([rel_coef * gc[i- 1] + (1 - rel_coef) * x if x <= gc[i- 1] else x for i,x in enumerate(gc)])
                if cr <= hold_time:
                    gc = np.complex64([gc[i- 1] for i,x in enumerate(gc)])
                print ("Reducing noise floor, this is taking some time")
                song.Phase(song.fft(song.windowed_x))
                song.phase = song.phase[:song.magnitude_spectrum.size]
                ft *= gc
                song.magnitude_spectrum = np.sqrt(pow(ft.real,2) + pow(ft.imag,2))
                np.add.at(output, range(pin, selection), song.ISTFT(song.magnitude_spectrum))
            else:
                np.add.at(output, range(pin, selection), audio[pin:selection])                                              
        pin = pin + song.H
        hold_time += selection/44100
    hissless = amp * output / output.max() #amplify to normal level                                                 
    return np.float32(hissless) 
예제 #10
0
def energy_density_at_origin(k):
    K = complex64(ellipk(k**2))
    E = complex64(ellipe(k**2))
    k1 = sqrt(1-k**2)

    A = 32*(k**2 *(-K**2 * k**2 +E**2-4*E*K+3* K**2 + k**2)-2*(E-K)**2)**2/(k**8 * K**4 * k1**2)

    return A.real
예제 #11
0
	def _update_filtered(self, translated):
		if translated is not None and self._taps_n is not None and self._taps_p is not None:
			filtered_data_1 = TimeData(numpy.complex64(scipy.signal.lfilter(self._taps_n, 1, translated.samples)), translated.sampling_rate)
			filtered_data_2 = TimeData(numpy.complex64(scipy.signal.lfilter(self._taps_p, 1, translated.samples)), translated.sampling_rate)
			self.eye_view.data = (filtered_data_1.abs, filtered_data_2.abs)
			self.burst.filtered = TimeData(filtered_data_2.abs.samples - filtered_data_1.abs.samples, filtered_data_1.sampling_rate)
		else:
			self.eye_view.data = (None, None)
			self.burst.filtered = None
예제 #12
0
def sc_complex_dot(x_gpu, y_gpu, c_gpu, transa='N', transb='N', handle=None):
    """
    modified version of linalg.dot which allows for the target output array to be specified.
    This function does not return anything.
    """
    if handle is None:
        handle = scikits.cuda.misc._global_cublas_handle

    assert len(x_gpu.shape) == 2
    assert len(y_gpu.shape) == 2
    assert len(c_gpu.shape) == 2
    assert x_gpu.dtype == np.complex64
    assert y_gpu.dtype == np.complex64 
    assert c_gpu.dtype == np.complex64

    # Get the shapes of the arguments
    x_shape = x_gpu.shape
    y_shape = y_gpu.shape
    
    # Perform matrix multiplication for 2D arrays:
    alpha = np.complex64(1.0)
    beta = np.complex64(0.0)
    
    transa = string.lower(transa)
    transb = string.lower(transb)

    if transb in ['t', 'c']:
        m, k = y_shape
    elif transb in ['n']:
        k, m = y_shape
    else:
        raise ValueError('invalid value for transb')

    if transa in ['t', 'c']:
        l, n = x_shape
    elif transa in ['n']:
        n, l = x_shape
    else:
        raise ValueError('invalid value for transa')

    if l != k:
        raise ValueError('objects are not aligned')

    if transb == 'n':
        lda = max(1, m)
    else:
        lda = max(1, k)

    if transa == 'n':
        ldb = max(1, k)
    else:
        ldb = max(1, n)

    ldc = max(1, m)

    cublas.cublasCgemm(handle, transb, transa, m, n, k, alpha, y_gpu.gpudata,
                lda, x_gpu.gpudata, ldb, beta, c_gpu.gpudata, ldc)
예제 #13
0
def add_vdot(M, v, out, beta=0.0, transM='N', handle=None):
    if handle is None:
        handle = scm._global_cublas_handle

    assert M.strides[1] <= M.strides[0], 'only C-order arrays supported'

    transM = transM.lower()
    if transM == 'n':
        trans = 't'
        m = M.shape[1]
        n = M.shape[0]
        alpha = 1.0
        lda = M.strides[0] // M.dtype.itemsize
        if v.shape[0] != M.shape[1] or out.shape[0] != M.shape[0]:
            raise ValueError('dimension mismatch: %s %s %s' %
                             (M.shape, v.shape, out.shape))
    elif transM == 't':
        trans = 'n'
        m = M.shape[1]
        n = M.shape[0]
        alpha = 1.0
        lda = M.strides[0] // M.dtype.itemsize
        if v.shape[0] != M.shape[0] or out.shape[0] != M.shape[1]:
            raise ValueError('dimension mismatch: %s %s %s' %
                             (M.shape, v.shape, out.shape))
    else:
        raise ValueError('transM must be n or t')

    if (M.dtype == np.complex64 and v.dtype == np.complex64):
        cublas_func = scikits.cuda.cublas.cublasCgemv
        alpha = np.complex64(alpha)
        beta = np.complex64(beta)
    elif (M.dtype == np.float32 and v.dtype == np.float32):
        cublas_func = scikits.cuda.cublas.cublasSgemv
        alpha = np.float32(alpha)
        beta = np.float32(beta)
    elif (M.dtype == np.complex128 and v.dtype == np.complex128):
        cublas_func = scikits.cuda.cublas.cublasZgemv
        alpha = np.complex128(alpha)
        beta = np.complex128(beta)
    elif (M.dtype == np.float64 and v.dtype == np.float64):
        cublas_func = scikits.cuda.cublas.cublasDgemv
        alpha = np.float64(alpha)
        beta = np.float64(beta)
    else:
        raise ValueError('unsupported combination of input types')

    incx = 1
    incy = 1
    cublas_func(handle,
                trans, m, n,
                alpha,
                M.gpudata, lda,
                v.gpudata, incx,
                beta,
                out.gpudata, incy)
    def __init__(self,timeSeries=None,
                 lenSeries=2**18,
                 numChannels=1,
                 fMin=400,fMax=800,
                 sampTime=None,
                 noiseRMS=0.1):
        """ Initializes the AmplitudeTimeSeries instance. 
        If a array is not passed, then a random whitenoise dataset is generated.
        Inputs: 
        Len -- Number of time data points (usually a power of 2) 2^38 gives about 65 seconds 
        of 400 MHz sampled data
        The time binning is decided by the bandwidth
        fMin -- lowest frequency (MHz)
        fMax -- highest frequency (MHz)
        noiseRMS -- RMS value of noise (TBD)
        noiseAlpha -- spectral slope (default is white noise) (TBD)
        ONLY GENERATES WHITE NOISE RIGHT NOW!
        """
        self.shape = (np.uint(numChannels),np.uint(lenSeries))
        self.fMax = fMax
        self.fMin = fMin        
        
        if sampTime is None:
            self.sampTime = np.uint(numChannels)*1E-6/(fMax-fMin)
        else:
            self.sampTime = sampTime

        if timeSeries is None:
            # then use the rest of the data to generate a random timeseries
            if VERBOSE:
                print "AmplitudeTimeSeries __init__ did not get new data, generating white noise data"

            self.timeSeries = np.complex64(noiseRMS*(np.float16(random.standard_normal(self.shape))
                                                     +np.float16(random.standard_normal(self.shape))*1j)/np.sqrt(2))
            
        else:
            if VERBOSE:
                print "AmplitudeTimeSeries __init__ got new data, making sure it is reasonable."

            if len(timeSeries.shape) == 1:
                self.shape = (1,timeSeries.shape[0])
                
            else:
                self.shape = timeSeries.shape

            self.timeSeries = np.reshape(np.complex64(timeSeries),self.shape)
            
            self.fMin = fMin
            self.fMax = fMax

            if sampTime is None:
                self.sampTime = numChannels*1E-6/(fMax-fMin)
            else:
                self.sampTime = sampTime

        return None
예제 #15
0
 def test_compressed(self):
     s = idlsave.read(path.join(DATA_PATH, 'various_compressed.sav'), verbose=False)
     assert_identical(s.i8u, np.uint8(234))
     assert_identical(s.f32, np.float32(-3.1234567e+37))
     assert_identical(s.c64, np.complex128(1.1987253647623157e+112-5.1987258887729157e+307j))
     assert_equal(s.array5d.shape, (4, 3, 4, 6, 5))
     assert_identical(s.arrays.a[0], np.array([1, 2, 3], dtype=np.int16))
     assert_identical(s.arrays.b[0], np.array([4., 5., 6., 7.], dtype=np.float32))
     assert_identical(s.arrays.c[0], np.array([np.complex64(1+2j), np.complex64(7+8j)]))
     assert_identical(s.arrays.d[0], np.array(asbytes_nested(["cheese", "bacon", "spam"]), dtype=np.object))
예제 #16
0
def test_dtypes():
    a = afnumpy.random.random((2,3))
    b = numpy.array(a)
    fassert(afnumpy.int32(a), numpy.int32(b))
    fassert(afnumpy.complex64(a), numpy.complex64(b))
    assert(afnumpy.float(a.sum()), numpy.float(b.sum()))
    fassert(afnumpy.complex64(b), numpy.complex64(a))
    assert(type(afnumpy.complex64(b)), afnumpy.multiarray.ndarray)
    assert(type(afnumpy.complex64([1,2,3])), afnumpy.multiarray.ndarray)
    assert(type(afnumpy.bool8(True)), numpy.bool_)
예제 #17
0
    def get_workspace(self, n):
        from pyfft.cuda import Plan as pycufftplan
        import pycuda.gpuarray as gpuarray

        ws = self.get(n)
        if ws: return ws
        return self.setdefault(n,
            (pycufftplan(int(n), stream=self.stream, normalize=False),
             gpuarray.empty(n, dtype=complex64(0.).dtype),
             gpuarray.empty(n, dtype=complex64(0.).dtype)))
예제 #18
0
def dmus(zeta, x, k):

    K = complex64(ellipk(k**2))

    E = complex64(ellipe(k**2))

    cm= (2*E-K)/K

    k1 = sqrt(1-k**2)

    xp = x[0]+complex(0,1)*x[1]
    xm = x[0]-complex(0,1)*x[1]
    S =  sqrt(K**2-4*xp*xm)
    SP = sqrt(K**2-4*xp**2)
    SM = sqrt(K**2-4*xm**2)
    SPM = sqrt(-k1**2*(K**2*k**2-4*xm*xp)+(xm-xp)**2)
    R = 2*K**2*k1**2-S**2-8*x[2]**2
    RM = complex(0,1)*SM**2*(xm*(2*k1**2-1)+xp)-(16*complex(0,1))*xm*x[2]**2
    RP = complex(0,1)*SM**2*(xp*(2*k1**2-1)+xm)+(16*complex(0,1))*xp*x[2]**2
    RMBAR=-complex(0,1)*SP**2*( xp*(2*k1**2-1)+xm ) +16*complex(0,1)*xp*x[2]**2
    RPBAR=-complex(0,1)*SP**2*( xm*(2*k1**2-1)+xp ) -16*complex(0,1)*xm*x[2]**2
    r=sqrt(x[0]**2+x[1]**2+x[2]**2)

    DM = [None,None,None,None]

    DM[0] = [None]*3
    DM[1] = [None]*3
    DM[2] = [None]*3
    DM[3] = [None]*3

    DM[0][0] = complex(0, 1) * (E * K * zeta[0] ** 2 - K ** 2 * zeta[0] ** 2 + 4 * zeta[0] ** 2 * x[0] ** 2 + complex(0, -4) * zeta[0] ** 2 * x[1] * x[0] + E * K - K ** 2 + 4 * x[0] ** 2 + complex(0, -8) * zeta[0] * x[2] * x[0] + complex(0, 4) * x[0] * x[1]) / (4 * zeta[0] ** 3 * x[0] ** 2 + complex(0, -8) * zeta[0] ** 3 * x[0] * x[1] + 2 * K ** 2 * k1 ** 2 * zeta[0] - K ** 2 * zeta[0] ** 3 + 4 * x[0] ** 2 * zeta[0] + complex(0, -12) * zeta[0] ** 2 * x[0] * x[2] - 4 * zeta[0] ** 3 * x[1] ** 2 - 12 * zeta[0] ** 2 * x[1] * x[2] - K ** 2 * zeta[0] + complex(0, -4) * x[0] * x[2] + 4 * x[1] ** 2 * zeta[0] - 8 * zeta[0] * x[2] ** 2 + 4 * x[1] * x[2])

    DM[1][0] = complex(0, 1) * (E * K * zeta[1] ** 2 - K ** 2 * zeta[1] ** 2 + 4 * zeta[1] ** 2 * x[0] ** 2 + complex(0, -4) * zeta[1] ** 2 * x[1] * x[0] + E * K - K ** 2 + 4 * x[0] ** 2 + complex(0, -8) * zeta[1] * x[2] * x[0] + complex(0, 4) * x[0] * x[1]) / (4 * zeta[1] ** 3 * x[0] ** 2 + complex(0, -8) * zeta[1] ** 3 * x[0] * x[1] + 2 * K ** 2 * k1 ** 2 * zeta[1] - K ** 2 * zeta[1] ** 3 + 4 * x[0] ** 2 * zeta[1] + complex(0, -12) * zeta[1] ** 2 * x[0] * x[2] - 4 * zeta[1] ** 3 * x[1] ** 2 - 12 * zeta[1] ** 2 * x[1] * x[2] - K ** 2 * zeta[1] + complex(0, -4) * x[0] * x[2] + 4 * x[1] ** 2 * zeta[1] - 8 * zeta[1] * x[2] ** 2 + 4 * x[1] * x[2])

    DM[2][0] = complex(0, 1) * (E * K * zeta[2] ** 2 - K ** 2 * zeta[2] ** 2 + 4 * zeta[2] ** 2 * x[0] ** 2 + complex(0, -4) * zeta[2] ** 2 * x[1] * x[0] + E * K - K ** 2 + 4 * x[0] ** 2 + complex(0, -8) * zeta[2] * x[2] * x[0] + complex(0, 4) * x[0] * x[1]) / (4 * zeta[2] ** 3 * x[0] ** 2 + complex(0, -8) * zeta[2] ** 3 * x[0] * x[1] + 2 * K ** 2 * k1 ** 2 * zeta[2] - K ** 2 * zeta[2] ** 3 + 4 * x[0] ** 2 * zeta[2] + complex(0, -12) * zeta[2] ** 2 * x[0] * x[2] - 4 * zeta[2] ** 3 * x[1] ** 2 - 12 * zeta[2] ** 2 * x[1] * x[2] - K ** 2 * zeta[2] + complex(0, -4) * x[0] * x[2] + 4 * x[1] ** 2 * zeta[2] - 8 * zeta[2] * x[2] ** 2 + 4 * x[1] * x[2])

    DM[3][0] = complex(0, 1) * (E * K * zeta[3] ** 2 - K ** 2 * zeta[3] ** 2 + 4 * zeta[3] ** 2 * x[0] ** 2 + complex(0, -4) * zeta[3] ** 2 * x[1] * x[0] + E * K - K ** 2 + 4 * x[0] ** 2 + complex(0, -8) * zeta[3] * x[2] * x[0] + complex(0, 4) * x[0] * x[1]) / (4 * zeta[3] ** 3 * x[0] ** 2 + complex(0, -8) * zeta[3] ** 3 * x[0] * x[1] + 2 * K ** 2 * k1 ** 2 * zeta[3] - K ** 2 * zeta[3] ** 3 + 4 * x[0] ** 2 * zeta[3] + complex(0, -12) * zeta[3] ** 2 * x[0] * x[2] - 4 * zeta[3] ** 3 * x[1] ** 2 - 12 * zeta[3] ** 2 * x[1] * x[2] - K ** 2 * zeta[3] + complex(0, -4) * x[0] * x[2] + 4 * x[1] ** 2 * zeta[3] - 8 * zeta[3] * x[2] ** 2 + 4 * x[1] * x[2])

    DM[0][1] = (complex(0, 4) * zeta[0] ** 2 * x[1] * x[0] + E * K * zeta[0] ** 2 + 4 * zeta[0] ** 2 * x[1] ** 2 + complex(0, 4) * x[0] * x[1] + 8 * zeta[0] * x[1] * x[2] - E * K - 4 * x[1] ** 2) / (4 * zeta[0] ** 3 * x[0] ** 2 + complex(0, -8) * zeta[0] ** 3 * x[0] * x[1] + 2 * K ** 2 * k1 ** 2 * zeta[0] - K ** 2 * zeta[0] ** 3 + 4 * x[0] ** 2 * zeta[0] + complex(0, -12) * zeta[0] ** 2 * x[0] * x[2] - 4 * zeta[0] ** 3 * x[1] ** 2 - 12 * zeta[0] ** 2 * x[1] * x[2] - K ** 2 * zeta[0] + complex(0, -4) * x[0] * x[2] + 4 * x[1] ** 2 * zeta[0] - 8 * zeta[0] * x[2] ** 2 + 4 * x[1] * x[2])

    DM[1][1] = (complex(0, 4) * zeta[1] ** 2 * x[1] * x[0] + E * K * zeta[1] ** 2 + 4 * zeta[1] ** 2 * x[1] ** 2 + complex(0, 4) * x[0] * x[1] + 8 * zeta[1] * x[1] * x[2] - E * K - 4 * x[1] ** 2) / (4 * zeta[1] ** 3 * x[0] ** 2 + complex(0, -8) * zeta[1] ** 3 * x[0] * x[1] + 2 * K ** 2 * k1 ** 2 * zeta[1] - K ** 2 * zeta[1] ** 3 + 4 * x[0] ** 2 * zeta[1] + complex(0, -12) * zeta[1] ** 2 * x[0] * x[2] - 4 * zeta[1] ** 3 * x[1] ** 2 - 12 * zeta[1] ** 2 * x[1] * x[2] - K ** 2 * zeta[1] + complex(0, -4) * x[0] * x[2] + 4 * x[1] ** 2 * zeta[1] - 8 * zeta[1] * x[2] ** 2 + 4 * x[1] * x[2])

    DM[2][1] = (complex(0, 4) * zeta[2] ** 2 * x[1] * x[0] + E * K * zeta[2] ** 2 + 4 * zeta[2] ** 2 * x[1] ** 2 + complex(0, 4) * x[0] * x[1] + 8 * zeta[2] * x[1] * x[2] - E * K - 4 * x[1] ** 2) / (4 * zeta[2] ** 3 * x[0] ** 2 + complex(0, -8) * zeta[2] ** 3 * x[0] * x[1] + 2 * K ** 2 * k1 ** 2 * zeta[2] - K ** 2 * zeta[2] ** 3 + 4 * x[0] ** 2 * zeta[2] + complex(0, -12) * zeta[2] ** 2 * x[0] * x[2] - 4 * zeta[2] ** 3 * x[1] ** 2 - 12 * zeta[2] ** 2 * x[1] * x[2] - K ** 2 * zeta[2] + complex(0, -4) * x[0] * x[2] + 4 * x[1] ** 2 * zeta[2] - 8 * zeta[2] * x[2] ** 2 + 4 * x[1] * x[2])

    DM[3][1] = (complex(0, 4) * zeta[3] ** 2 * x[1] * x[0] + E * K * zeta[3] ** 2 + 4 * zeta[3] ** 2 * x[1] ** 2 + complex(0, 4) * x[0] * x[1] + 8 * zeta[3] * x[1] * x[2] - E * K - 4 * x[1] ** 2) / (4 * zeta[3] ** 3 * x[0] ** 2 + complex(0, -8) * zeta[3] ** 3 * x[0] * x[1] + 2 * K ** 2 * k1 ** 2 * zeta[3] - K ** 2 * zeta[3] ** 3 + 4 * x[0] ** 2 * zeta[3] + complex(0, -12) * zeta[3] ** 2 * x[0] * x[2] - 4 * zeta[3] ** 3 * x[1] ** 2 - 12 * zeta[3] ** 2 * x[1] * x[2] - K ** 2 * zeta[3] + complex(0, -4) * x[0] * x[2] + 4 * x[1] ** 2 * zeta[3] - 8 * zeta[3] * x[2] ** 2 + 4 * x[1] * x[2])

    DM[0][2] = 2 * (-K ** 2 * k1 ** 2 * zeta[0] + complex(0, 2) * zeta[0] ** 2 * x[0] * x[2] + 2 * zeta[0] ** 2 * x[1] * x[2] + E * K * zeta[0] + complex(0, 2) * x[0] * x[2] + 4 * zeta[0] * x[2] ** 2 - 2 * x[1] * x[2]) / (4 * zeta[0] ** 3 * x[0] ** 2 + complex(0, -8) * zeta[0] ** 3 * x[0] * x[1] + 2 * K ** 2 * k1 ** 2 * zeta[0] - K ** 2 * zeta[0] ** 3 + 4 * x[0] ** 2 * zeta[0] + complex(0, -12) * zeta[0] ** 2 * x[0] * x[2] - 4 * zeta[0] ** 3 * x[1] ** 2 - 12 * zeta[0] ** 2 * x[1] * x[2] - K ** 2 * zeta[0] + complex(0, -4) * x[0] * x[2] + 4 * x[1] ** 2 * zeta[0] - 8 * zeta[0] * x[2] ** 2 + 4 * x[1] * x[2])

    DM[1][2] = 2 * (-K ** 2 * k1 ** 2 * zeta[1] + complex(0, 2) * zeta[1] ** 2 * x[0] * x[2] + 2 * zeta[1] ** 2 * x[1] * x[2] + E * K * zeta[1] + complex(0, 2) * x[0] * x[2] + 4 * zeta[1] * x[2] ** 2 - 2 * x[1] * x[2]) / (4 * zeta[1] ** 3 * x[0] ** 2 + complex(0, -8) * zeta[1] ** 3 * x[0] * x[1] + 2 * K ** 2 * k1 ** 2 * zeta[1] - K ** 2 * zeta[1] ** 3 + 4 * x[0] ** 2 * zeta[1] + complex(0, -12) * zeta[1] ** 2 * x[0] * x[2] - 4 * zeta[1] ** 3 * x[1] ** 2 - 12 * zeta[1] ** 2 * x[1] * x[2] - K ** 2 * zeta[1] + complex(0, -4) * x[0] * x[2] + 4 * x[1] ** 2 * zeta[1] - 8 * zeta[1] * x[2] ** 2 + 4 * x[1] * x[2])

    DM[2][2] = 2 * (-K ** 2 * k1 ** 2 * zeta[2] + complex(0, 2) * zeta[2] ** 2 * x[0] * x[2] + 2 * zeta[2] ** 2 * x[1] * x[2] + E * K * zeta[2] + complex(0, 2) * x[0] * x[2] + 4 * zeta[2] * x[2] ** 2 - 2 * x[1] * x[2]) / (4 * zeta[2] ** 3 * x[0] ** 2 + complex(0, -8) * zeta[2] ** 3 * x[0] * x[1] + 2 * K ** 2 * k1 ** 2 * zeta[2] - K ** 2 * zeta[2] ** 3 + 4 * x[0] ** 2 * zeta[2] + complex(0, -12) * zeta[2] ** 2 * x[0] * x[2] - 4 * zeta[2] ** 3 * x[1] ** 2 - 12 * zeta[2] ** 2 * x[1] * x[2] - K ** 2 * zeta[2] + complex(0, -4) * x[0] * x[2] + 4 * x[1] ** 2 * zeta[2] - 8 * zeta[2] * x[2] ** 2 + 4 * x[1] * x[2])

    DM[3][2] = 2 * (-K ** 2 * k1 ** 2 * zeta[3] + complex(0, 2) * zeta[3] ** 2 * x[0] * x[2] + 2 * zeta[3] ** 2 * x[1] * x[2] + E * K * zeta[3] + complex(0, 2) * x[0] * x[2] + 4 * zeta[3] * x[2] ** 2 - 2 * x[1] * x[2]) / (4 * zeta[3] ** 3 * x[0] ** 2 + complex(0, -8) * zeta[3] ** 3 * x[0] * x[1] + 2 * K ** 2 * k1 ** 2 * zeta[3] - K ** 2 * zeta[3] ** 3 + 4 * x[0] ** 2 * zeta[3] + complex(0, -12) * zeta[3] ** 2 * x[0] * x[2] - 4 * zeta[3] ** 3 * x[1] ** 2 - 12 * zeta[3] ** 2 * x[1] * x[2] - K ** 2 * zeta[3] + complex(0, -4) * x[0] * x[2] + 4 * x[1] ** 2 * zeta[3] - 8 * zeta[3] * x[2] ** 2 + 4 * x[1] * x[2])

    return DM
예제 #19
0
    def test_compressed(self):
        s = readsav(path.join(DATA_PATH, "various_compressed.sav"), verbose=False)

        assert_identical(s.i8u, np.uint8(234))
        assert_identical(s.f32, np.float32(-3.1234567e37))
        assert_identical(s.c64, np.complex128(1.1987253647623157e112 - 5.1987258887729157e307j))
        assert_equal(s.array5d.shape, (4, 3, 4, 6, 5))
        assert_identical(s.arrays.a[0], np.array([1, 2, 3], dtype=np.int16))
        assert_identical(s.arrays.b[0], np.array([4.0, 5.0, 6.0, 7.0], dtype=np.float32))
        assert_identical(s.arrays.c[0], np.array([np.complex64(1 + 2j), np.complex64(7 + 8j)]))
        assert_identical(s.arrays.d[0], np.array([b"cheese", b"bacon", b"spam"], dtype=np.object))
예제 #20
0
    def test_output_dtype(self):
        '''Test to see if the output_dtype property returns the correct thing
        '''
        self.assertEqual(self.fft.output_dtype, self.output_array.dtype)

        new_input_array = numpy.complex64(self.input_array)
        new_output_array = numpy.complex64(self.output_array)

        new_fft = FFTW(new_input_array, new_output_array)

        self.assertEqual(new_fft.output_dtype, new_output_array.dtype)
예제 #21
0
 def test_cublasCgemv(self):
     a = (np.random.rand(2, 3)+1j*np.random.rand(2, 3)).astype(np.complex64)
     x = (np.random.rand(3, 1)+1j*np.random.rand(3, 1)).astype(np.complex64)
     a_gpu = gpuarray.to_gpu(a.T.copy())
     x_gpu = gpuarray.to_gpu(x)
     y_gpu = gpuarray.empty((2, 1), np.complex64)
     alpha = np.complex64(1.0)
     beta = np.complex64(0.0)
     cublas.cublasCgemv('n', 2, 3, alpha, a_gpu.gpudata, 2, x_gpu.gpudata,
                        1, beta, y_gpu.gpudata, 1)
     assert np.allclose(y_gpu.get(), np.dot(a, x))
예제 #22
0
def dzetas(zeta, x, k):

    K = complex64(ellipk(k**2))

    E = complex64(ellipe(k**2))

    cm= (2*E-K)/K

    k1 = sqrt(1-k**2)

    xp = x[0]+complex(0,1)*x[1]
    xm = x[0]-complex(0,1)*x[1]
    S =  sqrt(K**2-4*xp*xm)
    SP = sqrt(K**2-4*xp**2)
    SM = sqrt(K**2-4*xm**2)
    SPM = sqrt(-k1**2*(K**2*k**2-4*xm*xp)+(xm-xp)**2)
    R = 2*K**2*k1**2-S**2-8*x[2]**2
    RM = complex(0,1)*SM**2*(xm*(2*k1**2-1)+xp)-(16*complex(0,1))*xm*x[2]**2
    RP = complex(0,1)*SM**2*(xp*(2*k1**2-1)+xm)+(16*complex(0,1))*xp*x[2]**2
    RMBAR=-complex(0,1)*SP**2*( xp*(2*k1**2-1)+xm ) +16*complex(0,1)*xp*x[2]**2
    RPBAR=-complex(0,1)*SP**2*( xm*(2*k1**2-1)+xp ) -16*complex(0,1)*xm*x[2]**2
    r=sqrt(x[0]**2+x[1]**2+x[2]**2)

    DZ = [None]*4
    DZ[0] = [None]*3
    DZ[1] = [None]*3
    DZ[2] = [None]*3
    DZ[3] = [None]*3

    DZ[0][0] = -2 * ((x[1] + complex(0, 1) * x[0]) * zeta[0] ** 2 + 2 * x[2] * zeta[0] - x[1] + complex(0, 1) * x[0]) * (complex(0, 1) * zeta[0] ** 2 + complex(0, 1)) / (2 * ((x[1] + complex(0, 1) * x[0]) * zeta[0] ** 2 + 2 * x[2] * zeta[0] - x[1] + complex(0, 1) * x[0]) * (2 * (x[1] + complex(0, 1) * x[0]) * zeta[0] + 2 * x[2]) + K ** 2 * (4 * zeta[0] ** 3 + 4 * (-2 * k1 ** 2 + 1) * zeta[0]) / 4)

    DZ[1][0] = -2 * ((x[1] + complex(0, 1) * x[0]) * zeta[1] ** 2 + 2 * zeta[1] * x[2] - x[1] + complex(0, 1) * x[0]) * (complex(0, 1) * zeta[1] ** 2 + complex(0, 1)) / (2 * ((x[1] + complex(0, 1) * x[0]) * zeta[1] ** 2 + 2 * zeta[1] * x[2] - x[1] + complex(0, 1) * x[0]) * (2 * (x[1] + complex(0, 1) * x[0]) * zeta[1] + 2 * x[2]) + K ** 2 * (4 * zeta[1] ** 3 + 4 * (-2 * k1 ** 2 + 1) * zeta[1]) / 4)

    DZ[2][0] = -2 * ((x[1] + complex(0, 1) * x[0]) * zeta[2] ** 2 + 2 * x[2] * zeta[2] - x[1] + complex(0, 1) * x[0]) * (complex(0, 1) * zeta[2] ** 2 + complex(0, 1)) / (2 * ((x[1] + complex(0, 1) * x[0]) * zeta[2] ** 2 + 2 * x[2] * zeta[2] - x[1] + complex(0, 1) * x[0]) * (2 * (x[1] + complex(0, 1) * x[0]) * zeta[2] + 2 * x[2]) + K ** 2 * (4 * zeta[2] ** 3 + 4 * (-2 * k1 ** 2 + 1) * zeta[2]) / 4)

    DZ[3][0] = -2 * ((x[1] + complex(0, 1) * x[0]) * zeta[3] ** 2 + 2 * zeta[3] * x[2] - x[1] + complex(0, 1) * x[0]) * (complex(0, 1) * zeta[3] ** 2 + complex(0, 1)) / (2 * ((x[1] + complex(0, 1) * x[0]) * zeta[3] ** 2 + 2 * zeta[3] * x[2] - x[1] + complex(0, 1) * x[0]) * (2 * (x[1] + complex(0, 1) * x[0]) * zeta[3] + 2 * x[2]) + K ** 2 * (4 * zeta[3] ** 3 + 4 * (-2 * k1 ** 2 + 1) * zeta[3]) / 4)

    DZ[0][1] = -2 * ((x[1] + complex(0, 1) * x[0]) * zeta[0] ** 2 + 2 * x[2] * zeta[0] - x[1] + complex(0, 1) * x[0]) * (zeta[0] ** 2 - 1) / (2 * ((x[1] + complex(0, 1) * x[0]) * zeta[0] ** 2 + 2 * x[2] * zeta[0] - x[1] + complex(0, 1) * x[0]) * (2 * (x[1] + complex(0, 1) * x[0]) * zeta[0] + 2 * x[2]) + K ** 2 * (4 * zeta[0] ** 3 + 4 * (-2 * k1 ** 2 + 1) * zeta[0]) / 4)

    DZ[1][1] = -2 * ((x[1] + complex(0, 1) * x[0]) * zeta[1] ** 2 + 2 * zeta[1] * x[2] - x[1] + complex(0, 1) * x[0]) * (zeta[1] ** 2 - 1) / (2 * ((x[1] + complex(0, 1) * x[0]) * zeta[1] ** 2 + 2 * zeta[1] * x[2] - x[1] + complex(0, 1) * x[0]) * (2 * (x[1] + complex(0, 1) * x[0]) * zeta[1] + 2 * x[2]) + K ** 2 * (4 * zeta[1] ** 3 + 4 * (-2 * k1 ** 2 + 1) * zeta[1]) / 4)

    DZ[2][1] = -2 * ((x[1] + complex(0, 1) * x[0]) * zeta[2] ** 2 + 2 * x[2] * zeta[2] - x[1] + complex(0, 1) * x[0]) * (zeta[2] ** 2 - 1) / (2 * ((x[1] + complex(0, 1) * x[0]) * zeta[2] ** 2 + 2 * x[2] * zeta[2] - x[1] + complex(0, 1) * x[0]) * (2 * (x[1] + complex(0, 1) * x[0]) * zeta[2] + 2 * x[2]) + K ** 2 * (4 * zeta[2] ** 3 + 4 * (-2 * k1 ** 2 + 1) * zeta[2]) / 4)

    DZ[3][1] = -2 * ((x[1] + complex(0, 1) * x[0]) * zeta[3] ** 2 + 2 * zeta[3] * x[2] - x[1] + complex(0, 1) * x[0]) * (zeta[3] ** 2 - 1) / (2 * ((x[1] + complex(0, 1) * x[0]) * zeta[3] ** 2 + 2 * zeta[3] * x[2] - x[1] + complex(0, 1) * x[0]) * (2 * (x[1] + complex(0, 1) * x[0]) * zeta[3] + 2 * x[2]) + K ** 2 * (4 * zeta[3] ** 3 + 4 * (-2 * k1 ** 2 + 1) * zeta[3]) / 4)

    DZ[0][2] = -4 * ((x[1] + complex(0, 1) * x[0]) * zeta[0] ** 2 + 2 * x[2] * zeta[0] - x[1] + complex(0, 1) * x[0]) * zeta[0] / (2 * ((x[1] + complex(0, 1) * x[0]) * zeta[0] ** 2 + 2 * x[2] * zeta[0] - x[1] + complex(0, 1) * x[0]) * (2 * (x[1] + complex(0, 1) * x[0]) * zeta[0] + 2 * x[2]) + K ** 2 * (4 * zeta[0] ** 3 + 4 * (-2 * k1 ** 2 + 1) * zeta[0]) / 4)

    DZ[1][2] = -4 * ((x[1] + complex(0, 1) * x[0]) * zeta[1] ** 2 + 2 * zeta[1] * x[2] - x[1] + complex(0, 1) * x[0]) * zeta[1] / (2 * ((x[1] + complex(0, 1) * x[0]) * zeta[1] ** 2 + 2 * zeta[1] * x[2] - x[1] + complex(0, 1) * x[0]) * (2 * (x[1] + complex(0, 1) * x[0]) * zeta[1] + 2 * x[2]) + K ** 2 * (4 * zeta[1] ** 3 + 4 * (-2 * k1 ** 2 + 1) * zeta[1]) / 4)

    DZ[2][2] = -4 * ((x[1] + complex(0, 1) * x[0]) * zeta[2] ** 2 + 2 * x[2] * zeta[2] - x[1] + complex(0, 1) * x[0]) * zeta[2] / (2 * ((x[1] + complex(0, 1) * x[0]) * zeta[2] ** 2 + 2 * x[2] * zeta[2] - x[1] + complex(0, 1) * x[0]) * (2 * (x[1] + complex(0, 1) * x[0]) * zeta[2] + 2 * x[2]) + K ** 2 * (4 * zeta[2] ** 3 + 4 * (-2 * k1 ** 2 + 1) * zeta[2]) / 4)

    DZ[3][2] = -4 * ((x[1] + complex(0, 1) * x[0]) * zeta[3] ** 2 + 2 * zeta[3] * x[2] - x[1] + complex(0, 1) * x[0]) * zeta[3] / (2 * ((x[1] + complex(0, 1) * x[0]) * zeta[3] ** 2 + 2 * zeta[3] * x[2] - x[1] + complex(0, 1) * x[0]) * (2 * (x[1] + complex(0, 1) * x[0]) * zeta[3] + 2 * x[2]) + K ** 2 * (4 * zeta[3] ** 3 + 4 * (-2 * k1 ** 2 + 1) * zeta[3]) / 4)

    return DZ
예제 #23
0
def testing_components_energy_density(k, x1, x2, x3):


    if (is_awc_multiple_root(k, x1, x2, x3) ):
        return 4

    if (is_awc_branch_point(k, x1, x2, x3) ):
        return 4

    zeta = calc_zeta(k ,x1, x2, x3)
    eta = calc_eta(k, x1, x2, x3)
    abel = calc_abel(k, zeta, eta)
    mu = calc_mu(k, x1, x2, x3, zeta, abel)
    x=[x1,x2,x3]

    K = complex64(ellipk(k**2))

    E = complex64(ellipe(k**2))

    cm= (2*E-K)/K

    k1 = sqrt(1-k**2)

    xp = x[0]+complex(0,1)*x[1]
    xm = x[0]-complex(0,1)*x[1]
    S =  sqrt(K**2-4*xp*xm)
    SP = sqrt(K**2-4*xp**2)
    SM = sqrt(K**2-4*xm**2)
    SPM = sqrt(-k1**2*(K**2*k**2-4*xm*xp)+(xm-xp)**2)
    R = 2*K**2*k1**2-S**2-8*x[2]**2
    RM = complex(0,1)*SM**2*(xm*(2*k1**2-1)+xp)-(16*complex(0,1))*xm*x[2]**2
    RP = complex(0,1)*SM**2*(xp*(2*k1**2-1)+xm)+(16*complex(0,1))*xp*x[2]**2
    RMBAR=-complex(0,1)*SP**2*( xp*(2*k1**2-1)+xm ) +16*complex(0,1)*xp*x[2]**2
    RPBAR=-complex(0,1)*SP**2*( xm*(2*k1**2-1)+xp ) -16*complex(0,1)*xm*x[2]**2
    r=sqrt(x[0]**2+x[1]**2+x[2]**2)

    DM = dmus(zeta, x, k)
    DZ = dzetas(zeta, x,k)
    DDM = ddmus(zeta, x, k)
    DDZ = ddzetas(zeta, x,k)

    GNUM = grams(zeta, mu, [x1, x2, x3], k)

    # DGS1 = dgrams1(zeta, mu, DM, DZ, x, k)
    #
    # DGS2 = dgrams2(zeta, mu, DM, DZ, x, k)
    #
    # DGS3 = dgrams3(zeta, mu, DM, DZ, x, k)

    # return  exp(-6 * mu[0])
    print zeta, '\n', abel,  '\n', mu, '\n' #mu[0], abel[0], exp(-6*mu[0])
    return
예제 #24
0
파일: test_idl.py 프로젝트: hitej/meta-core
    def test_compressed(self):
        with warnings.catch_warnings():
            warnings.filterwarnings('ignore', message="warning: empty strings")
            s = readsav(path.join(DATA_PATH, 'various_compressed.sav'), verbose=False)

        assert_identical(s.i8u, np.uint8(234))
        assert_identical(s.f32, np.float32(-3.1234567e+37))
        assert_identical(s.c64, np.complex128(1.1987253647623157e+112-5.1987258887729157e+307j))
        assert_equal(s.array5d.shape, (4, 3, 4, 6, 5))
        assert_identical(s.arrays.a[0], np.array([1, 2, 3], dtype=np.int16))
        assert_identical(s.arrays.b[0], np.array([4., 5., 6., 7.], dtype=np.float32))
        assert_identical(s.arrays.c[0], np.array([np.complex64(1+2j), np.complex64(7+8j)]))
        assert_identical(s.arrays.d[0], np.array([b"cheese", b"bacon", b"spam"], dtype=np.object))
예제 #25
0
 def test_cublasCgeam(self):
     a = (np.random.rand(2, 3)+1j*np.random.rand(2, 3)).astype(np.complex64)
     b = (np.random.rand(2, 3)+1j*np.random.rand(2, 3)).astype(np.complex64)
     a_gpu = gpuarray.to_gpu(a.copy())
     b_gpu = gpuarray.to_gpu(b.copy())
     c_gpu = gpuarray.zeros_like(a_gpu)
     alpha = np.complex64(np.random.rand()+1j*np.random.rand())
     beta = np.complex64(np.random.rand()+1j*np.random.rand())
     cublas.cublasCgeam(self.cublas_handle, 'n', 'n', 2, 3,
                        alpha, a_gpu.gpudata, 2,
                        beta, b_gpu.gpudata, 2,
                        c_gpu.gpudata, 2)
     assert np.allclose(c_gpu.get(), alpha*a+beta*b)
예제 #26
0
    def transform_raw_data(self, data, n_int, fill_conjugate=False):
        """ Transform dada data into a useful visibility matrix

        Parameters
        ----------
        data: np.ndarray
            data array to transform into a visibility matrix
        n_int:
            number of integrations in data array
        fill_conjugate: bool
            Default False. Will compute upper half (conjugate) of full visibilty
            matrix if set to True.

        Notes
        -----
        This function dominates data processing time. Unless you need a full visibility
        matrix, do not run this with fill_conjugate=True, as this approximately doubles
        the amount of processing time.

        TODO: This may break if system endianness is different
        TODO: Add support for outputting in upper/lower triangular format
        """
        #print "#Converting", self.filename

        data = data.view(dtype=self.dtype).astype(np.float32)
        # Note: The real and imag components are stored separately
        data = data.reshape((n_int, 2, self.n_chans, self.matlen))

        # Scatter values into new full matrix
        fullmatrix = np.zeros((n_int, self.n_chans, self.n_input, self.n_input),
                              dtype=np.complex64)

        if not fill_conjugate:
            # Note cols then rows and conjugation (-1j) -- this is required
            data = data[..., 0, :, :] + np.complex64(-1j) * data[..., 1, :, :]
            fullmatrix[..., self.matcols, self.matrows] = data
        else:
            # Fill out the other (conjugate) triangle
            # Note rows then cols and no conjugation -- in contrast to above
            data = data[..., 0, :, :] + np.complex64(1j) * data[..., 1, :, :]
            fullmatrix[..., self.matrows, self.matcols] = data
            tri_inds = np.arange(self.n_input * (self.n_input + 1) / 2, dtype=np.uint32)
            rows, cols = self.triangular_coords(tri_inds)
            fullmatrix[..., cols, rows] = np.conj(fullmatrix[..., rows, cols])

        # Reorder so that pol products change fastest
        fullmatrix = fullmatrix.reshape(n_int, self.n_chans,
                                        self.n_station, self.n_pol,
                                        self.n_station, self.n_pol)
        fullmatrix = fullmatrix.transpose([0, 2, 4, 1, 3, 5])
        self.data = fullmatrix
예제 #27
0
    def test_different_dtypes_fail(self):
        in_shape = self.input_shapes["2d"]
        out_shape = self.output_shapes["2d"]

        axes = (-1,)
        a, b = self.create_test_arrays(in_shape, out_shape)

        a_ = numpy.complex64(a)
        b_ = numpy.complex128(b)
        self.assertRaisesRegex(ValueError, "Invalid scheme", FFTW, *(a_, b_))

        a_ = numpy.complex128(a)
        b_ = numpy.complex64(b)
        self.assertRaisesRegex(ValueError, "Invalid scheme", FFTW, *(a_, b_))
예제 #28
0
파일: main.py 프로젝트: lucasp0927/Cscalc
 def testc():
     print '-------------------------------------------'
     A = np.random.random((N,N))+1.0j*np.random.random((N,N))
     B = np.random.random((N,N))+1.0j*np.random.random((N,N))
     A = np.complex64(A)
     B = np.complex64(B)        
     t1 = time.time()                            
     C_h = np.dot(A,B)
     t2 = time.time()
     print 'np.dot took %0.3f ms' % ((t2-t1)*1000.0)    
     t1 = time.time()                                
     C = cuda_cdot(A,B)
     t2 = time.time()
     print 'cuda_dot took %0.3f ms' % ((t2-t1)*1000.0)        
     print 'norm',np.linalg.norm(C-C_h)        
예제 #29
0
 def test_cublasCscal(self):
     x = (np.random.rand(5)+1j*np.random.rand(5)).astype(np.complex64)
     x_gpu = gpuarray.to_gpu(x)
     alpha = np.complex64(np.random.rand()+1j*np.random.rand())
     cublas.cublasCscal(self.cublas_handle, x_gpu.size, alpha,
                        x_gpu.gpudata, 1)
     assert np.allclose(x_gpu.get(), alpha*x)
예제 #30
0
    def test_byteswap(self):
        import numpy as np

        assert np.int64(123).byteswap() == 8863084066665136128
        a = np.complex64(1 + 2j).byteswap()
        assert repr(a.real).startswith("4.60060")
        assert repr(a.imag).startswith("8.96831")
예제 #31
0
    def __init__(self, data_vec, harm=1):
        """
        Initializes the pixel instance by parsing the provided data. 
        
        Parameters
        ----------
        data_vec : 1D float numpy array
            Data contained within each pixel
        harm: unsigned int
            Harmonic of the BE waveform. absolute value of the wave type used to normalize the response waveform.        
        """

        harm = abs(harm)
        if harm > 3 or harm < 1:
            harm = 1
            warn(
                'Error in BEPSndfPixel: invalid wave type / harmonic provided.'
            )

        # Begin parsing data:
        self.spatial_index = int(data_vec[1]) - 1

        self.spectrogram_length = int(data_vec[0])

        # calculate indices for parsing
        s1 = int(data_vec[2])  # total rows in pixel
        s2 = int(data_vec[3])  # total cols in pixel
        data_vec1 = data_vec[2:self.spectrogram_length]
        data_mat1 = data_vec1.reshape(s1, s2)
        spect_size1 = int(data_mat1[1, 0])  # total rows in spectrogram set
        self.num_bins = int(spect_size1 / 2)  # or, len(BE_bin_w)
        self.num_steps = int(data_mat1[1, 1])  # total cols in spectrogram set
        s3 = int(s1 - spect_size1)  #row index of beginning of spectrogram set
        s4 = int(s2 -
                 self.num_steps)  #col index of beginning of spectrogram set

        self.wave_label = data_mat1[2, 0]  # This is useless
        self.wave_modulation_type = data_mat1[
            2, 1]  # this is the one with useful information
        #print 'Pixel #',self.spatial_index,' Wave label: ', self.wave_label, ', Wave Type: ', self.wave_modulation_type

        # First get the information from the columns:
        FFT_BE_wave_real = data_mat1[s3:s3 - 0 + self.num_bins,
                                     1]  #real part of excitation waveform
        FFT_BE_wave_imag = data_mat1[s3 + self.num_bins:s3 - 0 + spect_size1,
                                     1]  #imaginary part of excitation waveform

        # Though typecasting the combination of the real and imaginary data looks fine in HDFviewer and Spyder, Labview sees such data as an array of clusters having "r" and "i" elements
        #self.FFT_BE_wave = np.complex64(FFT_BE_wave_real + 1j*FFT_BE_wave_imag)

        #complex excitation waveform !!! due to a problem in the acquisition software, this may not be normalzed properly
        self.FFT_BE_wave = np.zeros(self.num_bins, dtype=np.complex64)
        self.FFT_BE_wave.real = FFT_BE_wave_real
        self.FFT_BE_wave.imag = FFT_BE_wave_imag

        del FFT_BE_wave_real, FFT_BE_wave_imag

        self.BE_bin_w = data_mat1[s3:s3 - 0 + self.num_bins,
                                  2]  # vector of band frequencies
        self.BE_bin_ind = data_mat1[
            s3 + self.num_bins:s3 - 0 + spect_size1,
            2]  # vector of band indices (out of all accesible frequencies below Nyquist frequency)

        # Now look at the top few rows to get more information:
        self.DAQ_channel = data_mat1[2, 2]
        self.num_x_steps = int(data_mat1[3, 0])
        self.num_y_steps = int(data_mat1[4, 0])
        self.num_z_steps = int(data_mat1[5, 0])
        self.z_index = int(data_mat1[5, 1] - 1)
        self.y_index = int(data_mat1[4, 1] - 1)
        self.x_index = int(data_mat1[3, 1] - 1)

        self.step_ind_vec = data_mat1[0, s4:]  # vector of step indices
        self.DC_off_vec = data_mat1[1, s4:]  # vector of dc offsets  voltages
        self.AC_amp_vec = data_mat1[2, s4:]  # vector of ac amplitude voltages
        self.noise_floor_mat = data_mat1[
            3:6,
            s4:]  # matrix of noise floor data. Use this information to exclude bins during fitting
        #plot_group_list_mat = data_mat1[6:s3-2,s4:] # matrix of plot groups

        # Here come the optional parameter rows:
        self.deflVolt_vec = data_mat1[
            s3 - 2, s4:]  # vector of dc cantilever deflection
        # I think this is how the defl setpoint vec should be fixed:
        self.deflVolt_vec[np.isnan(self.deflVolt_vec)] = 0
        # so far, this vector seemed to match the DC offset vector....

        self.laser_spot_pos_vec = data_mat1[s3 - 1, s4:]  # NEVER used

        # Actual data for this pixel:
        spectrogram_real_mat = data_mat1[
            s3:s3 + self.num_bins, s4:]  #real part of response spectrogram
        spectrogram_imag_mat = data_mat1[
            s3 + self.num_bins:s3 + spect_size1,
            s4:]  #imaginary part of response spectrogram
        # Be consistent and ensure that the data is also stored as 64 bit complex as in the array creation
        self.spectrogram_mat = np.complex64(
            spectrogram_real_mat +
            1j * spectrogram_imag_mat)  #complex part of response spectrogram
        del spectrogram_real_mat, spectrogram_imag_mat

        self.spectrogram_mat = normalizeBEresponse(self.spectrogram_mat,
                                                   self.FFT_BE_wave, harm)

        #  Reshape as one column (its free in Python anyway):
        temp_mat = self.spectrogram_mat.transpose()
        self.spectrogram_vec = temp_mat.reshape(self.spectrogram_mat.size)
예제 #32
0
 def test_rec_read_c(self):
     self._test_rec_read(np.complex64(4.0 + 5.0j), record_read_c, 'c')
import bart

#define forward and backward mappging between image space and k-space
def k2i(K):
    X = np.fft.fftshift(np.fft.ifftn(np.fft.ifftshift(K.copy(),axes=(0,1,2)),axes=(0,1,2),norm="ortho"),axes=(0,1,2))
    return X
def i2k(X):
    K = np.fft.ifftshift(np.fft.fftn(np.fft.fftshift(X.copy(),axes=(0,1,2)),axes=(0,1,2),norm="ortho"),axes=(0,1,2))
    return K

#load kspace data and sensitivity maps

f = h5py.File(os.path.join(sys.path[0], "data.h5"),'r')

#f = h5py.File('data.h5','r')
K = np.complex64(f["K"])
sMaps = np.complex64(f["sMaps"])
f.close()

print('Sucessfuly loaded data, start recon')

#combine coil data according to Roemer and take max intensity of image to scale regularization weight accordingly
sos = np.sqrt(np.sum(np.abs(sMaps[:,:,:,:,None,None,None])**2,3))+1e-6
rcomb = np.sum(k2i(K)*np.conj(sMaps[:,:,:,:,None,None,None]),3)/sos
regFactor = np.max(np.abs(rcomb.flatten()))
print('scaling Factor for recon: ', regFactor)

#set up Recon
regWeight = 0.012 #lambda in Cost function
blk = 16 #block size for locally low rank recon
bart_string = 'pics -u1 -w 1 -H -d5 -e -i 80 -R L:7:7:%.2e -b %d' % (regFactor*regWeight,blk) #define Bart strings
예제 #34
0
파일: libmat.py 프로젝트: hokiedsp/libmatpy
    def data(self):
        if self._infoonly:
            raise InfoOnlyArray()

        sz = self.size()
        dims = self.shape()

        def to_nd_array(c_pointer, dtype):
            return make_nd_array(c_pointer, sz, dims, dtype)

        if _libmx.mxIsChar_800(self._pm):
            s = cast(_libmx.mxGetChars_800(self._pm),
                     POINTER(c_wchar * sz)).contents.value
            if sz == 0:
                return ""
            elif len(dims) == 2 and dims[0] == 1:  # single string
                return s
            else:  # array of characters
                return np.array(list(s)).reshape(dims)

                # return to_nd_array(_libmx.mxGetChars_800(self._pm), 'U1')
        elif _libmx.mxIsStruct_800(self._pm):
            nfields = _libmx.mxGetNumberOfFields_800(self._pm)
            if sz == 0:  # create empty dictionary
                return dict()
            elif sz == 1:  # create dictionary
                return dict([(
                    _libmx.mxGetFieldNameByNumber_800(self._pm,
                                                      i).decode("utf-8"),
                    matlab_array(
                        _libmx.mxGetFieldByNumber_800(self._pm, 0, i),
                        False,
                        False,
                    ),
                ) for i in range(nfields)])
            else:
                return np.array(
                    [
                        matlab_array(
                            _libmx.mxGetFieldByNumber_800(self._pm, 0, i),
                            False, False) for i in range(sz)
                    ],
                    [(
                        _libmx.mxGetFieldNameByNumber_800(self._pm,
                                                          i).decode("utf-8"),
                        np.object_,
                    ) for i in range(nfields)],
                ).reshape(dims)
        elif _libmx.mxIsEmpty_800(self._pm):
            return None
        elif _libmx.mxIsCell_800(self._pm):  # create ndarray of matlab_arrays

            def cellisstr(i):
                pcm = _libmx.mxGetCell_800(self._pm, i)
                return (_libmx.mxIsChar_800(pcm)
                        and _libmx.mxGetNumberOfDimensions_800(pcm) == 2
                        and _libmx.mxGetDimensions_800(pcm)[0] == 1)

            if next((i for i in range(sz) if not cellisstr(i)), -1) < 0:
                # found cellstr
                return np.array([
                    cast(
                        _libmx.mxGetChars_800(_libmx.mxGetCell_800(
                            self._pm, i)),
                        POINTER(c_wchar * sz),
                    ).contents.value for i in range(sz)
                ]).reshape(dims)
            elif sz == 1:
                return matlab_array(_libmx.mxGetCell_800(self._pm, 0), False,
                                    False)
            else:
                return np.array(
                    [
                        matlab_array(_libmx.mxGetCell_800(self._pm, i), False,
                                     False) for i in range(sz)
                    ],
                    np.object_,
                ).reshape(dims)
        elif _libmx.mxIsLogical_800(self._pm):
            if sz == 1:
                return _libmx.mxIsLogicalScalarTrue_800(self._pm)
            else:
                return to_nd_array(_libmx.mxGetLogicals_800(self._pm), np.bool)
        else:  # numeric
            iscplx = _libmx.mxIsComplex_800(self._pm)

            cfgs = {
                0: {
                    "istype": _libmx.mxIsDouble_800,
                    "getdata": _libmx.mxGetDoubles_800,
                    "getcdata": _libmx.mxGetComplexDoubles_800,
                    "type": np.float64,
                    "ctype": np.complex128,
                },
                1: {
                    "istype": _libmx.mxIsSingle_800,
                    "getdata": _libmx.mxGetSingles_800,
                    "getcdata": _libmx.mxGetComplexSingles_800,
                    "type": np.float32,
                    "ctype": np.complex64,
                },
                2: {
                    "istype": _libmx.mxIsInt8_800,
                    "getdata": _libmx.mxGetInt8s_800,
                    "getcdata": _libmx.mxGetComplexInt8s_800,
                    "type": np.int8,
                    "ctype": np.dtype([("real", np.int8), ("imag", np.int8)]),
                },
                3: {
                    "istype": _libmx.mxIsUint8_800,
                    "getdata": _libmx.mxGetUint8s_800,
                    "getcdata": _libmx.mxGetComplexUint8s_800,
                    "type": np.uint8,
                    "ctype": np.dtype([("real", np.uint8),
                                       ("imag", np.uint8)]),
                },
                4: {
                    "istype": _libmx.mxIsInt16_800,
                    "getdata": _libmx.mxGetInt16s_800,
                    "getcdata": _libmx.mxGetComplexInt16s_800,
                    "type": np.int16,
                    "ctype": np.dtype([("real", np.int16),
                                       ("imag", np.int16)]),
                },
                5: {
                    "istype": _libmx.mxIsUint16_800,
                    "getdata": _libmx.mxGetUint16s_800,
                    "getcdata": _libmx.mxGetComplexUint16s_800,
                    "type": np.uint16,
                    "ctype": np.dtype([("real", np.uint16),
                                       ("imag", np.uint16)]),
                },
                6: {
                    "istype": _libmx.mxIsInt32_800,
                    "getdata": _libmx.mxGetInt32s_800,
                    "getcdata": _libmx.mxGetComplexInt32s_800,
                    "type": np.int32,
                    "ctype": np.dtype([("real", np.int32),
                                       ("imag", np.int32)]),
                },
                7: {
                    "istype": _libmx.mxIsUint32_800,
                    "getdata": _libmx.mxGetUint32s_800,
                    "getcdata": _libmx.mxGetComplexUint32s_800,
                    "type": np.uint32,
                    "ctype": np.dtype([("real", np.uint32),
                                       ("imag", np.uint32)]),
                },
                8: {
                    "istype": _libmx.mxIsInt64_800,
                    "getdata": _libmx.mxGetInt64s_800,
                    "getcdata": _libmx.mxGetComplexInt64s_800,
                    "type": np.int64,
                    "ctype": np.dtype([("real", np.int64),
                                       ("imag", np.int64)]),
                },
                9: {
                    "istype": _libmx.mxIsUint64_800,
                    "getdata": _libmx.mxGetUint64s_800,
                    "getcdata": _libmx.mxGetComplexUint64s_800,
                    "type": np.uint64,
                    "ctype": np.dtype([("real", np.uint64),
                                       ("imag", np.uint64)]),
                },
            }

            index = next(
                (i for i in range(len(cfgs)) if cfgs[i]["istype"](self._pm)),
                len(cfgs))
            if index < len(cfgs):
                cfg = cfgs[index]
                pdata = (cfg["getcdata"](self._pm)
                         if iscplx else cfg["getdata"](self._pm))
                if sz == 1:
                    if iscplx:
                        if index == 0:
                            return complex(pdata[0].real, pdata[0].imag)
                        elif index == 1:
                            return np.complex64(
                                1j * pdata[0].imag) + np.float32(pdata[0].real)
                        else:
                            return {
                                "real": pdata[0].real,
                                "imag": pdata[0].imag
                            }
                    else:
                        return pdata[0]
                return to_nd_array(pdata,
                                   (cfg["ctype"] if iscplx else cfg["type"]))
            else:
                raise Exception("Unsupported data type")
def funarrayscalar():
    import numpy
    return numpy.complex64(2 + 3j), numpy.float32(1.), numpy.int8(
        123), numpy.bool8(True)
예제 #36
0
        ],
        [
            "testarr",
            np.array([12, 14, 90]),
            "testarr in extra_keywords is a list, array or dict",
            "Extra keyword testarr is of <class 'numpy.ndarray'>",
        ],
        [
            "test_long_key",
            True,
            "key test_long_key in extra_keywords is longer than 8 characters",
            None,
        ],
        [
            "complex1",
            np.complex64(5.3 + 1.2j),
            None,
            "Extra keyword complex1 is of <class 'numpy.complex64'>",
        ],
        [
            "complex2",
            6.9 + 4.6j,
            None,
            "Extra keyword complex2 is of <class 'complex'>",
        ],
    ),
)
def test_miriad_extra_keywords_errors(
    uv_in_paper, tmp_path, kwd_name, kwd_value, warnstr, errstr
):
    uv_in, uv_out, testfile = uv_in_paper
예제 #37
0
 def test_complex(self):
     s = compute_fingerprint(1j)
     self.assertEqual(s, compute_fingerprint(1 + 0j))
     s = compute_fingerprint(np.complex64())
     self.assertEqual(compute_fingerprint(np.complex64(2.0)), s)
     self.assertNotEqual(compute_fingerprint(np.complex128()), s)
예제 #38
0
파일: arithmetic.py 프로젝트: zoj613/numpy
from typing import Any, List
import numpy as np
import numpy.typing as npt

# Can't directly import `np.float128` as it is not available on all platforms
f16: np.floating[npt._128Bit]

c16 = np.complex128()
f8 = np.float64()
i8 = np.int64()
u8 = np.uint64()

c8 = np.complex64()
f4 = np.float32()
i4 = np.int32()
u4 = np.uint32()

dt = np.datetime64(0, "D")
td = np.timedelta64(0, "D")

b_ = np.bool_()

b = bool()
c = complex()
f = float()
i = int()

AR_b: np.ndarray[Any, np.dtype[np.bool_]]
AR_u: np.ndarray[Any, np.dtype[np.uint32]]
AR_i: np.ndarray[Any, np.dtype[np.int64]]
AR_f: np.ndarray[Any, np.dtype[np.float64]]
 def test_byteswap(self):
     import numpy as np
     assert np.int64(123).byteswap() == 8863084066665136128
     a = np.complex64(1+2j).byteswap()
     assert repr(a.real).startswith('4.60060')
     assert repr(a.imag).startswith('8.96831')
예제 #40
0
        expected = np.array([True])

        self._check_behavior(arr, expected)


m8_units = ['as', 'ps', 'ns', 'us', 'ms', 's', 'm', 'h', 'D', 'W', 'M', 'Y']

na_vals = [
    None,
    NaT,
    float('NaN'),
    complex('NaN'),
    np.nan,
    np.float64('NaN'),
    np.float32('NaN'),
    np.complex64(np.nan),
    np.complex128(np.nan),
    np.datetime64('NaT'),
    np.timedelta64('NaT'),
] + [np.datetime64('NaT', unit) for unit in m8_units
     ] + [np.timedelta64('NaT', unit) for unit in m8_units]

inf_vals = [
    float('inf'),
    float('-inf'),
    complex('inf'),
    complex('-inf'),
    np.inf,
    np.NINF,
]
예제 #41
0
def train_model(train_data):
    td = train_data

    summaries = tf.summary.merge_all()
    RestoreSession = False
    if not RestoreSession:
        td.sess.run(tf.global_variables_initializer())

    # lrval       = FLAGS.learning_rate_start
    learning_rate_start = myParams.myDict['learning_rate_start']
    lrval = myParams.myDict['learning_rate_start']
    start_time = time.time()
    last_summary_time = time.time()
    last_checkpoint_time = time.time()
    done = False
    batch = 0

    print("lrval %f" % (lrval))

    # assert FLAGS.learning_rate_half_life % 10 == 0

    # Cache test features and labels (they are small)
    test_feature, test_label = td.sess.run([td.test_features, td.test_labels])
    # test_label = td.sess.run([td.test_features, td.test_labels])

    G_LossV = np.zeros((1000000), dtype=np.float32)
    filename = os.path.join(myParams.myDict['train_dir'], 'TrainSummary.mat')

    feed_dictOut = {td.gene_minput: test_feature}
    gene_output = td.sess.run(td.gene_moutput, feed_dict=feed_dictOut)
    # _summarize_progress(td, test_label, gene_output, batch, 'out')

    feed_dict = {td.learning_rate: lrval}
    opsx = [td.gene_minimize, td.gene_loss]
    _, gene_loss = td.sess.run(opsx, feed_dict=feed_dict)

    # opsy = [td.gene_loss]
    # gene_loss = td.sess.run(opsy, feed_dict=feed_dict)

    # ops = [td.gene_minimize, td.disc_minimize, td.gene_loss, td.disc_real_loss, td.disc_fake_loss]
    # _, _, gene_loss, disc_real_loss, disc_fake_loss = td.sess.run(ops, feed_dict=feed_dict)

    batch += 1

    # run_metadata = tf.RunMetadata()
    gene_output = td.sess.run(td.gene_moutput, feed_dict=feed_dictOut)
    # gene_output = td.sess.run(td.gene_moutput, options=tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE, output_partition_graphs=True), feed_dict=feed_dictOut,run_metadata=run_metadata)
    # _summarize_progress(td, test_label, gene_output, batch, 'out')
    # with open("/tmp/run2.txt", "w") as out:
    #     out.write(str(run_metadata))

    # fetched_timeline = timeline.Timeline(run_metadata.step_stats)
    # chrome_trace = fetched_timeline.generate_chrome_trace_format()
    # with open('timeline_01.json', 'w') as f:
    #     f.write(chrome_trace)

    # tl = timeline.Timeline(run_metadata.step_stats)
    # print(tl.generate_chrome_trace_format(show_memory=True))
    # trace_file = tf.gfile.Open(name='timeline', mode='w')
    # trace_file.write(tl.generate_chrome_trace_format(show_memory=True))

    feed_dict = {td.learning_rate: lrval}
    # ops = [td.gene_minimize, td.disc_minimize, td.gene_loss, td.disc_real_loss, td.disc_fake_loss]

    opsx = [td.gene_minimize, td.gene_loss]
    _, gene_loss = td.sess.run(opsx, feed_dict=feed_dict)

    batch += 1

    gene_output = td.sess.run(td.gene_moutput, feed_dict=feed_dictOut)
    # _summarize_progress(td, test_label, gene_output, batch, 'out')

    # load model
    #saver.restore(sess,tf.train.latest_checkpoint('./'))
    # running model on data:test_feature
    RunOnData = False
    if RunOnData:
        filenames = tf.gfile.ListDirectory('DataAfterpMat')
        filenames = sorted(filenames)
        #filenames = [os.path.join('DataAfterpMat', f) for f in filenames]
        Ni = len(filenames)
        OutBase = myParams.myDict['SessionName'] + '_OutMat'
        tf.gfile.MakeDirs(OutBase)

        #pdb.set_trace()

        for index in range(Ni):
            print(index)
            print(filenames[index])
            CurData = scipy.io.loadmat(
                os.path.join('DataAfterpMat', filenames[index]))
            Data = CurData['CurData']
            Data = Data.reshape((1, 64, 64, 1))
            test_feature = np.kron(np.ones((16, 1, 1, 1)), Data)
            #test_feature = np.array(np.random.choice([0, 1], size=(16,64,64,1)), dtype='float32')

            feed_dictOut = {td.gene_minput: test_feature}
            gene_output = td.sess.run(td.gene_moutput, feed_dict=feed_dictOut)

            filenameOut = os.path.join(OutBase,
                                       filenames[index][:-4] + '_out.mat')

            SOut = {}
            SOut['X'] = gene_output[0]
            scipy.io.savemat(filenameOut, SOut)

    # pdb.set_trace()

    #_summarize_progress(td, test_feature, test_label, gene_output, batch, 'out')
    # to get value of var:
    # ww=td.sess.run(td.gene_var_list[1])

    if GT.getparam('ShowRealData') > 0:
        # ifilename=os.path.join('RealData', 'b.mat')

        if GT.getparam('InputMode') == 'RegridTry3F_B0T2S_ITS_MB':
            MB = GT.getparam('MB')
            nCh = GT.getparam('nccToUse')
            nTSC = GT.getparam('nTimeSegments')
            batch_size = myParams.myDict['batch_size']

            channelsIn = myParams.myDict['channelsIn']
            channelsOut = myParams.myDict['channelsOut']
            LabelsH = myParams.myDict['LabelsH']
            LabelsW = myParams.myDict['LabelsW']
            H = LabelsH
            W = LabelsW

            TimePoints_ms = GT.getparam('TimePoints_ms')
            SN = GT.getparam('SN')  # H,W,f8
            P = GT.getparam('P')  # H,W,f8
            nTraj = GT.getparam('nTraj')

            cCAIPIVecZ = GT.getparam('cCAIPIVecZ')  # MB,nTraj complex128
            TSBF = GT.getparam('TSBF')  # nTS,nTraj f64

            Rec0FN = '/opt/data/RealDataMB/meas_MID426_gBP_2dSpiral_multiecho_ASL_2mm_iso_run1_FID34170_2Echos/Sli06_Rec0.mat'
            B0TSFFN = '/opt/data/RealDataMB/meas_MID426_gBP_2dSpiral_multiecho_ASL_2mm_iso_run1_FID34170_2Echos/Sli06/B0TS.mat'
            SensDataFN = '/opt/data/RealDataMB/meas_MID426_gBP_2dSpiral_multiecho_ASL_2mm_iso_run1_FID34170_2Echos/Sli06/SensCC1.mat'
            RealDtFN = '/opt/data/RealDataMB/meas_MID426_gBP_2dSpiral_multiecho_ASL_2mm_iso_run1_FID34170_2Echos/Sli06/RealDataForNN.mat'

            Rec0F = scipy.io.loadmat(Rec0FN)
            Rec0 = Rec0F['Rec0']  # (96, 192) complex128
            Rec0MB = np.transpose(np.reshape(Rec0, [H, MB, W]),
                                  (0, 2, 1))  # H,W,MB complex128

            B0TSF = scipy.io.loadmat(B0TSFFN)
            CurB0 = B0TSF['CurB0']  # (96, 96, 2) f8, in Hz
            # B0TSF.keys()

            SensData = scipy.io.loadmat(SensDataFN)
            # SensData.keys()
            SensCCA = SensData['SensCCA']  # (96, 96, 16) complex128
            SensCCB = SensData['SensCCB']  # (96, 96, 16) complex128
            RealSensCCMB = np.stack((SensCCA, SensCCB),
                                    axis=3)  # (96, 96, 16,2) complex128
            RealSensCCMB = RealSensCCMB[:, :, :nCh, :]  # H,W,nCh,MB

            RealData = scipy.io.loadmat(RealDtFN)
            DataCC = RealData['DataCC']  # (10238, 16) (nTraj, ncc) complex64
            DataCC = DataCC[:, :nCh]  # (nTraj, nCh) complex64

            CurB0_Hz = CurB0[:, :, np.newaxis, :]

            RealTSC0 = np.complex64(
                np.exp(1j * 2 * np.pi * CurB0_Hz *
                       GT.NP_addDim(TimePoints_ms) / 1000))  # (96, 96, 7, 2)
            # Add T2* of 20
            T2S_est_ms = 20
            RealTSC0 = RealTSC0 * np.exp(
                -GT.NP_addDim(TimePoints_ms) / T2S_est_ms)

            WarmStart_ITS = Rec0MB[:, :,
                                   np.newaxis, :] * RealTSC0  # H,W,nTS,MB

            TSBFCAIPI = np.transpose(cCAIPIVecZ[:, :, np.newaxis],
                                     (1, 2, 0)) * np.transpose(
                                         TSBF[:, :, np.newaxis],
                                         (1, 0, 2))  # nTraj,nTS,MB c128
            cTSBFCAIPI = np.conj(
                np.transpose(
                    TSBFCAIPI[:, :, :, np.newaxis, np.newaxis],
                    (0, 1, 3, 2, 4)))  # nTraj,nTS,/nCh/,MB,/batch_size/

            DataCCMB = DataCC[:, np.newaxis, :, np.newaxis,
                              np.newaxis]  # nTraj,/nTS/,nCh,/MB/,/batch_size/
            DataCCMB = DataCCMB * cTSBFCAIPI  # nTraj,nTS,nCh,MB,/batch_size/

            # Padded=sps_x_dense_vecs(np.conj(np.transpose(P)),DataCCMB)
            Padded = np.conj(np.transpose(P)) * np.reshape(
                DataCCMB, (nTraj, -1))
            Padded = np.reshape(Padded, (H * 2, W * 2, nTSC, nCh, MB, 1))
            Padded = np.transpose(Padded, (2, 3, 4, 5, 0, 1))
            # Padded=np.reshape(Padded,(nTSC,nCh,MB,1,H*2,W*2))
            IFPadded = np.fft.ifft2(Padded, axes=(-2, -1))

            Cropped = IFPadded[:, :, :, :, :H, :
                               W]  # nTS,nCh,MB,/batch_size/,H,W
            CroppedP = np.transpose(
                Cropped, (5, 4, 1, 2, 0, 3))  # H,W,nCh,MB,nTS,/batch_size/
            Real_NUFFTHSig = np.sum(
                CroppedP *
                np.conj(RealSensCCMB[:, :, :, :, np.newaxis, np.newaxis]),
                axis=2)  # H,W,MB,nTS,/batch_size/
            # RealSensCCMB=RealSensCCMB[:,:,:nCh,:] # H,W,nCh,MB
            Real_NUFFTHSig = np.transpose(Real_NUFFTHSig, (4, 0, 1, 3, 2))
            Real_NUFFTHSig = Real_NUFFTHSig * np.conj(
                SN[np.newaxis, :, :, np.newaxis,
                   np.newaxis])  # /batch_size/,H,W,nTS,MB

            RealSensCCMB1D = GT.NP_ConcatRIOn0(
                np.reshape(RealSensCCMB, (-1, 1, 1)))
            Real_feature = RealSensCCMB1D
            RealTSC01D = GT.NP_ConcatRIOn0(np.reshape(RealTSC0, (-1, 1, 1)))
            Real_feature = np.concatenate((Real_feature, RealTSC01D), axis=0)
            WarmStart_ITS1D = GT.NP_ConcatRIOn0(
                np.reshape(WarmStart_ITS, (-1, 1, 1)))
            Real_feature = np.concatenate((Real_feature, WarmStart_ITS1D),
                                          axis=0)
            Real_NUFFTHSig1D = GT.NP_ConcatRIOn0(
                np.reshape(Real_NUFFTHSig, (-1, 1, 1)))
            Real_feature = np.concatenate((Real_feature, Real_NUFFTHSig1D),
                                          axis=0)

            Real_feature = np.tile(Real_feature, (batch_size, 1, 1, 1))

        if myParams.myDict['InputMode'] == 'SPEN_FC':
            ifilename = myParams.myDict['RealDataFN']
            RealData = scipy.io.loadmat(ifilename)
            RealData = RealData['Data']
            # RealData=np.reshape(RealData,)
            #             RealData=RealData
            Real_feature = RealData

        if myParams.myDict['InputMode'] == 'SPEN_Local':
            ifilename = myParams.myDict['RealDataFN']
            RealData = scipy.io.loadmat(ifilename)
            RealData = RealData['Data']
            #             RealData=RealData
            Real_feature = RealData

        if False:
            if RealData.ndim == 2:
                RealData = RealData.reshape(
                    (RealData.shape[0], RealData.shape[1], 1, 1))
            if RealData.ndim == 3:
                RealData = RealData.reshape(
                    (RealData.shape[0], RealData.shape[1], RealData.shape[2],
                     1))

        if myParams.myDict['InputMode'] == 'RegridTry1' or myParams.myDict[
                'InputMode'] == 'RegridTry2':
            ifilename = myParams.myDict['RealDataFN']
            RealData = scipy.io.loadmat(ifilename)
            RealData = RealData['Data']
            # FullData=scipy.io.loadmat('/media/a/f38a5baa-d293-4a00-9f21-ea97f318f647/home/a/TF/NMapIndTesta.mat')
            FullData = scipy.io.loadmat(myParams.myDict['NMAP_FN'])
            NMapCR = FullData['NMapCR']

            batch_size = myParams.myDict['batch_size']

            Real_feature = np.reshape(RealData[0], [RealData.shape[1]])
            Real_feature = np.take(Real_feature, NMapCR)
            Real_feature = np.tile(Real_feature, (batch_size, 1, 1, 1))

        if myParams.myDict['InputMode'] == 'RegridTry3' or myParams.myDict[
                'InputMode'] == 'RegridTry3M' or myParams.myDict[
                    'InputMode'] == 'RegridTry3F' or myParams.myDict[
                        'InputMode'] == 'RegridTry3FMB' or myParams.myDict[
                            'InputMode'] == 'RegridTry3FME':
            ifilename = myParams.myDict['RealDataFN']
            RealData = scipy.io.loadmat(ifilename)
            RealData = RealData['Data']
            batch_size = myParams.myDict['batch_size']
            nTraj = myParams.myDict['nTraj']
            RealDatancc = myParams.myDict['RealDatancc']
            nccInData = myParams.myDict['nccInData']

            # RealData=RealData
            # RealData=np.reshape(RealData,[batch_size,RealDatancc,nTraj,2])
            # RealData=RealData[:,0:nccInData,:,:]
            # RealData=np.reshape(RealData,[batch_size,nTraj,RealDatancc,2])
            # RealData=RealData[:,:,0:nccInData,:]
            # RealData=np.reshape(RealData,[batch_size,-1])
            RealData = RealData[0, :]
            RealData = np.tile(RealData, (batch_size, 1))
            Real_feature = np.reshape(
                RealData, [RealData.shape[0], RealData.shape[1], 1, 1])

        Real_dictOut = {td.gene_minput: Real_feature}

    # LearningDecayFactor=np.power(2,(-1/FLAGS.learning_rate_half_life))
    learning_rate_half_life = myParams.myDict['learning_rate_half_life']
    LearningDecayFactor = np.power(2, (-1 / learning_rate_half_life))

    # train_time=FLAGS.train_time
    train_time = myParams.myDict['train_time']

    QuickFailureTimeM = myParams.myDict['QuickFailureTimeM']
    QuickFailureThresh = myParams.myDict['QuickFailureThresh']

    summary_period = myParams.myDict['summary_period']  # in Minutes
    checkpoint_period = myParams.myDict['checkpoint_period']  # in Minutes

    DiscStartMinute = myParams.myDict['DiscStartMinute']

    gene_output = td.sess.run(td.gene_moutput, feed_dict=feed_dictOut)

    if myParams.myDict['ShowRealData'] > 0:
        gene_RealOutput = td.sess.run(td.gene_moutput, feed_dict=Real_dictOut)
        gene_output[0] = gene_RealOutput[0]

    Asuffix = 'out_%06.4f' % (gene_loss)
    _summarize_progress(td, test_label, gene_output, batch, Asuffix)

    print("Adding to saver:")
    var_listX = td.gene_var_list
    var_listX = [v for v in var_listX if "Bank" not in v.name]
    #     var_listX = tf.sort(var_listX)
    for line in var_listX:
        print("Adding " + line.name + '           ' +
              str(line.shape.as_list()))
    print("Saver var list end")

    saver = tf.train.Saver(var_listX)
    # _save_checkpoint(td, batch,G_LossV,saver)

    tf.get_default_graph().finalize()

    while not done:
        batch += 1
        gene_loss = disc_real_loss = disc_fake_loss = -1.234

        # elapsed = int(time.time() - start_time)/60
        CurTime = time.time()
        elapsed = (time.time() - start_time) / 60

        # Update learning rate
        lrval *= LearningDecayFactor
        if (learning_rate_half_life < 1000):  # in minutes
            lrval = learning_rate_start * np.power(
                0.5, elapsed / learning_rate_half_life)

        #print("batch %d gene_l1_factor %f' " % (batch,FLAGS.gene_l1_factor))
        # if batch==200:
        if elapsed > DiscStartMinute:
            FLAGS.gene_l1_factor = 0.9

        RunDiscriminator = FLAGS.gene_l1_factor < 0.999

        feed_dict = {td.learning_rate: lrval}
        if RunDiscriminator:
            ops = [
                td.gene_minimize, td.disc_minimize, td.gene_loss,
                td.disc_real_loss, td.disc_fake_loss
            ]
            _, _, gene_loss, disc_real_loss, disc_fake_loss = td.sess.run(
                ops, feed_dict=feed_dict)
        else:
            ops = [
                td.gene_minimize, td.gene_loss, td.MoreOut, td.MoreOut2,
                td.MoreOut3
            ]
            _, gene_loss, MoreOut, MoreOut2, MoreOut3 = td.sess.run(
                ops, feed_dict=feed_dict)

        G_LossV[batch] = gene_loss

        # ggg: Force phase only var
        # VR = [v for v in tf.global_variables() if v.name == "gene/GEN_L004/add_Mult2DMCyCSharedOverFeat_weightR:0"][0]
        # VI = [v for v in tf.global_variables() if v.name == "gene/GEN_L004/add_Mult2DMCyCSharedOverFeat_weightI:0"][0]
        # VRX=td.sess.run(VR);
        # VIX=td.sess.run(VI);
        # VC=VRX+1J*VIX
        # Norm=np.abs(VC)
        # Norm[Norm == 0] = 0.00001
        # VRX=VRX/Norm
        # VIX=VIX/Norm
        # VR.load(VRX, td.sess)
        # VI.load(VIX, td.sess)

        # VR = [v for v in tf.global_variables() if v.name == "gene/GEN_L005/add_Mult2DMCxCSharedOverFeat_weightR:0"][0]
        # VI = [v for v in tf.global_variables() if v.name == "gene/GEN_L005/add_Mult2DMCxCSharedOverFeat_weightI:0"][0]
        # VRX=td.sess.run(VR);
        # VIX=td.sess.run(VI);
        # VC=VRX+1J*VIX
        # Norm=np.abs(VC)
        # Norm[Norm == 0] = 0.00001
        # VRX=VRX/Norm
        # VIX=VIX/Norm
        # VR.load(VRX, td.sess)
        # VI.load(VIX, td.sess)

        # VR = [v for v in tf.global_variables() if v.name == "gene/GEN_L004/einsum_weightR:0"][0]
        # VI = [v for v in tf.global_variables() if v.name == "gene/GEN_L004/einsum_weightI:0"][0]
        # VRX=td.sess.run(VR);
        # VIX=td.sess.run(VI);
        # HmngWnd=np.power(np.hamming(98),1)
        # HmngWnd=np.reshape(HmngWnd,[98,1,1])
        # VC=VRX +1j*VIX

        # FVC=GT.gfft(VC,dim=0)
        # FVC=FVC*HmngWnd
        # VC=GT.gifft(FVC,dim=0)
        # VYR=np.real(VC)
        # VYI=np.imag(VC)
        # VR.load(VYR, td.sess)
        # VI.load(VYI, td.sess)

        if batch % 10 == 0:

            # pdb.set_trace()

            # Show we are alive
            #print('Progress[%3d%%], ETA[%4dm], Batch [%4d], G_Loss[%3.3f], D_Real_Loss[%3.3f], D_Fake_Loss[%3.3f]' %
            #      (int(100*elapsed/train_time), train_time - int(elapsed), batch, gene_loss, disc_real_loss, disc_fake_loss))

            print(
                'Progress[%3d%%], ETA[%4dm], Batch [%4d], G_Loss[%3.3f], D_Real_Loss[%3.3f], D_Fake_Loss[%3.3f], MoreOut[%3.3f, %3.3f, %3.3f]'
                % (int(100 * elapsed / train_time), train_time - int(elapsed),
                   batch, gene_loss, disc_real_loss, disc_fake_loss, MoreOut,
                   MoreOut2, MoreOut3))

            # VLen=td.gene_var_list.__len__()
            # for i in range(0, VLen):
            #     print(td.gene_var_list[i].name);

            # print(VRX.dtype)
            # print(VRX)
            # exit()
            # var_23 = [v for v in tf.global_variables() if v.name == "gene/GEN_L020/C2D_weight:0"][0]
            # tmp=td.sess.run(td.gene_var_list[i])
            # v.load([2, 3], td.sess)

            if np.isnan(gene_loss):
                print('NAN!!')
                done = True

            # ggg: quick failure test
            if elapsed > QuickFailureTimeM:
                if gene_loss > QuickFailureThresh:
                    print('Quick failure!!')
                    done = True
                else:
                    QuickFailureTimeM = 10000000

            # Finished?
            current_progress = elapsed / train_time
            if current_progress >= 1.0:
                done = True

            StopFN = '/media/a/f38a5baa-d293-4a00-9f21-ea97f318f647/home/a/TF/stop.a'
            if os.path.isfile(StopFN):
                print('Stop file used!!')
                done = True
                try:
                    tf.gfile.Remove(StopFN)
                except:
                    pass

            # Update learning rate
            # if batch % FLAGS.learning_rate_half_life == 0:
            #     lrval *= .5

        # if batch % FLAGS.summary_period == 0:
        if (CurTime - last_summary_time) / 60 > summary_period:
            # Show progress with test features
            # feed_dict = {td.gene_minput: test_feature}
            gene_output = td.sess.run(td.gene_moutput, feed_dict=feed_dictOut)

            if myParams.myDict['ShowRealData'] > 0:
                gene_RealOutput = td.sess.run(td.gene_moutput,
                                              feed_dict=Real_dictOut)
                gene_output[0] = gene_RealOutput[0]

            Asuffix = 'out_%06.4f' % (gene_loss)
            _summarize_progress(td, test_label, gene_output, batch, Asuffix)

            last_summary_time = time.time()

        # if batch % FLAGS.checkpoint_period == 0:
        SaveCheckpoint_ByTime = (CurTime -
                                 last_checkpoint_time) / 60 > checkpoint_period
        CheckpointFN = '/media/a/f38a5baa-d293-4a00-9f21-ea97f318f647/home/a/TF/save.a'
        SaveCheckPointByFile = os.path.isfile(CheckpointFN)
        if SaveCheckPointByFile:
            tf.gfile.Remove(CheckpointFN)

        if SaveCheckpoint_ByTime or SaveCheckPointByFile:
            last_checkpoint_time = time.time()
            # Save checkpoint
            _save_checkpoint(td, batch, G_LossV, saver)

        RunOnAllFN = '/media/a/f38a5baa-d293-4a00-9f21-ea97f318f647/home/a/TF/RunOnAll.a'
        RunOnAllFNByFile = os.path.isfile(RunOnAllFN)
        if RunOnAllFNByFile:
            tf.gfile.Remove(RunOnAllFN)

            for r in range(1, 81):
                ifilenamePrefix = myParams.myDict['LoadAndRunOnData_Prefix']
                #                 ifilename=ifilenamePrefix +  f'{r:02}' + '.mat'
                ifilename = ifilenamePrefix + '%02d.mat' % (r)
                RealData = scipy.io.loadmat(ifilename)
                RealData = RealData['Data']

                if RealData.ndim == 2:
                    RealData = RealData.reshape(
                        (RealData.shape[0], RealData.shape[1], 1, 1))
                if RealData.ndim == 3:
                    RealData = RealData.reshape(
                        (RealData.shape[0], RealData.shape[1],
                         RealData.shape[2], 1))

                Real_feature = RealData

                if myParams.myDict[
                        'InputMode'] == 'RegridTry1' or myParams.myDict[
                            'InputMode'] == 'RegridTry2':
                    # FullData=scipy.io.loadmat('/media/a/f38a5baa-d293-4a00-9f21-ea97f318f647/home/a/TF/NMapIndTesta.mat')
                    FullData = scipy.io.loadmat(myParams.myDict['NMAP_FN'])
                    NMapCR = FullData['NMapCR']

                    batch_size = myParams.myDict['batch_size']

                    Real_feature = np.reshape(RealData[0], [RealData.shape[1]])
                    Real_feature = np.take(Real_feature, NMapCR)
                    Real_feature = np.tile(Real_feature, (batch_size, 1, 1, 1))

                Real_dictOut = {td.gene_minput: Real_feature}

                gene_RealOutput = td.sess.run(td.gene_moutput,
                                              feed_dict=Real_dictOut)

                OnRealData = {}
                OnRealDataM = gene_RealOutput
                #                 filenamex = 'OnRealData' + f'{r:02}' + '.mat'
                filenamex = 'OnRealData' + '%02d.mat' % (r)

                LoadAndRunOnData_OutP = myParams.myDict[
                    'LoadAndRunOnData_OutP']
                filename = os.path.join(LoadAndRunOnData_OutP, filenamex)
                OnRealData['x'] = OnRealDataM
                scipy.io.savemat(filename, OnRealData)

            print('Saved recon of real data')

    _save_checkpoint(td, batch, G_LossV, saver)

    print('Finished training!')
예제 #42
0
def complex(x, y):
  return x + np.complex64(1j) * y
예제 #43
0
def conj(x):
  return np.conj(x) + np.complex64(0)
예제 #44
0
 def test_numpy_scalar_complex(self):
     x = np.complex64(np.random.rand() + 1j * np.random.rand())
     x_rec = self.encode_decode(x)
     assert_equal(x, x_rec)
     assert_equal(type(x), type(x_rec))
예제 #45
0
    def GiveModelTessel(self,
                        Image,
                        DicoImager,
                        iFacet,
                        NormIm,
                        Sphe,
                        SpacialWeight,
                        ToGrid=False,
                        ChanSel=None,
                        ApplyNorm=True):

        nch, npol, NPixOut, _ = Image.shape

        N1 = DicoImager[iFacet]["NpixFacetPadded"]
        N1NonPadded = DicoImager[iFacet]["NpixFacetPadded"]
        dx = (N1 - N1NonPadded) // 2

        xc, yc = DicoImager[iFacet]["pixCentral"]
        #x0,x1,y0,y1=DicoImager[iFacet]["pixExtent"]
        #xc,yc=(x0+x1)//2,(y0+y1)//2

        Aedge, Bedge = GiveEdges(xc, yc, NPixOut, N1 // 2, N1 // 2, N1)
        #Bedge,Aedge=GiveEdges(N1//2,N1//2,N1,yc,xc,NPixOut)
        x0d, x1d, y0d, y1d = Aedge
        x0p, x1p, y0p, y1p = Bedge
        #print "xxA:",x0d,x1d
        #print "xxB:",x0p,x1p
        SumFlux = 1.
        ModelIm = np.zeros((nch, npol, N1, N1), dtype=np.float32)

        T = ClassTimeIt.ClassTimeIt("ClassImToGrid")
        T.disable()

        if ChanSel is None:
            CSel = range(nch)
        else:
            CSel = ChanSel

        SumFlux = 0

        for ch in CSel:
            for pol in range(npol):
                #ModelIm[ch,pol][x0p:x1p,y0p:y1p]=Image[ch,pol].T[::-1,:].real[x0d:x1d,y0d:y1d]
                #ModelIm[ch,pol][x0p:x1p,y0p:y1p]=Image[ch,pol].real[x0d:x1d,y0d:y1d]

                ModelIm[ch, pol][x0p:x1p, y0p:y1p] = Image[ch,
                                                           pol][x0d:x1d,
                                                                y0d:y1d].real

                if (ModelIm[ch, pol] == 0).all():
                    continue

                T.timeit("0")

                M = ModelIm[ch, pol][dx:dx + N1NonPadded + 1,
                                     dx:dx + N1NonPadded + 1].copy()
                T.timeit("1")

                ModelIm[ch, pol].fill(0)
                T.timeit("2")
                ModelIm[ch, pol][dx:dx + N1NonPadded + 1,
                                 dx:dx + N1NonPadded + 1] = M[:, :]

                #ModelCutOrig=ModelIm[ch,pol].copy()

                T.timeit("3")
                #ind =np.where(np.abs(ModelIm)==np.max(np.abs(ModelIm)))

                ##print "!!!!!!!!!!!!!!!!!!!!!!"
                if ApplyNorm:
                    # #print NormIm.max()
                    # #print np.count_nonzero(np.isnan(NormIm))
                    # #print np.count_nonzero(np.isinf(NormIm))
                    # print NormIm.min()
                    # np.save("NormIm",NormIm)
                    # stop
                    ModelIm[ch, pol][x0p:x1p, y0p:y1p] /= NormIm[x0d:x1d,
                                                                 y0d:y1d].real

                #ModelCutOrig_GNorm=NormIm[x0d:x1d,y0d:y1d].real.copy()

                T.timeit("4")
                if ApplyNorm:
                    ModelIm[ch, pol][x0p:x1p,
                                     y0p:y1p] *= SpacialWeight[x0p:x1p,
                                                               y0p:y1p]
                indPos = np.where(ModelIm[ch, pol] > 0)
                SumFlux += np.sum(ModelIm[ch, pol][indPos])

                ModelCutOrig_SW = SpacialWeight[x0p:x1p, y0p:y1p].copy()

                #ModelCutOrig_GNorm_SW_Sphe_CorrT=ModelIm[ch,pol].copy()
                T.timeit("5")
                #SumFlux=np.sum(ModelIm)

                if ApplyNorm:
                    ModelIm[ch, pol][x0p:x1p, y0p:y1p] /= Sphe[x0p:x1p,
                                                               y0p:y1p].real
                    # ModelIm[ch, pol][x0p:x1p, y0p:y1p] *= ModelCutOrig_SW / Sphe[x0p:x1p,
                    #                                                         y0p:y1p].real  # LB - added *SW

                #ModelCutOrig_Sphe=Sphe[x0p:x1p,y0p:y1p].real.copy()

                T.timeit("6")
                ModelIm[ch, pol][Sphe < 1e-3] = 0
                T.timeit("7")
                ModelIm[ch, pol] = ModelIm[ch, pol].T[::-1, :]
                T.timeit("8")
                #ModelCutOrig_GNorm_SW_Sphe_CorrT=ModelIm[ch,pol].copy()

                #return True, ModelCutOrig, ModelCutOrig_GNorm, ModelCutOrig_SW, ModelCutOrig_Sphe, ModelCutOrig_GNorm_SW_Sphe_CorrT

        #print iFacet,DicoImager[iFacet]["l0m0"],DicoImager[iFacet]["NpixFacet"],DicoImager[iFacet]["NpixFacetPadded"],SumFlux
        # if np.max(np.abs(ModelIm))>1:
        #     print ind

        #if np.abs(SumFlux)>1: stop

        # #print iFacet,np.max(ModelIm)

        # #return ModelIm, None
        # #Padding=self.GD["Image"]["Padding"]

        T.timeit("9")
        SumFlux /= nch

        if ToGrid:
            ModelIm *= (self.OverS * N1)**2
            if SumFlux != 0:
                Grid = np.complex64(
                    self.FFTWMachine.fft(np.complex64(ModelIm), ChanList=CSel))
            else:
                Grid = np.complex64(ModelIm)

            return Grid, SumFlux
        elif ApplyNorm:
            ModelIm *= (self.OverS * N1)**2

            return ModelIm, SumFlux
        else:
            return ModelIm, SumFlux
예제 #46
0
def energy_density(k, x1, x2, x3):   # If there is a multiple root or branch point a value of maxint-1 will be returned; maxint is globally set.

    try:
        if (is_awc_multiple_root(k, x1, x2, x3) ):
            return -1

        if (is_awc_branch_point(k, x1, x2, x3) ):
            return -2

        zeta = calc_zeta(k ,x1, x2, x3)
        eta = calc_eta(k, x1, x2, x3)
        abel = calc_abel(k, zeta, eta)
        mu = calc_mu(k, x1, x2, x3, zeta, abel)
        x=[x1,x2,x3]

        K = complex64(ellipk(k**2))

        E = complex64(ellipe(k**2))

        cm= (2*E-K)/K

        k1 = sqrt(1-k**2)

        xp = x[0]+complex(0,1)*x[1]
        xm = x[0]-complex(0,1)*x[1]
        S =  sqrt(K**2-4*xp*xm)
        SP = sqrt(K**2-4*xp**2)
        SM = sqrt(K**2-4*xm**2)
        SPM = sqrt(-k1**2*(K**2*k**2-4*xm*xp)+(xm-xp)**2)
        R = 2*K**2*k1**2-S**2-8*x[2]**2
        RM = complex(0,1)*SM**2*(xm*(2*k1**2-1)+xp)-(16*complex(0,1))*xm*x[2]**2
        RP = complex(0,1)*SM**2*(xp*(2*k1**2-1)+xm)+(16*complex(0,1))*xp*x[2]**2
        RMBAR=-complex(0,1)*SP**2*( xp*(2*k1**2-1)+xm ) +16*complex(0,1)*xp*x[2]**2
        RPBAR=-complex(0,1)*SP**2*( xm*(2*k1**2-1)+xp ) -16*complex(0,1)*xm*x[2]**2
        r=sqrt(x[0]**2+x[1]**2+x[2]**2)

        DM = dmus(zeta, x, k)
        DZ = dzetas(zeta, x,k)
        DDM = ddmus(zeta, x, k)
        DDZ = ddzetas(zeta, x,k)

        GNUM = grams(zeta, mu, [x1, x2, x3], k)

        inv_gram = matrix(GNUM).I

        higgs = phis(zeta, mu, [x1, x2, x3], k)

        DGS1 = dgrams1(zeta, mu, DM, DZ, x, k)

        DGS2 = dgrams2(zeta, mu, DM, DZ, x, k)

        DGS3 = dgrams3(zeta, mu, DM, DZ, x, k)


        # Using the hermiticity properties its faster to evaluate the matrix entries just once and so if we evaluate
        # the *12 element the *21 is minus the conjugate of this
        # DHS1 = mat([[ dphis111(zeta, mu, DM, DZ, [x1, x2, x3], k), dphis112(zeta, mu, DM, DZ, [x1, x2, x3], k)],
        #            [ dphis121(zeta, mu, DM, DZ, [x1, x2, x3], k), dphis122(zeta, mu, DM, DZ, [x1, x2, x3], k)]])

        DH112 = dphis112(zeta, mu, DM, DZ, [x1, x2, x3], k)

        DHS1 = mat([[ dphis111(zeta, mu, DM, DZ, [x1, x2, x3], k), DH112   ],
                    [ -conj(DH112), dphis122(zeta, mu, DM, DZ, [x1, x2, x3], k)]])

        DH212 = dphis212(zeta, mu, DM, DZ, [x1, x2, x3], k)

        DHS2 = mat([[ dphis211(zeta, mu, DM, DZ, [x1, x2, x3], k), DH212   ],
                    [ -conj(DH212), dphis222(zeta, mu, DM, DZ, [x1, x2, x3], k)]])

        DH312 = dphis312(zeta, mu, DM, DZ, [x1, x2, x3], k)

        DHS3 = mat([[ dphis311(zeta, mu, DM, DZ, [x1, x2, x3], k), DH312   ],
                    [ -conj(DH312), dphis322(zeta, mu, DM, DZ, [x1, x2, x3], k)]])

        DDGS112 = ddgrams112(zeta, mu, DM, DZ, DDM, DDZ, [x1, x2, x3], k)

        DDGS1 = mat([[ ddgrams111(zeta, mu, DM, DZ, DDM,  DDZ, [x1, x2, x3], k), DDGS112 ],
                     [ -conj(DDGS112), ddgrams122(zeta, mu, DM, DZ, DDM, DDZ, [x1, x2, x3], k)]])

        DDGS212 = ddgrams212(zeta, mu, DM, DZ, DDM, DDZ, [x1, x2, x3], k)

        DDGS2 = mat([[ ddgrams211(zeta, mu, DM, DZ, DDM,  DDZ, [x1, x2, x3], k), DDGS212 ],
                     [ -conj(DDGS212) , ddgrams222(zeta, mu, DM, DZ, DDM, DDZ, [x1, x2, x3], k)]])

        DDGS312 = ddgrams312(zeta, mu, DM, DZ, DDM, DDZ, [x1, x2, x3], k)

        DDGS3 = mat([[ ddgrams311(zeta, mu, DM, DZ, DDM,  DDZ, [x1, x2, x3], k), DDGS312 ],
                     [ -conj(DDGS312), ddgrams322(zeta, mu, DM, DZ, DDM, DDZ, [x1, x2, x3], k)]])

        DDHS111 = ddphis111(zeta, mu,DM, DZ, DDM,  DDZ, [x1, x2, x3], k)
        DDHS112 = ddphis112(zeta, mu,DM, DZ, DDM,  DDZ, [x1, x2, x3], k)
        # DDHS121 = ddphis121(zeta, mu,DM, DZ, DDM,  DDZ, [x1, x2, x3], k)
        DDHS122 = ddphis122(zeta, mu,DM, DZ, DDM,  DDZ, [x1, x2, x3], k)

        DDHS1 = mat( [[DDHS111, DDHS112], [ -conj(DDHS112),DDHS122]])

        DDHS211 = ddphis211(zeta, mu,DM, DZ, DDM,  DDZ, [x1, x2, x3], k)
        DDHS212 = ddphis212(zeta, mu,DM, DZ, DDM,  DDZ, [x1, x2, x3], k)
        # DDHS221 = ddphis221(zeta, mu,DM, DZ, DDM,  DDZ, [x1, x2, x3], k)
        DDHS222 = ddphis222(zeta, mu,DM, DZ, DDM,  DDZ, [x1, x2, x3], k)

        DDHS2 = mat( [[DDHS211, DDHS212], [-conj(DDHS212),DDHS222]])

        DDHS311 = ddphis311(zeta, mu,DM, DZ, DDM,  DDZ, [x1, x2, x3], k)
        DDHS312 = ddphis312(zeta, mu,DM, DZ, DDM,  DDZ, [x1, x2, x3], k)
        # DDHS321 = ddphis321(zeta, mu,DM, DZ, DDM,  DDZ, [x1, x2, x3], k)
        DDHS322 = ddphis322(zeta, mu, DM, DZ, DDM,  DDZ, [x1, x2, x3], k)

        DDHS3 = mat( [[DDHS311, DDHS312], [-conj(DDHS312),DDHS322]])

        ed1 = trace(matmul( matmul(DDHS1, inv_gram) -2* matmul( matmul(DHS1 , inv_gram), matmul(DGS1, inv_gram)) \
                            + matmul(higgs, matmul( 2* matmul(matmul(inv_gram,DGS1), matmul(inv_gram, DGS1)), inv_gram)  - matmul(matmul(inv_gram, DDGS1), inv_gram)),
                            matmul(higgs,inv_gram)) ) \
              + trace( matmul(matmul(DHS1, inv_gram) - matmul(matmul(higgs, inv_gram), matmul(DGS1, inv_gram)),
                              matmul(DHS1, inv_gram) - matmul(matmul(higgs, inv_gram), matmul(DGS1, inv_gram))))

        ed2 = trace(matmul( matmul(DDHS2, inv_gram) -2* matmul( matmul(DHS2 , inv_gram), matmul(DGS2, inv_gram)) \
                            + matmul(higgs, matmul( 2* matmul(matmul(inv_gram,DGS2), matmul(inv_gram, DGS2)), inv_gram)  - matmul(matmul(inv_gram, DDGS2), inv_gram)),
                            matmul(higgs,inv_gram)) ) \
              + trace( matmul(matmul(DHS2, inv_gram) - matmul(matmul(higgs, inv_gram), matmul(DGS2, inv_gram)),
                              matmul(DHS2, inv_gram) - matmul(matmul(higgs, inv_gram), matmul(DGS2, inv_gram))))

        ed3 = trace(matmul( matmul(DDHS3, inv_gram) -2* matmul( matmul(DHS3 , inv_gram), matmul(DGS3, inv_gram)) \
                            + matmul(higgs, matmul( 2* matmul(matmul(inv_gram,DGS3), matmul(inv_gram, DGS3)), inv_gram)  - matmul(matmul(inv_gram, DDGS3), inv_gram)),
                            matmul(higgs,inv_gram)) ) \
              + trace( matmul(matmul(DHS3, inv_gram) - matmul(matmul(higgs, inv_gram), matmul(DGS3, inv_gram)),
                              matmul(DHS3, inv_gram) - matmul(matmul(higgs, inv_gram), matmul(DGS3, inv_gram))))

        # energy_density = -(ed1 + ed2 + ed3).real

        return  -(ed1 + ed2 + ed3).real
    except:
        return -3
예제 #47
0
def _train():

    # LoadAndRunOnData=False
    LoadAndRunOnData = myParams.myDict['LoadAndRunOnData'] > 0
    if LoadAndRunOnData:
        # Setup global tensorflow state
        sess, summary_writer = setup_tensorflow()

        # Prepare directories
        # filenames = prepare_dirs(delete_train_dir=False)

        # Setup async input queues
        # features, labels = srez_input.setup_inputs(sess, filenames)
        features, labels = srez_input.setup_inputs(sess, 1)

        # Create and initialize model
        [gene_minput, gene_moutput,
         gene_output, gene_var_list,
         disc_real_output, disc_fake_output, disc_var_list] = \
                srez_modelBase.create_model(sess, features, labels)

        # Restore variables from checkpoint
        print("Adding to saver:")
        var_listX = gene_var_list
        var_listX = [v for v in var_listX if "Bank" not in v.name]
        for line in var_listX:
            print("Adding " + line.name + '           ' +
                  str(line.shape.as_list()))
        print("Saver var list end")

        saver = tf.train.Saver(var_listX)
        # saver = tf.train.Saver()
        filename = 'checkpoint_new'
        # filename = os.path.join('/media/a/f38a5baa-d293-4a00-9f21-ea97f318f647/home/a/TF/srez/RegridTry1C2_TS2_dataNeighborhoodRCB0__2018-06-08_16-17-56_checkpoint', filename)
        # filename = os.path.join('/media/a/f38a5baa-d293-4a00-9f21-ea97f318f647/home/a/TF/srez/RegridTry1C2_TS2_dataNeighborhoodRCB0__2018-06-09_19-44-17_checkpoint', filename)
        # filename = os.path.join('/media/a/f38a5baa-d293-4a00-9f21-ea97f318f647/home/a/TF/srez/RegridTry1C2_TS__2018-06-29_10-39-13_checkpoint', filename)
        checkpointP = myParams.myDict['LoadAndRunOnData_checkpointP']
        filename = os.path.join(checkpointP, filename)

        saver.restore(sess, filename)

        if myParams.myDict['Mode'] == 'RegridTry1' or myParams.myDict[
                'Mode'] == 'RegridTry1C' or myParams.myDict[
                    'Mode'] == 'RegridTry1C2' or myParams.myDict[
                        'Mode'] == 'RegridTry1C2_TS' or myParams.myDict[
                            'Mode'] == 'RegridTry1C2_TS2':
            FullData = scipy.io.loadmat(myParams.myDict['NMAP_FN'])
            NMapCR = FullData['NMapCR']

        for r in range(1, myParams.myDict['HowManyToRun']):
            # ifilename='/media/a/f38a5baa-d293-4a00-9f21-ea97f318f647/home/a/TF/srez/RealData/b_Ben14May_Sli5_r' +  f'{r:02}' + '.mat'
            # ifilename='/media/a/DATA/14May18/Ben/meas_MID109_gBP_VD11_U19_4min_FID17944/RealData/Sli11_r' +  f'{r:02}' + '.mat'
            if myParams.myDict['InputMode'] == 'Cart3SB':
                # print('Loaded SensMaps, Shape %d %d %d %d' % (SensMapsSz[0],SensMapsSz[1],SensMapsSz[2],SensMapsSz[3]))
                print('Cart3SB running on real data %d' % r)
                # feature  shape: (1048576, 1, 1) <dtype: 'float32'>
                batch_size = myParams.myDict['batch_size']
                # RealData=np.zeros((batch_size,1048576, 1, 1),np.float32)
                # RealData=np.zeros((batch_size,640000, 1, 1),np.float32)

                # Simulating RealData from ITS, Sens
                MB = GT.getparam('MB')
                TimePoints_ms = GT.getparam('TimePoints_ms')
                nTSC = TimePoints_ms.shape[0]
                nCh = GT.getparam('nccToUse')

                LabelsH = myParams.myDict['LabelsH']
                LabelsW = myParams.myDict['LabelsW']

                H = LabelsH
                W = LabelsW

                SnsFN = '/opt/data/CCSensMaps.mat'
                fS = h5py.File(SnsFN, 'r')
                SensMaps = fS['SensCC']
                SensMaps = SensMaps['real'] + 1j * SensMaps['imag']
                SensMapsSz = SensMaps.shape
                print('r Loaded SensMaps, Shape %d %d %d %d' %
                      (SensMapsSz[0], SensMapsSz[1], SensMapsSz[2],
                       SensMapsSz[3]))
                SensMaps = SensMaps[:, :, :, :nCh]

                NumSensMapsInFile = SensMaps.shape[0]
                # IdxS=15
                for b in range(0, MB):
                    # if b==1:
                    #     IdxB2=tf.random_uniform([1],minval=12,maxval=19,dtype=tf.int32)
                    #     IdxS=IdxS+IdxB2[0]
                    #     IdxS=tf.cond(IdxS[0]>=NumSensMapsInFile, lambda: IdxS-NumSensMapsInFile, lambda: IdxS)

                    # Sens=np.squeeze(SensMaps[IdxS,:,:,:],axis=0)
                    Sens = (SensMaps[15, :, :, :])

                    Sens = Sens[:H, :W, :nCh]

                    # Sens = tf.image.random_flip_left_right(Sens)
                    # Sens = tf.image.random_flip_up_down(Sens)
                    # uS=tf.random_uniform([1])
                    # Sens=tf.cond(uS[0]<0.5, lambda: tf.identity(Sens), lambda: tf.image.rot90(Sens))
                    SensMsk = GT.NP_addDim(
                        np.sum(np.abs(Sens), axis=2) > 0).astype(np.complex64)
                    Sens = GT.NP_addDim(Sens)

                    if b == 0:
                        SensMB = Sens
                        SensMskMB = SensMsk

                    # else:
                    #     SensMB=tf.concat([SensMB,Sens],axis=3) #     SensMB H W nCh MB
                    #     SensMskMB=tf.concat([SensMskMB,SensMsk],axis=2) #     SensMskMB H W MB

                # nToLoad=myParams.myDict['nToLoad']
                # LoadAndRunOnData=myParams.myDict['LoadAndRunOnData']>0
                # if LoadAndRunOnData:
                nToLoad = 300

                print('r loading images ' + time.strftime("%Y-%m-%d %H:%M:%S"))
                GREBaseP = '/opt/data/'
                SFN = GREBaseP + 'All_Orientation-0x.mat'
                f = h5py.File(SFN, 'r')
                I = f['CurSetAll'][0:nToLoad]
                print('r Loaded images ' + time.strftime("%Y-%m-%d %H:%M:%S"))

                SendTSCest = GT.getparam('SendTSCest') > 0
                HamPow = GT.getparam('HamPow')

                # def TFexpix(X): return tf.exp(tf.complex(tf.zeros_like(X),X))
                def NPexpix(X):
                    return np.exp(1j * X)

                for b in range(0, MB):
                    # TFI = tf.constant(np.int16(I))
                    # Idx=tf.random_uniform([1],minval=0,maxval=I.shape[0],dtype=tf.int32)
                    Idx = 133

                    Data4 = (I[Idx, :, :, :])
                    # Data4=tf.squeeze(tf.slice(I,[Idx[0],0,0,0],[1,-1,-1,-1]),axis=0)
                    # Data4 = tf.image.random_flip_left_right(Data4)
                    # Data4 = tf.image.random_flip_up_down(Data4)

                    # u1=tf.random_uniform([1])
                    # Data4=tf.cond(u1[0]<0.5, lambda: tf.identity(Data4), lambda: tf.image.rot90(Data4))

                    # Data4 = tf.random_crop(Data4, [H, W, 4])
                    # Data4 = tf.random_crop(Data4, [:H, :W, 4])
                    Data4 = Data4[:H, :W, :]

                    # M=tf.slice(Data4,[0,0,0],[-1,-1,1])
                    # Ph=tf.slice(Data4,[0,0,1],[-1,-1,1])
                    # feature=tf.cast(M,tf.complex64)*TFexpix(Ph)
                    M = Data4[:, :, 0]
                    Ph = Data4[:, :, 1]
                    feature = M.astype(np.complex64) * NPexpix(Ph)

                    feature = GT.NP_addDim(feature) * SensMskMB[:, :, b:b + 1]

                    T2S_ms = Data4[:, :, 2]
                    # T2S_ms = tf.where( T2S_ms<1.5, 10000 * tf.ones_like( T2S_ms ), T2S_ms )
                    T2S_ms[T2S_ms < 1.5] = 10000

                    B0_Hz = Data4[:, :, 3]
                    # B0_Hz=M*0

                    # T2S_ms = tf.where( tf.is_nan(T2S_ms), 10000 * tf.ones_like( T2S_ms ), T2S_ms )
                    T2S_ms[np.isnan(T2S_ms)] = 10000
                    # B0_Hz = tf.where( tf.is_nan(B0_Hz), tf.zeros_like( B0_Hz ), B0_Hz )
                    B0_Hz[np.isnan(B0_Hz)] = 0

                    if SendTSCest:
                        # HamPowA=10
                        HamPowA = HamPow
                        HamA = np.roll(np.hamming(H), np.int32(H / 2))
                        HamA = np.power(HamA, HamPowA)
                        HamXA = np.reshape(HamA, (1, H, 1))
                        HamYA = np.reshape(HamA, (1, 1, W))

                        B0_Hz_Smoothed = np.transpose(
                            GT.NP_addDim(B0_Hz.astype(np.complex64)),
                            (2, 0, 1))
                        B0_Hz_Smoothed = np.fft.fft2(B0_Hz_Smoothed)
                        B0_Hz_Smoothed = B0_Hz_Smoothed * HamXA
                        B0_Hz_Smoothed = B0_Hz_Smoothed * HamYA
                        B0_Hz_Smoothed = np.fft.ifft2(B0_Hz_Smoothed)
                        B0_Hz_Smoothed = np.transpose(B0_Hz_Smoothed,
                                                      (1, 2, 0))
                        B0_Hz_Smoothed = np.real(B0_Hz_Smoothed)

                        TSCest = np.exp(1j * 2 * np.pi *
                                        (B0_Hz_Smoothed * TimePoints_ms /
                                         1000).astype(np.complex64))
                        # TSCest=np.ones(TSCest.shape).astype(np.complex64)
                        print('TSCest shape: ' + str(TSCest.shape))
                        # TSCest=TSCest*0+1
                        # print('TSCest shape: ' + str(TSCest.shape))
                        # print('reducing B0')
                        # print('B0_Hz shape: ' + str(B0_Hz.shape))
                        # print('B0_Hz_Smoothed shape: ' + str(B0_Hz_Smoothed.shape))
                        # B0_Hz=B0_Hz-np.squeeze(B0_Hz_Smoothed)
                        # print('B0_Hz shape: ' + str(B0_Hz.shape))

                    # urand_ms=tf.random_uniform([1])*12
                    # urand_sec=(tf.random_uniform([1])*2-1)*3/1000

                    # feature=feature*tf.cast(tf.exp(-urand_ms/T2S_ms),tf.complex64)
                    # feature=feature*TFexpix(2*np.pi*B0_Hz*urand_sec)

                    mx = M.max()
                    mx = np.maximum(mx, 1)
                    mx = mx.astype(np.complex64)

                    feature = feature / mx

                    CurIWithPhase = feature

                    TSCM = np.exp(-TimePoints_ms / GT.NP_addDim(T2S_ms))
                    TSCP = np.exp(1j * 2 * np.pi *
                                  (GT.NP_addDim(B0_Hz) * TimePoints_ms /
                                   1000).astype(np.complex64))
                    TSC = TSCM.astype(np.complex64) * TSCP

                    ITSbase = CurIWithPhase * TSC  # ITSbase is H,W,nTSC

                    TSC = GT.NP_addDim(TSC)
                    ITSbase = GT.NP_addDim(ITSbase)
                    if b == 0:
                        CurIWithPhaseMB = CurIWithPhase
                        TSCMB = TSC
                        ITSbaseMB = ITSbase
                        if SendTSCest:
                            TSCest = GT.NP_addDim(TSCest)
                            TSCMBest = TSCest
                    # else:
                    #     CurIWithPhaseMB=tf.concat([CurIWithPhaseMB,CurIWithPhase],axis=2) #     CurIWithPhaseMB H W MB
                    #     TSCMB=tf.concat([TSCMB,TSC],axis=3) #     TSCMB H W nTSC MB
                    #     ITSbaseMB=tf.concat([ITSbaseMB,ITSbase],axis=3) #     ITSbaseMB H W nTSC MB
                    #     if SendTSCest:
                    #         TSCMBest=tf.stack([TSCMBest,TSCest],axis=3)
                print('r ok 2')
                ITS_P = np.transpose(
                    GT.NP_addDim(ITSbaseMB),
                    (4, 0, 1, 2, 3))  # /batch_size/,H,W,nTSC,MB

                Msk3 = np.zeros((H, W, nTSC, 1, 1, 1))

                PEShifts = GT.getparam('PEShifts')
                PEJump = GT.getparam('PEJump')
                print('r Using PEShifts')
                for i in range(nTSC):
                    Msk3[PEShifts[i]::PEJump, :, i, :, :, :] = 1

                Msk3 = np.complex64(Msk3)

                # GT.setparam('CartMask',Msk3)

                Sens6 = SensMB[:, :, np.newaxis, :, :,
                               np.newaxis]  # H,W,/nTS/,nCh,MB,/batch_size/

                # AHA_ITS=GT.Cartesian_OPHOP_ITS_MB(ITS_P,Sens6,Msk3)

                ITS = np.transpose(ITSbaseMB, (0, 3, 2, 1))  # H, nTSC, W
                ITS = np.reshape(ITS, (H, W * nTSC * MB, 1))
                ITS_RI = GT.NP_ConcatRIOn2(ITS)

                Sensc = SensMB
                Sens1D = GT.NP_ConcatRIOn0(np.reshape(Sensc, (-1, 1, 1)))
                feature = Sens1D

                AHA_ITS = GT.NP_Cartesian_OPHOP_ITS_MB(ITS_P, Sens6, Msk3)
                # new simpler approach
                if SendTSCest:
                    TSCMBest_P = np.transpose(
                        GT.NP_addDim(TSCMBest),
                        (4, 0, 1, 2, 3))  # /batch_size/,H,W,nTSC,MB
                    AHA_ITS = AHA_ITS * np.conj(TSCMBest_P)

                #         send AHA_ITS
                AHA_ITS_1D = GT.NP_ConcatRIOn0(np.reshape(AHA_ITS, (-1, 1, 1)))
                feature = np.concatenate((feature, AHA_ITS_1D), axis=0)

                if SendTSCest:
                    TSCest1D = GT.NP_ConcatRIOn0(
                        np.reshape(TSCMBest_P, (-1, 1, 1)))
                    feature = np.concatenate((feature, TSCest1D), axis=0)

                RealData = np.tile(feature, (batch_size, 1, 1, 1))

                # End simulating RealData
                Real_feature = RealData
            else:
                ifilenamePrefix = myParams.myDict['LoadAndRunOnData_Prefix']
                #             ifilename=ifilenamePrefix +  f'{r:02}' + '.mat'
                ifilename = ifilenamePrefix + '%02d.mat' % (r)
                RealData = scipy.io.loadmat(ifilename)
                RealData = RealData['Data']

                if RealData.ndim == 2:
                    RealData = RealData.reshape(
                        (RealData.shape[0], RealData.shape[1], 1, 1))
                if RealData.ndim == 3:
                    RealData = RealData.reshape(
                        (RealData.shape[0], RealData.shape[1],
                         RealData.shape[2], 1))

                Real_feature = RealData

                # if myParams.myDict['Mode'] == 'RegridTry1' or myParams.myDict['Mode'] == 'RegridTry1C' or myParams.myDict['Mode'] == 'RegridTry1C2' or myParams.myDict['Mode'] == 'RegridTry1C2_TS' or myParams.myDict['Mode'] == 'RegridTry1C2_TS2':
                #     batch_size=myParams.myDict['batch_size']

                #     Real_feature=np.reshape(RealData[0],[RealData.shape[1]])
                #     Real_feature=np.take(Real_feature,NMapCR)
                #     Real_feature=np.tile(Real_feature, (batch_size,1,1,1))

            if myParams.myDict['InputMode'] == 'RegridTry1' or myParams.myDict[
                    'InputMode'] == 'RegridTry2':
                # FullData=scipy.io.loadmat('/media/a/f38a5baa-d293-4a00-9f21-ea97f318f647/home/a/TF/NMapIndTesta.mat')
                FullData = scipy.io.loadmat(myParams.myDict['NMAP_FN'])
                NMapCR = FullData['NMapCR']

                batch_size = myParams.myDict['batch_size']

                Real_feature = np.reshape(RealData[0], [RealData.shape[1]])
                Real_feature = np.take(Real_feature, NMapCR)
                Real_feature = np.tile(Real_feature, (batch_size, 1, 1, 1))

            Real_dictOut = {gene_minput: Real_feature}

            gene_RealOutput = sess.run(gene_moutput, feed_dict=Real_dictOut)

            OnRealData = {}
            OnRealDataM = gene_RealOutput
            #             filenamex = 'OnRealData' + f'{r:02}' + '.mat'
            filenamexBase = 'OnRealData' + '%02d' % (r)
            filenamex = filenamexBase + '.mat'

            LoadAndRunOnData_OutP = myParams.myDict['LoadAndRunOnData_OutP']
            filename = os.path.join(LoadAndRunOnData_OutP, filenamex)
            OnRealData['x'] = OnRealDataM
            scipy.io.savemat(filename, OnRealData)

            image = np.sqrt(
                np.square(OnRealDataM[0, -H:, :(W * 3), 0]) +
                np.square(OnRealDataM[0, -H:, :(W * 3), 1]))
            filenamep = filenamexBase + '.png'
            filename = os.path.join(LoadAndRunOnData_OutP, filenamep)
            imageio.imwrite(filename, image)

        print('Saved recon of real data')
        exit()

    # Setup global tensorflow state
    sess, summary_writer = setup_tensorflow()

    # Prepare directories
    all_filenames = prepare_dirs(delete_train_dir=True)

    # Separate training and test sets
    #train_filenames = all_filenames[:-FLAGS.test_vectors]
    train_filenames = all_filenames
    #test_filenames  = all_filenames[-FLAGS.test_vectors:]

    # TBD: Maybe download dataset here

    #pdb.set_trace()

    # ggg Signal Bank stuff:
    if myParams.myDict['BankSize'] > 0:
        if myParams.myDict['InputMode'] == 'RegridTry3FMB':
            BankSize = myParams.myDict['BankSize'] * 2

            # BankInit=np.zeros([BankSize,myParams.myDict['DataH'],1,1])
            # LBankInit=np.zeros([BankSize,myParams.myDict['LabelsH'],myParams.myDict['LabelsW'], 2])
            with tf.variable_scope("aaa"):
                Bank = tf.get_variable(
                    "Bank",
                    shape=[BankSize, myParams.myDict['DataH'], 1, 1],
                    dtype=tf.float32,
                    trainable=False)
                LBank = tf.get_variable("LBank",
                                        shape=[
                                            BankSize,
                                            myParams.myDict['LabelsH'],
                                            myParams.myDict['LabelsW'], 2
                                        ],
                                        dtype=tf.float32,
                                        trainable=False)
                # LBank=tf.get_variable("LBank",initializer=tf.cast(LBankInit, tf.float32),dtype=tf.float32,trainable=False)
        else:
            BankSize = myParams.myDict['BankSize']

            BankInit = np.zeros([BankSize, myParams.myDict['DataH'], 1, 1])
            LBankInit = np.zeros([
                BankSize, myParams.myDict['LabelsH'],
                myParams.myDict['LabelsW'], 2
            ])
            with tf.variable_scope("aaa"):
                # Bank=tf.get_variable("Bank",initializer=tf.cast(BankInit, tf.float32),dtype=tf.float32)
                Bank = tf.get_variable(
                    "Bank",
                    shape=[BankSize, myParams.myDict['DataH'], 1, 1],
                    dtype=tf.float32,
                    trainable=False)
                LBank = tf.get_variable("LBank",
                                        shape=[
                                            BankSize,
                                            myParams.myDict['LabelsH'],
                                            myParams.myDict['LabelsW'], 2
                                        ],
                                        dtype=tf.float32,
                                        trainable=False)
                # LBank=tf.get_variable("LBank",initializer=tf.cast(LBankInit, tf.float32),dtype=tf.float32)

        init_new_vars_op = tf.variables_initializer([Bank, LBank])
        sess.run(init_new_vars_op)
    # ggg end Signal Bank stuff:

    # Setup async input queues
    train_features, train_labels = srez_input.setup_inputs(
        sess, train_filenames)
    # test_features, test_labels = srez_input.setup_inputs(sess, train_filenames,TestStuff=True)
    test_features = train_features
    test_labels = train_labels
    #test_features,  test_labels  = srez_input.setup_inputs(sess, test_filenames)

    print('starting' + time.strftime("%Y-%m-%d %H:%M:%S"))
    print('train_features %s' % (train_features))
    print('train_labels %s' % (train_labels))

    # Add some noise during training (think denoising autoencoders)
    noise_level = myParams.myDict['noise_level']
    AddNoise = noise_level > 0.0
    if AddNoise:
        noisy_train_features = train_features + tf.random_normal(
            train_features.get_shape(), stddev=noise_level)
    else:
        noisy_train_features = train_features

    # Create and initialize model
    [gene_minput, gene_moutput,
     gene_output, gene_var_list,
     disc_real_output, disc_fake_output, disc_var_list] = \
            srez_modelBase.create_model(sess, noisy_train_features, train_labels)

    # gene_VarNamesL=[];
    # for line in gene_var_list: gene_VarNamesL.append(line.name+'           ' + str(line.shape.as_list()))
    # gene_VarNamesL.sort()

    # for line in gene_VarNamesL: print(line)
    # # var_23 = [v for v in tf.global_variables() if v.name == "gene/GEN_L020/C2D_weight:0"][0]

    # for line in sess.graph.get_operations(): print(line)
    # Gen3_ops=[]
    # for line in sess.graph.get_operations():
    #     if 'GEN_L003' in line.name:
    #         Gen3_ops.append(line)

    #     # LL=QQQ.outputs[0]

    # for x in Gen3_ops: print(x.name +'           ' + str(x.outputs[0].shape))

    # GenC2D_ops= [v for v in sess.graph.get_operations()]

    # GenC2D_ops= [v for v in tf.get_operations() if "weight" in v.name]
    # GenC2D_ops= [v for v in GenC2D_ops if "C2D" in v.name]
    # for x in GenC2D_ops: print(x.name +'           ' + str(x.outputs[0].shape))

    # for x in GenC2D_ops: print(x.name)

    AEops = [
        v for v in sess.graph.get_operations()
        if "AE" in v.name and not ("_1/" in v.name)
    ]
    # AEops = [v for v in td.sess.graph.get_operations() if "Pixel" in v.name and not ("_1/" in v.name) and not ("opti" in v.name) and not ("Assign" in v.name) and not ("read" in v.name) and not ("Adam" in v.name)]
    AEouts = [v.outputs[0] for v in AEops]
    varsForL1 = AEouts
    # varsForL1=AEouts[0:-1]
    # varsForL1=AEouts[1:]

    # for line in sess.graph.get_operations():
    #     if 'GEN_L003' in line.name:
    #         Gen3_ops.append(line)

    #     # LL=QQQ.outputs[0]

    # for x in Gen3_ops: print(x.name +'           ' + str(x.outputs[0].shape))

    print("Vars for l2 loss:")
    varws = [
        v for v in tf.global_variables()
        if (("weight" in v.name) or ("ConvNet" in v.name))
    ]
    varsForL2 = [v for v in varws if "C2D" in v.name]
    varsForL2 = [v for v in varws if "disc" not in v.name]
    varsForL2 = [v for v in varws if "bias" not in v.name]
    for line in varsForL2:
        print(line.name + '           ' + str(line.shape.as_list()))

    print("Vars for Phase-only loss:")
    varws = [v for v in tf.global_variables() if "weight" in v.name]
    varsForPhaseOnly = [v for v in varws if "SharedOverFeat" in v.name]
    for line in varsForPhaseOnly:
        print(line.name + '           ' + str(line.shape.as_list()))

    # pdb.set_trace()

    gene_loss, MoreOut, MoreOut2, MoreOut3 = srez_modelBase.create_generator_loss(
        disc_fake_output, gene_output, train_features, train_labels, varsForL1,
        varsForL2, varsForPhaseOnly)
    disc_real_loss, disc_fake_loss = \
                     srez_modelBase.create_discriminator_loss(disc_real_output, disc_fake_output)
    disc_loss = tf.add(disc_real_loss, disc_fake_loss, name='disc_loss')

    (global_step, learning_rate, gene_minimize, disc_minimize) = \
            srez_modelBase.create_optimizers(gene_loss, gene_var_list, disc_loss, disc_var_list)

    # Train model
    train_data = TrainData(locals())

    #pdb.set_trace()
    # ggg: to restore session
    RestoreSession = False
    if RestoreSession:
        saver = tf.train.Saver()
        filename = 'checkpoint_new'
        filename = os.path.join(myParams.myDict['checkpoint_dir'], filename)
        saver.restore(sess, filename)

    srez_train.train_model(train_data)
예제 #48
0
import os
import sys
import argparse
import datetime as dt
import h5py
import numpy as np

from mintpy.objects import timeseries, geometry, sensor
from mintpy.defaults.template import get_template_content
from mintpy.utils import ptime, readfile
from mintpy import info

BOOL_ZERO = np.bool_(0)
INT_ZERO = np.int16(0)
FLOAT_ZERO = np.float32(0.0)
CPX_ZERO = np.complex64(0.0)
COMPRESSION = 'lzf'

################################################################
TEMPALTE = TEMPLATE = get_template_content('hdfeos5')

EXAMPLE = """example:
  save_hdfeos5.py geo/geo_timeseries_ERA5_ramp_demErr.h5
  save_hdfeos5.py timeseries_ERA5_ramp_demErr.h5 --tc temporalCoherence.h5 --asc avgSpatialCoh.h5 -m maskTempCoh.h5 -g inputs/geometryGeo.h5
"""


def create_parser():
    parser = argparse.ArgumentParser(
        description='Convert MintPy timeseries product into HDF-EOS5 format\n'
        +
예제 #49
0
def multiply(masname,
             slvname,
             outname,
             rngname,
             fact,
             masterFrame,
             flatten=True,
             alks=3,
             rlks=7,
             virtual=True):

    masImg = isceobj.createSlcImage()
    masImg.load(masname + '.xml')

    width = masImg.getWidth()
    length = masImg.getLength()

    if not virtual:
        master = np.memmap(masname,
                           dtype=np.complex64,
                           mode='r',
                           shape=(length, width))
    else:
        master = loadVirtualArray(masname + '.vrt')

    slave = np.memmap(slvname,
                      dtype=np.complex64,
                      mode='r',
                      shape=(length, width))

    if os.path.exists(rngname):
        rng2 = np.memmap(rngname,
                         dtype=np.float32,
                         mode='r',
                         shape=(length, width))
    else:
        print('No range offsets provided')
        rng2 = np.zeros((length, width))

    cJ = np.complex64(-1j)

    #Zero out anytging outside the valid region:
    ifg = np.memmap(outname,
                    dtype=np.complex64,
                    mode='w+',
                    shape=(length, width))
    firstS = masterFrame.firstValidSample
    lastS = masterFrame.firstValidSample + masterFrame.numValidSamples - 1
    firstL = masterFrame.firstValidLine
    lastL = masterFrame.firstValidLine + masterFrame.numValidLines - 1
    for kk in range(firstL, lastL + 1):
        ifg[kk, firstS:lastS + 1] = master[kk, firstS:lastS + 1] * np.conj(
            slave[kk, firstS:lastS + 1])
        if flatten:
            phs = np.exp(cJ * fact * rng2[kk, firstS:lastS + 1])
            ifg[kk, firstS:lastS + 1] *= phs

    ####
    master = None
    slave = None
    ifg = None

    objInt = isceobj.createIntImage()
    objInt.setFilename(outname)
    objInt.setWidth(width)
    objInt.setLength(length)
    objInt.setAccessMode('READ')
    objInt.renderHdr()

    try:
        takeLooks(objInt, alks, rlks)
    except:
        raise Exception('Failed to multilook ifg: {0}'.format(objInt.filename))

    return objInt
예제 #50
0
 def test_numpy_scalar_complex(self):
     x = np.complex64(np.random.rand() + 1j * np.random.rand())
     x_rec = self.encode_decode(x)
     self.assert_(np.allclose(x, x_rec) and type(x) == type(x_rec))
예제 #51
0
def test_extra_keywords():
    beam_in = UVBeam()
    beam_out = UVBeam()
    casa_file = os.path.join(DATA_PATH, 'HERABEAM.FITS')
    testfile = os.path.join(DATA_PATH, 'test/outtest_beam.fits')
    beam_in.read_beamfits(casa_file, run_check=False)

    # fill in missing parameters
    beam_in.data_normalization = 'peak'
    beam_in.feed_name = 'casa_ideal'
    beam_in.feed_version = 'v0'
    beam_in.model_name = 'casa_airy'
    beam_in.model_version = 'v0'

    # this file is actually in sine projection RA/DEC at zenith at a particular time.
    # For now pretend it's in sine projection of az/za
    beam_in.pixel_coordinate_system = 'sin_zenith'

    # check for warnings & errors with extra_keywords that are dicts, lists or arrays
    beam_in.extra_keywords['testdict'] = {'testkey': 23}
    uvtest.checkWarnings(
        beam_in.check,
        message=['testdict in extra_keywords is a '
                 'list, array or dict'])
    nt.assert_raises(TypeError,
                     beam_in.write_beamfits,
                     testfile,
                     run_check=False)
    beam_in.extra_keywords.pop('testdict')

    beam_in.extra_keywords['testlist'] = [12, 14, 90]
    uvtest.checkWarnings(
        beam_in.check,
        message=['testlist in extra_keywords is a '
                 'list, array or dict'])
    nt.assert_raises(TypeError,
                     beam_in.write_beamfits,
                     testfile,
                     run_check=False)
    beam_in.extra_keywords.pop('testlist')

    beam_in.extra_keywords['testarr'] = np.array([12, 14, 90])
    uvtest.checkWarnings(
        beam_in.check,
        message=['testarr in extra_keywords is a '
                 'list, array or dict'])
    nt.assert_raises(TypeError,
                     beam_in.write_beamfits,
                     testfile,
                     run_check=False)
    beam_in.extra_keywords.pop('testarr')

    # check for warnings with extra_keywords keys that are too long
    beam_in.extra_keywords['test_long_key'] = True
    uvtest.checkWarnings(beam_in.check,
                         message=[
                             'key test_long_key in extra_keywords '
                             'is longer than 8 characters'
                         ])
    uvtest.checkWarnings(
        beam_in.write_beamfits, [testfile], {
            'run_check': False,
            'clobber': True
        },
        message=[
            'key test_long_key in extra_keywords is longer than 8 characters'
        ])
    beam_in.extra_keywords.pop('test_long_key')

    # check handling of boolean keywords
    beam_in.extra_keywords['bool'] = True
    beam_in.extra_keywords['bool2'] = False
    beam_in.write_beamfits(testfile, clobber=True)
    beam_out.read_beamfits(testfile, run_check=False)

    nt.assert_equal(beam_in, beam_out)
    beam_in.extra_keywords.pop('bool')
    beam_in.extra_keywords.pop('bool2')

    # check handling of int-like keywords
    beam_in.extra_keywords['int1'] = np.int(5)
    beam_in.extra_keywords['int2'] = 7
    beam_in.write_beamfits(testfile, clobber=True)
    beam_out.read_beamfits(testfile, run_check=False)

    nt.assert_equal(beam_in, beam_out)
    beam_in.extra_keywords.pop('int1')
    beam_in.extra_keywords.pop('int2')

    # check handling of float-like keywords
    beam_in.extra_keywords['float1'] = np.int64(5.3)
    beam_in.extra_keywords['float2'] = 6.9
    beam_in.write_beamfits(testfile, clobber=True)
    beam_out.read_beamfits(testfile, run_check=False)

    nt.assert_equal(beam_in, beam_out)
    beam_in.extra_keywords.pop('float1')
    beam_in.extra_keywords.pop('float2')

    # check handling of complex-like keywords
    beam_in.extra_keywords['complex1'] = np.complex64(5.3 + 1.2j)
    beam_in.extra_keywords['complex2'] = 6.9 + 4.6j
    beam_in.write_beamfits(testfile, clobber=True)
    beam_out.read_beamfits(testfile, run_check=False)

    nt.assert_equal(beam_in, beam_out)
예제 #52
0
def main(_):
    # CHECK-LABEL: TEST: abs int32[]
    # CHECK: mhlo.abs
    # CHECK-SAME: tensor<i32>
    print_ir(np.int32(0))(lax.abs)

    # CHECK-LABEL: TEST: add float32[] float32[]
    # CHECK: mhlo.add
    # CHECK-SAME: tensor<f32>
    print_ir(np.float32(1), np.float32(2))(lax.add)

    # CHECK-LABEL: TEST: acos float32[]
    # CHECK: mhlo.atan2
    # CHECK-SAME: tensor<f32>
    print_ir(np.float32(1))(lax.acos)

    # CHECK-LABEL: TEST: acosh float32[]
    # CHECK: chlo.acosh
    # CHECK-SAME: tensor<f32>
    print_ir(np.float32(0))(lax.acosh)

    # CHECK-LABEL: TEST: asin float32[]
    # CHECK: mhlo.atan2
    # CHECK-SAME: tensor<f32>
    print_ir(np.float32(1))(lax.asin)

    # CHECK-LABEL: TEST: asinh float32[]
    # CHECK: chlo.asinh
    # CHECK-SAME: tensor<f32>
    print_ir(np.float32(0))(lax.asinh)

    # CHECK-LABEL: TEST: atan float32[]
    # CHECK: mhlo.atan2
    # CHECK-SAME: tensor<f32>
    print_ir(np.float32(1))(lax.atan)

    # CHECK-LABEL: TEST: atanh float32[]
    # CHECK: chlo.atanh
    # CHECK-SAME: tensor<f32>
    print_ir(np.float32(0))(lax.atanh)

    # CHECK-LABEL: TEST: atan2 float64[] float64[]
    # CHECK: mhlo.atan2
    # CHECK-SAME: tensor<f64>
    print_ir(np.float64(1), np.float64(2))(lax.atan2)

    # CHECK-LABEL: TEST: bessel_i0e float32[]
    # CHECK: xla_fallback_bessel_i0e
    # CHECK-SAME: tensor<f32>
    print_ir(np.float32(0))(lax.bessel_i0e)

    # CHECK-LABEL: TEST: bessel_i1e float32[]
    # CHECK: xla_fallback_bessel_i1e
    # CHECK-SAME: tensor<f32>
    print_ir(np.float32(0))(lax.bessel_i1e)

    # CHECK-LABEL: TEST: betainc float32[] float32[] float32[]
    # CHECK: xla_fallback_regularized_incomplete_beta
    # CHECK-SAME: tensor<f32>
    print_ir(np.float32(0), np.float32(0), np.float32(0))(lax.betainc)

    # CHECK-LABEL: TEST: bitcast_convert_type uint32[7]
    # CHECK: mhlo.bitcast_convert
    # CHECK-SAME: tensor<7xui32>
    # CHECK-SAME: tensor<7xf32>
    print_ir(np.empty((7, ), np.uint32))(partial(lax.bitcast_convert_type,
                                                 new_dtype=np.float32))

    # CHECK-LABEL: TEST: bitwise_and int32[] int32[]
    # CHECK: mhlo.and
    # CHECK-SAME: tensor<i32>
    print_ir(np.int32(1), np.int32(2))(lax.bitwise_and)

    # CHECK-LABEL: TEST: bitwise_and bool[] bool[]
    # CHECK: mhlo.and
    # CHECK-SAME: tensor<i1>
    print_ir(np.bool_(0), np.bool_(0))(lax.bitwise_and)

    # CHECK-LABEL: TEST: bitwise_or int32[] int32[]
    # CHECK: mhlo.or
    # CHECK-SAME: tensor<i32>
    print_ir(np.int32(1), np.int32(2))(lax.bitwise_or)

    # CHECK-LABEL: TEST: bitwise_or bool[] bool[]
    # CHECK: mhlo.or
    # CHECK-SAME: tensor<i1>
    print_ir(np.bool_(0), np.bool_(0))(lax.bitwise_or)

    # CHECK-LABEL: TEST: bitwise_xor int32[] int32[]
    # CHECK: mhlo.xor
    # CHECK-SAME: tensor<i32>
    print_ir(np.int32(1), np.int32(2))(lax.bitwise_xor)

    # CHECK-LABEL: TEST: bitwise_xor bool[] bool[]
    # CHECK: mhlo.xor
    # CHECK-SAME: tensor<i1>
    print_ir(np.bool_(0), np.bool_(0))(lax.bitwise_xor)

    # CHECK-LABEL: TEST: cbrt bfloat16[]
    # CHECK: mhlo.cbrt
    # CHECK-SAME: tensor<bf16>
    print_ir(jnp.bfloat16(0))(lax.cbrt)

    # CHECK-LABEL: TEST: clamp bfloat16[] bfloat16[] bfloat16[]
    # CHECK: mhlo.clamp
    # CHECK-SAME: tensor<bf16>
    print_ir(jnp.bfloat16(0), jnp.bfloat16(0), jnp.bfloat16(0))(lax.clamp)

    # CHECK-LABEL: TEST: ceil float16[7]
    # CHECK: mhlo.ceil
    # CHECK-SAME: tensor<7xf16>
    print_ir(np.empty((7, ), np.float16))(lax.ceil)

    # CHECK-LABEL: TEST: convert_element_type float16[7]
    # CHECK: mhlo.convert
    # CHECK-SAME: tensor<7xf16>
    # CHECK-SAME: tensor<7xf32>
    print_ir(np.empty((7, ), np.float16))(partial(lax.convert_element_type,
                                                  new_dtype=np.float32))

    # CHECK-LABEL: TEST: convert_element_type complex64[7]
    # CHECK: mhlo.real
    # CHECK-SAME: tensor<7xcomplex<f32>>
    # CHECK-SAME: tensor<7xf32>
    print_ir(np.empty((7, ), np.complex64))(partial(lax.convert_element_type,
                                                    new_dtype=np.float32))

    # CHECK-LABEL: TEST: convert_element_type float32[7]
    # CHECK: mhlo.compare
    # CHECK-SAME: tensor<7xf32>
    # CHECK-SAME: tensor<7xi1>
    print_ir(np.empty((7, ), np.float32))(partial(lax.convert_element_type,
                                                  new_dtype=np.bool_))

    # CHECK-LABEL: TEST: clz uint32[]
    # CHECK: mhlo.count_leading_zeros
    # CHECK-SAME: tensor<ui32>
    print_ir(np.uint32(0))(lax.clz)

    # CHECK-LABEL: TEST: conj complex64[]
    # CHECK-DAG: mhlo.real
    # CHECK-DAG: mhlo.imag
    # CHECK-DAG: mhlo.neg
    # CHECK-DAG: mhlo.complex
    # CHECK-SAME: tensor<complex<f32>>
    print_ir(np.complex64(0))(lax.conj)

    # CHECK-LABEL: TEST: cos float32[]
    # CHECK: mhlo.cos
    # CHECK-SAME: tensor<f32>
    print_ir(np.float32(0))(lax.cos)

    # CHECK-LABEL: TEST: cosh float32[]
    # CHECK: chlo.cosh
    # CHECK-SAME: tensor<f32>
    print_ir(np.float32(0))(lax.cosh)

    # CHECK-LABEL: TEST: digamma float32[]
    # CHECK: chlo.digamma
    # CHECK-SAME: tensor<f32>
    print_ir(np.float32(0))(lax.digamma)

    # CHECK-LABEL: TEST: div float32[] float32[]
    # CHECK: mhlo.div
    # CHECK-SAME: tensor<f32>
    print_ir(np.float32(1), np.float32(2))(lax.div)

    # CHECK-LABEL: TEST: eq float32[] float32[]
    # CHECK: mhlo.compare
    # CHECK-SAME: compare_type = #mhlo<"comparison_type FLOAT">
    # CHECK-SAME: comparison_direction = #mhlo<"comparison_direction EQ">
    # CHECK-SAME: tensor<f32>
    print_ir(np.float32(1), np.float32(2))(lax.eq)

    # CHECK-LABEL: TEST: eq complex128[] complex128[]
    # CHECK: mhlo.compare
    # CHECK-SAME: compare_type = #mhlo<"comparison_type FLOAT">
    # CHECK-SAME: comparison_direction = #mhlo<"comparison_direction EQ">
    # CHECK-SAME: tensor<complex<f64>>
    print_ir(np.complex128(1), np.complex128(2))(lax.eq)

    # CHECK-LABEL: TEST: eq int64[] int64[]
    # CHECK: mhlo.compare
    # CHECK-SAME: compare_type = #mhlo<"comparison_type SIGNED">
    # CHECK-SAME: comparison_direction = #mhlo<"comparison_direction EQ">
    # CHECK-SAME: tensor<i64>
    print_ir(np.int64(1), np.int64(2))(lax.eq)

    # CHECK-LABEL: TEST: eq uint16[] uint16[]
    # CHECK: mhlo.compare
    # CHECK-SAME: compare_type = #mhlo<"comparison_type UNSIGNED">
    # CHECK-SAME: comparison_direction = #mhlo<"comparison_direction EQ">
    # CHECK-SAME: tensor<ui16>
    print_ir(np.uint16(1), np.uint16(2))(lax.eq)

    # CHECK-LABEL: TEST: erf float32[]
    # CHECK: chlo.erf
    # CHECK-SAME: tensor<f32>
    print_ir(np.float32(0))(lax.erf)

    # CHECK-LABEL: TEST: erfc float32[]
    # CHECK: chlo.erfc
    # CHECK-SAME: tensor<f32>
    print_ir(np.float32(0))(lax.erfc)

    # CHECK-LABEL: TEST: erf_inv float32[]
    # CHECK: xla_fallback_erf_inv
    # CHECK-SAME: tensor<f32>
    print_ir(np.float32(0))(lax.erf_inv)

    # CHECK-LABEL: TEST: exp float16[]
    # CHECK: mhlo.exp
    # CHECK-SAME: tensor<f16>
    print_ir(np.float16(0))(lax.exp)

    # CHECK-LABEL: TEST: expm1 bfloat16[]
    # CHECK: mhlo.exponential_minus_one
    # CHECK-SAME: tensor<bf16>
    print_ir(jnp.bfloat16(0))(lax.expm1)

    # CHECK-LABEL: TEST: floor bfloat16[2,3]
    # CHECK: mhlo.floor
    # CHECK-SAME: tensor<2x3xbf16>
    print_ir(np.empty((2, 3), jnp.bfloat16))(lax.floor)

    # CHECK-LABEL: TEST: ge float32[] float32[]
    # CHECK: mhlo.compare
    # CHECK-SAME: compare_type = #mhlo<"comparison_type FLOAT">
    # CHECK-SAME: comparison_direction = #mhlo<"comparison_direction GE">
    # CHECK-SAME: tensor<f32>
    print_ir(np.float32(1), np.float32(2))(lax.ge)

    # CHECK-LABEL: TEST: gt float32[] float32[]
    # CHECK: mhlo.compare
    # CHECK-SAME: compare_type = #mhlo<"comparison_type FLOAT">
    # CHECK-SAME: comparison_direction = #mhlo<"comparison_direction GT">
    # CHECK-SAME: tensor<f32>
    print_ir(np.float32(1), np.float32(2))(lax.gt)

    # CHECK-LABEL: TEST: igamma float32[] float32[]
    # CHECK: xla_fallback_igamma
    # CHECK-SAME: tensor<f32>
    print_ir(np.float32(0), np.float32(0))(lax.igamma)

    # CHECK-LABEL: TEST: igammac float32[] float32[]
    # CHECK: xla_fallback_igammac
    # CHECK-SAME: tensor<f32>
    print_ir(np.float32(0), np.float32(0))(lax.igammac)

    # CHECK-LABEL: TEST: igamma_grad_a float32[] float32[]
    # CHECK: xla_fallback_igamma_grad_a
    # CHECK-SAME: tensor<f32>
    print_ir(np.float32(0), np.float32(0))(lax.igamma_grad_a)

    # CHECK-LABEL: TEST: imag complex64[]
    # CHECK: mhlo.imag
    # CHECK-SAME: tensor<complex<f32>>
    print_ir(np.complex64(0))(lax.imag)

    # CHECK-LABEL: TEST: integer_pow float32[]
    # CHECK-DAG: mhlo.mul
    # CHECK-SAME: tensor<f32>
    @print_ir(np.float32(1))
    def integer_pow(x):
        return lax.integer_pow(x, 3)

    # CHECK-LABEL: TEST: is_finite float64[]
    # CHECK: mhlo.is_finite
    # CHECK-SAME: tensor<f64>
    print_ir(np.float64(0))(lax.is_finite)

    # CHECK-LABEL: TEST: le float32[] float32[]
    # CHECK: mhlo.compare
    # CHECK-SAME: compare_type = #mhlo<"comparison_type FLOAT">
    # CHECK-SAME: comparison_direction = #mhlo<"comparison_direction LE">
    # CHECK-SAME: tensor<f32>
    print_ir(np.float32(1), np.float32(2))(lax.le)

    # CHECK-LABEL: TEST: lgamma float32[]
    # CHECK: chlo.lgamma
    # CHECK-SAME: tensor<f32>
    print_ir(np.float32(0))(lax.lgamma)

    # CHECK-LABEL: TEST: log float32[]
    # CHECK: mhlo.log
    # CHECK-SAME: tensor<f32>
    print_ir(np.float32(0))(lax.log)

    # CHECK-LABEL: TEST: log1p float32[]
    # CHECK: mhlo.log_plus_one
    # CHECK-SAME: tensor<f32>
    print_ir(np.float32(0))(lax.log1p)

    # CHECK-LABEL: TEST: lt float32[] float32[]
    # CHECK: mhlo.compare
    # CHECK-SAME: compare_type = #mhlo<"comparison_type FLOAT">
    # CHECK-SAME: comparison_direction = #mhlo<"comparison_direction LT">
    # CHECK-SAME: tensor<f32>
    print_ir(np.float32(1), np.float32(2))(lax.lt)

    # CHECK-LABEL: TEST: max float32[] float32[]
    # CHECK: mhlo.max
    # CHECK-SAME: tensor<f32>
    print_ir(np.float32(1), np.float32(2))(lax.max)

    # CHECK-LABEL: TEST: min float32[] float32[]
    # CHECK: mhlo.min
    # CHECK-SAME: tensor<f32>
    print_ir(np.float32(1), np.float32(2))(lax.min)

    # CHECK-LABEL: TEST: mul float32[] float32[]
    # CHECK: mhlo.mul
    # CHECK-SAME: tensor<f32>
    print_ir(np.float32(1), np.float32(2))(lax.mul)

    # CHECK-LABEL: TEST: ne float32[] float32[]
    # CHECK: mhlo.compare
    # CHECK-SAME: compare_type = #mhlo<"comparison_type FLOAT">
    # CHECK-SAME: comparison_direction = #mhlo<"comparison_direction NE">
    # CHECK-SAME: tensor<f32>
    print_ir(np.float32(1), np.float32(2))(lax.ne)

    # CHECK-LABEL: TEST: neg int64[]
    # CHECK: mhlo.negate
    # CHECK-SAME: tensor<i64>
    print_ir(np.int64(0))(lax.neg)

    # CHECK-LABEL: TEST: nextafter float32[] float32[]
    # CHECK: chlo.next_after
    # CHECK-SAME: tensor<f32>
    print_ir(np.float32(0), np.float32(0))(lax.nextafter)

    # CHECK-LABEL: TEST: bitwise_not int64[]
    # CHECK: mhlo.not
    # CHECK-SAME: tensor<i64>
    print_ir(np.int64(0))(lax.bitwise_not)

    # CHECK-LABEL: TEST: bitwise_not bool[]
    # CHECK: mhlo.not
    # CHECK-SAME: tensor<i1>
    print_ir(np.bool_(0))(lax.bitwise_not)

    # CHECK-LABEL: TEST: population_count uint32[]
    # CHECK: mhlo.popcnt
    # CHECK-SAME: tensor<ui32>
    print_ir(np.uint32(0))(lax.population_count)

    # CHECK-LABEL: TEST: pow float32[] float32[]
    # CHECK: mhlo.power
    # CHECK-SAME: tensor<f32>
    print_ir(np.float32(1), np.float32(2))(lax.pow)

    # CHECK-LABEL: TEST: random_gamma_grad float32[] float32[]
    # CHECK: xla_fallback_random_gamma_grad
    # CHECK-SAME: tensor<f32>
    print_ir(np.float32(0), np.float32(0))(lax.random_gamma_grad)

    # CHECK-LABEL: TEST: real complex128[]
    # CHECK: mhlo.real
    # CHECK-SAME: tensor<complex<f64>>
    print_ir(np.complex128(0))(lax.real)

    # CHECK-LABEL: TEST: reduce_precision bfloat16[]
    # CHECK: mhlo.reduce_precision
    # CHECK-SAME: tensor<bf16>
    print_ir(jnp.bfloat16(0))(partial(lax.reduce_precision,
                                      exponent_bits=2,
                                      mantissa_bits=2))

    # CHECK-LABEL: TEST: rem float32[] float32[]
    # CHECK: mhlo.rem
    # CHECK-SAME: tensor<f32>
    print_ir(np.float32(1), np.float32(2))(lax.rem)

    # CHECK-LABEL: TEST: round float64[7,1]
    # CHECK: mhlo.round
    # CHECK-SAME: tensor<7x1xf64>
    print_ir(np.empty((7, 1), np.float64))(partial(
        lax.round, rounding_method=lax.RoundingMethod.AWAY_FROM_ZERO))

    # CHECK-LABEL: TEST: rsqrt complex64[]
    # CHECK: mhlo.rsqrt
    # CHECK-SAME: tensor<complex<f32>>
    print_ir(jnp.complex64(0))(lax.rsqrt)

    # CHECK-LABEL: TEST: shift_left uint32[] uint32[]
    # CHECK: mhlo.shift_left
    # CHECK-SAME: tensor<ui32>
    print_ir(np.uint32(0), np.uint32(0))(lax.shift_left)

    # CHECK-LABEL: TEST: shift_right_arithmetic uint8[] uint8[]
    # CHECK: mhlo.shift_right_arithmetic
    # CHECK-SAME: tensor<ui8>
    print_ir(np.uint8(0), np.uint8(0))(lax.shift_right_arithmetic)

    # CHECK-LABEL: TEST: shift_right_logical uint16[] uint16[]
    # CHECK: mhlo.shift_right_logical
    # CHECK-SAME: tensor<ui16>
    print_ir(np.uint16(0), np.uint16(0))(lax.shift_right_logical)

    # CHECK-LABEL: TEST: sign int64[]
    # CHECK: mhlo.sign
    # CHECK-SAME: tensor<i64>
    print_ir(np.int64(0))(lax.sign)

    # CHECK-LABEL: TEST: sign uint32[]
    # CHECK: mhlo.compare
    # CHECK-SAME: tensor<ui32>
    print_ir(np.uint32(0))(lax.sign)

    # CHECK-LABEL: TEST: sin float32[]
    # CHECK: mhlo.sin
    # CHECK-SAME: tensor<f32>
    print_ir(np.float32(0))(lax.sin)

    # CHECK-LABEL: TEST: sinh float32[]
    # CHECK: chlo.sinh
    # CHECK-SAME: tensor<f32>
    print_ir(np.float32(0))(lax.sinh)

    # CHECK-LABEL: TEST: sub float32[] float32[]
    # CHECK: mhlo.sub
    # CHECK-SAME: tensor<f32>
    print_ir(np.float32(1), np.float32(2))(lax.sub)

    # CHECK-LABEL: TEST: sqrt bfloat16[]
    # CHECK: mhlo.sqrt
    # CHECK-SAME: tensor<bf16>
    print_ir(jnp.bfloat16(0))(lax.sqrt)

    # CHECK-LABEL: TEST: tan float16[]
    # CHECK: chlo.tan
    # CHECK-SAME: tensor<f16>
    print_ir(np.float16(0))(lax.tan)

    # CHECK-LABEL: TEST: tanh float32[]
    # CHECK: mhlo.tanh
    # CHECK-SAME: tensor<f32>
    print_ir(np.float32(0))(lax.tanh)
예제 #53
0
import pytest
import sympy

import cirq


@pytest.mark.parametrize(
    'val',
    [
        3.2,
        np.float32(3.2),
        int(1),
        np.int32(45),
        np.float64(6.3),
        np.int32(2),
        np.complex64(1j),
        np.complex128(2j),
        complex(1j),
        fractions.Fraction(3, 2),
    ],
)
def test_value_of_pass_through_types(val):
    _assert_consistent_resolution(val, val)


@pytest.mark.parametrize(
    'val,resolved',
    [(sympy.pi, np.pi), (sympy.S.NegativeOne, -1), (sympy.S.Half, 0.5),
     (sympy.S.One, 1)],
)
def test_value_of_transformed_types(val, resolved):
예제 #54
0
# stubs should instead do
#
# np.array([1.0, 0.0, 0.0], dtype=np.float32)
# np.array([], dtype=np.complex64)
#
# See e.g. the discussion on the mailing list
#
# https://mail.python.org/pipermail/numpy-discussion/2020-April/080566.html
#
# and the issue
#
# https://github.com/numpy/numpy-stubs/issues/41
#
# for more context.
np.float32([1.0, 0.0, 0.0])  # E: incompatible type
np.complex64([])  # E: incompatible type

np.complex64(1, 2)  # E: Too many arguments
# TODO: protocols (can't check for non-existent protocols w/ __getattr__)

np.datetime64(0)  # E: non-matching overload


class A:
    def __float__(self):
        return 1.0


np.int8(A())  # E: incompatible type
np.int16(A())  # E: incompatible type
np.int32(A())  # E: incompatible type
예제 #55
0
 def test_complex32(self):
     s = readsav(path.join(DATA_PATH, 'scalar_complex32.sav'),
                 verbose=False)
     assert_identical(s.c32, np.complex64(3.124442e13 - 2.312442e31j))
예제 #56
0
            uv_in.write_uvfits(testfile, run_check=False)
    else:
        with pytest.warns(UserWarning, match=warnstr):
            uv_in.write_uvfits(testfile, run_check=False)


@pytest.mark.filterwarnings(
    "ignore:The uvw_array does not match the expected values")
@pytest.mark.filterwarnings("ignore:Telescope EVLA is not")
@pytest.mark.parametrize(
    "kwd_names,kwd_values",
    (
        [["bool", "bool2"], [True, False]],
        [["int1", "int2"], [np.int(5), 7]],
        [["float1", "float2"], [np.int64(5.3), 6.9]],
        [["complex1", "complex2"], [np.complex64(5.3 + 1.2j), 6.9 + 4.6j]],
        [
            ["str", "comment"],
            [
                "hello",
                "this is a very long comment that will be broken into several "
                "lines\nif everything works properly.",
            ],
        ],
    ),
)
def test_extra_keywords(casa_uvfits, tmp_path, kwd_names, kwd_values):
    uv_in = casa_uvfits
    uv_out = UVData()
    testfile = str(tmp_path / "outtest_casa.uvfits")
예제 #57
0
        if not fixed:
            converter = converter.vararray_type(field, converter, arraysize,
                                                config)

    return converter


numpy_dtype_to_field_mapping = {
    np.float64().dtype.num: 'double',
    np.float32().dtype.num: 'float',
    np.bool_().dtype.num: 'bit',
    np.uint8().dtype.num: 'unsignedByte',
    np.int16().dtype.num: 'short',
    np.int32().dtype.num: 'int',
    np.int64().dtype.num: 'long',
    np.complex64().dtype.num: 'floatComplex',
    np.complex128().dtype.num: 'doubleComplex',
    np.unicode_().dtype.num: 'unicodeChar'
}

numpy_dtype_to_field_mapping[np.bytes_().dtype.num] = 'char'


def _all_bytes(column):
    for x in column:
        if not isinstance(x, bytes):
            return False
    return True


def _all_unicode(column):
예제 #58
0
lk = 1  #total length k space
dk = lk / N
h = np.array(range(0,
                   int(N / 2) + 1), dtype=np.float64)  # vector of wavenumbers
h = np.append(h, np.array(range(1 - int(N / 2), 0), dtype=np.float64))
h = h * dk
k = h
l = h
H, K, L = np.meshgrid(h, k, l)
# reciprocal space coordinates
HH = H * b1[0] + K * b2[0] + L * b3[0]
KK = H * b1[1] + K * b2[1] + L * b3[1]
LL = H * b1[2] + K * b2[2] + L * b3[2]
'''Initial condition '''
nr0 = np.complex64(c + 0.05 *
                   (np.random.rand(h.shape[0], h.shape[0], h.shape[0]) - 0.5))

nk0 = np.complex64(np.fft.fftn(nr0))
'''Parameters'''
R = 8.3144598
T = 600
T0 = 1100
Tr1 = T / T0
w1 = (592 + T / R + 205 * (c - c2) / 2)
pi = tf.constant(math.pi, dtype=tf.complex64)
' ' 'Lk=-2 sum(L1*sin^2(1/2*k*r))' ''
Lk1 = 0
coordl = [-1, 1]
for xx in coordl:
    Lk1 = Lk1 - 2 * ((np.sin(math.pi * (HH * xx)))**2)
    Lk1 = Lk1 - 2 * ((np.sin(math.pi * (KK * xx)))**2)
예제 #59
0
def sc_complex_dot_batched(bx_gpu,
                           by_gpu,
                           bc_gpu,
                           transa='N',
                           transb='N',
                           handle=None):
    """
    Uses cublasCgemmBatched to compute a bunch of complex dot products
    in parallel.

    """
    if handle is None:
        handle = scikits.cuda.misc._global_cublas_handle

    assert len(bx_gpu.shape) == 3
    assert len(by_gpu.shape) == 3
    assert len(bc_gpu.shape) == 3
    assert bx_gpu.dtype == np.complex64
    assert by_gpu.dtype == np.complex64
    assert bc_gpu.dtype == np.complex64

    # Get the shapes of the arguments
    bx_shape = bx_gpu.shape
    by_shape = by_gpu.shape

    # Perform matrix multiplication for 2D arrays:
    alpha = np.complex64(1.0)
    beta = np.complex64(0.0)

    transa = string.lower(transa)
    transb = string.lower(transb)

    if transb in ['t', 'c']:
        N, m, k = by_shape
    elif transb in ['n']:
        N, k, m = by_shape
    else:
        raise ValueError('invalid value for transb')

    if transa in ['t', 'c']:
        N2, l, n = bx_shape
    elif transa in ['n']:
        N2, n, l = bx_shape
    else:
        raise ValueError('invalid value for transa')

    if l != k:
        raise ValueError('objects are not aligned')

    if N != N2:
        raise ValueError('batch sizes are not the same')

    if transb == 'n':
        lda = max(1, m)
    else:
        lda = max(1, k)

    if transa == 'n':
        ldb = max(1, k)
    else:
        ldb = max(1, n)

    ldc = max(1, m)

    # construct pointer arrays needed for cublasCgemmBatched
    bx_arr = bptrs(bx_gpu)
    by_arr = bptrs(by_gpu)
    bc_arr = bptrs(bc_gpu)

    cublas.cublasCgemmBatched(handle, transb, transa, m, n, k, alpha,
                              by_arr.gpudata, lda, bx_arr.gpudata, ldb, beta,
                              bc_arr.gpudata, ldc, N)
예제 #60
0
    def __initializeDataSet(self, num_pix, current_pixels):
        """
        Creates and initializes the primary (and auxillary) datasets and datagroups
        to hold the raw data for the current set of experimental parameters.
        
        Parameters
        ----------
        num_pix : unsigned int
            Number of pixels this datagroup is expected to hold
        current_pixels : dictionary of BEPSndfPixel objects
            Extracted data for the first pixel in this group
            
        Returns
        ---------
        h5_refs : list of HDF5group and HDF5Dataset references 
            references of the written H5 datasets
        """

        tot_bins = 0
        tot_pts = 0
        # Each wavetype can have different number of bins
        for pixl in current_pixels.values():
            tot_bins += pixl.num_bins
            tot_pts += pixl.num_bins * pixl.num_steps

        # Need to halve the number of steps when only in / out field is acquired:
        if self.halve_udvs_steps:
            tot_pts = int(tot_pts / 2)

        # Populate information from the columns within the pixels such as the FFT, bin freq, indices, etc.
        bin_freqs = np.zeros(shape=(tot_bins), dtype=np.float32)
        bin_inds = np.zeros(shape=(tot_bins), dtype=np.uint32)
        bin_FFT = np.zeros(shape=(tot_bins), dtype=np.complex64)
        exec_bin_vec = np.zeros(shape=(tot_bins), dtype=np.int32)
        pixel_bins = {}  # Might be useful later
        stind = 0
        for wave_type in self.__unique_waves__:
            pixl = current_pixels[wave_type]
            exec_bin_vec[stind:stind +
                         pixl.num_bins] = wave_type * np.ones(pixl.num_bins)
            bin_inds[stind:stind + pixl.num_bins] = pixl.BE_bin_ind
            bin_freqs[stind:stind + pixl.num_bins] = pixl.BE_bin_w
            bin_FFT[stind:stind + pixl.num_bins] = pixl.FFT_BE_wave
            pixel_bins[wave_type] = [stind, pixl.num_bins]
            stind += pixl.num_bins
        del pixl, stind

        # Make the index matrix that has the UDVS step number and bin indices
        spec_inds = np.zeros(shape=(2, tot_pts), dtype=np.uint32)
        stind = 0
        # Need to go through the UDVS file and reconstruct chronologically
        for step_index, wave_type in enumerate(self.excit_type_vec):
            if self.halve_udvs_steps and self.udvs_mat[
                    step_index, 2] < 1E-3:  # invalid AC amplitude
                continue  # skip
            vals = pixel_bins[wave_type]
            spec_inds[1, stind:stind +
                      vals[1]] = step_index * np.ones(vals[1])  # UDVS step
            spec_inds[0, stind:stind + vals[1]] = np.arange(
                vals[0], vals[0] + vals[1])  # Bin step
            stind += vals[1]
        del stind, wave_type, step_index

        self.spec_inds = spec_inds  # will need this for plot group generation

        ds_ex_wfm = MicroDataset('Excitation_Waveform', self.BE_wave)
        ds_bin_freq = MicroDataset('Bin_Frequencies', bin_freqs)
        ds_bin_inds = MicroDataset(
            'Bin_Indices', bin_inds - 1,
            dtype=np.uint32)  # From Matlab (base 1) to Python (base 0)
        ds_bin_FFT = MicroDataset('Bin_FFT', bin_FFT)
        ds_wfm_typ = MicroDataset('Bin_Wfm_Type', exec_bin_vec)
        ds_bin_steps = MicroDataset('Bin_Step',
                                    np.arange(tot_bins, dtype=np.uint32))

        curr_parm_dict = self.parm_dict
        # Some very basic information that can help the processing crew
        curr_parm_dict['num_bins'] = tot_bins
        curr_parm_dict['num_pix'] = num_pix

        # technically should change the date, etc.
        self.current_group = '{:s}'.format('Measurement_')
        meas_grp = MicroDataGroup(self.current_group, '/')
        meas_grp.attrs = curr_parm_dict

        chan_grp = MicroDataGroup('Channel_')
        chan_grp.attrs['Channel_Input'] = curr_parm_dict['IO_Analog_Input_1']
        meas_grp.addChildren([chan_grp])

        udvs_slices = dict()
        for col_ind, col_name in enumerate(self.udvs_labs):
            udvs_slices[col_name] = (slice(None), slice(col_ind, col_ind + 1))
            #print('UDVS column index {} = {}'.format(col_ind,col_name))
        ds_UDVS = MicroDataset('UDVS', self.udvs_mat)
        ds_UDVS.attrs['labels'] = udvs_slices
        ds_UDVS.attrs['units'] = self.udvs_units

        actual_udvs_steps = self.num_udvs_steps
        if self.halve_udvs_steps:
            actual_udvs_steps = actual_udvs_steps / 2

        curr_parm_dict['num_udvs_steps'] = actual_udvs_steps

        ds_UDVS_inds = MicroDataset('UDVS_Indices', self.spec_inds[1])
        #         ds_UDVS_inds.attrs['labels'] = {'UDVS_step':(slice(None),)}
        """
        Create the Spectroscopic Values tables
        """
        spec_vals, spec_inds, spec_vals_labs, spec_vals_units, spec_vals_labs_names = \
            createSpecVals(self.udvs_mat, spec_inds, bin_freqs, exec_bin_vec,
                           curr_parm_dict, np.array(self.udvs_labs), self.udvs_units)

        spec_vals_slices = dict()
        for row_ind, row_name in enumerate(spec_vals_labs):
            spec_vals_slices[row_name] = (slice(row_ind,
                                                row_ind + 1), slice(None))
        ds_spec_vals_mat = MicroDataset('Spectroscopic_Values',
                                        np.array(spec_vals, dtype=np.float32))
        ds_spec_vals_mat.attrs['labels'] = spec_vals_slices
        ds_spec_vals_mat.attrs['units'] = spec_vals_units
        ds_spec_mat = MicroDataset('Spectroscopic_Indices',
                                   spec_inds,
                                   dtype=np.uint32)
        ds_spec_mat.attrs['labels'] = spec_vals_slices
        ds_spec_mat.attrs['units'] = spec_vals_units
        for entry in spec_vals_labs_names:
            label = entry[0] + '_parameters'
            names = entry[1]
            ds_spec_mat.attrs[label] = names
            ds_spec_vals_mat.attrs[label] = names
        """
        New Method for chunking the Main_Data dataset.  Chunking is now done in N-by-N squares of UDVS steps by pixels.
        N is determined dinamically based on the dimensions of the dataset.  Currently it is set such that individual
        chunks are less than 10kB in size.
        
        Chris Smith -- [email protected]
        """
        max_bins_per_pixel = np.max(pixel_bins.values())
        pixel_chunking = maxReadPixels(10240, num_pix, max_bins_per_pixel,
                                       np.dtype('complex64').itemsize)
        chunking = np.floor(np.sqrt(pixel_chunking))
        chunking = max(1, chunking)
        chunking = min(actual_udvs_steps, num_pix, chunking)
        beps_chunks = calc_chunks([num_pix, tot_pts],
                                  np.complex64(0).itemsize,
                                  unit_chunks=(1, max_bins_per_pixel))
        ds_main_data = MicroDataset('Raw_Data',
                                    np.zeros(shape=(1, tot_pts),
                                             dtype=np.complex64),
                                    chunking=beps_chunks,
                                    resizable=True,
                                    compression='gzip')

        ds_noise = MicroDataset('Noise_Floor',
                                np.zeros(shape=(1, actual_udvs_steps),
                                         dtype=nf32),
                                chunking=(1, actual_udvs_steps),
                                resizable=True,
                                compression='gzip')
        # noise_labs = ['super_band','inter_bin_band','sub_band']
        # noise_slices = dict()
        # for col_ind, col_name in enumerate(noise_labs):
        #     noise_slices[col_name] = (slice(None),slice(col_ind,col_ind+1), slice(None))
        # ds_noise.attrs['labels'] = noise_slices
        # ds_noise.attrs['units'] = ['','','']
        #         ds_noise_labs = MicroDataset('Noise_Labels',np.array(noise_labs))
        # Allocate space for the first pixel for now and write along with the complete tree...
        # Positions CANNOT be written at this time since we don't know if the parameter changed

        chan_grp.addChildren([
            ds_main_data, ds_noise, ds_ex_wfm, ds_spec_mat, ds_wfm_typ,
            ds_bin_steps, ds_bin_inds, ds_bin_freq, ds_bin_FFT, ds_UDVS,
            ds_spec_vals_mat, ds_UDVS_inds
        ])

        #meas_grp.showTree()
        h5_refs = self.hdf.writeData(meas_grp)

        self.ds_noise = getH5DsetRefs(['Noise_Floor'], h5_refs)[0]
        self.ds_main = getH5DsetRefs(['Raw_Data'], h5_refs)[0]

        #self.dset_index += 1 # raise dset index after closing only
        self.ds_pixel_index = 0

        # Use this for plot groups:
        self.mean_resp = np.zeros(shape=(tot_pts), dtype=np.complex64)

        # Used for Histograms
        self.max_resp = np.zeros(shape=(num_pix), dtype=np.float32)
        self.min_resp = np.zeros(shape=(num_pix), dtype=np.float32)

        return h5_refs