Esempio n. 1
0
def update_data(start, end):
    global step
    dur = end - start
    if dur <= 2.0:
        newstep = 1
    elif dur <= 5.0:
        newstep = np.int16(np.round(len(au) / width / 8))
    elif dur <= 10.0:
        newstep = np.int16(np.round(len(au) / width / 4))
    else:
        newstep = np.int16(np.round(len(au) / width / 2))
    if newstep != step:
        sys.stderr.write('Updating source data\n')
        step = newstep
        newsource = dict()
        newsource['x'] = timepts[0::step]
        newsource['au'] = au[0::step]
        newsource['p1'] = lp_p1[0::step]
        newsource['raw_lp_decim_p1'] = raw_lp_decim_p1[0::step]
        newsource['p2'] = lp_p2[0::step]
        newsource['raw_lp_decim_p2'] = raw_lp_decim_p2[0::step]
        source.data = newsource
        for renderer in ch0.select(dict(tag='update_ts')):
            renderer.data_source.data = newsource
        # TODO: should this trigger fire?
        #source.trigger('change')
    else:
        sys.stderr.write('Step did not change\n')
    print(dur, step, len(au), width)
    def test_array_richcompare_legacy_weirdness(self):
        # It doesn't really work to use assert_deprecated here, b/c part of
        # the point of assert_deprecated is to check that when warnings are
        # set to "error" mode then the error is propagated -- which is good!
        # But here we are testing a bunch of code that is deprecated *because*
        # it has the habit of swallowing up errors and converting them into
        # different warnings. So assert_warns will have to be sufficient.
        assert_warns(FutureWarning, lambda: np.arange(2) == "a")
        assert_warns(FutureWarning, lambda: np.arange(2) != "a")
        # No warning for scalar comparisons
        with warnings.catch_warnings():
            warnings.filterwarnings("error")
            assert_(not (np.array(0) == "a"))
            assert_(np.array(0) != "a")
            assert_(not (np.int16(0) == "a"))
            assert_(np.int16(0) != "a")

        for arg1 in [np.asarray(0), np.int16(0)]:
            struct = np.zeros(2, dtype="i4,i4")
            for arg2 in [struct, "a"]:
                for f in [operator.lt, operator.le, operator.gt, operator.ge]:
                    if sys.version_info[0] >= 3:
                        # py3
                        with warnings.catch_warnings() as l:
                            warnings.filterwarnings("always")
                            assert_raises(TypeError, f, arg1, arg2)
                            assert_(not l)
                    else:
                        # py2
                        assert_warns(DeprecationWarning, f, arg1, arg2)
Esempio n. 3
0
def _search_fine(sino, srad, step, init_cen, ratio, drop):
    """
    Fine search for finding the rotation center.
    """
    Nrow, Ncol = sino.shape
    centerfliplr = (Ncol + 1.0) / 2.0 - 1.0

    # Use to shift the sinogram 2 to the raw CoR.
    shiftsino = np.int16(2 * (init_cen - centerfliplr))
    _copy_sino = np.roll(np.fliplr(sino[1:]), shiftsino, axis=1)
    lefttake = 0
    righttake = Ncol - 1
    if init_cen <= centerfliplr:
        lefttake = np.ceil(srad + 1)
        righttake = np.floor(2 * init_cen - srad - 1)
    else:
        lefttake = np.ceil(
            init_cen - (Ncol - 1 - init_cen) + srad + 1)
        righttake = np.floor(Ncol - 1 - srad - 1)
    Ncol1 = righttake - lefttake + 1
    mask = _create_mask(2 * Nrow - 1, Ncol1, 0.5 * ratio * Ncol, drop)
    numshift = np.int16((2 * srad + 1.0) / step)
    listshift = np.linspace(-srad, srad, num=numshift)
    listmetric = np.zeros(len(listshift), dtype='float32')
    num1 = 0
    for i in listshift:
        _sino = ndimage.interpolation.shift(
            _copy_sino, (0, i), prefilter=False)
        sinojoin = np.vstack((sino, _sino))
        listmetric[num1] = np.sum(np.abs(np.fft.fftshift(
            pyfftw.interfaces.numpy_fft.fft2(
                sinojoin[:, lefttake:righttake + 1]))) * mask)
        num1 = num1 + 1
    minpos = np.argmin(listmetric)
    return init_cen + listshift[minpos] / 2.0
def test_is_int():
    # is int
    assert isinstance(1, int) is True
    assert isinstance(np.int(1), int) is True
    assert isinstance(np.int8(1), int) is False
    assert isinstance(np.int16(1), int) is False

    if PY3:
        assert isinstance(np.int32(1), int) is False
    elif PY2:
        assert isinstance(np.int32(1), int) is True

    assert isinstance(np.int64(1), int) is False

    # is np.int
    assert isinstance(np.int(1), np.int) is True
    assert isinstance(np.int8(1), np.int) is False
    assert isinstance(np.int16(1), np.int) is False

    if PY3:
        assert isinstance(np.int32(1), np.int) is False
    elif PY2:
        assert isinstance(np.int32(1), np.int) is True

    assert isinstance(np.int64(1), np.int) is False
	def loadADCFile(self,filename,newLimits=[-30,30]):
			print ('Loading ',filename)
			INPUTNAME = filename.split('_')[1]
			GAIN = filename.split('_')[2].split('x')[0]

			data = np.loadtxt('%s/%s'%(self.dirname,filename))
			X=data[:,0];Y=data[:,1];
			source=self.analogInputSource(INPUTNAME)
			source.setGain(int(GAIN))
			X2=[];Y2=[]
			for B in range(len(X)):
				if source.__conservativeInRange__(X[B]) and X[B]>newLimits[0] and X[B]<newLimits[1]:
					X2.append(X[B]);Y2.append(Y[B])
			X=np.array(X2);Y=np.array(Y2)
			RAW = source.voltToCode12(Y)  								#convert back to ADC codes for testing
			avg_shifts=(self.adc_shifts[np.int16(np.floor(RAW))]+self.adc_shifts[np.int16(np.ceil(RAW))])/2. # Find mean shift(in code units) of ADC INL at each code,
			# so it can be removed (Next line) , before calculating slope & intercept for the channel under process
			OFFSET_REMOVED = RAW-4095*(avg_shifts*self.INL_SLOPE - self.INL_INTERCEPT)/3.3  #apply calibration of the ADC. no slope correction yet. 
			#OFFSET_REMOVED = source.calPoly12(OFFSET_REMOVED)	#convert to voltage values
			
			fitvals = np.polyfit(OFFSET_REMOVED[1:],X[1:],3)
			self.results[INPUTNAME][int(GAIN)]=fitvals
			fitfn = np.poly1d(fitvals)
			print (filename,fitvals,fitfn(0),fitfn(4095))

			self.rawCurves[filename].setData(np.array(X),X-Y)
			self.cleanCurves[filename].setData(np.array(X),X-fitfn(OFFSET_REMOVED))	
Esempio n. 6
0
    def testInt(self):
        num = np.int(2562010)
        self.assertEqual(np.int(ujson.decode(ujson.encode(num))), num)

        num = np.int8(127)
        self.assertEqual(np.int8(ujson.decode(ujson.encode(num))), num)

        num = np.int16(2562010)
        self.assertEqual(np.int16(ujson.decode(ujson.encode(num))), num)

        num = np.int32(2562010)
        self.assertEqual(np.int32(ujson.decode(ujson.encode(num))), num)

        num = np.int64(2562010)
        self.assertEqual(np.int64(ujson.decode(ujson.encode(num))), num)

        num = np.uint8(255)
        self.assertEqual(np.uint8(ujson.decode(ujson.encode(num))), num)

        num = np.uint16(2562010)
        self.assertEqual(np.uint16(ujson.decode(ujson.encode(num))), num)

        num = np.uint32(2562010)
        self.assertEqual(np.uint32(ujson.decode(ujson.encode(num))), num)

        num = np.uint64(2562010)
        self.assertEqual(np.uint64(ujson.decode(ujson.encode(num))), num)
Esempio n. 7
0
    def testIntMax(self):
        num = np.int(np.iinfo(np.int).max)
        self.assertEqual(np.int(ujson.decode(ujson.encode(num))), num)

        num = np.int8(np.iinfo(np.int8).max)
        self.assertEqual(np.int8(ujson.decode(ujson.encode(num))), num)

        num = np.int16(np.iinfo(np.int16).max)
        self.assertEqual(np.int16(ujson.decode(ujson.encode(num))), num)

        num = np.int32(np.iinfo(np.int32).max)
        self.assertEqual(np.int32(ujson.decode(ujson.encode(num))), num)

        num = np.uint8(np.iinfo(np.uint8).max)
        self.assertEqual(np.uint8(ujson.decode(ujson.encode(num))), num)

        num = np.uint16(np.iinfo(np.uint16).max)
        self.assertEqual(np.uint16(ujson.decode(ujson.encode(num))), num)

        num = np.uint32(np.iinfo(np.uint32).max)
        self.assertEqual(np.uint32(ujson.decode(ujson.encode(num))), num)

        if platform.architecture()[0] != '32bit':
            num = np.int64(np.iinfo(np.int64).max)
            self.assertEqual(np.int64(ujson.decode(ujson.encode(num))), num)

            # uint64 max will always overflow as it's encoded to signed
            num = np.uint64(np.iinfo(np.int64).max)
            self.assertEqual(np.uint64(ujson.decode(ujson.encode(num))), num)
Esempio n. 8
0
def hitrandom(n,NS,FS,scaledata,bp):
	
	#Randomly creates 1 or 2 hits per time step
	n = np.random.randint(1,3)
	detpow = np.random.uniform(20,100,n) 
	meanpow = np.random.uniform(1,20,n)
	
	bzero3 = scaledata[0]
	bscale3 = scaledata[1]
	bzero4 = scaledata[2]
	bscale4 = scaledata[3]

	corchan = np.int16(np.array([i-bzero3 for i in np.random.randint(1,NS,size=n)]))
	finchan = np.int32(np.array([i-bzero4 for i in np.random.randint(1,FS,size=n)]))

	#ET Signal in a given beam
	beam = [0,1,2]
	if(bp in beam):	
		detpow = np.append(detpow,1200.0)
		detpow = np.append(detpow,1300.0)
		meanpow = np.append(meanpow,10.0)
		meanpow = np.append(meanpow,10.0)
		corchan = np.append(corchan,np.int16(125-bzero3))
		corchan = np.append(corchan,np.int16(225-bzero3))
		finchan= np.append(finchan,np.int32(0-bzero4))
		finchan= np.append(finchan,np.int32(0-bzero4))

	c1 = pyfits.Column(name='DETPOW',format='1E',array=detpow)
	c2 = pyfits.Column(name='MEANPOW',format='1E',array=meanpow)
	#c3 = pyfits.Column(name='COARCHAN',format='1I',array=corchan,bzero=bzero3,bscale=bscale3)
	c3 = pyfits.Column(name='COARCHAN',format='1I',array=corchan)
	#c4 = pyfits.Column(name='FINECHAN',format='1J',array=finchan,bzero=bzero4,bscale=bscale4)
	c4 = pyfits.Column(name='FINECHAN',format='1J',array=finchan)
	tbhdu = pyfits.new_table([c1, c2, c3, c4])
	return  tbhdu
Esempio n. 9
0
 def test_numpy_scalar_conversion_values(self):
     self.assertEqual(nd.as_py(nd.array(np.bool_(True))), True)
     self.assertEqual(nd.as_py(nd.array(np.bool_(False))), False)
     self.assertEqual(nd.as_py(nd.array(np.int8(100))), 100)
     self.assertEqual(nd.as_py(nd.array(np.int8(-100))), -100)
     self.assertEqual(nd.as_py(nd.array(np.int16(20000))), 20000)
     self.assertEqual(nd.as_py(nd.array(np.int16(-20000))), -20000)
     self.assertEqual(nd.as_py(nd.array(np.int32(1000000000))), 1000000000)
     self.assertEqual(nd.as_py(nd.array(np.int64(-1000000000000))),
                      -1000000000000)
     self.assertEqual(nd.as_py(nd.array(np.int64(1000000000000))),
                      1000000000000)
     self.assertEqual(nd.as_py(nd.array(np.int32(-1000000000))),
                      -1000000000)
     self.assertEqual(nd.as_py(nd.array(np.uint8(200))), 200)
     self.assertEqual(nd.as_py(nd.array(np.uint16(50000))), 50000)
     self.assertEqual(nd.as_py(nd.array(np.uint32(3000000000))), 3000000000)
     self.assertEqual(nd.as_py(nd.array(np.uint64(10000000000000000000))),
                      10000000000000000000)
     self.assertEqual(nd.as_py(nd.array(np.float32(2.5))), 2.5)
     self.assertEqual(nd.as_py(nd.array(np.float64(2.5))), 2.5)
     self.assertEqual(nd.as_py(nd.array(np.complex64(2.5-1j))), 2.5-1j)
     self.assertEqual(nd.as_py(nd.array(np.complex128(2.5-1j))), 2.5-1j)
     if np.__version__ >= '1.7':
         self.assertEqual(nd.as_py(nd.array(np.datetime64('2000-12-13'))),
                          date(2000, 12, 13))
def computeRho(x,y,z,rho1):

        rho = rho1.copy()

        x = x.flatten()
        y = y.flatten()
        z = z.flatten()

        ip = np.int16(x); jp = np.int16(y); kp = np.int16(z)
        dx = x - ip; dy = y - jp; dz = z - kp

        for i in range(len(x)):
            ix = ip[i]; iy = jp[i]; iz = kp[i]
            dxi = dx[i]; dyi = dy[i]; dzi = dz[i]
            txi = 1.0-dxi; tyi = 1.0-dyi; tzi = 1.0-dzi

            #boudary condition
            ix = ix%N; iy = iy%N; iz = iz%N
            ixp1 = (ix+1)%N; iyp1 = (iy+1)%N; izp1 = (iz+1)%N;

            rho[ix,iy,iz] = rho[ix,iy,iz] + mp*txi*tyi*tzi;
            rho[ix,iyp1,iz] = rho[ix,iyp1,iz] + mp*txi*dyi*tzi;
            rho[ix,iy,izp1] = rho[ix,iy,izp1] + mp*txi*tyi*dzi;
            rho[ix,iyp1,izp1] = rho[ix,iyp1,izp1] + mp*txi*dyi*dzi;
            rho[ixp1,iy,iz] = rho[ixp1,iy,iz] + mp*dxi*tyi*tzi;
            rho[ixp1,iyp1,iz] = rho[ixp1,iyp1,iz] + mp*dxi*dyi*tzi;
            rho[ixp1,iy,izp1] = rho[ixp1,iy,izp1] + mp*dxi*tyi*dzi;
            rho[ixp1,iyp1,izp1] = rho[ixp1,iyp1,izp1] + mp*dxi*dyi*dzi;

        return rho
Esempio n. 11
0
    def test_valid(self):
        prop = bcpp.Int()

        assert prop.is_valid(None)

        assert prop.is_valid(0)
        assert prop.is_valid(1)

        assert prop.is_valid(np.int8(0))
        assert prop.is_valid(np.int8(1))
        assert prop.is_valid(np.int16(0))
        assert prop.is_valid(np.int16(1))
        assert prop.is_valid(np.int32(0))
        assert prop.is_valid(np.int32(1))
        assert prop.is_valid(np.int64(0))
        assert prop.is_valid(np.int64(1))
        assert prop.is_valid(np.uint8(0))
        assert prop.is_valid(np.uint8(1))
        assert prop.is_valid(np.uint16(0))
        assert prop.is_valid(np.uint16(1))
        assert prop.is_valid(np.uint32(0))
        assert prop.is_valid(np.uint32(1))
        assert prop.is_valid(np.uint64(0))
        assert prop.is_valid(np.uint64(1))

        # TODO (bev) should fail
        assert prop.is_valid(False)
        assert prop.is_valid(True)
Esempio n. 12
0
def wavwriteStereo(yLeft, yRight, fs, filename, inputSound):
    """
    Write a stereo sound file from 2 arrays with the channels sounds and the sampling rate
    yLeft: floating point array of one dimension,
    yRight: floating point array of one dimension,
    fs: sampling rate
    filename: name of file to create
    inputSound: original sound, used for auto-attenuation of the output sound
    """
    if yLeft.size != yRight.size: raise ValueError('wavwriteStereo: Left and Right sound samples input arrays have different sizes')
    yMaxMagnitude = max(abs(max(yLeft)),abs(max(yRight)),abs(min(yLeft)),abs(min(yRight)))
    inputMaxMagnitude = max(abs(max(inputSound)),abs(min(inputSound)))
    attenuationRatio = inputMaxMagnitude / yMaxMagnitude
#    print 'yMaxMagnitude ==',yMaxMagnitude
#    print 'inputMaxMagnitude ==',inputMaxMagnitude
#    print 'attenuationRatio ==',attenuationRatio
    xLeft = copy.deepcopy(yLeft)                     # copy array
    xLeft *= attenuationRatio                        # scale output sound to the input sound range
    xLeft *= INT16_FAC                               # scaling floating point -1 to 1 range signal to int16 range
    xLeft = np.int16(xLeft)                          # converting to int16 type
    xRight = copy.deepcopy(yRight)                   # copy array
    xRight *= attenuationRatio                       # scale output sound to the input sound range
    xRight *= INT16_FAC                              # scaling floating point -1 to 1 range signal to int16 range
    xRight = np.int16(xRight)                        # converting to int16 type
    xStereo = np.transpose(np.array([xLeft,xRight]))
    write(filename, fs, xStereo)
Esempio n. 13
0
 def readData(self):
     # Read out the data, request 6 bytes, translate, and scale
     data = self.x.read(6)
     ax = ((np.int16(data[0] | (data[1] << 8))) >> 2 ) / self.divider
     ay = ((np.int16(data[2] | (data[3] << 8))) >> 2 ) / self.divider
     az = ((np.int16(data[4] | (data[5] << 8))) >> 2 ) / self.divider
     return ax, ay, az
def get_R_Minus_B_Value(img, mask):
    """
    Calculate (r-b) value of each contour area

    Args:
        img: original image in each spark rdd element
        mask: sky region mask
    Returns:
        tuples: (id, (r-b) of each contour area)
    """
    contour0 = 0
    contour1 = 0
    contour2 = 0
    for x in range(0, HEIGHT):
        for y in range(0, WIDTH):
            grey_level = mask[x, y] / 80
            b = np.int16(img.item(x, y, 0))
            g = np.int16(img.item(x, y, 1))
            r = np.int16(img.item(x, y, 2))
            if grey_level == 1:
                contour0 += np.absolute(r - b)
            elif grey_level == 2:
                contour1 += np.absolute(r - b)
            elif grey_level == 3:
                contour2 += np.absolute(r - b)
    return (0, contour0), (1, contour1), (2, contour2)
Esempio n. 15
0
	def getRaw(self):
		'''
		This method must be defined if you want GUIs to use this class to generate 
		plots on the fly.
		It must return a set of different values read from the sensor. such as X,Y,Z acceleration.
		The length of this list must not change, and must be defined in the variable NUMPLOTS.
		
		GUIs will generate as many plots, and the data returned from this method will be appended appropriately
		'''
		vals=self.getVals(0x3B,14)
		if vals:
			if len(vals)==14:
				raw=[0]*7
				for a in range(3):raw[a] = 1.*int16(vals[a*2]<<8|vals[a*2+1])/self.ACCEL_SCALING[self.AR]
				for a in range(4,7):raw[a] = 1.*int16(vals[a*2]<<8|vals[a*2+1])/self.GYRO_SCALING[self.GR]
				raw[3] = int16(vals[6]<<8|vals[7])/340. + 36.53
				if not self.K:
					return raw
				else:
					for b in range(self.NUMPLOTS):
						self.K[b].input_latest_noisy_measurement(raw[b])
						raw[b]=self.K[b].get_latest_estimated_measurement()
					return raw

			else:
				return False
		else:
			return False
Esempio n. 16
0
def rand_bits(fs):
    baud = 300  # symbol rate
    Ns = fs/baud
    f0 = 1800

    code = np.array((-2-2j,
        -2-1j,-2+2j,-2+1j,-1-2j,-1-1j,-1+2j,-1+1j,+2-2j,+2-1j,+2+2j+2+1j,1-2j,+1-1j,1+2j,1+1j))/2

    np.random.seed(seed=1)
    rbits = np.int16(np.random.rand(6,1)*15)
    prefix = np.array([[0],[2],[10],[8]])
    bits = np.int16(np.random.rand(26,1)*15)

    Nbits = len(rbits) + len(prefix) + len(bits)  # number of bits
    bits = np.array(rbits.tolist() + prefix.tolist() + bits.tolist())
    N = Nbits * Ns

    M = np.tile(code[bits],(1,Ns))
    t = np.r_[0.0:N]/fs

    np.save('data/real.npy', M.real.ravel())
    np.save('data/imag.npy', M.imag.ravel())

    QAM = (M.real.ravel()*np.cos(2*np.pi*f0*t) -
            M.imag.ravel()*np.sin(2*np.pi*f0*t))/2/np.sqrt(2)

    return QAM
Esempio n. 17
0
def imslant(img):
    aa=np.transpose(img)
    dimx=aa.shape[0]
    dimy=aa.shape[1]
    dimxh=aa.shape[0]/2
    dimyh=aa.shape[1]/2
    ii=np.where(aa>0)
    cc=len(ii[0])
    xa=np.mean(ii[0])
    ya=np.mean(ii[1])
    slopenum=np.sum(ii[1]*(ii[0]-xa))
    slopeden=np.sum(ii[1]*(ii[1]-ya))
    slope=slopenum/slopeden
    #print slope, xa, ya
    ii=np.outer(range(28),np.ones(28))
    jj=np.outer(np.ones(28),range(28))
    fx=ii-xa+(jj-ya)*slope+dimxh
    fy=jj-ya+dimyh
    x=np.int16(fx)
    y=np.int16(fy)
    a=fx-x
    b=fy-y
    xx1=x+1
    yy1=y+1
    timg=np.zeros((2*dimx+img.shape[0],2*dimy+img.shape[1]))
    timg[dimx:(dimx+img.shape[0]),dimy:(dimy+img.shape[1])]=aa
    imout=a*b*timg[xx1+dimx,yy1+dimy]+a*(1.-b)*timg[xx1+dimx,y+dimy]+(1.-a)*b*timg[x+dimx,yy1+dimy]+(1.-a)*(1.-b)*timg[x+dimx,y+dimy]
    #imout=timg[x+dimxh,y+dimyh]
    
    return(np.ubyte(np.transpose(imout)))
    def pre_process(self):
        out_dataset = self.get_out_datasets()[0]
        in_pData = self.get_plugin_in_datasets()
        width_dim = in_pData[0].get_data_dimension_by_axis_label('detector_x')
        height_dim = in_pData[0].get_data_dimension_by_axis_label(
            'rotation_angle')
        sino_shape = list(in_pData[0].get_shape())
        self.width = sino_shape[width_dim]
        self.height = sino_shape[height_dim]
        center_manu = self.parameters['center']
        if center_manu != 0:
            self.center = center_manu
        self.mid_width = self.width / 2.0
        if (self.center <= 0) or (self.center > self.width):
            self.center = self.mid_width
        center_int = np.int16(np.floor(self.center))
        self.subpixel_shift = self.center - center_int
        if self.center < self.mid_width:
            self.overlap = 2 * center_int
            self.cor = self.width + center_int
        else:
            self.overlap = 2 * (self.width - center_int)
            self.cor = center_int

        self.new_height = np.int16(np.ceil(self.height / 2.0))
        list_angle = out_dataset.meta_data.get("rotation_angle")
        list_angle = list_angle[0:self.new_height]

        out_dataset.meta_data.set("rotation_angle", list_angle)
        list_wedge = np.linspace(1.0, 0.0, self.overlap)
        self.mat_wedge_left = \
            np.ones((self.new_height, self.width), dtype=np.float32)
        self.mat_wedge_left[:, -self.overlap:] = np.float32(list_wedge)
        self.mat_wedge_right = np.fliplr(self.mat_wedge_left)
Esempio n. 19
0
def CreateUsEnvPack(RawData):

	print RawData
	f = open(RawData, "r")
	a = np.fromfile(f, dtype=np.uint32)
	t = a[-1]
	V = a[:-1].copy()
	T = [ (( x * t ) / ( 1000.0*len(V) )) for x in 2*range(len(V))]
	Fech = 1000.0*len(V)/t # in MHz

	M = GetV2(V)[0]
	M2 = GetV1(V)[0]
	Ma = M - np.average(M[500:1000])
	Mb = M2 - np.average(M2[500:1000])

	raw = []
	rawSig = []
	for k in range(len(Ma)):
	    rawSig.append(Mb[k])
	    rawSig.append(Ma[k])
	    raw.append(M)
	    raw.append(M2)

	Interest = rawSig

	tableDataH = np.asarray(Interest).reshape((1000,2*2500))

	plt.figure(figsize=(15,20))
	plt.imshow(tableDataH)
	plt.savefig(RawData.split("/")[-1]+'raw-enveloppe.jpg', bbox_inches='tight')
	plt.show()

	np.savez(RawData.split("/")[-1].split(".")[0]+".npz", np.int16(rawSig), np.int16(tableDataH) )

	return rawSig, tableDataH, raw
Esempio n. 20
0
def save_data_to_h5(in_file, out_file):
    maxlength = 100
    with open(in_file, 'r') as f:
        total_num = sum(line.count('\n') for line in block(f)) - 1
        print('Total num from', in_file, ':', total_num)

        h5file = h5py.File(out_file, 'w')
        h5file.create_dataset("premise", shape=(total_num, maxlength), dtype=np.int32)
        h5file.create_dataset("p_len", shape=(total_num, ), dtype=np.int16)
        h5file.create_dataset("hypothesis", shape=(total_num, maxlength), dtype=np.int32)
        h5file.create_dataset("h_len", shape=(total_num, ), dtype=np.int16)
        h5file.create_dataset("label", shape=(total_num, ), dtype=np.int16)

        wdgts = [pgb.SimpleProgress(), ' ',
             pgb.Bar(marker='∎', left='|', right='|'), ' ',
             pgb.Timer(), ' ',
             pgb.ETA()]

        f.seek(0, 0)
        all_lines = whole(f).split('\n')
        all_lines.pop(0)
        print(len(all_lines))

        with pgb.ProgressBar(widgets=wdgts, maxval=total_num) as p:
            for i, line in enumerate(all_lines):
                if not line:
                    break

                line = line.split('|')
                # sentence1 : premise
                s_ids_1 = line[0]

                s_ids_1 = np.fromstring(s_ids_1, dtype=np.int32, sep=' ')
                pad = lambda a, i: a[0: i] if a.shape[0] > i else np.hstack((a, np.zeros(i - a.shape[0], dtype=np.int32)))
                s_ids_1 = pad(s_ids_1, maxlength)
                h5file['premise'][i] = s_ids_1

                s_len_1 = np.int16(line[1]) if np.int16(line[1]) < maxlength else maxlength
                h5file['p_len'][i] = s_len_1

                # sentence2 : hypothesis
                s_ids_2 = line[2]

                s_ids_2 = np.fromstring(s_ids_2, dtype=np.int32, sep=' ')
                pad = lambda a, i: a[0: i] if a.shape[0] > i else np.hstack((a, np.zeros(i - a.shape[0], dtype=np.int32)))
                s_ids_2 = pad(s_ids_2, maxlength)
                h5file['hypothesis'][i] = s_ids_2

                s_len_2 = np.int16(line[3]) if np.int16(line[3]) < maxlength else maxlength
                h5file['h_len'][i] = s_len_2

                # label
                p_label = line[4]
                h5file['label'][i] = np.int16(p_label)

                h5file.flush()

                p.update(i)

        h5file.close()
Esempio n. 21
0
def Pyramid(img):
	YUV = cv2.cvtColor(img,cv2.COLOR_BGR2YCR_CB)
	YUV = cv2.resize(YUV,(40,40))
	Y,U,V = cv2.split(YUV)
	YUV = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
	img = cv2.resize(YUV,(26,26))
	kernel1 = np.ones((3,1),np.float32)
	kernel2 = np.ones((1,3),np.float32)
	kernel1[0] = -1
	kernel1[1] = 0
	kernel2[0] = [-1,0,1]
	dst = cv2.filter2D(img,cv2.CV_16S,kernel1)
	dstv1 = np.int16(dst)
	dstv2 = cv2.pow(dstv1,2)
	dst = cv2.filter2D(img,cv2.CV_16S,kernel2)
	dsth1 = np.int16(dst)
	dsth2 = cv2.pow(dsth1,2)
	dst1 = dsth2 + dstv2
	dst1 = np.float32(dst1)
	dstfinal = cv2.sqrt(dst1).astype(np.uint8)
	finalh =  dsth1
	finalv = dstv1
	finalm = dstfinal
	UporDown = (finalv > 0 ).astype(int)
	LeftorRight = 2*(finalh > 0).astype(int)
	absh = map(abs, finalh)
	absv = map(abs, finalv)
	absv[:] = [x*1.732 for x in absv]
	absh = np.float32(absh)
	absv = np.float32(absv)
	high = 4*(absv > absh).astype(int)
	out = high + LeftorRight + UporDown
	features = []
	for x in range(6):
		hrt = np.zeros(out.shape[:2],np.uint8)
		features.append(hrt)
	for x in range(out.shape[:2][0]):
		for y in range(out.shape[:2][1]):
			z = out[x][y]
			if z == 4 or z == 6:
#				print "a",z
				features[4][x][y] = finalm[x][y]
			elif z == 5 or z == 7:
				features[5][x][y] = finalm[x][y]
#				print "b",z
			else:
				features[z][x][y] = finalm[x][y]
#				print z
	kernelg1 = 0.125*np.ones((4,4),np.float32)
	kernelg2 = 0.25*np.ones((2,2),np.float32)
	lastFeatures = []	
	for img in features:
		tote = cv2.sumElems(img)
		tote = tote/img.size
		img = img/tote
		print img
		print cv2.sumElems(img)
		print img.size
		lastFeatures.append(img1)
	return lastFeatures
Esempio n. 22
0
def reconstruct(pitch,fs,coffs,syllable):
	gain = coffs[0]
	coffs[0] = 1;
	x = np.double(fs)/np.double(pitch)
	num = np.ceil((pitch*3)/10.0)
	#300ms thus
	if syllable != "s":
		
		ex_input = np.zeros(num*x)
		for i in range (0,int(num)):
			ex_input[(i*x)-1] = 1

	    # Filtering the signal
		
		out = signal.filtfilt([gain],coffs,ex_input)
		d_num = [1]
		d_den = [1,-0.9]
		out = signal.filtfilt(d_num,d_den,out)  # De-emphasis
		out = np.int16(out/np.max(np.abs(out)) * 32767)
	else:
		ex_input = np.random.normal(0,1,num)
		out = signal.filtfilt([gain],coffs,ex_input)
		out = np.int16(out/np.max(np.abs(out)) * 32767)

	return out	
Esempio n. 23
0
    def fit_eyes(self):
        def midpoint(a, b):
            return np.mean([a, b], axis=0)

        def vector_cos(a, b):
            return np.dot(
                a, b)/(0.001+np.linalg.norm(a+0.001)*np.linalg.norm(b+0.001))

        size = tuple(self.exconf["eyefitting"]["size"])
        avg_eye_left = np.array([size[0]/2 - size[0]/5, size[1]*0.3])
        avg_eye_right = np.array([size[0]/2 + size[0]/5, size[1]*0.3])

        m = self.imread()
        if m is None:
            return None

        avg_eye_v = np.int16(avg_eye_right - avg_eye_left)
        eye_v = np.int16(self.eye_right - self.eye_left)

        angd = np.rad2deg(np.arccos(vector_cos(avg_eye_v, eye_v)))

        scale_f = np.linalg.norm(
            0.001+avg_eye_v)/(0.001+np.linalg.norm(0.001+eye_v))

        M = cv2.getRotationMatrix2D(
            tuple(
                midpoint(
                    self.eye_left, self.eye_right)), -angd, scale_f)
        M[:, 2] += midpoint(avg_eye_left, avg_eye_right) - \
            midpoint(self.eye_left, self.eye_right)

        m2 = cv2.warpAffine(m, M, size)
        #m3 = self.preproc(m2)
        return m2
Esempio n. 24
0
 def pre_process(self):
     self.drop = np.int16(self.parameters['row_drop'])
     self.smin, self.smax = np.int16(self.parameters['search_area'])
     self.search_radius = np.float32(self.parameters['search_radius'])
     self.search_step = np.float32(self.parameters['step'])
     self.ratio = np.float32(self.parameters['ratio'])
     self.est_cor = self.parameters['start_pixel']
     self.broadcast_method = str(self.parameters['broadcast_method'])
     self.error_msg_1 = ""
     self.error_msg_2 = ""
     self.error_msg_3 = "" 
     if not((self.broadcast_method == 'mean')
             or (self.broadcast_method == 'median')
              or (self.broadcast_method == 'linear_fit')
               or (self.broadcast_method == 'nearest')):
         logging.warn("!!! WARNING !!! Selected broadcasting method is"
                      " out of the list. Use 'median' instead")
         self.error_msg_3 = "!!! WARNING !!! Selected broadcasting "\
          "method is out of the list. Use the default option: 'median'"                        
         self.broadcast_method = 'median' 
     in_pData = self.get_plugin_in_datasets()[0]
     data = self.get_in_datasets()[0]
     starts,stops,steps = data.get_preview().get_starts_stops_steps()[0:3]
     start_ind = starts[1]
     stop_ind = stops[1]
     step_ind = steps[1]
     name = data.get_name()
     pre_start = self.exp.meta_data.get(name + '_preview_starts')[1]
     pre_stop = self.exp.meta_data.get(name + '_preview_stops')[1]
     pre_step = self.exp.meta_data.get(name + '_preview_steps')[1]        
     self.origin_prev = np.arange(pre_start,pre_stop, pre_step)
     self.plugin_prev = self.origin_prev[start_ind:stop_ind:step_ind]
Esempio n. 25
0
 def process_frames(self, data):        
     if len(data[0].shape)>2:
         sino = np.mean(data[0],axis=1)
     else:
         sino = data[0]
     (nrow, ncol) = sino.shape
     dsp_row = 1
     dsp_col = 1                    
     if ncol>2000:
         dsp_col = 4             
     if nrow>2000:
         dsp_row = 2        
     # Denoising 
     # There's a critical reason to use different window sizes
     # between coarse and fine search.
     sino_csearch = ndi.gaussian_filter(sino, (3,1), mode='reflect')
     sino_fsearch = ndi.gaussian_filter(sino, (2,2), mode='reflect')
     sino_dsp = self._downsample(sino_csearch, dsp_row, dsp_col)
     fine_srange = max(self.search_radius, dsp_col)
     off_set = 0.5*dsp_col if dsp_col>1 else 0.0
     if self.est_cor is None:
         self.est_cor = (ncol-1.0)/2.0
     else:
         self.est_cor = np.float32(self.est_cor)
     start_cor = np.int16(
         np.floor(1.0 * (self.est_cor + self.smin) / dsp_col))        
     stop_cor = np.int16(
         np.ceil(1.0 * (self.est_cor + self.smax) / dsp_col))
     raw_cor = self._coarse_search(sino_dsp, start_cor, stop_cor,
                                    self.ratio, self.drop)
     cor = self._fine_search(
         sino_fsearch, raw_cor*dsp_col + off_set, fine_srange,
          self.search_step, self.ratio, self.drop)
     return [np.array([cor]), np.array([cor])]
Esempio n. 26
0
def pitch_strength_all_candidates(f, L, pc):
	# Create pritch strength matrix
	S = np.zeros((pc.size, L.shape[1]))

	# Define integration regions
	k = np.zeros(pc.size + 1)

	for j in xrange(0, k.size - 1):
		k[j+1] = k[j] + np.argmax(f[np.int16(k[j]):] > pc[j]/4)

	k = k[1:]

	# Create loudness normalization matrix
	N = np.sqrt(np.flipud(np.cumsum(np.flipud(L * L), 0)))

	for j in xrange(0, pc.size):
		# Normalize loudness
		n = N[np.int16(k[j]),:]
		n[n == 0] = -np.inf # to make zero-loudness equal zero after normalization
		NL = L[np.int16(k[j]) :] / np.tile(n, (L.shape[0] - k[j], 1))

		# Compute pitch strength
		S[j] = pitch_strength_one_candidate(f[np.int16(k[j]) :], NL, pc[j])

	return S
Esempio n. 27
0
 def _fine_search(self, sino, start_cor, search_radius,
                   search_step, ratio, drop):
     """
     Fine search for finding the rotation center.
     """
     # Denoising        
     (nrow, ncol) = sino.shape
     flip_sino = np.fliplr(sino)
     search_radius = np.clip(np.abs(search_radius), 1, ncol//10 - 1)
     search_step = np.clip(np.abs(search_step), 0.1, 1.1)
     start_cor = np.clip(start_cor, search_radius, ncol - search_radius - 1)
     cen_fliplr = (ncol - 1.0) / 2.0
     list_cor = start_cor + np.arange(
             -search_radius, search_radius + search_step, search_step)
     comp_sino = np.flipud(sino) # Used to avoid local minima
     list_metric = np.zeros(len(list_cor), dtype = np.float32)
     mask = self._create_mask(2 * nrow, ncol, 0.5 * ratio * ncol, drop)
     for i, cor in enumerate(list_cor):
         shift = 2.0*(cor - cen_fliplr)
         sino_shift = ndi.interpolation.shift(
             flip_sino, (0, shift), order = 3, prefilter = True)
         if shift>=0:
             shift_int = np.int16(np.ceil(shift))
             sino_shift[:,:shift_int] = comp_sino[:,:shift_int]
         else:
             shift_int = np.int16(np.floor(shift))
             sino_shift[:,shift_int:] = comp_sino[:,shift_int:]
         mat1 = np.vstack((sino, sino_shift))
         list_metric[i] = np.mean(
             np.abs(np.fft.fftshift(fft.fft2(mat1)))*mask)
     min_pos = np.argmin(list_metric)
     cor = list_cor[min_pos]
     return cor
Esempio n. 28
0
def bbox(image, scale=256, margin=1, fix_points='all'):

    image = np.hstack([np.zeros_like(image),image, np.zeros_like(image)])
    image = np.vstack([np.zeros_like(image),image, np.zeros_like(image)])


    lm = landmarks(image)
    lm = lm[:,p[fix_points]]
    if np.any(np.isnan(lm)):
        return np.zeros([scale,scale,image.shape[2]]).astype(np.uint8), lm

    m = lm.mean(1)
    s = lm.std(1).mean()
    x1 = np.int16(m+2*s+(1+margin)*s)
    x0 = np.int16(m-2*s-(1+margin)*s)

    lm = lm-x0[:,None]
    image = image[x0[1]:x1[1],x0[0]:x1[0]]
    s1 = image.shape[0]

    image = transform.resize(image,[scale,scale,3])
    image = np.uint8(image*255)
    s2 = image.shape[0]

    lm = lm*(float(s2)/s1)

    return image,lm
Esempio n. 29
0
def conv_int16( value ):
    if( len(value) == 0 ):
        rval = imiss
    else:
        rval = int( value )
    assert numpy.int16( rval ) == numpy.int32( rval ) , " conv_int16: value out of range"
    return numpy.int16( rval )
Esempio n. 30
0
 def _fine_search(self, sino, raw_cor):
     (Nrow, Ncol) = sino.shape
     centerfliplr = (Ncol + 1.0)/2.0-1.0
     # Use to shift the sino2 to the raw CoR
     shiftsino = np.int16(2*(raw_cor-centerfliplr))
     sino2 = np.roll(np.fliplr(sino[1:]), shiftsino, axis=1)
     lefttake = 0
     righttake = Ncol-1
     search_rad = self.parameters['search_radius']
     if raw_cor <= centerfliplr:
         lefttake = np.ceil(search_rad+1)
         righttake = np.floor(2*raw_cor-search_rad-1)
     else:
         lefttake = np.ceil(raw_cor-(Ncol-1-raw_cor)+search_rad+1)
         righttake = np.floor(Ncol-1-search_rad-1)
     Ncol1 = righttake-lefttake + 1
     mask = self._create_mask(2*Nrow-1, Ncol1,
                              0.5*self.parameters['ratio']*Ncol)
     numshift = np.int16((2*search_rad+1.0)/self.parameters['step'])
     listshift = np.linspace(-search_rad, search_rad, num=numshift)
     listmetric = np.zeros(len(listshift), dtype=np.float32)
     num1 = 0
     for i in listshift:
         logging.debug("list shift %d", i)
         sino2a = ndi.interpolation.shift(sino2, (0, i), prefilter=False)
         sinojoin = np.vstack((sino, sino2a))
         listmetric[num1] = np.sum(np.abs(fft.fftshift(
             fft.fft2(sinojoin[:, lefttake:righttake + 1])))*mask)
         num1 = num1 + 1
     minpos = np.argmin(listmetric)
     rotcenter = raw_cor + listshift[minpos]/2.0
     return rotcenter, listmetric
Esempio n. 31
0
def lshr16(x, y):
    return np.int16(np.uint16(x) >> np.uint16(y))
Esempio n. 32
0
    def test_array_1d(self):
        A = np.arange(1050)

        ctx = t.Ctx()
        dom = t.Domain(ctx,
                       t.Dim(ctx, domain=(0, 1049), tile=100, dtype=np.int64))
        att = t.Attr(ctx, dtype=A.dtype)
        T = t.DenseArray(ctx, self.path("foo"), domain=dom, attrs=(att, ))

        self.assertEqual(len(A), len(T))
        self.assertEqual(A.ndim, T.ndim)
        self.assertEqual(A.shape, T.shape)

        self.assertEqual(1, T.nattr)
        self.assertEqual(A.dtype, T.attr(0).dtype)

        # check empty array
        B = T[:]

        self.assertEqual(A.shape, B.shape)
        self.assertEqual(A.dtype, B.dtype)
        self.assertIsNone(T.nonempty_domain())

        # check set array
        T[:] = A

        self.assertEqual(((0, 1049), ), T.nonempty_domain())

        # check slicing
        assert_array_equal(A, np.array(T))
        assert_array_equal(A, T[:])
        assert_array_equal(A, T[...])
        assert_array_equal(A, T[slice(None)])
        assert_array_equal(A[:10], T[:10])
        assert_array_equal(A[10:20], T[10:20])
        assert_array_equal(A[-10:], T[-10:])

        # ellipsis
        assert_array_equal(A[:10, ...], T[:10, ...])
        assert_array_equal(A[10:50, ...], T[10:50, ...])
        assert_array_equal(A[-50:, ...], T[-50:, ...])
        assert_array_equal(A[..., :10], T[..., :10])
        assert_array_equal(A[..., 10:20], T[..., 10:20])
        assert_array_equal(A[..., -50:], T[..., -50:])

        # across tiles
        assert_array_equal(A[:150], T[:150])
        assert_array_equal(A[-250:], T[-250:])

        # point index
        self.assertEqual(A[0], T[0])
        self.assertEqual(A[-1], T[-1])

        # point index with all index types
        self.assertEqual(A[123], T[np.int8(123)])
        self.assertEqual(A[123], T[np.uint8(123)])
        self.assertEqual(A[123], T[np.int16(123)])
        self.assertEqual(A[123], T[np.uint16(123)])
        self.assertEqual(A[123], T[np.int64(123)])
        self.assertEqual(A[123], T[np.uint64(123)])
        self.assertEqual(A[123], T[np.int32(123)])
        self.assertEqual(A[123], T[np.uint32(123)])

        # basic step
        assert_array_equal(A[:50:2], T[:50:2])
        assert_array_equal(A[:2:50], T[:2:50])
        assert_array_equal(A[10:-1:50], T[10:-1:50])

        # indexing errors
        with self.assertRaises(IndexError):
            T[:, :]
        with self.assertRaises(IndexError):
            T[:, 50]
        with self.assertRaises(IndexError):
            T[50, :]
        with self.assertRaises(IndexError):
            T[0, 0]

        # check single ellipsis
        with self.assertRaises(IndexError):
            T[..., 1:5, ...]

        # check partial assignment
        B = np.arange(1e5, 2e5).astype(A.dtype)
        T[190:310] = B[190:310]

        assert_array_equal(A[:190], T[:190])
        assert_array_equal(B[190:310], T[190:310])
        assert_array_equal(A[310:], T[310:])
Esempio n. 33
0
def fptosi_T_i16(x):
    return np.int16(np.trunc(x))
Esempio n. 34
0
def zext_i64_i16(x):
    return np.int16(np.uint64(x))
Esempio n. 35
0
def zext_i32_i16(x):
    return np.int16(np.uint32(x))
Esempio n. 36
0
def zext_i16_i16(x):
    return np.int16(np.uint16(x))
Esempio n. 37
0
def sext_T_i16(x):
    return np.int16(x)
                                     speckleWindowSize=100,
                                     speckleRange=32,
                                     preFilterCap=63,
                                     mode=cv2.STEREO_SGBM_MODE_SGBM_3WAY)

right_matcher = cv2.ximgproc.createRightMatcher(left_matcher)
#WLS Filter
lmbda = 80000
sigma = 1.7
visual_multiplier = 1.0
wls_filter = cv2.ximgproc.createDisparityWLSFilter(matcher_left=left_matcher)
wls_filter.setLambda(lmbda)
wls_filter.setSigmaColor(sigma)
displ = left_matcher.compute(left, right)
dispr = right_matcher.compute(right, left)
displ = np.int16(displ)
dispr = np.int16(dispr)
filteredImg = wls_filter.filter(displ, left, None, dispr)
# cv2.imshow('Filtered Map', filteredImg)
filteredImg = cv2.normalize(src=filteredImg,
                            dst=filteredImg,
                            beta=0,
                            alpha=255,
                            norm_type=cv2.NORM_MINMAX)
# print(filteredImg[:10,:])
filteredImg = np.uint8(filteredImg)
# cv2.imshow('Disparity Map', filteredImg)
# cv2.waitKey(30)
print "Computed Disparity"
# print("Shape:",filteredImg.shape)
x = filteredImg.reshape((filteredImg.shape[0] * filteredImg.shape[1], 1))
Esempio n. 39
0
    QSTRING: 's',
    QSYMBOL: 'S',
    QCHAR: 'b',
    QMONTH: 'i',
    QDATE: 'i',
    QDATETIME: 'd',
    QMINUTE: 'i',
    QSECOND: 'i',
    QTIME: 'i',
    QTIMESPAN: 'q',
    QTIMESTAMP: 'q',
}

# null definitions
_QNULL1 = numpy.int8(-2**7)
_QNULL2 = numpy.int16(-2**15)
_QNULL4 = numpy.int32(-2**31)
_QNULL8 = numpy.int64(-2**63)
_QNAN32 = numpy.frombuffer(b'\x00\x00\xc0\x7f', dtype=numpy.float32)[0]
_QNAN64 = numpy.frombuffer(b'\x00\x00\x00\x00\x00\x00\xf8\x7f',
                           dtype=numpy.float64)[0]
_QNULL_BOOL = numpy.bool_(False)
_QNULL_SYM = numpy.string_('')
_QNULL_GUID = uuid.UUID('00000000-0000-0000-0000-000000000000')

QNULLMAP = {
    QGUID: ('0Ng', _QNULL_GUID, lambda v: v == _QNULL_GUID),
    QBOOL: ('0b', _QNULL_BOOL, lambda v: v == numpy.bool_(False)),
    QBYTE: ('0x00', _QNULL1, lambda v: v == _QNULL1),
    QSHORT: ('0Nh', _QNULL2, lambda v: v == _QNULL2),
    QINT: ('0N', _QNULL4, lambda v: v == _QNULL4),
Esempio n. 40
0
def highest_power_of_2(x):
    """Given a number x, find the highest power of 2 that <= x"""
    res = np.power(2, np.floor(np.log2(x)))
    res = np.int16(res)
    return res
Esempio n. 41
0
import h5py
import numpy as np

try:
    from skimage.transform import resize
except ImportError:
    raise ImportError('Could not import skimage!')

from mintpy.objects import (dataTypeDict,
                            geometryDatasetNames,
                            ifgramDatasetNames)
from mintpy.utils import readfile, ptime, utils0 as ut


BOOL_ZERO = np.bool_(0)
INT_ZERO = np.int16(0)
FLOAT_ZERO = np.float32(0.0)
CPX_ZERO = np.complex64(0.0)

dataType = np.float32


########################################################################################
class ifgramStackDict:
    '''
    IfgramStack object for a set of InSAR pairs from the same platform and track.

    Example:
        from mintpy.objects.insarobj import ifgramStackDict
        pairsDict = {('20160524','20160530'):ifgramObj1,
                     ('20160524','20160605'):ifgramObj2,
Esempio n. 42
0
 def identity_charge() -> np.ndarray:
   return np.int16(0)
Esempio n. 43
0
 def getLastResults(self):
     return int16(self.readRegister(
         self.REG_POINTER_CONVERT)) * self.gain_scaling[self.gain]
    def read2h5(self, infile):
        # 主程序
        #         print(infile)

        try:
            f = h5py.File(infile, 'r')
        except:
            errordir = os.path.dirname(infile) + os.sep + 'error'
            if not os.path.exists(errordir):
                os.makedirs(errordir)
            errors = glob.glob(infile[0:-3] + '*')
            for error in errors:
                shutil.move(error, errordir + os.sep + os.path.basename(error))
            return

        #         输出文件名
        millisecond = f['Millisecond/Millisecond'][()]
        hour = np.min(millisecond) // (1000 * 60 * 60)
        minute = (np.min(millisecond) - hour * (1000 * 60 * 60)) // (1000 * 60)
        second = (np.min(millisecond) - hour *
                  (1000 * 60 * 60) - minute * 1000 * 60) // (1000)

        time1 = datetime.datetime(int('20' + os.path.basename(infile)[6:8]), 1, 1) + \
                datetime.timedelta(days=int(os.path.basename(infile)[8:11]) - 1) + \
                datetime.timedelta(hours=int(hour)) + datetime.timedelta(minutes=int(minute)) + \
                datetime.timedelta(seconds=int(second))

        hour = np.max(millisecond) // (1000 * 60 * 60)
        minute = (np.max(millisecond) - hour * (1000 * 60 * 60)) // (1000 * 60)
        second = (np.max(millisecond) - hour *
                  (1000 * 60 * 60) - minute * 1000 * 60) // (1000)

        time2 = datetime.datetime(int('20' + os.path.basename(infile)[6:8]), 1, 1) + \
                datetime.timedelta(days=int(os.path.basename(infile)[8:11]) - 1) + \
                datetime.timedelta(hours=int(hour)) + datetime.timedelta(minutes=int(minute)) + \
                datetime.timedelta(seconds=int(second))

        # del millisecond

        tree = ET.ElementTree(file=infile[0:-2] + 'xml')
        root = tree.getroot()
        node = root[0].find("OrbitNumber")

        outfile = os.path.dirname(infile) + os.sep + 'H1A_OPER_OCT_L1B_' + time1.strftime("%Y%m%dT%H%M%S") + '_' + \
                  time2.strftime("%Y%m%dT%H%M%S") + '_' + node.text + '_10.h5'

        if os.access(outfile, os.R_OK):
            return outfile

        f_new = h5py.File(outfile, 'a')

        # Calibration
        calibration = f_new.create_group('Calibration')
        data = np.zeros((10, 1))
        calibration.create_dataset('Calibration Coefficients Offsets factor',
                                   (data.shape[0], data.shape[1]),
                                   dtype='f',
                                   data=data)
        del data
        data = np.ones((10, 1))
        calibration.create_dataset('Calibration Coefficients Scale factor',
                                   (data.shape[0], data.shape[1]),
                                   dtype='f',
                                   data=data)
        del data
        data = np.ones((10, 1))
        calibration.create_dataset('Mirror-side Correction Scale Factors',
                                   (data.shape[0], 1),
                                   dtype='f',
                                   data=data[:, 0])
        data = np.zeros((10, 1))
        calibration.create_dataset('Mirror-side Correction Offsets Factors',
                                   (data.shape[0], 1),
                                   dtype='f',
                                   data=data[:, 0])
        del data
        data = np.zeros((10, 1))
        calibration.create_dataset('Time-dependent Correction Constant Terms',
                                   (data.shape[0], data.shape[1]),
                                   dtype='f',
                                   data=data)
        del data
        data = np.ones((10, 1))
        calibration.create_dataset(
            'Time-dependent Correction Linear Coefficients',
            (data.shape[0], data.shape[1]),
            dtype='f',
            data=data)
        del data
        data = np.ones((10, 1))
        calibration.create_dataset(
            'Time-dependent Correction Quadratic Coefficients',
            (data.shape[0], data.shape[1]),
            dtype='f',
            data=data)
        del data
        data = np.array([[1], [1], [1], [1], [1], [1], [1], [1]])
        calibration.create_dataset('Vicarious Calibration gan factor',
                                   (data.shape[0], data.shape[1]),
                                   dtype='f',
                                   data=data)
        del data
        calibration.attrs['Calibration Entry Year'] = np.int16(2019)
        calibration.attrs['Calibration Entry Day'] = np.int16(330)
        calibration.attrs['Calibration Reference Year'] = np.int16(0)
        calibration.attrs['Calibration Reference Day'] = np.int16(0)
        calibration.attrs['Calibration Reference Minute'] = np.int32(0)
        calibration.attrs['Visible Channel Radiance Data Unit'] = np.string_(
            'mWcm-2 um-1 sr-1')
        calibration.attrs['Infrared Channel Radiance Data Unit'] = np.string_(
            'mWcm-2 um-1 sr-1')

        # geophysical Data
        group = f_new.create_group('Geophysical Data')
        for j, dataset_of_group in enumerate(f['Scan image data'].items()):
            dataset = group.create_dataset(dataset_of_group[0],
                                           dataset_of_group[1].shape,
                                           dtype=np.float32,
                                           data=dataset_of_group[1][()])
            dataset.attrs['Unit'] = np.string_('None')
            dataset.attrs['long_name'] = np.string_('Top of Atmosphere B' +
                                                    dataset_of_group[0][2:] +
                                                    'nm/um radiance counts')
        del group, dataset

        # Extra Data
        group = f_new.create_group('Extra Data')
        Ext_xxx = [
            '412', '443', '490', '520', '565', '670', '750', '865', '11', '12'
        ]
        try:
            L0file = glob.glob(
                os.path.dirname(infile) + os.sep +
                os.path.basename(infile)[0:-6] + 'L0.bz2')[0]
            L0 = L0parase()
            data = L0.run_this_function(L0file)
        #         print('LA')
        except:
            data = np.zeros(shape=(10, dataset_of_group[1].shape[0], 43))
        for j, ext_x in enumerate(Ext_xxx):
            ext = group.create_dataset('Ext_' + ext_x, (data.shape[1], 43),
                                       dtype='uint16',
                                       data=data[j, :, :])
            if len(ext_x) == 2:
                ext.attrs['long_name'] = np.string_('B' + ext_x +
                                                    'um Extra data counts')
            else:
                ext.attrs['long_name'] = np.string_('B' + ext_x +
                                                    'nm Extra data counts')
        del group, ext

        # navigaton data
        group = f_new.create_group('Navigation Data')

        # 计算四个角度

        # =======================================================================================================================
        lat = f['Pixels location data/latitude'][()]
        lon = f['Pixels location data/longitude'][()]
        millisecond = f['Millisecond/Millisecond'][()]
        # year = np.ones(shape=lat.shape) * int('20' + os.path.basename(infile)[6:8])
        # # year= np.array([[2003,2003],[2003,2004]])
        # # month=np.array([[1,2],[6,12]])
        # # day=np.array([[2,16],[14,2]])
        # DOY = np.ones(shape=lat.shape) * int(os.path.basename(infile)[8:11])
        #
        # millisecond = np.repeat(millisecond, 102, axis=1)
        # # hour = np.array([[1, 2], [3, 4]])
        # hour = np.trunc(millisecond / 1000 / 3600)
        # minu = np.trunc((millisecond - hour * 1000 * 3600) / 1000 / 60)
        # sec = (millisecond - hour * 1000 * 3600 - minu * 1000 * 60) / 1000
        # # minu=np.array([[23,23],[23,22]])
        # # sec=np.array([[1,2],[24,55]])
        #
        # TimeZone = np.trunc((lon - np.sign(lon) * 7.5) / 15 + np.sign(lon))
        #
        # # N0   sitar=θ
        # N0 = 79.6764 + 0.2422 * (year - 1985) - np.trunc((year - 1985) / 4.0)
        # sitar = 2 * np.pi * (DOY - N0) / 365.2422
        # ED1 = 0.3723 + 23.2567 * np.sin(sitar) + 0.1149 * np.sin(2 * sitar) - 0.1712 * np.sin(
        #     3 * sitar) - 0.758 * np.cos(
        #     sitar) + 0.3656 * np.cos(2 * sitar) + 0.0201 * np.cos(3 * sitar)
        # ED = ED1 * np.pi / 180  # ED本身有符号
        #
        # dLon = (lon - TimeZone * 15.0) * np.sign(lon)
        #
        # # 时差
        # Et = 0.0028 - 1.9857 * np.sin(sitar) + 9.9059 * np.sin(2 * sitar) - 7.0924 * np.cos(sitar) - 0.6882 * np.cos(
        #     2 * sitar)
        # gtdt1 = hour + minu / 60.0 + sec / 3600.0 + dLon / 15  # 地方时
        # gtdt = gtdt1 + Et / 60.0
        # dTimeAngle1 = 15.0 * (gtdt - 12)
        # dTimeAngle = dTimeAngle1 * np.pi / 180
        # latitudeArc = lat * np.pi / 180
        #
        # # 高度角计算公式
        # HeightAngleArc = np.arcsin(
        #     np.sin(latitudeArc) * np.sin(ED) + np.cos(latitudeArc) * np.cos(ED) * np.cos(dTimeAngle))
        # # 方位角计算公式
        # CosAzimuthAngle = (np.sin(HeightAngleArc) * np.sin(latitudeArc) - np.sin(ED)) / np.cos(HeightAngleArc) / np.cos(
        #     latitudeArc)
        # AzimuthAngleArc = np.arccos(CosAzimuthAngle)
        # HeightAngle = HeightAngleArc * 180 / np.pi
        # sza = 90 - HeightAngle
        # AzimuthAngle1 = AzimuthAngleArc * 180 / np.pi
        # saa = 180 + AzimuthAngle1 * np.sign(dTimeAngle)
        # ========================================================================================================================
        # 使用pysolar计算sza saa
        time = [*map(
            lambda t: datetime.datetime(int('20' + os.path.basename(infile)[6:8]), 1, 1, tzinfo=datetime.timezone.utc) + \
                      datetime.timedelta(days=(int(os.path.basename(infile)[8:11]) - 1)) + \
                      datetime.timedelta(milliseconds=t[0]), millisecond.tolist())]

        time = np.repeat(np.array(time).reshape(-1, 1), 102, axis=1)
        time = time.flatten()
        lat = lat.flatten()
        lon = lon.flatten()

        sza = np.array([
            *map(lambda sx, sy, t: 90 - get_altitude(sx, sy, t), lat, lon,
                 time)
        ])
        sza = sza.reshape(-1, 102)
        saa = np.array(
            [*map(lambda sx, sy, t: get_azimuth(sx, sy, t), lat, lon, time)])
        saa = saa.reshape(-1, 102)

        # sza = np.array([*map(lambda x, y, t: 90.0 - get_altitude(x, y, t), lat, lon, time)]).reshape(-1, 102)
        # saa = np.array([*map(lambda x, y, t: get_azimuth(x, y, t), lat, lon, time)]).reshape(-1, 102)
        center_lat = f['Center Latitude/Center Latitude'][()]
        center_lon = f['Center Longitude/Center Longitude'][()]
        center_lat = np.repeat(center_lat, 102, axis=1)
        center_lon = np.repeat(center_lon, 102, axis=1)
        center_lat = center_lat.flatten()
        center_lon = center_lon.flatten()
        # pyorbital.orbital.get_observer_look(sat_lon, sat_lat, sat_alt, utc_time, lon, lat, alt)
        view_angle = np.array([
            *map(
                lambda sx, sy, t, x, y: orbital.get_observer_look(
                    np.atleast_1d(sx), np.atleast_1d(sy), np.atleast_1d(798),
                    t, np.atleast_1d(x), np.atleast_1d(y), np.atleast_1d(0)),
                center_lon, center_lat, time, lon, lat)
        ])
        vaa = (view_angle[:, 0]).reshape(-1, 102)
        vza = (90 - view_angle[:, 1]).reshape(-1, 102)
        lon = lon.reshape(-1, 102)
        lat = lat.reshape(-1, 102)

        parameters = [lon, lat, sza, saa, vza, vaa]
        parameter_ID = [
            'Longitude', 'Latitude', 'Solar Zenith Angle',
            'Solar Azimuth Angle', 'Satellite Zenith Angle',
            'Satellite Azimuth Angle'
        ]

        for ID, parameter in enumerate(parameters):
            nl = np.arange(parameter.shape[0])
            #             当经度跨过180度经线时,
            if parameter_ID[ID] == 'Longitude':
                if parameter.max() - parameter.min() > 300:  # 跨180经度时
                    parameter[parameter < 0] = parameter[
                        parameter < 0] + 360.0  # 180经线两边数值连续,以正确插值
                    parameter_inter = np.array(
                        [*map(lambda n: self.interp(parameter[n, :]), nl)])
                    parameter_inter[parameter_inter > 180] = parameter_inter[
                        parameter_inter > 180] - 360.0  # 变回来
                    group.create_dataset(
                        parameter_ID[ID],
                        (parameter_inter.shape[0], parameter_inter.shape[1]),
                        dtype=np.float32,
                        data=parameter_inter)
                    continue  # 经度无需再写
            #             按行计算
            parameter_inter = np.array(
                [*map(lambda n: self.interp(parameter[n, :]), nl)])
            group.create_dataset(
                parameter_ID[ID],
                (parameter_inter.shape[0], parameter_inter.shape[1]),
                dtype='f',
                data=parameter_inter)

        group.attrs['Navigation Point Counts'] = np.int32(
            parameter_inter.shape[1])
        group.attrs['First Navigation Points'] = np.int32(1)
        group.attrs['Pixel-intervals of Navigation Point'] = np.int32(1)
        del group, parameters

        #         QC Attributes
        group = f_new.create_group('QC Attributes')
        parameters = ['Staturated Pixel Counts', 'Zero Pixel Counts']
        data = f['Saturated Pixels/Saturated Pixels'][()]
        group.create_dataset('Staturated Pixel Counts',
                             (data.shape[0], data.shape[1]),
                             dtype='uint16',
                             data=data)
        data = f['Zero Pixels/Zero Pixels'][()]
        group.create_dataset('Zero Pixel Counts',
                             (data.shape[0], data.shape[1]),
                             dtype='uint16',
                             data=data)
        group.attrs['Missing Frame Counts'] = np.int32(0)
        del group, data

        #         Scan Line Attributes
        group = f_new.create_group('Scan Line Attributes')
        # data = (f.select('Attitude Parameters')).get()
        # group.create_dataset('Attitude Parameters', (data.shape[0], data.shape[1]), dtype=type(data[0][0]), data=data)
        # del data
        data = f['Center Latitude/Center Latitude'][()]
        group.create_dataset('Center Latitude', (data.shape[0], data.shape[1]),
                             dtype=type(data[0][0]),
                             data=data)
        del data

        data = f['Center Latitude/Center Latitude'][()]
        group.create_dataset('Center Longitude',
                             (data.shape[0], data.shape[1]),
                             dtype=type(data[0][0]),
                             data=data)
        del data

        data = f['Center Solar Zenith/Center Solar Zenith'][()]
        group.create_dataset('Center Solar Zenith Angle',
                             (data.shape[0], data.shape[1]),
                             dtype=type(data[0][0]),
                             data=data)
        del data

        data = f['Start Latitude/Start Latitude'][()]
        group.create_dataset('Start Latitude', (data.shape[0], data.shape[1]),
                             dtype=type(data[0][0]),
                             data=data)
        del data

        data = f['Start Longitude/Start Longitude'][()]
        group.create_dataset('Start Longitude', (data.shape[0], data.shape[1]),
                             dtype=type(data[0][0]),
                             data=data)
        del data

        data = f['End Latitude/End Latitude'][()]
        group.create_dataset('End Latitude', (data.shape[0], data.shape[1]),
                             dtype=type(data[0][0]),
                             data=data)
        del data

        data = f['End Longitude/End Longitude'][()]
        group.create_dataset('End Longitude', (data.shape[0], data.shape[1]),
                             dtype=type(data[0][0]),
                             data=data)

        frame = (np.arange(data.shape[0])) % 8
        frame.shape = (frame.shape[0], 1)
        group.create_dataset('Frame Number', (frame.shape[0], frame.shape[1]),
                             dtype='int16',
                             data=frame)
        del data

        # data = (f.select('Infrared Channel Calibration Data')).get()
        # group.create_dataset('Infrared Channel Calibration Data', (data.shape[0], data.shape[1], data.shape[2]),
        #                      dtype=type(data[0][0][0]), data=data)
        # del data

        data = f['Millisecond/Millisecond'][()]
        millisecond = group.create_dataset('Millisecond',
                                           (data.shape[0], data.shape[1]),
                                           dtype=np.float64,
                                           data=data)
        millisecond.attrs['Fillin_value'] = -999.0
        millisecond.attrs['Unit'] = np.string_(
            'milliseconds since at 00:00:00 on this day ')
        del data

        # data = (f.select('Mirror-side Flag')).get()
        #     # group.create_dataset('Mirror-side Flag', (data.shape[0], data.shape[1]), dtype='|S1', data=data)
        #     # del data
        # no ORB_VEC dataset in HDF
        # data = (f.select('ORB_VEC')).get()
        # group.create_dataset('ORB_VEC', (data.shape[0], data.shape[1]), dtype=type(data[0][0]), data=data)
        # del data
        group.attrs['Instrument Parameters'] = np.string_(
            '412nm,443nm,490nm,520nm,565nm,670nm,750nm,865nm,10.3-11.4um,'
            '11.5-12.5um')

        #         file attributes

        f_new.attrs.create('Calibration Flag',
                           'None',
                           shape=(1, ),
                           dtype='S10')
        f_new.attrs.create('Calibration Version',
                           '1.00',
                           shape=(1, ),
                           dtype='S10')
        f_new.attrs.create('DayorNight', 'D', shape=(1, ), dtype='S10')
        distance = 1 - 0.01672 * np.cos(
            0.9856 * (int(os.path.basename(infile)[8:11]) - 4))
        f_new.attrs.create('Earth-Sun Distance',
                           distance,
                           shape=(1, ),
                           dtype=np.float32)
        f_new.attrs.create('Easternmost Longitude',
                           np.max(lon),
                           shape=(1, ),
                           dtype=np.float32)
        f_new.attrs.create('End Center Longitude',
                           center_lon[-1],
                           shape=(1, ),
                           dtype=np.float32)
        f_new.attrs.create('End Center Latitude',
                           center_lat[-1],
                           shape=(1, ),
                           dtype=np.float32)
        f_new.attrs.create('GEO Correction Method',
                           'Unkonwn',
                           shape=(1, ),
                           dtype='S10')
        f_new.attrs.create('Input File',
                           os.path.basename(infile),
                           shape=(1, ),
                           dtype='S10')
        f_new.attrs.create('Latitude Unit', 'degree', shape=(1, ), dtype='S10')
        f_new.attrs.create('Longitude Unit',
                           'degree',
                           shape=(1, ),
                           dtype='S10')
        f_new.attrs.create('Lower Left Latitude',
                           lat[-1, 0],
                           shape=(1, ),
                           dtype=np.float32)
        f_new.attrs.create('Lower Left Longitude',
                           lon[-1, 0],
                           shape=(1, ),
                           dtype=np.float32)
        f_new.attrs.create('Lower Right Latitude',
                           lat[-1, -1],
                           shape=(1, ),
                           dtype=np.float32)
        f_new.attrs.create('Lower Right Longitude',
                           lon[-1, -1],
                           shape=(1, ),
                           dtype=np.float32)

        node = root[0].find("NodeCrossingTime")
        time = datetime.datetime(int('20' + node.text[0:2]), 1, 1) + datetime.timedelta(days=int(node.text[2:5]) - 1) + \
               datetime.timedelta(hours=int(node.text[5:7])) + datetime.timedelta(minutes=int(node.text[7:9]))
        f_new.attrs.create('Node Crossing Time',
                           time.strftime("%Y-%m-%dT%H-%M-%S"),
                           shape=(1, ),
                           dtype='S10')
        f_new.attrs.create('Northernmost Latitude',
                           np.max(lat),
                           shape=(1, ),
                           dtype=np.float32)
        f_new.attrs.create('Number of Bands', 16, shape=(1, ), dtype=np.int16)
        f_new.attrs.create('Number of Scan Lines',
                           f['Scan image data/L_412'].shape[0],
                           shape=(1, ),
                           dtype=np.int32)
        f_new.attrs.create('Orbit Node Longitude',
                           root[0].find("OrbitNodeLongitude").text,
                           shape=(1, ),
                           dtype=np.float32)
        f_new.attrs.create('Orbit Number',
                           root[0].find("OrbitNumber").text,
                           shape=(1, ),
                           dtype=np.int32)
        f_new.attrs.create('Pixels Per Scan Line',
                           1024,
                           shape=(1, ),
                           dtype=np.int32)
        f_new.attrs.create('Processing Center',
                           'NSOAS',
                           shape=(1, ),
                           dtype='S10')
        f_new.attrs.create('Processing Control',
                           'IMG',
                           shape=(1, ),
                           dtype='S10')
        f_new.attrs.create(
            'Processing Time',
            datetime.datetime.now().strftime("%Y-%m-%dT%H-%M-%S"),
            shape=(1, ),
            dtype='S10')
        f_new.attrs.create('Product Name',
                           os.path.basename(outfile),
                           shape=(1, ),
                           dtype='S10')
        f_new.attrs.create('Radiometric Method',
                           'unknown',
                           shape=(1, ),
                           dtype='S10')

        day = datetime.datetime(int('20' + os.path.basename(infile)[6:8]), 1, 1) + \
              datetime.timedelta(days=int(os.path.basename(infile)[8:11]) - 1)
        f_new.attrs.create('Range Beginning Date',
                           day.strftime("%Y%m%d") + ' ' +
                           os.path.basename(infile)[8:11],
                           shape=(1, ),
                           dtype='S10')
        f_new.attrs.create('Range Ending Date',
                           day.strftime("%Y%m%d") + ' ' +
                           os.path.basename(infile)[8:11],
                           shape=(1, ),
                           dtype='S10')
        hour = np.min(millisecond) % (1000 * 60 * 60)
        minute = (np.min(millisecond) - hour * (1000 * 60 * 60)) % (1000 * 60)
        second = (np.min(millisecond) - hour *
                  (1000 * 60 * 60) - minute * 1000 * 60) / (1000)
        f_new.attrs.create('Range Beginning Time',
                           str(hour) + ':' + str(minute) + ':' + str(second),
                           shape=(1, ),
                           dtype='S10')

        hour = np.max(millisecond) // (1000 * 60 * 60)
        minute = (np.max(millisecond) - hour * (1000 * 60 * 60)) // (1000 * 60)
        second = (np.max(millisecond) - hour *
                  (1000 * 60 * 60) - minute * 1000 * 60) // (1000)
        f_new.attrs.create('Range Ending Time',
                           str(hour) + ':' + str(minute) + ':' + str(second),
                           shape=(1, ),
                           dtype='S10')
        f_new.attrs.create('Realtime Delay Flag',
                           'Unknown',
                           shape=(1, ),
                           dtype='S10')
        f_new.attrs.create('Receiving End Time',
                           'Unknown',
                           shape=(1, ),
                           dtype='S10')
        f_new.attrs.create('Receiving Start Time',
                           'Unknown',
                           shape=(1, ),
                           dtype='S10')
        f_new.attrs.create('Ref Band Number', 6, shape=(1, ), dtype=np.int32)

        node = root[0].find("MissionCharacter")
        f_new.attrs.create('Satellite Character',
                           node.text,
                           shape=(1, ),
                           dtype='S10')
        f_new.attrs.create('Satellite Name', 'HY-1A', shape=(1, ), dtype='S10')
        node = root[0].find("SceneCenterLatitude")
        f_new.attrs.create('Sence Center Latitude',
                           node.text,
                           shape=(1, ),
                           dtype=np.float32)
        node = root[0].find("SceneCenterLongitude")
        f_new.attrs.create('Sence Center Longitude',
                           node.text,
                           shape=(1, ),
                           dtype=np.float32)
        node = root[0].find("SceneCenterSolarZenith")
        f_new.attrs.create('Sence Center Solar Zenith',
                           node.text,
                           shape=(1, ),
                           dtype=np.float32)
        node = root[0].find("SceneCenterTime")
        time = datetime.datetime(int('20' + node.text[0:2]), 1, 1) + datetime.timedelta(days=int(node.text[2:5]) - 1) + \
               datetime.timedelta(hours=int(node.text[5:7])) + datetime.timedelta(minutes=int(node.text[7:9]))
        f_new.attrs.create('Sence Center Solar Time',
                           time.strftime("%Y-%m-%dT%H-%M-%S"),
                           shape=(1, ),
                           dtype='S10')
        f_new.attrs.create('Sensor Mode', 'Unknown', shape=(1, ), dtype='S10')
        f_new.attrs.create(
            'Sensor Name',
            'COCTS, Chinese Ocean Color and Temperature Scanner',
            shape=(1, ),
            dtype='S10')
        f_new.attrs.create('Sensor Pitch Element',
                           'Unknown',
                           shape=(1, ),
                           dtype='S10')
        f_new.attrs.create('Sensor Yaw Element',
                           'Unknown',
                           shape=(1, ),
                           dtype='S10')
        f_new.attrs.create('Software Version',
                           '01.00',
                           shape=(1, ),
                           dtype='S10')
        f_new.attrs.create('Southernmost Latitude',
                           np.min(lat),
                           shape=(1, ),
                           dtype=np.float32)
        f_new.attrs.create('Start Center Longitude',
                           center_lon[0],
                           shape=(1, ),
                           dtype=np.float32)
        f_new.attrs.create('Start Center Latitude',
                           center_lat[0],
                           shape=(1, ),
                           dtype=np.float32)
        f_new.attrs.create('TLE', '', shape=(1, ), dtype='S10')
        f_new.attrs.create(
            'The Parameters of Sensor Characteristics',
            '412nm,443nm,490nm,520nm,565nm,670nm,750nm,865nm,10.3-11.4um,'
            '11.5-12.5um',
            shape=(1, ),
            dtype='S10')
        f_new.attrs.create('Title',
                           'HY-1A OCT Level-1B',
                           shape=(1, ),
                           dtype='S10')
        f_new.attrs.create('Upper Left Latitude',
                           lat[0, 0],
                           shape=(1, ),
                           dtype=np.float32)
        f_new.attrs.create('Upper Left Longitude',
                           lon[0, 0],
                           shape=(1, ),
                           dtype=np.float32)
        f_new.attrs.create('Upper Right Latitude',
                           lat[0, -1],
                           shape=(1, ),
                           dtype=np.float32)
        f_new.attrs.create('Upper Right Longitude',
                           lon[0, -1],
                           shape=(1, ),
                           dtype=np.float32)
        f_new.attrs.create('Westernmost Longitude',
                           np.min(lon),
                           shape=(1, ),
                           dtype=np.float32)
        f.close()
        f_new.close()
        return outfile
Esempio n. 45
0
    def filter_py(self, x, is_aligned=False, delay_common=None):
        """Filters in Python
        Args:
        x: signal with shape of [x_len,n_band], if x only has single dimension,
        n_band will be added as 1
        is_aligned: aligned peaks of Gammatone filter impulse response
        delay_common: if aligned, give the same delay to all channels,
        default, aligned to the maximum delay
        Returns:
        fitler result with the shape of [n_band,x_len,n_band]
        """
        # constant variables
        tpt = 2*np.pi*(1.0/self.fs)

        # check inputs
        if not isinstance(x, np.ndarray):
            raise Exception()

        x = x.copy()
        if len(x.shape) > 2:
            raise Exception('two many dimensions for x')
        # ensure x is 2-D array
        x_len = x.shape[0]
        if len(x.shape) == 1:
            x = np.reshape(x, [-1, 1])
        n_chann = x.shape[1]

        if is_aligned is True:
            delays = np.round(3.0/(2.0*np.pi*self.bws)*self.fs)/self.fs
            if delay_common is not None:
                delay_common = np.max(delays)
                delays = np.int(delays-delay_common)
        else:
            delays = np.zeros(self.n_band)

        x_filtered = np.zeros((self.n_band, x_len, n_chann))

        # IIR and FIR filters outputs
        out_a = np.zeros((5, n_chann), dtype=np.complex)
        coefs_a = np.zeros(5)
        out_b = 0
        coefs_b = np.zeros(4)

        # padd 0 to x, for the convinientce of delaying manipulation
        x_padd = np.concatenate((x, np.zeros((
                                        np.max(np.int16(delays*self.fs)),
                                        n_chann))),
                                axis=0)
        norm_factors = np.zeros(self.n_band)
        y = np.zeros((self.n_band, x_len, n_chann), dtype=np.float)
        for band_i in range(self.n_band):
            bw = self.bws[band_i]
            cf = self.cfs[band_i]
            k = np.exp(-tpt*bw)
            # filter coefs
            coefs_a = [1, 4*k, -6*k**2, 4*k**3, -k**4]
            coefs_b = [1, 1, 4*k, k**2]
            #
            norm_factors[band_i] = (1-k)**4/(1+4*k+k**2)*2
            delay_len_band = np.int(delays[band_i]*self.fs)
            phi_0 = (-2*np.pi*cf
                     * np.int(self.fs*3.0/(2.0*np.pi*self.bws[band_i]))
                     / self.fs)
            # 3.0/(2.0*np.pi*self.bws[band_i])
            # phi_0 = 0
            for sample_i in range(x_len+delay_len_band):
                # 2*pi*cf*n/fs -2*pi*cf*delay
                freq_shiftor = np.exp(-1j*(tpt*cf*sample_i))
                # IIR part
                out_a[0, :] = x_padd[sample_i, :]*freq_shiftor*np.exp(1j*phi_0)
                for order_i in range(1, 5):
                    out_a[0, :] = (out_a[0, :]
                                   + coefs_a[order_i]*out_a[order_i, :])
                    # if np.max(np.abs(out_a[order_i])) > 20:
                    #     print(out_a[order_i])
                # FIR part
                out_b = 0
                for order_i in range(1, 4):
                    out_b = out_b+coefs_b[order_i]*out_a[order_i, :]
                    #
                if sample_i > delay_len_band:
                    y[band_i, sample_i-delay_len_band, :] = (
                                    norm_factors[band_i]
                                    * np.real(out_b
                                              * np.conjugate(freq_shiftor)))
                # update IIR output
                for order_i in range(4, 0, -1):
                    out_a[order_i, :] = out_a[order_i-1, :]
        return np.squeeze(y)
Esempio n. 46
0
        cmax = gdal.Info(infile, computeMinMax=True,
                         format="json")["bands"][0]['computedMax']

    ### float -> int if int
    if np.mod(cmin, 1) == 0: cmin = int(cmin)
    if np.mod(cmax, 1) == 0: cmax = int(cmax)

    #%% Set file name
    if not outfile:
        outfile = infile.replace('.tif',
                                 '.{}_{}_{}.tif'.format(cmap_name, cmin, cmax))

    #%% Create color table
    ### Format: value R G B alpha
    colorfile = '{}_{}_{}.txt'.format(cmap_name, cmin, cmax)
    cmap_RGB = np.int16(np.round(cmap(np.linspace(0, 1, n_color)) * 255))
    with open(colorfile, "w") as f:
        for i in range(n_color):
            print("{} {} {} {} 255".format(
                cmin + i * (cmax - cmin) / (n_color - 1), cmap_RGB[i, 0],
                cmap_RGB[i, 1], cmap_RGB[i, 2]),
                  file=f)
        print("nv 0 0 0 0", file=f)

    #%% gdal dem
    gdal.DEMProcessing(outfile,
                       infile,
                       "color-relief",
                       colorFilename=colorfile,
                       format="GTiff",
                       creationOptions=gdal_option,
Esempio n. 47
0
def _read_int16(f):
    '''Read a signed 16-bit integer'''
    return np.int16(struct.unpack('>h', f.read(4)[2:4])[0])
Esempio n. 48
0
 def __readInt__(self, addr):
     return int16(self.__readUInt__(addr))
Esempio n. 49
0
 def year_index(self):
     return np.int16(self.dates.year - self.dates.year[0])
Esempio n. 50
0
class A:
    def __float__(self) -> float:
        return 4.0


np.complex64(3j)
np.complex64(A())
np.complex64(C())
np.complex128(3j)
np.complex128(C())
np.complex128(None)
np.complex64("1.2")
np.complex128(b"2j")

np.int8(4)
np.int16(3.4)
np.int32(4)
np.int64(-1)
np.uint8(B())
np.uint32()
np.int32("1")
np.int64(b"2")

np.float16(A())
np.float32(16)
np.float64(3.0)
np.float64(None)
np.float32("1")
np.float16(b"2.5")

if sys.version_info >= (3, 8):
Esempio n. 51
0
    RP = skimage.measure.regionprops(minutiaeTerm)
    minutiaeTerm = removeSpuriousMinutiae(RP, np.uint8(img), 10)

    BifLabel = skimage.measure.label(minutiaeBif, 8)
    TermLabel = skimage.measure.label(minutiaeTerm, 8)

    minutiaeBif = minutiaeBif * 0
    minutiaeTerm = minutiaeTerm * 0

    (rows, cols) = skel.shape
    DispImg = np.zeros((rows, cols, 3), np.uint8)
    DispImg[:, :, 0] = skel
    DispImg[:, :, 1] = skel
    DispImg[:, :, 2] = skel

    RP = skimage.measure.regionprops(BifLabel)
    for i in RP:
        (row, col) = np.int16(np.round(i['Centroid']))
        minutiaeBif[row, col] = 1
        (rr, cc) = skimage.draw.circle_perimeter(row, col, 3)
        skimage.draw.set_color(DispImg, (rr, cc), (255, 0, 0))

    RP = skimage.measure.regionprops(TermLabel)
    for i in RP:
        (row, col) = np.int16(np.round(i['Centroid']))
        minutiaeTerm[row, col] = 1
        (rr, cc) = skimage.draw.circle_perimeter(row, col, 3)
        skimage.draw.set_color(DispImg, (rr, cc), (0, 0, 255))

    cv2.imshow('a', DispImg)
    cv2.waitKey(0)
Esempio n. 52
0
    def fit(self,
            env,
            nb_steps,
            action_repetition=1,
            callbacks=None,
            verbose=1,
            visualize=False,
            nb_max_start_steps=0,
            start_step_policy=None,
            log_interval=10000,
            nb_max_episode_steps=None):
        """Trains the agent on the given environment.

        # Arguments
            env: (`Env` instance): Environment that the agent interacts with. See [Env](#env) for details.
            nb_steps (integer): Number of training steps to be performed.
            action_repetition (integer): Number of times the agent repeats the same action without
                observing the environment again. Setting this to a value > 1 can be useful
                if a single action only has a very small effect on the environment.
            callbacks (list of `keras.callbacks.Callback` or `rl.callbacks.Callback` instances):
                List of callbacks to apply during training. See [callbacks](/callbacks) for details.
            verbose (integer): 0 for no logging, 1 for interval logging (compare `log_interval`), 2 for episode logging
            visualize (boolean): If `True`, the environment is visualized during training. However,
                this is likely going to slow down training significantly and is thus intended to be
                a debugging instrument.
            nb_max_start_steps (integer): Number of maximum steps that the agent performs at the beginning
                of each episode using `start_step_policy`. Notice that this is an upper limit since
                the exact number of steps to be performed is sampled uniformly from [0, max_start_steps]
                at the beginning of each episode.
            start_step_policy (`lambda observation: action`): The policy
                to follow if `nb_max_start_steps` > 0. If set to `None`, a random action is performed.
            log_interval (integer): If `verbose` = 1, the number of steps that are considered to be an interval.
            nb_max_episode_steps (integer): Number of steps per episode that the agent performs before
                automatically resetting the environment. Set to `None` if each episode should run
                (potentially indefinitely) until the environment signals a terminal state.

        # Returns
            A `keras.callbacks.History` instance that recorded the entire training process.
        """
        if not self.compiled:
            raise RuntimeError(
                'Your tried to fit your agent but it hasn\'t been compiled yet. Please call `compile()` before `fit()`.'
            )
        if action_repetition < 1:
            raise ValueError('action_repetition must be >= 1, is {}'.format(
                action_repetition))

        self.training = True

        callbacks = [] if not callbacks else callbacks[:]

        if verbose == 1:
            callbacks += [TrainIntervalLogger(interval=log_interval)]
        elif verbose > 1:
            callbacks += [TrainEpisodeLogger()]
        if visualize:
            callbacks += [Visualizer()]
        history = History()
        callbacks += [history]
        callbacks = CallbackList(callbacks)
        if hasattr(callbacks, 'set_model'):
            callbacks.set_model(self)
        else:
            callbacks._set_model(self)
        callbacks._set_env(env)
        params = {
            'nb_steps': nb_steps,
        }
        if hasattr(callbacks, 'set_params'):
            callbacks.set_params(params)
        else:
            callbacks._set_params(params)
        self._on_train_begin()
        callbacks.on_train_begin()

        episode = np.int16(0)
        self.step = np.int16(0)
        observation = None
        episode_reward = None
        episode_step = None
        did_abort = False
        try:
            while self.step < nb_steps:
                if observation is None:  # start of a new episode
                    callbacks.on_episode_begin(episode)
                    episode_step = np.int16(0)
                    episode_reward = np.float32(0)

                    # Obtain the initial observation by resetting the environment.
                    self.reset_states()
                    observation = deepcopy(env.reset())
                    if self.processor is not None:
                        observation = self.processor.process_observation(
                            observation)
                    assert observation is not None

                    # Perform random starts at beginning of episode and do not record them into the experience.
                    # This slightly changes the start position between games.
                    nb_random_start_steps = 0 if nb_max_start_steps == 0 else np.random.randint(
                        nb_max_start_steps)
                    for _ in range(nb_random_start_steps):
                        if start_step_policy is None:
                            action = env.action_space.sample()
                        else:
                            action = start_step_policy(observation)
                        if self.processor is not None:
                            action = self.processor.process_action(action)
                        callbacks.on_action_begin(action)
                        observation, reward, done, info = env.step(action)
                        observation = deepcopy(observation)
                        if self.processor is not None:
                            observation, reward, done, info = self.processor.process_step(
                                observation, reward, done, info)
                        callbacks.on_action_end(action)
                        if done:
                            warnings.warn(
                                'Env ended before {} random steps could be performed at the start. You should probably lower the `nb_max_start_steps` parameter.'
                                .format(nb_random_start_steps))
                            observation = deepcopy(env.reset())
                            if self.processor is not None:
                                observation = self.processor.process_observation(
                                    observation)
                            break

                # At this point, we expect to be fully initialized.
                assert episode_reward is not None
                assert episode_step is not None
                assert observation is not None

                # Run a single step.
                callbacks.on_step_begin(episode_step)
                # This is were all of the work happens. We first perceive and compute the action
                # (forward step) and then use the reward to improve (backward step).
                action = self.forward(observation)
                if self.processor is not None:
                    action = self.processor.process_action(action)
                reward = np.float32(0)
                accumulated_info = {}
                raw_info = []
                done = False
                for _ in range(action_repetition):
                    callbacks.on_action_begin(action)
                    observation, r, done, info = env.step(action)
                    observation = deepcopy(observation)
                    if self.processor is not None:
                        observation, r, done, info = self.processor.process_step(
                            observation, r, done, info)
                    raw_info.append(info)
                    for key, value in info.items():
                        if not np.isreal(value):
                            continue
                        if key not in accumulated_info:
                            accumulated_info[key] = np.zeros_like(value)
                        accumulated_info[key] += value
                    callbacks.on_action_end(action)
                    reward += r
                    if done:
                        break
                if nb_max_episode_steps and episode_step >= nb_max_episode_steps - 1:
                    # Force a terminal state.
                    done = True
                metrics = self.backward(reward, terminal=done)
                episode_reward += reward

                step_logs = {
                    'action': action,
                    'observation': observation,
                    'reward': reward,
                    'metrics': metrics,
                    'episode': episode,
                    'info': accumulated_info,
                    'raw_info': raw_info
                }
                callbacks.on_step_end(episode_step, step_logs)
                episode_step += 1
                self.step += 1

                if done:
                    # We are in a terminal state but the agent hasn't yet seen it. We therefore
                    # perform one more forward-backward call and simply ignore the action before
                    # resetting the environment. We need to pass in `terminal=False` here since
                    # the *next* state, that is the state of the newly reset environment, is
                    # always non-terminal by convention.
                    self.forward(observation)
                    self.backward(0., terminal=False)

                    # This episode is finished, report and reset.
                    episode_logs = {
                        'episode_reward': episode_reward,
                        'nb_episode_steps': episode_step,
                        'nb_steps': self.step,
                    }
                    callbacks.on_episode_end(episode, episode_logs)

                    episode += 1
                    observation = None
                    episode_step = None
                    episode_reward = None
        except KeyboardInterrupt:
            # We catch keyboard interrupts here so that training can be be safely aborted.
            # This is so common that we've built this right into this function, which ensures that
            # the `on_train_end` method is properly called.
            did_abort = True
        callbacks.on_train_end(logs={'did_abort': did_abort})
        self._on_train_end()

        return history
Esempio n. 53
0
            x = power2resize(x)

            z = largeforward(net, x.unsqueeze(0))
            z = globalresize(z)
            z = (z[0, 1, :, :] > z[0, 0, :, :]).float()

            cm[k] += digitanie.confusion(y, z, size=size)

            if False:
                debug = digitanie.torchTOpil(globalresize(x))
                debug = PIL.Image.fromarray(numpy.uint8(debug))
                debug.save("build/" + city + str(i) + "_x.png")
                debug = y.float()
                debug = debug * 2 * (1 - digitanie.isborder(y, size=size))
                debug = debug + digitanie.isborder(y, size=size)
                debug *= 127
                debug = debug.cpu().numpy()
                debug = PIL.Image.fromarray(numpy.uint8(debug))
                debug.save("build/" + city + str(i) + "_y.png")
                debug = z.cpu().numpy() * 255
                debug = PIL.Image.fromarray(numpy.uint8(debug))
                debug.save("build/" + city + str(i) + "_z.png")

        print("perf=", digitanie.perf(cm[k]))

perfs = digitanie.perf(cm)
print("digitanie", perfs[-1])
print(perfs)
numpy.savetxt(name, numpy.int16(perfs.cpu().numpy() * 10), fmt="%i", delimiter="\t")
os._exit(0)
Esempio n. 54
0
import numpy as np
import matplotlib.pyplot as plt
from scipy.io import wavfile
from scipy.fftpack import fft, ifft,rfft,irfft
import struct as st
import os

list = os.listdir('/media/esolera/ExtraDrive1/TEC/cuatri_1/Adquisicion/proyecto2/entregable2/prueba/audio_files') # dir is your directory path
number_files = len(list)


for i in range(0,number_files-1):
	fs, data = wavfile.read('/media/esolera/ExtraDrive1/TEC/cuatri_1/Adquisicion/proyecto2/entregable1/audio_files/chunk%i.wav'%i)
	k = data.T[0] # hay dos canales

	we=np.int16(k)
	print(we)

	with open('/media/esolera/ExtraDrive1/TEC/cuatri_1/Adquisicion/proyecto2/entregable2/prueba/textfiles/Datos%i.txt'%i, 'w') as file:
		for a in range(64):
			file.write("%i\n" %k[a])


Esempio n. 55
0
def DownloadData(Dir, Startdate, Enddate, latlim, lonlim, Waitbar, cores,
                 TimeCase):
    """
    This function downloads TRMM daily or monthly data

    Keyword arguments:
    Dir -- 'C:/file/to/path/'
    Startdate -- 'yyyy-mm-dd'
    Enddate -- 'yyyy-mm-dd'
    latlim -- [ymin, ymax] (values must be between -50 and 50)
    lonlim -- [xmin, xmax] (values must be between -180 and 180)
    cores -- The number of cores used to run the routine. It can be 'False'
             to avoid using parallel computing routines.
    TimeCase -- String equal to 'daily' or 'monthly'
    Waitbar -- 1 (Default) will print a waitbar
    """
    # String Parameters
    if TimeCase == 'daily':
        TimeFreq = 'D'
        output_folder = os.path.join(Dir, 'Precipitation', 'TRMM', 'Daily')
    elif TimeCase == 'monthly':
        TimeFreq = 'MS'
        output_folder = os.path.join(Dir, 'Precipitation', 'TRMM', 'Monthly')
    else:
        raise KeyError("The input time interval is not supported")

# Make directory
    if not os.path.exists(output_folder):
        os.makedirs(output_folder)

# Check variables
    if not Startdate:
        Startdate = pd.Timestamp('1998-01-01')
    if not Enddate:
        Enddate = pd.Timestamp('Now')
    Dates = pd.date_range(Startdate, Enddate, freq=TimeFreq)

    # Create Waitbar
    if Waitbar == 1:
        import watools.Functions.Start.WaitbarConsole as WaitbarConsole
        total_amount = len(Dates)
        amount = 0
        WaitbarConsole.printWaitBar(amount,
                                    total_amount,
                                    prefix='Progress:',
                                    suffix='Complete',
                                    length=50)

    if latlim[0] < -50 or latlim[1] > 50:
        print('Latitude above 50N or below 50S is not possible.'
              ' Value set to maximum')
        latlim[0] = np.max(latlim[0], -50)
        latlim[1] = np.min(lonlim[1], 50)
    if lonlim[0] < -180 or lonlim[1] > 180:
        print('Longitude must be between 180E and 180W.'
              ' Now value is set to maximum')
        lonlim[0] = np.max(latlim[0], -180)
        lonlim[1] = np.min(lonlim[1], 180)

    # Define IDs
    yID = np.int16(
        np.array(
            [np.ceil((latlim[0] + 50) * 4),
             np.floor((latlim[1] + 50) * 4)]))
    xID = np.int16(
        np.array([np.floor((lonlim[0]) * 4),
                  np.ceil((lonlim[1]) * 4)]) + 720)

    # Pass variables to parallel function and run
    args = [output_folder, TimeCase, xID, yID, lonlim, latlim]

    if not cores:
        for Date in Dates:
            RetrieveData(Date, args)
            if Waitbar == 1:
                amount += 1
                WaitbarConsole.printWaitBar(amount,
                                            total_amount,
                                            prefix='Progress:',
                                            suffix='Complete',
                                            length=50)
        results = True
    else:
        results = Parallel(n_jobs=cores)(delayed(RetrieveData)(Date, args)
                                         for Date in Dates)

    return results
def detector(frames: list) -> dict:
    """
    Returns:
        (dict) a dictionary representing the condition of the eye. None if the eyes 
        does not have strabismus. 
        
        Content of dictionary: 
            - horizontalDeviation: if positive, left eye is closer to the right eye, farther otherwise
            - verticalDeviation: if positive, left eye is below the right eye, above otherwise
            
    """
    
    # img = cv2.VideoCapture(0)
    threshold = 2

    dist_acc = []
    dist_x = []
    dist_y = []

    sum_left_x = 0
    sum_right_x = 0
    sum_left_y = 0
    sum_right_y = 0

    for a in range(len(frames)):
        # ret, frame = img.read()
        frame = frames[a]
        gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
        eye = eye_cascade.detectMultiScale(gray, 1.3, 5)
        # print(eye)
        strabismus = []
        x = 0
        dist_main = []

        while len(eye) == 2 and x < 2:
            # ensuring first index stores left eye
            if eye[0][0] > eye[1][0]:
                temp = np.zeros((len(eye[0])))
                temp[:] = eye[0, :]
                eye[0, :] = eye[1, :]
                eye[1, :] = temp[:]

            ex, ey, ew, eh = eye[x]
            cv2.rectangle(frame, (ex, ey), (ex+ew, ey+eh), (0, 255, 0), 2)
            eye_im = frame[ey:ey+eh, ex:ex+ew]
            eye_gray = gray[ey:ey+eh, ex:ex+ew]

            # ret, eye_gray = cv2.threshold(eye_g,55,150,cv2.THRESH_BINARY_INV)
            # ret,eye_gray = cv2.threshold(eye_g,55,255,1)
            # im2, contours, hierarchy = cv2.findContours(eye_gray,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)
            rec_center = [ex+ew/2, ey+eh/2]
            x += 1

            try:
                circles = cv2.HoughCircles(eye_gray, 
                    cv2.HOUGH_GRADIENT, 1.25, 20,
                    param1=50, 
                    param2=50, 
                    minRadius=CIRCLE_DETECTION_MIN_RADIUS, 
                    maxRadius=CIRCLE_DETECTION_MAX_RADIUS
                )
                
                circles = np.int16(np.around(circles))
                if len(circles[0] == 1):
                    i = circles[0][0]
                    if i[0] == 0:
                        continue

                    cv2.circle(eye_im, (i[0], i[1]), i[2], (0, 255, 0), 2)
                    cir_center = [ex + i[0], ey + i[1]]
                    xcor_dev = (cir_center[0] - rec_center[0])
                    ycor_dev = ((cir_center[1] - rec_center[1]) ** 2) ** .5
                    if x == 1:
                        sum_right_x += xcor_dev
                        sum_right_y += ycor_dev

                    else:
                        sum_left_x += xcor_dev
                        sum_left_y += ycor_dev

                    dist=((xcor_dev) ** 2 + (ycor_dev) ** 2) ** .5

                    if x == 1:
                        dist_x.append(dist)

                    else:
                        dist_y.append(dist)

                    dist_main.append(dist)
            except:
                continue

        print(dist_main)
        # if len(dist_main) == 2:
        #    dist_acc.append(dist_main)
        # cv2.imshow("img", frame)
        # cv2.imshow("eye_gray", eye_gray)

        if (len(dist_x) >= 40 and len(dist_y) >= 40):
            break
        # cv2.waitKey(10)
    # x = np.array(dist_acc)
    x = np.array(dist_x)
    y = np.array(dist_y)

    sum_right_x = sum_right_x / len(dist_x)
    sum_right_y = sum_right_y / len(dist_x)
    sum_left_x = sum_left_x / len(dist_y)
    sum_left_y = sum_left_y / len(dist_y)

    deviation=std_deviation(x, y)

    if (abs(deviation) > threshold):
        '''
        if horizontal deviation is -ve, left eye is closer to the right eye
        if horizontal deviation is +ve, left eye is away from the right eye
        if vertical deviation is -ve, left eye is above the right eye
        if vertical deviation is +ve, left eye is below the right eye
        '''
        print("strabismus")
        
        result = {
            "result": "strabismus",
            "horizontalDeviation": sum_left_x - sum_right_x,
            "verticalDeviation": sum_left_y - sum_right_y
        }
        
        return result
        
    return None
        
def rescale_im_to_hu(image, intercept, slope):
    image[image == -2000] = -1024 - intercept
    image = slope * image.astype(np.float64)
    image = image.astype(np.int16) + np.int16(intercept)

    return image
Esempio n. 58
0
)

import soundfile as sf
sf.write("new_wav.wav", generated_wav.astype(np.float32),
         synthesizer.sample_rate)

from scipy.io.wavfile import write

save_path = 'C:/Users/hp/Desktop/cours cs 3A/projects/voice cloning app/Real-Time-Voice-Cloning'
write(
    'output.wav',
    rate=synthesizer.sample_rate,
    data=generated_wav_normalized,
)

y = np.int16(generated_wav_new * 2**15)
song = pydub.AudioSegment(y.tobytes(),
                          frame_rate=synthesizer.sample_rate,
                          sample_width=2,
                          channels=1)
song.export('output_new.wav', format='wav', bitrate="320k")

song.frame_rate

scaled = np.int16(generated_wav_new / np.max(np.abs(generated_wav_new)) *
                  32767)
write('test.wav', 16000, scaled)

from scipy.io.wavfile import write

scaled = np.int16(data / np.max(np.abs(data)) * 32767)
Esempio n. 59
0
def find_golf():
    global bgr, depth, mask_gripper
    position = Point()
    position.x = 100
    position.z = 100
    position.y = 100
    # while not rospy.is_shutdown():
    if bgr is None:
        return position
    gray = cv.cvtColor(bgr, cv.COLOR_BGR2GRAY)
    # r,c = gray.shape
    # mask_gripper = cv.resize(mask_gripper, (r,c))
    bg = cv.medianBlur(gray, 61)
    fg = cv.medianBlur(gray, 5)
    sub_sign = np.int16(fg) - np.int16(bg)
    sub_pos = np.clip(sub_sign.copy(), 0, sub_sign.copy().max())
    sub_neg = np.clip(sub_sign.copy(), sub_sign.copy().min(), 0)

    sub_pos = normalize(sub_pos)
    sub_neg = normalize(sub_neg)

    # cv.imshow('sub_pos',sub_pos)
    # cv.imshow('sub_neg',sub_neg)

    _, obj = cv.threshold(sub_pos, 0, 255, cv.THRESH_BINARY + cv.THRESH_OTSU)
    print("obj", obj.shape)
    print("mask", mask_gripper.shape)
    obj = cv.bitwise_and(obj, obj, mask=mask_gripper)

    _, contours, _ = cv.findContours(obj.copy(), cv.RETR_EXTERNAL,
                                     cv.CHAIN_APPROX_NONE)
    display = cv.cvtColor(sub_neg.copy(), cv.COLOR_GRAY2BGR)
    r = 0
    circle = []
    for cnt in contours:
        area_cnt = cv.contourArea(cnt)
        (x, y), radius = cv.minEnclosingCircle(cnt)
        center = (int(x), int(y))
        radius = radius
        area_cir = math.pi * (radius**2)
        if area_cir <= 0 or area_cnt / area_cir < 0.8:
            continue
        cv.circle(display, center, int(radius), (255, 0, 0), -1)
        circle.append([x, y, radius])

    row, col = gray.shape
    if len(circle) > 0:
        circle = sorted(circle, key=itemgetter(2), reverse=True)
        circle = circle[0]
        x, y, radius = circle
        cv.circle(display, (int(x), int(y)), int(radius), (0, 0, 255), 2)
        diameter = 2. * radius
        pixel_per_cm = diameter / 4.3
        print("radius", radius)
        print("pixel_per_cm", pixel_per_cm)
        print("depth", depth)
        if True:
            x_distance_pixel = row / 2. - y
            y_distance_pixel = col / 2. - x
            print("row col", row, col)
            print("x y", x, y)
            print("x_distance_pixel y_distance_pixel:", x_distance_pixel,
                  y_distance_pixel)
            x_distance_cm = float(x_distance_pixel) / pixel_per_cm
            y_distance_cm = float(y_distance_pixel) / pixel_per_cm
            print("x_distance_cm:", x_distance_cm)
            print("y_distance_cm:", y_distance_cm)
            x_distance_meter = x_distance_cm / 100.
            y_distance_meter = y_distance_cm / 100.
            print("x_meter:", x_distance_meter)
            print("y_meter:", y_distance_meter)
            cv.circle(display, (int(x), int(y)), int(radius), (0, 255, 255),
                      -1)

            # cv.imshow('obj',obj)

            bg = np.uint8(bg)
            fg = np.uint8(fg)

            # cv.imshow('original_bgr', bgr)
            # cv.imshow('bg', bg)
            # cv.imshow('fg', fg)
            # cv.imshow('display', display)

            position.x = x_distance_meter
            position.y = y_distance_meter
            return position
    # k = cv.waitKey(1) & 0xff
    return position
Esempio n. 60
0
from scipy.io.wavfile import read
import sys

input = []

count = 0

filename = str(sys.argv[1])
lineSize = int(sys.argv[2])

sr, input = read(filename)

lines = input.size // lineSize

noext = filename.split(".", 1)[0]

with open(noext + ".txt", "w") as file:

    for i in range(lines * lineSize):
        if i % lineSize == 0:
            if count != 0:
                file.write("};\n")
            file.write("const Int16 samples" + str(count) +
                       "[SIGNAL_SIZE] = {")
            count = count + 1
        file.write(str(np.int16(round(input[i]))))
        if i % lineSize != lineSize - 1:
            file.write(",")
    file.write("};\n")

print("Saved as " + noext + ".txt")