Ejemplo n.º 1
0
def train_D(x_real):
    G.train()
    D.train()

    z = torch.randn(args.batch_size, args.z_dim, 1, 1).to(device)
    x_fake = G(z).detach()

    # fake image 1d power spectrum
    psd1D_img = np.zeros([x_fake.shape[0], N])
    for t in range(x_fake.shape[0]):
        gen_imgs = x_fake.permute(0, 2, 3, 1)
        img_numpy = gen_imgs[t, :, :, :].cpu().detach().numpy()
        img_gray = RGB2gray(img_numpy)
        fft = np.fft.fft2(img_gray)
        fshift = np.fft.fftshift(fft)
        fshift += epsilon
        magnitude_spectrum = 20 * np.log(np.abs(fshift))
        psd1D = radialProfile.azimuthalAverage(magnitude_spectrum)
        psd1D = (psd1D - np.min(psd1D)) / (np.max(psd1D) - np.min(psd1D))
        psd1D_img[t, :] = psd1D

    psd1D_img = torch.from_numpy(psd1D_img).float()
    psd1D_img = Variable(psd1D_img, requires_grad=True).to(device)

    # real image 1d power spectrum
    psd1D_rec = np.zeros([x_real.shape[0], N])
    for t in range(x_real.shape[0]):
        gen_imgs = x_real.permute(0, 2, 3, 1)
        img_numpy = gen_imgs[t, :, :, :].cpu().detach().numpy()
        img_gray = RGB2gray(img_numpy)
        fft = np.fft.fft2(img_gray)
        fshift = np.fft.fftshift(fft)
        fshift += epsilon
        magnitude_spectrum = 20 * np.log(np.abs(fshift))
        psd1D = radialProfile.azimuthalAverage(magnitude_spectrum)
        psd1D = (psd1D - np.min(psd1D)) / (np.max(psd1D) - np.min(psd1D))
        psd1D_rec[t, :] = psd1D

    psd1D_rec = torch.from_numpy(psd1D_rec).float()
    psd1D_rec = Variable(psd1D_rec, requires_grad=True).to(device)

    loss_freq = criterion_freq(psd1D_rec, psd1D_img.detach())

    x_real_d_logit = D(x_real)
    x_fake_d_logit = D(x_fake)

    x_real_d_loss, x_fake_d_loss = d_loss_fn(x_real_d_logit, x_fake_d_logit)
    gp = gan.gradient_penalty(functools.partial(D),
                              x_real,
                              x_fake,
                              mode=args.gradient_penalty_mode)

    D_loss = (x_real_d_loss + x_fake_d_loss
              ) + gp * args.gradient_penalty_weight + 2 * loss_freq

    D.zero_grad()
    D_loss.backward()
    D_optimizer.step()

    return {'d_loss': x_real_d_loss + x_fake_d_loss, 'gp': gp}
Ejemplo n.º 2
0
def train_G(x_real):
    G.train()
    D.train()

    z = torch.randn(args.batch_size, args.z_dim, 1, 1).to(device)
    x_fake = G(z)

    x_fake_d_logit = D(x_fake)
    g_loss = g_loss_fn(x_fake_d_logit)

    # fake image 1d power spectrum
    psd1D_img = np.zeros([x_fake.shape[0], N])
    for t in range(x_fake.shape[0]):
        gen_imgs = x_fake.permute(0, 2, 3, 1)
        img_numpy = gen_imgs[t, :, :, :].cpu().detach().numpy()
        img_gray = RGB2gray(img_numpy)
        fft = np.fft.fft2(img_gray)
        fshift = np.fft.fftshift(fft)
        fshift += epsilon
        magnitude_spectrum = 20 * np.log(np.abs(fshift))
        psd1D = radialProfile.azimuthalAverage(magnitude_spectrum)
        psd1D = (psd1D - np.min(psd1D)) / (np.max(psd1D) - np.min(psd1D))
        psd1D_img[t, :] = psd1D

    psd1D_img = torch.from_numpy(psd1D_img).float()
    psd1D_img = Variable(psd1D_img, requires_grad=True).to(device)

    # real image 1d power spectrum
    psd1D_rec = np.zeros([x_real.shape[0], N])
    for t in range(x_real.shape[0]):
        gen_imgs = x_real.permute(0, 2, 3, 1)
        img_numpy = gen_imgs[t, :, :, :].cpu().detach().numpy()
        img_gray = RGB2gray(img_numpy)
        fft = np.fft.fft2(img_gray)
        fshift = np.fft.fftshift(fft)
        fshift += epsilon
        magnitude_spectrum = 20 * np.log(np.abs(fshift))
        psd1D = radialProfile.azimuthalAverage(magnitude_spectrum)
        psd1D = (psd1D - np.min(psd1D)) / (np.max(psd1D) - np.min(psd1D))
        psd1D_rec[t, :] = psd1D

    psd1D_rec = torch.from_numpy(psd1D_rec).float()
    psd1D_rec = Variable(psd1D_rec, requires_grad=True).to(device)

    loss_freq = criterion_freq(psd1D_rec, psd1D_img.detach())
    datalossBCE.append(loss_freq.data)

    G_loss = loss_freq + g_loss

    G.zero_grad()
    G_loss.backward()
    G_optimizer.step()

    return {'g_loss': G_loss}
Ejemplo n.º 3
0
def getpowerspectrum(g):
    ghat = np.fft.fft2(g)
    ghatshift = fftpack.fftshift(ghat)
    #    ghatshift = ghat
    gabs = np.abs(ghatshift)**2
    ps2 = radialProfile.azimuthalAverage(gabs)
    return ps2
Ejemplo n.º 4
0
def FP_profile(im, xcen, ycen, trim_rad=None, mask=0):
    """
    calculate the radial profile and include options for using r or r**2
    for the profile.  might want to display vs. r, but fit voigt profile vs.
    r**2. also provide options to trim radius to exclude outside the FOV and
    to set a mask value.

    Parameters
    ----------
    im : 2D ndarray containing image (usually read in using pyfits)
    xcen : float - X position of ring center
    ycen : float - Y position of ring center
    trim_rad : float - maximum radial extent of profile
    mask : int - data value to use for masking bad data

    Returns
    -------
    prof, r : numpy arrays containing radial profile and radii, respectively
    """
    prof = azimuthalAverage(im, center=[xcen, ycen], maskval=mask)
    r = np.arange(len(prof))
    if trim_rad:
        prof = prof[0:trim_rad]
        r = r[0:trim_rad]
    return prof, r
Ejemplo n.º 5
0
def FP_profile(im, xcen, ycen, trim_rad=None, mask=0):
    """
    calculate the radial profile and include options for using r or r**2
    for the profile.  might want to display vs. r, but fit voigt profile vs.
    r**2. also provide options to trim radius to exclude outside the FOV and
    to set a mask value.

    Parameters
    ----------
    im : 2D ndarray containing image (usually read in using pyfits)
    xcen : float - X position of ring center
    ycen : float - Y position of ring center
    trim_rad : float - maximum radial extent of profile
    mask : int - data value to use for masking bad data

    Returns
    -------
    prof, r : numpy arrays containing radial profile and radii, respectively
    """
    prof = azimuthalAverage(im, center=[xcen, ycen], maskval=mask)
    r = np.arange(len(prof))
    if trim_rad:
        prof = prof[0:trim_rad]
        r = r[0:trim_rad]
    return prof, r
Ejemplo n.º 6
0
def fft1D(imagen):
    nimage = (imagen - np.median(imagen)) / np.std(imagen)
    nimage = nimage.astype(float)

    nsize = 3
    image = np.zeros((nimage.shape[0] * nsize, nimage.shape[1] * nsize))
    image[0:nimage.shape[0], 0:nimage.shape[1]] = nimage
    # image[nimage.shape[0]:,0:nimage.shape[1]] = np.rot90(nimage)
    # image[nimage.shape[0]:,nimage.shape[1]:] = np.rot90(np.rot90(nimage))
    # image[0:nimage.shape[0],nimage.shape[1]:] = np.rot90(np.rot90(np.rot90(nimage)))

    T = float(image.shape[0])

    # Take the fourier transform of the image.
    F1 = fftpack.fft2(image)
    F2 = fftpack.fftshift(F1)

    # Calculate a 2D power spectrum
    psf2D = np.abs(F2)**2.

    # Calculate the azimuthally averaged 1D power spectrum
    psf1D = radialProfile.azimuthalAverage(psf2D,
                                           center=(int(T / 2), int(T / 2)))
    v = np.arange(len(psf1D)) / T
    vmax = psf2D.shape[0] / 2. / T
    ii = list(v).index(vmax)
    return v[1:ii], psf1D[1:ii]
Ejemplo n.º 7
0
def FP_profile(im, xcen, ycen, trim_rad=None, mask=0):
    """
    wrap calculating the radial profile to include options for using
    r or r**2 for the profile. want to display vs. r, but fit voigt
    profile vs. r**2.  also provide options to trim radius
    to exclude outside the FOV and to set a mask value.
    """
    prof = azimuthalAverage(im, center=[xcen, ycen], maskval=mask)
    r = np.arange(len(prof))
    if trim_rad:
        prof = prof[0:trim_rad]
        r = r[0:trim_rad]
    return prof, r
Ejemplo n.º 8
0
def psd_input_sdens(file_in,nnn,data_type):
	boxsize = 3.0
	boxb = 1.5

	ai = np.fromfile(file_in,dtype=data_type)
	ai = ai.reshape((nnn,nnn))/np.mean(ai)-1.0
	ai_crop = ai[nnn/4:nnn*3/4,nnn/4:nnn*3/4]

	fi = np.fft.fft2(ai_crop)
	fi = np.fft.fftshift(fi)
	psdi = np.abs(fi)**2
	psdir = radialProfile.azimuthalAverage(psdi)

	rrr = np.linspace(1,len(psdir),len(psdir))/boxb*2.0*np.pi

	return rrr,psdir/boxb**2.0/np.max(psdir)
Ejemplo n.º 9
0
def do_autocorr_power_spectrum(image_for_fft):
    '''
    This function performs an auto-correlation of an input image and
    outputs a normalized 1D azimuthally averaged power as a function of spatial frequency (PSD).
    :param image_for_fft: input image for which the user wishes a 1D PSD
    :return: a 1D PSD.
    '''
    Fnoise = fftpack.fft2(image_for_fft)  # Fast fourier transform
    Fnoise_shifted = fftpack.fftshift(
        Fnoise)  # Shifting such that m=0 is at the center of the image.
    psd2D_noise = np.abs(Fnoise_shifted)**2  # 2D Power map..
    psd1D_noise = radialProfile.azimuthalAverage(
        psd2D_noise)  # azimuthally averaged 1D power profile.
    #NOTE_TO_SELF: There is an alternative function for 1D PSD, which i should explore...
    psd1d_noise_normed = psd1D_noise / np.max(np.cumsum(
        psd1D_noise))  # Normalizing by the cumulative area under 1D PSD.
    return psd1d_noise_normed  # return normalized 1D PSD.
Ejemplo n.º 10
0
    def update_model(self,data):
        ### prepare data ###
        self.real, sourceD, targetD = self.prepare_image(data)
        sourceDC, self.sourceIndex = self.get_domain_code(sourceD)
        targetDC, self.targetIndex = self.get_domain_code(targetD)
        sourceC, targetC = sourceDC, targetDC
        ### generate image ###
        if self.E is not None:
            c_enc, mu, logvar = self.E(self.real,sourceDC)
            c_rand = self.sample_latent_code(c_enc.size())
            sourceC = torch.cat([sourceDC, c_enc],1)
            targetC = torch.cat([targetDC, c_rand],1)
        self.fake = self.G(self.real,targetC)
        self.cyc = self.G(self.fake,sourceC)
        if self.E is not None:
            _, mu_enc, _ = self.E(self.fake,targetDC)
        if self.opt.lambda_ide > 0:
            self.ide = self.G(self.real,sourceC)
        ### update D ###
        self.errDs = []
        for i in range(self.opt.d_num):
            errD = self.update_D(self.Ds[i], self.Ds_opt[i], self.real.index_select(0,self.sourceIndex[i]), self.fake.index_select(0,self.targetIndex[i]))
            self.errDs.append(errD)

        ### update G ###
        self.errGs, self.errKl, self.errCode, errG_total = [], 0, 0, 0
        self.G.zero_grad()

        #####change:g_loss pure generate loss
        g_loss = 0
        #######
        for i in range(self.opt.d_num):# d_num = of domain number
            errG = self.calculate_G(self.Ds[i], self.fake.index_select(0,self.targetIndex[i]))
            errG_total += errG
            g_loss += errG
            self.errGs.append(errG)

        self.errCyc = torch.mean(torch.abs(self.cyc-self.real)) *  self.opt.lambda_cyc

        errG_total += self.errCyc

        if self.opt.lambda_ide > 0:
            self.errIde = torch.mean(torch.abs(self.ide-self.real)) *  self.opt.lambda_ide
            errG_total += self.errIde

    ######change:loss_freq #######
        # fake image 1d power spectrum

        N = 179
        epsilon = 1e-8
        psd1D_img = np.zeros([self.fake.shape[0], N])
        for t in range(self.fake.shape[0]):
            gen_imgs = self.fake.permute(0, 2, 3, 1)
            img_numpy = gen_imgs[t, :, :, :].cpu().detach().numpy()
            #img_gray = self.RGB2gray(img_numpy)
            img_gray = 0.2989 * img_numpy[:, :, 0] + 0.5870 * img_numpy[:, :, 1] + 0.1140 * img_numpy[:, :, 2]
            fft = np.fft.fft2(img_gray)
            fshift = np.fft.fftshift(fft)
            fshift += epsilon
            magnitude_spectrum = 20 * np.log(np.abs(fshift))
            psd1D = radialProfile.azimuthalAverage(magnitude_spectrum)
            psd1D = (psd1D - np.min(psd1D)) / (np.max(psd1D) - np.min(psd1D))
            psd1D_img[t, :] = psd1D

        psd1D_img = torch.from_numpy(psd1D_img).float()
        psd1D_img = Variable(psd1D_img, requires_grad=True).to("cuda")

        # real image 1d power spectrum
        psd1D_rec = np.zeros([self.real.shape[0], N])
        for t in range(self.real.shape[0]):
            gen_imgs = self.real.permute(0, 2, 3, 1)
            img_numpy = gen_imgs[t, :, :, :].cpu().detach().numpy()
            #img_gray = self.RGB2gray(img_numpy)
            img_gray = 0.2989 * img_numpy[:, :, 0] + 0.5870 * img_numpy[:, :, 1] + 0.1140 * img_numpy[:, :, 2]
            fft = np.fft.fft2(img_gray)
            fshift = np.fft.fftshift(fft)
            fshift += epsilon
            magnitude_spectrum = 20 * np.log(np.abs(fshift))
            psd1D = radialProfile.azimuthalAverage(magnitude_spectrum)
            psd1D = (psd1D - np.min(psd1D)) / (np.max(psd1D) - np.min(psd1D))
            psd1D_rec[t, :] = psd1D

        psd1D_rec = torch.from_numpy(psd1D_rec).float()
        psd1D_rec = Variable(psd1D_rec, requires_grad=True).to('cuda')
        criterion_freq = nn.BCELoss()
        loss_freq = criterion_freq(psd1D_rec, psd1D_img.detach())
        loss_freq *= g_loss  ####
        lambda_freq = 0.5   ###
        errG_total += loss_freq * lambda_freq



        if self.E is not None:
            self.E.zero_grad()
            self.errKL = KL_loss(mu,logvar) * self.opt.lambda_kl
            errG_total += self.errKL
            errG_total.backward(retain_graph=True)
            self.G_opt.step()
            self.E_opt.step()
            self.G.zero_grad()
            self.E.zero_grad()
            self.errCode = torch.mean(torch.abs(mu_enc - c_rand)) * self.opt.lambda_c
            self.errCode.backward()
            self.G_opt.step()
        else:
            errG_total.backward()
            self.G_opt.step()

        return errD,errG_total ,loss_freq
Ejemplo n.º 11
0
def faraday_theory_quiet(i_file,j_file,wl_i,wl_j,alpha_file,bands_name,beam=False,polar_mask=False):
	print "Computing Cross Correlations for Bands "+str(bands_name)

#	radio_file='/data/wmap/faraday_MW_realdata.fits'
#	cl_file='/home/matt/wmap/simul_scalCls.fits'
#	nside=1024
#	npix=hp.nside2npix(nside)
#	
#	cls=hp.read_cl(cl_file)
#	simul_cmb=hp.sphtfunc.synfast(cls,nside,fwhm=0.,new=1,pol=1);
#	
#	alpha_radio=hp.read_map(radio_file,hdu='maps/phi');
#	alpha_radio=hp.ud_grade(alpha_radio,nside_out=nside,order_in='ring',order_out='ring')
#	bands=[43.1,94.5]
	q_fwhm=[27.3,11.7]
#	wl=np.array([299792458./(band*1e9) for band in bands])
#	num_wl=len(wl)
#	t_array=np.zeros((num_wl,npix))	
#	q_array=np.zeros((num_wl,npix))
#	u_array=np.zeros((num_wl,npix))
#	for i in range(num_wl):
#		tmp_cmb=rotate_tqu.rotate_tqu(simul_cmb,wl[i],alpha_radio);
#		t_array[i],q_array[i],u_array[i]=tmp_cmb
#	iqu_band_i=[t_array[0],q_array[0],u_array[0]]	
#	iqu_band_j=[t_array[1],q_array[1],u_array[1]]	


	hdu_i=fits.open(i_file)
	hdu_j=fits.open(j_file)
	alpha_radio=hp.read_map(alpha_file,hdu='maps/phi')
	alpha_radio=hp.ud_grade(alpha_radio,1024)
	iqu_band_i=hdu_i['stokes iqu'].data
	iqu_band_j=hdu_j['stokes iqu'].data
	field_pixels=hdu_i['SQUARE PIXELS'].data
	hdu_i.close()
	hdu_j.close()
	
	
	iqu_band_i=hp.smoothing(iqu_band_i,pol=1,fwhm=np.sqrt((smoothing_scale)**2-(q_fwhm[0])**2)*np.pi/(180.*60.),lmax=383)
	iqu_band_j=hp.smoothing(iqu_band_j,pol=1,fwhm=np.sqrt((smoothing_scale)**2-(q_fwhm[1])**2)*np.pi/(180.*60.),lmax=383)
	#alpha_radio=hp.smoothing(alpha_radio,fwhm=np.pi/180.,lmax=383)

	const=2.*(wl_i**2-wl_j**2)	

	Delta_Q=(iqu_band_i[1]-iqu_band_j[1])/const
	Delta_U=(iqu_band_i[2]-iqu_band_j[2])/const
	alpha_u=alpha_radio*iqu_band_j[2] 
	alpha_q=-alpha_radio*iqu_band_j[1]
	
	if polar_mask:
		P=np.sqrt(iqu_band_j[1]**2+iqu_band_j[2]**2)
		bad_pix=np.where( P < .2e-6)
		Delta_Q[bad_pix]=0
		Delta_U[bad_pix]=0
		alpha_u[bad_pix]=0
		alpha_q[bad_pix]=0

	cross1_array=[]
	cross2_array=[]
	L=15*(np.pi/180.)
	k=np.arange(1,np.round(500*(L/(2*np.pi))))
	l=2*np.pi*k/L
	Bl_factor=np.repeat(1,len(k))
	if beam:
		Bl_factor=hp.gauss_beam(np.pi/180.,383)
	for field1 in xrange(4):
		pix_cmb=field_pixels.field(field1)	
		nx=np.sqrt(pix_cmb.shape[0])	
		flat_dq=np.reshape(Delta_Q[pix_cmb],(nx,nx))	
		flat_du=np.reshape(Delta_U[pix_cmb],(nx,nx))
		flat_aq=np.reshape(alpha_q[pix_cmb],(nx,nx))	
		flat_au=np.reshape(alpha_u[pix_cmb],(nx,nx))	
		
		dq_alm=fft.fftshift(fft.fft2(flat_dq,shape=[450,450]))
		du_alm=fft.fftshift(fft.fft2(flat_du,shape=[450,450]))
		aq_alm=fft.fftshift(fft.fft2(flat_aq,shape=[450,450]))
		au_alm=fft.fftshift(fft.fft2(flat_au,shape=[450,450]))
	
		pw2d_qau=np.real(dq_alm*np.conjugate(au_alm))		
		pw2d_uaq=np.real(du_alm*np.conjugate(aq_alm))		
		pw1d_qau=radialProfile.azimuthalAverage(pw2d_qau)
		pw1d_uaq=radialProfile.azimuthalAverage(pw2d_uaq)
		tmp_cl1=pw1d_qau[k.astype(int)-1]*L**2
		tmp_cl2=pw1d_uaq[k.astype(int)-1]*L**2
		#	index=np.where( (np.sqrt(x**2+y**2) <= k[num_k] +1)  & ( np.sqrt(x**2 + y**2) >= k[num_k] -1) )
		#	tmp1= np.sum(pw2d_qau[index])/(np.pi*( (k[num_k]+1)**2 -(k[num_k]-1)**2 ) )
		#	tmp2= np.sum(pw2d_uaq[index])/(np.pi*( (k[num_k]+1)**2 -(k[num_k]-1)**2 ) )
		#	tmp_cl1[num_k]=L**2*tmp1
		#	tmp_cl2[num_k]=L**2*tmp2
		cross1_array.append(tmp_cl1/Bl_factor)
		cross2_array.append(tmp_cl2/Bl_factor)

	cross1=np.mean(cross1_array,axis=0)	##Average over all Cross Spectra
	cross2=np.mean(cross2_array,axis=0)	##Average over all Cross Spectra
	hp.write_cl('cl_'+bands_name+'_FR_QxaU.fits',cross1)
	hp.write_cl('cl_'+bands_name+'_FR_UxaQ.fits',cross2)
	return (cross1,cross2)
Ejemplo n.º 12
0
def faraday_correlate_quiet(i_file,j_file,wl_i,wl_j,alpha_file,bands,beam=False,polar_mask=False):
	print "Computing Cross Correlations for Bands "+str(bands)

	hdu_i=fits.open(i_file)
	hdu_j=fits.open(j_file)
	alpha_radio=hp.read_map(alpha_file,hdu='maps/phi')
	alpha_radio=hp.ud_grade(alpha_radio,1024)
	iqu_band_i=hdu_i['stokes iqu'].data
	iqu_band_j=hdu_j['stokes iqu'].data
	sigma_i=hdu_i['Q/U UNCERTAINTIES'].data
	sigma_j=hdu_j['Q/U UNCERTAINTIES'].data
	field_pixels=hdu_i['SQUARE PIXELS'].data
	
	q_fwhm=[27.3,11.7]
	noise_const=np.array([36./f for f in q_fwhm])*1e-6
	npix=hp.nside2npix(1024)
	sigma_i=[noise_const[0]*np.random.normal(0,1,npix),noise_const[1]*np.random.normal(0,1,npix)]
	sigma_j=[noise_const[0]*np.random.normal(0,1,npix),noise_const[1]*np.random.normal(0,1,npix)]
	iqu_band_i[1]+=sigma_i[0]
	iqu_band_i[2]+=sigma_i[1]
	iqu_band_j[1]+=sigma_j[0]
	iqu_band_j[2]+=sigma_j[1]
	hdu_i.close()
	hdu_j.close()
	
	
	iqu_band_i=hp.smoothing(iqu_band_i,pol=1,fwhm=np.sqrt((smoothing_scale)**2-(q_fwhm[0])**2)*np.pi/(180.*60.),lmax=383)
	iqu_band_j=hp.smoothing(iqu_band_j,pol=1,fwhm=np.sqrt((smoothing_scale)**2-(q_fwhm[1])**2)*np.pi/(180.*60.),lmax=383)
	#alpha_radio=hp.smoothing(alpha_radio,fwhm=np.pi/180.,lmax=383)

	const=2.*(wl_i**2-wl_j**2)	

	Delta_Q=(iqu_band_i[1]-iqu_band_j[1])/const
	Delta_U=(iqu_band_i[2]-iqu_band_j[2])/const
	alpha_u=alpha_radio*iqu_band_j[2] 
	alpha_q=-alpha_radio*iqu_band_j[1]
	
	if polar_mask:
		P=np.sqrt(iqu_band_j[1]**2+iqu_band_j[2]**2)
		bad_pix=np.where( P < .2e-6)
		Delta_Q[bad_pix]=0
		Delta_U[bad_pix]=0
		alpha_u[bad_pix]=0
		alpha_q[bad_pix]=0

	cross1_array=[]
	cross2_array=[]
	cross3_array=[]
	L=15*(np.pi/180.)
	k=np.arange(1,np.round(500*(L/(2*np.pi))))
	l=2*np.pi*k/L
	Bl_factor=np.repeat(1,len(k))
	if beam:
		Bl_factor=hp.gauss_beam(np.pi/180.,383)
	for field1 in xrange(4):
		pix_cmb=field_pixels.field(field1)	
		nx=np.sqrt(pix_cmb.shape[0])	
		flat_dq=np.reshape(Delta_Q[pix_cmb],(nx,nx))	
		flat_du=np.reshape(Delta_U[pix_cmb],(nx,nx))
		flat_aq=np.reshape(alpha_q[pix_cmb],(nx,nx))	
		flat_au=np.reshape(alpha_u[pix_cmb],(nx,nx))	
		
		dq_alm=fft.fftshift(fft.fft2(flat_dq,shape=[450,450]))
		du_alm=fft.fftshift(fft.fft2(flat_du,shape=[450,450]))
		aq_alm=fft.fftshift(fft.fft2(flat_aq,shape=[450,450]))
		au_alm=fft.fftshift(fft.fft2(flat_au,shape=[450,450]))
		pw2d_qau=np.real(dq_alm*np.conjugate(au_alm))		
		pw2d_uaq=np.real(du_alm*np.conjugate(aq_alm))		
		pw1d_qau=radialProfile.azimuthalAverage(pw2d_qau)
		pw1d_uaq=radialProfile.azimuthalAverage(pw2d_uaq)
		tmp_cl1=pw1d_qau[k.astype(int)-1]*L**2
		tmp_cl2=pw1d_uaq[k.astype(int)-1]*L**2
		#	index=np.where( (np.sqrt(x**2+y**2) <= k[num_k] +1)  & ( np.sqrt(x**2 + y**2) >= k[num_k] -1) )
		#	tmp1= np.sum(pw2d_qau[index])/(np.pi*( (k[num_k]+1)**2 -(k[num_k]-1)**2 ) )
		#	tmp2= np.sum(pw2d_uaq[index])/(np.pi*( (k[num_k]+1)**2 -(k[num_k]-1)**2 ) )
		#	tmp_cl1[num_k]=L**2*tmp1
		#	tmp_cl2[num_k]=L**2*tmp2
		cross1_array.append(tmp_cl1/Bl_factor)
		cross2_array.append(tmp_cl2/Bl_factor)
	
	cross1=np.mean(cross1_array,axis=0)	##Average over all Cross Spectra
	cross2=np.mean(cross2_array,axis=0)	##Average over all Cross Spectra
	hp.write_cl('cl_'+bands+'_FR_QxaU.fits',cross1)
	hp.write_cl('cl_'+bands+'_FR_UxaQ.fits',cross2)
	return (cross1,cross2)
Ejemplo n.º 13
0
def singleImageAnalysis(model_path):

    print("Loading the training set...")
    train_img = os.path.join(VanHateren.DATA_DIR, 'train.pkl')
    train_set = serial.load(train_img)
    patch_size = (32, 32)

    # Run the model and visualize
    print("Loading the model...")
    model = serial.load(model_path)

    list_of_images = []
    list_of_reconstructed = []

    print("Beginning the fft analysis...")

    for ii in np.arange(train_set.X.shape[0]):
        img_vector = train_set.X[ii, :]

        # Part for reconstructed
        [tensor_var] = model.reconstruct([img_vector])
        reconstructed_vector = tensor_var.eval()

        # Save off human-viewable image patches
        img_patch = train_set.denormalize_image(img_vector)
        img_patch = img_patch.reshape(patch_size)
        list_of_images.append(img_patch)

        reconstructed_patch = train_set.denormalize_image(reconstructed_vector)
        reconstructed_patch = reconstructed_patch.reshape(patch_size)
        list_of_reconstructed.append(reconstructed_patch)

    average_frequency = fft2AverageOnImageSet(list_of_images)
    average_reconstructed = fft2AverageOnImageSet(list_of_reconstructed)

    # Show the 2D Power Analysis
    fh = plt.figure()
    fh.add_subplot(1, 2, 1)
    plt.imshow(np.log(average_frequency))
    plt.title('Original Images')

    fh.add_subplot(1, 2, 2)
    plt.imshow(np.log(average_reconstructed))
    plt.title('Reconstructed Images')

    plt.savefig('fft2D.png')
    os.system('eog fft2D.png &')

    psd1D = radialProfile.azimuthalAverage(average_frequency)
    reconstructed_psd1D = radialProfile.azimuthalAverage(average_reconstructed)

    fg = plt.figure()
    fg.add_subplot(1, 2, 1)
    plt.plot(np.log(psd1D))
    plt.title('Original Images')

    fg.add_subplot(1, 2, 2)
    plt.plot(np.log(reconstructed_psd1D))
    plt.title('Reconstructed Images')

    plt.savefig('fft1D.png')
    os.system('eog fft1D.png &')
Ejemplo n.º 14
0
            A13 = A13 / np.max(A13)
            A14 = A14 / np.max(A14)
            A15 = A15 / np.max(A15)
            A16 = A16 / np.max(A16)
            A17 = A17 / np.max(A17)
            A18 = A18 / np.max(A18)
            A19 = A19 / np.max(A19)
            A20 = A20 / np.max(A20)

print A1
sys.exit()

A1F1 = np.fft.fft2(A1)
A1F2 = np.fft.fftshift(A1F1)
A1psd2D = np.abs(A1F2)**2
A1psd1D = radialProfile.azimuthalAverage(A1psd2D)

A2F1 = np.fft.fft2(A2)
A2F2 = np.fft.fftshift(A2F1)
A2psd2D = np.abs(A2F2)**2
A2psd1D = radialProfile.azimuthalAverage(A2psd2D)

A3F1 = np.fft.fft2(A3)
A3F2 = np.fft.fftshift(A3F1)
A3psd2D = np.abs(A3F2)**2
A3psd1D = radialProfile.azimuthalAverage(A3psd2D)

A4F1 = np.fft.fft2(A4)
A4F2 = np.fft.fftshift(A4F1)
A4psd2D = np.abs(A4F2)**2
A4psd1D = radialProfile.azimuthalAverage(A4psd2D)
Ejemplo n.º 15
0
def compute(d=100,
            n=1,
            w0=10,
            cla="high",
            test=False,
            output="./",
            truedir="./true/",
            falsedir="./false/",
            number=3000,
            feature_num=300):
    data = {}
    epsilon = 1e-8
    N = feature_num
    y = []
    error = []
    if not test:
        number_iter = number

        psd1D_total = np.zeros([number_iter, N])
        label_total = np.zeros([number_iter])
        psd1D_org_mean = np.zeros(N)
        psd1D_org_std = np.zeros(N)

        cont = 0

        # fake data
        rootdir = falsedir

        for subdir, dirs, files in os.walk(rootdir):
            for file in files:

                filename = os.path.join(subdir, file)

                img = cv2.imread(filename, 0)
                # windows1 = gauss_window(3, 1.0)
                # img = correl2d(img, windows1)
                # n = 3
                # img = ndimage.maximum_filter(img, (n, n))
                # img_sobel = filters.sobel(img)
                # img = img + img_sobel
                # img = img_sobel
                # img_laplace = filters.laplace(img, ksize=3, mask=None)
                # img = img + img_laplace
                # img = img_laplace
                # we crop the center
                h = int(img.shape[0] / 3)
                w = int(img.shape[1] / 3)
                img = img[h:-h, w:-w]

                # f = np.fft.fft2(img)
                # fshift = np.fft.fftshift(f)
                if cla == "bandpass":
                    fshift = butterworth_bandpass_filter(img, d, w0, n)
                else:
                    fshift = butterworthPassFilter(img, d, n, cla)
                fshift += epsilon
                magnitude_spectrum = 20 * np.log(np.abs(fshift))
                psd1D = radialProfile.azimuthalAverage(magnitude_spectrum)

                # Calculate the azimuthally averaged 1D power spectrum
                points = np.linspace(0, N, num=psd1D.size)  # coordinates of a
                xi = np.linspace(0, N, num=N)  # coordinates for interpolation

                interpolated = griddata(points, psd1D, xi, method='cubic')
                interpolated /= interpolated[0]

                psd1D_total[cont, :] = interpolated
                label_total[cont] = 0
                cont += 1

                if cont == number_iter:
                    break
            if cont == number_iter:
                break

        for x in range(N):
            psd1D_org_mean[x] = np.mean(psd1D_total[:, x])
            psd1D_org_std[x] = np.std(psd1D_total[:, x])

        ## real data
        psd1D_total2 = np.zeros([number_iter, N])
        label_total2 = np.zeros([number_iter])
        psd1D_org_mean2 = np.zeros(N)
        psd1D_org_std2 = np.zeros(N)

        cont = 0
        rootdir2 = truedir

        for subdir, dirs, files in os.walk(rootdir2):
            for file in files:

                filename = os.path.join(subdir, file)
                parts = filename.split("/")

                img = cv2.imread(filename, 0)
                # windows1 = gauss_window(3, 1.0)
                # img = correl2d(img, windows1)
                # n = 3
                # img = ndimage.maximum_filter(img, (n, n))
                # img_sobel = filters.sobel(img)
                # img = img + img_sobel
                # img = img_sobel
                # img_laplace = filters.laplace(img, ksize=3, mask=None)
                # img = img + img_laplace
                # img = img_laplace
                # we crop the center
                h = int(img.shape[0] / 3)
                w = int(img.shape[1] / 3)
                img = img[h:-h, w:-w]

                # f = np.fft.fft2(img)
                # fshift = np.fft.fftshift(f)

                if cla == "bandpass":
                    fshift = butterworth_bandpass_filter(img, d, w0, n)
                else:
                    fshift = butterworthPassFilter(img, d, n, cla)
                fshift += epsilon

                magnitude_spectrum = 20 * np.log(np.abs(fshift))

                # Calculate the azimuthally averaged 1D power spectrum
                psd1D = radialProfile.azimuthalAverage(magnitude_spectrum)

                points = np.linspace(0, N, num=psd1D.size)  # coordinates of a
                xi = np.linspace(0, N, num=N)  # coordinates for interpolation

                interpolated = griddata(points, psd1D, xi, method='cubic')
                interpolated /= interpolated[0]

                psd1D_total2[cont, :] = interpolated
                label_total2[cont] = 1
                cont += 1

                if cont == number_iter:
                    break
            if cont == number_iter:
                break

        for x in range(N):
            psd1D_org_mean2[x] = np.mean(psd1D_total2[:, x])
            psd1D_org_std2[x] = np.std(psd1D_total2[:, x])

        y.append(psd1D_org_mean)
        y.append(psd1D_org_mean2)

        error.append(psd1D_org_std)
        error.append(psd1D_org_std2)

        psd1D_total_final = np.concatenate((psd1D_total, psd1D_total2), axis=0)
        label_total_final = np.concatenate((label_total, label_total2), axis=0)

        data["data"] = psd1D_total_final
        data["label"] = label_total_final

        output = open(output + 'train_3200_2.pkl', 'wb')
        pickle.dump(data, output)
        output.close()

        print("DATA Saved")

        # load feature file
        pkl_file = open('train_3200_2.pkl', 'rb')
        data = pickle.load(pkl_file)
        pkl_file.close()
        X = data["data"]
        y = data["label"]

        num = int(X.shape[0] / 2)
        num_feat = X.shape[1]

        psd1D_org_0 = np.zeros((num, num_feat))
        psd1D_org_1 = np.zeros((num, num_feat))
        psd1D_org_0_mean = np.zeros(num_feat)
        psd1D_org_0_std = np.zeros(num_feat)
        psd1D_org_1_mean = np.zeros(num_feat)
        psd1D_org_1_std = np.zeros(num_feat)

        cont_0 = 0
        cont_1 = 0

        # We separate real and fake using the label
        for x in range(X.shape[0]):
            if y[x] == 0:
                psd1D_org_0[cont_0, :] = X[x, :]
                cont_0 += 1
            elif y[x] == 1:
                psd1D_org_1[cont_1, :] = X[x, :]
                cont_1 += 1

        # We compute statistcis
        for x in range(num_feat):
            psd1D_org_0_mean[x] = np.mean(psd1D_org_0[:, x])
            psd1D_org_0_std[x] = np.std(psd1D_org_0[:, x])
            psd1D_org_1_mean[x] = np.mean(psd1D_org_1[:, x])
            psd1D_org_1_std[x] = np.std(psd1D_org_1[:, x])

        # Plot
        x = np.arange(0, num_feat, 1)
        fig, ax = plt.subplots(figsize=(10, 6))
        ax.plot(x,
                psd1D_org_0_mean,
                alpha=0.5,
                color='red',
                label='Fake',
                linewidth=2.0)
        ax.fill_between(x,
                        psd1D_org_0_mean - psd1D_org_0_std,
                        psd1D_org_0_mean + psd1D_org_0_std,
                        color='red',
                        alpha=0.2)
        ax.plot(x,
                psd1D_org_1_mean,
                alpha=0.5,
                color='blue',
                label='Real',
                linewidth=2.0)
        ax.fill_between(x,
                        psd1D_org_1_mean - psd1D_org_1_std,
                        psd1D_org_1_mean + psd1D_org_1_std,
                        color='blue',
                        alpha=0.2)
        plt.tick_params(axis='x', labelsize=20)
        plt.tick_params(axis='y', labelsize=20)
        ax.legend(loc='best', prop={'size': 20})
        plt.xlabel("Spatial Frequency", fontsize=20)
        plt.ylabel("Power Spectrum", fontsize=20)
        # plt.show()
        plt.savefig(output + 'deepfake_celeb_DF' + cla + '_' + str(d) + '_' +
                    str(w0) + '_' + str(n) + '.png',
                    bbox_inches='tight')

    num = 3
    SVM_r = 0
    precision_SVM_r = 0
    recall_SVM_r = 0
    f1_score_SVM_r = 0
    if test:
        testset(d, n, w0, cla, output, truedir, falsedir, number, feature_num)
    for z in range(num):
        try:

            from sklearn.model_selection import train_test_split
            from sklearn.metrics import accuracy_score
            if test:
                pkl_file2 = open(output + 'test_deepfake.pkl', 'rb')
                data2 = pickle.load(pkl_file2)
                X = data2["data"]
                y = data2["label"]
                X_train, X_test, y_train, y_test = train_test_split(
                    X, y, test_size=0.4)
            else:
                pkl_file = open(output + 'train_3200_2.pkl', 'rb')
                data = pickle.load(pkl_file)
                pkl_file.close()
                X = data["data"]
                y = data["label"]
                X_train, X_test, y_train, y_test = train_test_split(
                    X, y, test_size=0.4)
            from sklearn.svm import SVC

            svclassifier_r = SVC(C=6.37, kernel='rbf', gamma=0.86)
            svclassifier_r.fit(X_train, y_train)
            print("train loop " + str(z) + " finished")
            # print('Accuracy on test set: {:.3f}'.format(svclassifier_r.score(X_test, y_test)))

            y_pred = svclassifier_r.predict(X_test)
            n_correct = sum(y_pred == y_test)
            # confusion_matrix(y_train,y_pred)
            # print("准确率: ", n_correct/len(y_pred))
            precision_SVM_r += precision_score(y_test, y_pred)
            recall_SVM_r += recall_score(y_test, y_pred)
            SVM_r += accuracy_score(y_test, y_pred)
            f1_score_SVM_r += f1_score(y_test, y_pred)
        except:
            num -= 1
            print(num)

    print("Average SVM_r: " + str(SVM_r / num))
    print("Precision SVM_r: " + str(precision_SVM_r / num))
    print("Recall SVM_r: " + str(recall_SVM_r / num))
    print("F1_score SVM_r: " + str(f1_score_SVM_r / num))
Ejemplo n.º 16
0
def hemisphericalDifferences(left_model_path, right_model_path, plotting=None):
    """
    """
    print("Loading the training set...")
    train_img = os.path.join(VanHateren.DATA_DIR, 'train.pkl')
    train_set = serial.load(train_img)
    patch_size = (32, 32)

    # Load both models
    print("Loading the models...")
    left_model = serial.load(left_model_path)
    right_model = serial.load(right_model_path)

    # Create empty arrays to hold images
    image_patches = {
        'orig': [],
        left_model: [],
        right_model: [],
    }

    print("Beginning the fft analysis...")

    # Go through all the images and add them to the lists
    for ii in np.arange(train_set.X.shape[0]):
        img_vector = train_set.X[ii, :]
        for model in [left_model, right_model]:

            # Part for reconstructed
            [tensor_var] = model.reconstruct([img_vector])
            reconstructed_vector = tensor_var.eval()

            reconstructed_patch = train_set.denormalize_image(
                reconstructed_vector)
            reconstructed_patch = reconstructed_patch.reshape(patch_size)
            image_patches[model].append(reconstructed_patch)

        # Save off results
        img_patch = train_set.denormalize_image(img_vector)
        img_patch = img_patch.reshape(patch_size)
        image_patches['orig'].append(img_patch)

    # Run 2D Analysis
    average_frequency = fft2AverageOnImageSet(image_patches['orig'])
    average_left_reconstructed = fft2AverageOnImageSet(
        image_patches[left_model])
    average_right_reconstructed = fft2AverageOnImageSet(
        image_patches[right_model])

    # Run 1D Analysis
    psd1D = radialProfile.azimuthalAverage(average_frequency)
    reconstructed_left_psd1D = radialProfile.azimuthalAverage(
        average_left_reconstructed)
    reconstructed_right_psd1D = radialProfile.azimuthalAverage(
        average_right_reconstructed)

    # Get difference of differences
    left_difference = abs(reconstructed_left_psd1D - psd1D)
    right_difference = abs(reconstructed_right_psd1D - psd1D)
    total_difference = left_difference - right_difference  # RH better: > 0

    # Plot 3 1D images: The Original, the reconstructions from LH and RH
    if plotting:
        fig = plt.figure(figsize=(12, 6))
        fig.add_subplot(1, 2, 1)
        plt.plot(
            np.asarray([
                np.log(psd1D),
                np.log(reconstructed_left_psd1D),
                np.log(reconstructed_right_psd1D),
            ]).T)
        plt.legend([
            'Original',
            'LH (\sigma=%.2f)' % np.asarray(left_model.sigma).max(),
            'RH (\sigma=%.2f)' % np.asarray(right_model.sigma).max()
        ])
        plt.xlabel('Spatial frequency')
        plt.ylabel('Power (log(amplitude))')

        fig.add_subplot(1, 2, 2)
        plt.plot(total_difference)
        plt.hold(True)
        plt.plot(np.ndarray(len(total_difference)),
                 np.zeros(total_difference.shape))  # show X-axis
        plt.title('Closeness in power differences (RH - LH)')
        plt.xlabel('Spatial frequency')
        plt.ylabel('Power difference')

        if isinstance(plotting, basestring):
            plt.savefig(plotting)
        else:
            plt.show()

    return total_difference
Ejemplo n.º 17
0
lam = '70'
num, xx, yy = np.loadtxt('target.txt', unpack=True)
#images = glob.glob('*.fits')
i = 0
f = open('photauto.txt', 'w')
for i in np.arange(len(num)):
    if num[i] < 100:
        str_num = '0' + str(int(num[i]))
    else:
        str_num = str(int(num[i]))
    data, hdr = fits.getdata('destripe_l' + str_num + '_blue_wgls_rcal.fits',
                             0,
                             header=True)
    delt = abs(hdr['CDELT1'])
    a = radialProfile.azimuthalAverage(data, center=[int(xx[i]), int(yy[i])])
    grid = annulus_index.go([data.shape[1], data.shape[0]],
                            [int(xx[i]), int(yy[i])], 25, 10)
    bg = np.median(data[grid])

    if np.size(np.where(a - bg < 0.1 * (np.max(a[0:5]) - bg))) == 0:
        print >> f, str_num, 'none'
        continue
    radius = np.min([
        np.where(a - bg < 0.1 * (np.max(a[0:5]) - bg))[0][0],
        np.where(a <= 2 * bg)[0][0]
    ])
    print radius, radius * abs(hdr['CDELT1']) * 3600.

    position = (int(xx[i]), int(yy[i]))
    aperture = CircularAperture(position, r=radius)
Ejemplo n.º 18
0
f = np.fft.fft2(img)
fshift = np.fft.fftshift(f)
print("FFT", f)
print("FFT shift", fshift)
# print(len(fshift), len(fshift[0]))
filtr = [0] * 100
#
# print(2000*np.log(np.abs(fshift)))
# print(filtr)

# fshift = fshift * filtr

magnitude_spectrum2 = np.abs(fshift)**2
magnitude_spectrum = 20 * np.log(np.abs(fshift))

psd1D = rp.azimuthalAverage(magnitude_spectrum)

print(psd1D)

py.figure(1)
py.clf()
py.imshow(img)
py.figure(2)
py.clf()
py.imshow(magnitude_spectrum)
py.figure(3)
py.clf()
py.semilogy(psd1D)
py.xlabel("Spatial Frequency")
py.ylabel("Power Spectrum")
py.show()
Ejemplo n.º 19
0
def getpowerspectrum(g):
    gback = fftpack.fft2(g)
    gback2 = fftpack.fftshift(gback)
    gback2 = np.abs(gback2)**2
    ps2 = radialProfile.azimuthalAverage(gback2)
    return ps2
Ejemplo n.º 20
0
def pwspin(NRO, Nup, a, mu0, rgrid, Lpow, deriv, seqnum, data, offsetx, offsety, vmx):

 ### Universal constants read from file

 ###number of geodesics
 nn=len(data)/(Nup+1)

 ###total length of padded image
 Npad=NRO*10.0#int((nn)/20)

 ###start of nonzero entries in padded image
 cor=int(Npad/2) - int(NRO/2)

 ###outer edge of accretion disk
 rout=15.0#+seqnum/float(seqlen)*12.0

 ### file specific constants
 ufa=zeros(nn)
 alpha=zeros(nn)
 beta=zeros(nn)

 ### Luminosity as a function of Impact Params
 Lra=zeros(( (Npad),(Npad) ))


 ##########################
 #Fill final radius arrays
 for i in range (int((nn-1))):
  ufa[i] = data[Nup+i*(Nup+1)][0]

 #Fill alpha and beta arrays
 for i in range (int((nn-1))):
  alpha[i] = data[i*(Nup+1)][0]
  beta[i]  = data[i*(Nup+1)][1]

 ##########################
 #Define Kerr parameters for ISCO (prograde only!)
 r = 1.0/ufa
 M = 1.0

###Define margianlly stable orbits in Kerr spacetime#######
 Z1  = 1+(1-a**2/M**2)**(1.E0/3.E0) * ( ( 1+a/M )**(1.E0/3.E0) + ( 1-a/M )**(1.E0/3.E0) )
 Z2  = ( 3*(a/M)**2+Z1**2 )**(1.E0/2.E0)
 rms = M*( 3+Z2 - ( (3-Z1)*(3+Z1+2*Z2) )**(1.E0/2.E0) )

 C3  = r>=rms


####RELATIVISTIC BEAMING PARAMS#############
 spec_indx = 0.7 #synchrotron spectral index
 phi       = arctan(alpha/beta) #angle around top of brightness profile 

 for i in range (0,len(alpha)):   #fix arctan symmetries
  if (beta[i]>=0.0):
   phi[i]=-phi[i]
###vmx is a user defind parameter = v/c which is passed to PWleastsqrs.py
 vmax = vmx*sin(acos(mu0)) #Fraction of speed of light of disk rotation along LOS
 vr   = (rms/r)**(1.0/2.0)*vmax  #keplerian Disk has v prop to 1/sqrt(r)
 Bet  = vr * numpy.sin( phi )    #Beta=v/c as a function of azimuth (phi)


###Define L(r) with REL BEAMING ##########################
 for i in range(  int(cor), int(cor + NRO) ):
  for j in  range(  int(cor), int(cor + NRO) ):
   if (r[(j-cor)+(i-cor)*NRO]<=rout and C3[(j-cor)+(i-cor)*NRO]):
    #BEAMING EQUATION: L_{obs}=L_{instrinsic}*sqrt[(1+B)/(1-B)]^(3-spec_indx)
    Lra[j][i] = (  (ufa[(j-cor)+(i-cor)*NRO])**( Lpow )  ) * (sqrt( (1+Bet[(j-cor)+(i-cor)*NRO])/(1-Bet[(j-cor)+(i-cor)*NRO]) ))**(3.0-spec_indx)




###Define L(r) without REL BEAMING ##########################
# for i in range(  int(cor), int(cor + NRO) ):
#  for j in  range(  int(cor), int(cor + NRO) ):
#   # Now asign intrinsic luminosity for photons which eminated
#   # from region between rms and rout
#   if (r[(j-cor)+(i-cor)*NRO]<=rout and C3[(j-cor)+(i-cor)*NRO]):
#    Lra[j+offsety][i+offsetx] = (ufa[(j-cor)+(i-cor)*NRO])**( Lpow )
###############################################################
  

#################Some previous Luminosty profiles #####################
#3.0*M*Md/(2.0*(r[(j-cor)+(i-cor)*NRO])**3)*( 1-B*(rms/r[(j-cor)+(i-cor)*NRO])**(1.E0/2.E0) )
#(ufa[(j-cor)+(i-cor)*NRO])**( Lpow )
 ### NY 2008 ADAF two temp ###
##  for i in range(  int(cor), int(cor + NRO) ):
##   for j in  range(  int(cor), int(cor + NRO) ):
##    if (r[(j-cor)+(i-cor)*NRO]<=rout and C3[(j-cor)+(i-cor)*NRO]):
##     if (10.0*ufa[(j-cor)+(i-cor)*NRO] > 1.0):
##      Lra[j][i] = 10.0*(ufa[(j-cor)+(i-cor)*NRO])**( Lpow ) + 1.0
##     if (10.0*ufa[(j-cor)+(i-cor)*NRO] <= 1.0):
##      Lra[j+offset][i+offset] = 20.0*(ufa[(j-cor)+(i-cor)*NRO])**( Lpow ) 
#####################################################################3



 #########################  
 ### FFT ###
 IfLa = fft2(Lra)




 ########################
 ### shift FFT to be centerad at 0 ###  
 IfLas=fftpack.fftshift(IfLa)


 ### Get rid of extra zeros in 2-d ifft ###
 ii = int(-1)
 jj = int(0)
 IfLas_n=zeros(  ((NRO),(NRO)), 'complex')
 for i in range(  int(cor), int(cor + NRO) ):
  ii = ii+1
  for j in  range(  int(cor), int(cor + NRO) ):
   IfLas_n[jj][ii] = IfLas[j][i]
   jj = jj+1
   if (j==int(cor + NRO-1)):
    jj=0


 ### imaginary part ##
 IfaIm = IfLas_n.imag


 #########################
 ### compute power (sqrd ifft) ###
 pwIm   = abs(IfaIm)**2
 If2mag = abs(IfLas_n)**2



 ########################
 ### Azimuthal Average ###
 pw1dIm  = radialProfile.azimuthalAverage(pwIm)
 pw1dmag = radialProfile.azimuthalAverage(If2mag)
 
 ###########################
 ### ANGULAR AVG ###
 pwang=radialProfile.angularAverage(pwIm)

 ## SMOOTH ###
 from savitzky_golay import *
 numpnts     = 3
 polydeg     = 2
 coeff       = calc_coeff(numpnts, polydeg, diff_order=deriv)
 pwIm_smooth = smooth(pw1dIm, coeff)

### NORMALIZE POW ###
 pwImmx      = max(pwIm_smooth)
 pwmagmx     = max(pw1dmag)
 pwIm_smooth = pwIm_smooth/pwmagmx
 pwang       = pwang/pwmagmx
 pwIm        = pwIm/pwmagmx

 ###########################

 ### The smoothed azimuthally averaged imaginary power spectrum is returned ###
 return pwIm_smooth
Ejemplo n.º 21
0
def hemisphericalDifferences(left_model_path, right_model_path, plotting=None):
    """
    """
    print("Loading the training set...")
    train_img = os.path.join(VanHateren.DATA_DIR, 'train.pkl')
    train_set = serial.load(train_img)
    patch_size = (32, 32)

    # Load both models
    print("Loading the models...")
    left_model = serial.load(left_model_path)
    right_model = serial.load(right_model_path)

    # Create empty arrays to hold images
    image_patches = {
        'orig': [],
        left_model: [],
        right_model: [], }

    print("Beginning the fft analysis...")

    # Go through all the images and add them to the lists
    for ii in np.arange(train_set.X.shape[0]):
        img_vector = train_set.X[ii, :]
        for model in [left_model, right_model]:

            # Part for reconstructed
            [tensor_var] = model.reconstruct([img_vector])
            reconstructed_vector = tensor_var.eval()

            reconstructed_patch = train_set.denormalize_image(reconstructed_vector)
            reconstructed_patch = reconstructed_patch.reshape(patch_size)
            image_patches[model].append(reconstructed_patch)

        # Save off results
        img_patch = train_set.denormalize_image(img_vector)
        img_patch = img_patch.reshape(patch_size)
        image_patches['orig'].append(img_patch)


    # Run 2D Analysis
    average_frequency = fft2AverageOnImageSet(image_patches['orig'])
    average_left_reconstructed = fft2AverageOnImageSet(
        image_patches[left_model])
    average_right_reconstructed = fft2AverageOnImageSet(
        image_patches[right_model])

    # Run 1D Analysis
    psd1D = radialProfile.azimuthalAverage(average_frequency)
    reconstructed_left_psd1D = radialProfile.azimuthalAverage(
        average_left_reconstructed)
    reconstructed_right_psd1D = radialProfile.azimuthalAverage(
        average_right_reconstructed)


    # Get difference of differences
    left_difference = abs(reconstructed_left_psd1D - psd1D)
    right_difference = abs(reconstructed_right_psd1D - psd1D)
    total_difference = left_difference - right_difference   # RH better: > 0

    # Plot 3 1D images: The Original, the reconstructions from LH and RH
    if plotting:
        fig = plt.figure(figsize=(12, 6))
        fig.add_subplot(1, 2, 1)
        plt.plot(np.asarray([np.log(psd1D),
                             np.log(reconstructed_left_psd1D),
                             np.log(reconstructed_right_psd1D), ]).T)
        plt.legend(['Original',
                    'LH (\sigma=%.2f)' % np.asarray(left_model.sigma).max(),
                    'RH (\sigma=%.2f)' % np.asarray(right_model.sigma).max()])
        plt.xlabel('Spatial frequency')
        plt.ylabel('Power (log(amplitude))')

        fig.add_subplot(1, 2, 2)
        plt.plot(total_difference)
        plt.hold(True)
        plt.plot(np.ndarray(len(total_difference)),
                 np.zeros(total_difference.shape))  # show X-axis
        plt.title('Closeness in power differences (RH - LH)')
        plt.xlabel('Spatial frequency')
        plt.ylabel('Power difference')

        if isinstance(plotting, basestring):
            plt.savefig(plotting)
        else:
            plt.show()

    return total_difference
Ejemplo n.º 22
0
def singleImageAnalysis(model_path):

    print("Loading the training set...")
    train_img = os.path.join(VanHateren.DATA_DIR, 'train.pkl')
    train_set = serial.load(train_img)
    patch_size = (32, 32)

    # Run the model and visualize
    print("Loading the model...")
    model = serial.load(model_path)

    list_of_images = []
    list_of_reconstructed = []

    print("Beginning the fft analysis...")

    for ii in np.arange(train_set.X.shape[0]):
        img_vector = train_set.X[ii, :]

        # Part for reconstructed
        [tensor_var] = model.reconstruct([img_vector])
        reconstructed_vector = tensor_var.eval()

        # Save off human-viewable image patches
        img_patch = train_set.denormalize_image(img_vector)
        img_patch = img_patch.reshape(patch_size)
        list_of_images.append(img_patch)

        reconstructed_patch = train_set.denormalize_image(reconstructed_vector)
        reconstructed_patch = reconstructed_patch.reshape(patch_size)
        list_of_reconstructed.append(reconstructed_patch)

    average_frequency = fft2AverageOnImageSet(list_of_images)
    average_reconstructed = fft2AverageOnImageSet(list_of_reconstructed)

    # Show the 2D Power Analysis
    fh = plt.figure()
    fh.add_subplot(1, 2, 1)
    plt.imshow(np.log(average_frequency))
    plt.title('Original Images')

    fh.add_subplot(1, 2, 2)
    plt.imshow(np.log(average_reconstructed))
    plt.title('Reconstructed Images')

    plt.savefig('fft2D.png')
    os.system('eog fft2D.png &')

    psd1D = radialProfile.azimuthalAverage(average_frequency)
    reconstructed_psd1D = radialProfile.azimuthalAverage(average_reconstructed)

    fg = plt.figure()
    fg.add_subplot(1, 2, 1)
    plt.plot(np.log(psd1D))
    plt.title('Original Images')

    fg.add_subplot(1, 2, 2)
    plt.plot(np.log(reconstructed_psd1D))
    plt.title('Reconstructed Images')

    plt.savefig('fft1D.png')
    os.system('eog fft1D.png &')
Ejemplo n.º 23
0
                        else:
                            count = count + 1

fig = plt.figure()
ax = fig.add_subplot(111)
ax.imshow(im)
plt.show()

print "Num of Vertical = ", vcount
print "Num of Horizontal = ", hcount
print "Num of Neither = ", fcount

F1 = np.fft.fft2(im)
F2 = np.fft.fftshift(F1)
psd2D = np.abs(F2)**2
psd1D = radialProfile.azimuthalAverage(psd2D)

fig = plt.figure()
ax = fig.add_subplot(111)
ax.imshow(np.log10(im), cmap=py.cm.Greys)
ax.set_ylabel('Vertical=%d\n Horizontal=%d\nNeither=%d' %
              (vcount, hcount, fcount),
              rotation='horizontal')

fig2 = plt.figure()
ax2 = fig2.add_subplot(111)
ax2.imshow(np.log10(psd2D))

fig3 = plt.figure()
ax3 = fig3.add_subplot(111)
ax3.semilogy(psd1D)
            continue
       
        img = cv2.imread(filename,0)
        
        # we crop the center
        #h = int(img.shape[0]/3)
        #w = int(img.shape[1]/3)
        #img = img[h:-h,w:-w] 


        f = np.fft.fft2(img)
        fshift = np.fft.fftshift(f)


        magnitude_spectrum = 20*np.log(np.abs(fshift))
        psd1D = radialProfile.azimuthalAverage(magnitude_spectrum)
        
    

        
        # Calculate the azimuthally averaged 1D power spectrum
        points = np.linspace(0,N,num=psd1D.size) # coordinates of a
        xi = np.linspace(0,N,num=N) # coordinates for interpolation

        interpolated = griddata(points,psd1D,xi,method='cubic')
        #cv2.imshow("image", interpolated)
        #cv2.waitKey(0)
        interpolated /= interpolated[0]

        psd1D_total[cont,:] = interpolated             
        label_total[cont] = 0
Ejemplo n.º 25
0
def main():
    if (len(sys.argv) != 5):
        print("Not enough arguments")
        print("insert <dir> <features> <max_files> <output filename>")
        exit()

    dir = sys.argv[1]
    N = int(sys.argv[2])
    number_iter = int(sys.argv[3])
    output_filename = str(sys.argv[4]) + ".pkl"

    if os.path.isdir(dir) is False:
        print("this directory does not exist")
        exit(0)

    data = {}
    psd1D_total = np.zeros([number_iter, N])
    label_total = np.zeros([number_iter])
    psd1D_org_mean = np.zeros(N)
    psd1D_org_std = np.zeros(N)

    cont = 0
    rootdir = dir
    for subdir, dirs, files in os.walk(rootdir):
        #print(files)
        for file in files:
            #print("entrey boy fake")
            filename = os.path.join(subdir, file)
            if filename == dir + "\desktop.ini":
                continue

            img = cv2.imread(filename, 0)
            f = np.fft.fft2(img)
            fshift = np.fft.fftshift(f)

            magnitude_spectrum = 20 * np.log(np.abs(fshift))
            psd1D = radialProfile.azimuthalAverage(magnitude_spectrum)

            # Calculate the azimuthally averaged 1D power spectrum
            points = np.linspace(0, N, num=psd1D.size)  # coordinates of a
            xi = np.linspace(0, N, num=N)  # coordinates for interpolation

            interpolated = griddata(points, psd1D, xi, method='cubic')
            interpolated /= interpolated[0]

            psd1D_total[cont, :] = interpolated
            label_total[cont] = 0  #fake
            cont += 1

            if cont == number_iter:
                break
        if cont == number_iter:
            break

    for x in range(N):
        psd1D_org_mean[x] = np.mean(psd1D_total[:, x])
        psd1D_org_std[x] = np.std(psd1D_total[:, x])

    data["data"] = psd1D_total
    data["label"] = label_total

    output = open(output_filename, 'wb')
    pickle.dump(data, output)
    output.close()

    print("DATA Saved")
Ejemplo n.º 26
0
def testset(d=100,
            n=1,
            w0=10,
            cla="high",
            output="./",
            truedir="./true/",
            falsedir="./false/",
            number=3000,
            feature_num=300):
    data = {}
    epsilon = 1e-8
    N = feature_num
    y = []
    error = []

    number_iter = number

    psd1D_total = np.zeros([number_iter, N])
    label_total = np.zeros([number_iter])
    psd1D_org_mean = np.zeros(N)
    psd1D_org_std = np.zeros(N)

    cont = 0

    # fake data
    rootdir = falsedir

    for subdir, dirs, files in os.walk(rootdir):
        for file in files:

            filename = os.path.join(subdir, file)

            img = cv2.imread(filename, 0)
            # we crop the center
            h = int(img.shape[0] / 3)
            w = int(img.shape[1] / 3)
            img = img[h:-h, w:-w]

            f = np.fft.fft2(img)
            fshift = np.fft.fftshift(f)

            if cla == "bandpass":
                fshift = butterworth_bandpass_filter(img, d, w0, n)
            else:
                fshift = butterworthPassFilter(img, d, n, cla)

            fshift += epsilon
            magnitude_spectrum = 20 * np.log(np.abs(fshift))
            psd1D = radialProfile.azimuthalAverage(magnitude_spectrum)

            # Calculate the azimuthally averaged 1D power spectrum
            points = np.linspace(0, N, num=psd1D.size)  # coordinates of a
            xi = np.linspace(0, N, num=N)  # coordinates for interpolation

            interpolated = griddata(points, psd1D, xi, method='cubic')
            interpolated /= interpolated[0]

            psd1D_total[cont, :] = interpolated
            label_total[cont] = 0
            cont += 1

            if cont == number_iter:
                break
        if cont == number_iter:
            break

    for x in range(N):
        psd1D_org_mean[x] = np.mean(psd1D_total[:, x])
        psd1D_org_std[x] = np.std(psd1D_total[:, x])

    ## real data
    psd1D_total2 = np.zeros([number_iter, N])
    label_total2 = np.zeros([number_iter])
    psd1D_org_mean2 = np.zeros(N)
    psd1D_org_std2 = np.zeros(N)

    cont = 0
    rootdir2 = truedir

    for subdir, dirs, files in os.walk(rootdir2):
        for file in files:

            filename = os.path.join(subdir, file)
            parts = filename.split("/")

            img = cv2.imread(filename, 0)
            # we crop the center
            h = int(img.shape[0] / 3)
            w = int(img.shape[1] / 3)
            img = img[h:-h, w:-w]

            f = np.fft.fft2(img)
            fshift = np.fft.fftshift(f)

            if cla == "bandpass":
                fshift = butterworth_bandpass_filter(img, d, w0, n)
            else:
                fshift = butterworthPassFilter(img, d, n, cla)

            fshift += epsilon

            magnitude_spectrum = 20 * np.log(np.abs(fshift))

            # Calculate the azimuthally averaged 1D power spectrum
            psd1D = radialProfile.azimuthalAverage(magnitude_spectrum)

            points = np.linspace(0, N, num=psd1D.size)  # coordinates of a
            xi = np.linspace(0, N, num=N)  # coordinates for interpolation

            interpolated = griddata(points, psd1D, xi, method='cubic')
            interpolated /= interpolated[0]

            psd1D_total2[cont, :] = interpolated
            label_total2[cont] = 1
            cont += 1

            if cont == number_iter:
                break
        if cont == number_iter:
            break

    for x in range(N):
        psd1D_org_mean2[x] = np.mean(psd1D_total2[:, x])
        psd1D_org_std2[x] = np.std(psd1D_total2[:, x])

    y.append(psd1D_org_mean)
    y.append(psd1D_org_mean2)

    error.append(psd1D_org_std)
    error.append(psd1D_org_std2)

    psd1D_total_final = np.concatenate((psd1D_total, psd1D_total2), axis=0)
    label_total_final = np.concatenate((label_total, label_total2), axis=0)

    data["data"] = psd1D_total_final
    data["label"] = label_total_final

    output = open(output + 'test_deepfake.pkl', 'wb')
    pickle.dump(data, output)
    output.close()

    print("Deepfake DATA Saved")
Ejemplo n.º 27
0
         #print "13", count13
         #print "14", count14
         #print "15", count15
         #print "16", count16






print AW

F1 = np.fft.fft2(AW)
F2 = np.fft.fftshift(F1)
psd2D = np.abs(F2)**2
psd1D = radialProfile.azimuthalAverage(psd2D)

fig = plt.figure()
ax = fig.add_subplot(111)
ax.imshow(np.log10(AW), cmap=py.cm.Greys)



fig2 = plt.figure()
ax2 = fig2.add_subplot(111)
ax2.imshow(np.log10(psd2D))

fig3 = plt.figure()
ax3 = fig3.add_subplot(111)
ax3.semilogy(psd1D)
ax3.set_xlabel('Spatial Frequency')
Ejemplo n.º 28
0
    F2Lm = fftpack.fftshift(fftLm)
    F2S = fftpack.fftshift(fftS)

    #Not sure why this is squared www.astrobetter.com/fourier-transforms-of-images-in-python/
    #Or why a log is take before plotting, from the same source
    F3Lum = abs(F2Lum)**2
    F3Lm = abs(F2Lm)**2
    F3S = abs(F2S)**2

    #Plotting kspace
#    pylab.figure(1)
#    pylab.clf()
#    pylab.imshow(np.log10(F3Lum))

    #Calculating the azimuthally average 1D power spectrum using radialProfile
    A1Lum = radialProfile.azimuthalAverage(F3Lum)
    A1Lm = radialProfile.azimuthalAverage(F3Lm)
    A1S = radialProfile.azimuthalAverage(F3S)

#Plot radial Profile
#    pylab.figure(2)
#    pylab.clf()
#    pylab.semilogy(A1Lum, label = 'Lum')
#    pylab.semilogy(A1Lm, label = 'Lm')
#    pylab.semilogy(A1S, label = 'S')
#    pylab.title(OrigImage)
#    pylab.legend()
#    pylab.xlim(0, 256)
#    pylab.xlabel('Spatial Frequency')
#    pylab.ylabel('Power Spectrum')
#