def get_shots(run_num, sample, qidx, num_shots=3500):
    f = h5py.File(
        '/reg/d/psdm/cxi/cxilp6715/scratch/denoise_polar_intensity/diagnostics/all_pca_maskByRun/%s/run%d_PCA-denoise.h5'
        % (sample, run_num), 'r')
    f_mask = h5py.File(
        '/reg/d/psdm/cxi/cxilp6715/scratch/combined_tables/finer_q/run%d.tbl' %
        run_num, 'r')

    mask = f_mask['polar_mask_binned'].value
    mask = (mask == mask.max())
    mask.shape
    qs = np.linspace(0, 1, mask.shape[0])
    dc = DiffCorr(mask[None, :, :], qs, 0, pre_dif=True)
    mask_cor = dc.autocorr().mean(0)

    f_mask.close()

    cutoff = f['q%d' % qidx]['num_pca_cutoff'].value
    if num_shots == 'all':
        shots = f['q%d' %
                  qidx]['pca%d' %
                        cutoff]['all_train_difcors'][:] / mask_cor[qidx]
    else:
        shots = f['q%d' % qidx][
            'pca%d' % cutoff]['all_train_difcors'][:num_shots] / mask_cor[qidx]

    return shots[:, 0, :]
示例#2
0
def corr_pair_diff_PI(norm_shots, mask_corr, qs, 
    qidx_pair = 25,
    phi_offset=10):
    """
    Pair polar intensities at one q value using the corr pair method
    Correlations of individual intensities are computed. Shots with similar correlations are paired
    Pairing metrics are euclidean distances between single-shot correlations
    Pairing done by the Stable roommate method

    norm_shots - numpy.array, Nshot*Nq*Nphi, normalized and zeroed polar intensities
    mask_corr - numpy.array, Nq*Nphi, correlations of the mask used for the PI shots
    qs - numpy.array, Nq, q values covered by the PI shots 
    qidx_pair - int, idx of the q values at which to pair shots
    phi_offset - int, number of pixels in phi to ignore on the two extremes of the correlations
    Ignoring these pixels means we are not looking at the very high values near 0 and pi

    """

    print("doing corr pairing...")
    
    num_phi=norm_shots.shape[-1]
    
    dc = DiffCorr(norm_shots,
      qs,0,pre_dif=True)
    corr = dc.autocorr()
    
    corr/=mask_corr
    corr=corr[:,:,phi_offset:num_phi/2-phi_offset]
    
    
    eps = distance.cdist(corr[:,qidx_pair],corr[:,qidx_pair], metric='euclidean')
    # do this so the diagonals are not the minimum, i.e. don't pair shot with itself
    epsI = 1.1 * eps.max(1) * np.identity(eps.shape[0])
    eps += epsI

    shot_preference = np.roll(eps.argsort(1), 1, axis=1)
    pref_dict = {str(E[0]): list(E[1:])
             for E in shot_preference.astype(str)}

    print("stable roommate pair....")
    pairs_dict = stable.stableroomate(prefs=pref_dict)

    pairing = np.array(MakeTagPairs._remove_duplicate_pairs(pairs_dict) )

    print("computing difference intensities...")
    diff_norm = np.zeros( (norm_shots.shape[0]/2, 
        1, 
        norm_shots.shape[-1]), 
        dtype=np.float64 )

    for index, pp in enumerate( pairing ):
        diff_norm[index,0] = norm_shots[pp[0],qidx_pair]-norm_shots[pp[1],qidx_pair]

    return diff_norm
示例#3
0
def corr_pair_diff_PI(norm_shots, mask_corr, qs, qidx_pair=25, phi_offset=10):
    """
    Pair polar intensities at one q value using the corr pair method
    Correlations of individual intensities are computed. Shots with similar correlations are paired
    Pairing metrics are euclidean distances between single-shot correlations
    Pairing done by the Stable roommate method

    norm_shots - numpy.array, Nshot*Nq*Nphi, normalized and zeroed polar intensities
    mask_corr - numpy.array, Nq*Nphi, correlations of the mask used for the PI shots
    qs - numpy.array, Nq, q values covered by the PI shots 
    qidx_pair - int, idx of the q values at which to pair shots
    phi_offset - int, number of pixels in phi to ignore on the two extremes of the correlations
    Ignoring these pixels means we are not looking at the very high values near 0 and pi

    """

    print("doing corr pairing...")

    num_phi = norm_shots.shape[-1]

    dc = DiffCorr(norm_shots, qs, 0, pre_dif=True)
    corr = dc.autocorr()

    corr /= mask_corr
    corr = corr[:, :, phi_offset:num_phi / 2 - phi_offset]

    eps = distance.cdist(corr[:, qidx_pair],
                         corr[:, qidx_pair],
                         metric='euclidean')
    # do this so the diagonals are not the minimum, i.e. don't pair shot with itself
    epsI = 1.1 * eps.max(1) * np.identity(eps.shape[0])
    eps += epsI

    shot_preference = np.roll(eps.argsort(1), 1, axis=1)
    pref_dict = {str(E[0]): list(E[1:]) for E in shot_preference.astype(str)}

    print("stable roommate pair....")
    pairs_dict = stable.stableroomate(prefs=pref_dict)

    pairing = np.array(MakeTagPairs._remove_duplicate_pairs(pairs_dict))

    print("computing difference intensities...")
    diff_norm = np.zeros((norm_shots.shape[0] / 2, 1, norm_shots.shape[-1]),
                         dtype=np.float64)

    for index, pp in enumerate(pairing):
        diff_norm[index,
                  0] = norm_shots[pp[0], qidx_pair] - norm_shots[pp[1],
                                                                 qidx_pair]

    return diff_norm
示例#4
0
def pair_diff_PI(norm_shots,
                 mask_corr,
                 qs,
                 qidx_pair=25,
                 phi_offset=0,
                 pair_method='int'):

    if pair_method == 'corr':
        print("doing corr pairing...")
        #dummy qs
        num_phi = norm_shots.shape[-1]

        dc = DiffCorr(norm_shots, qs, 0, pre_dif=True)
        corr = dc.autocorr()

        corr /= mask_corr
        corr = corr[:, :, phi_offset:num_phi / 2 - phi_offset]

        eps = distance.cdist(corr[:, qidx_pair],
                             corr[:, qidx_pair],
                             metric='euclidean')

    if pair_method == 'int':
        print "doing intensity pair..."
        eps = distance.cdist(norm_shots[:, qidx_pair],
                             norm_shots[:, qidx_pair],
                             metric='euclidean')
    # do this so the diagonals are not the minimum, i.e. don't pair shot with itself
    epsI = 1.1 * eps.max(1) * np.identity(eps.shape[0])
    eps += epsI

    shot_preference = np.roll(eps.argsort(1), 1, axis=1)
    pref_dict = {str(E[0]): list(E[1:]) for E in shot_preference.astype(str)}

    print("stable roommate pair....")
    pairs_dict = stable.stableroomate(prefs=pref_dict)

    pairing = np.array(MakeTagPairs._remove_duplicate_pairs(pairs_dict))

    print("computing difference intensities...")
    diff_norm = np.zeros(
        (norm_shots.shape[0] / 2, norm_shots.shape[1], norm_shots.shape[-1]),
        dtype=np.float64)

    for index, pp in enumerate(pairing):
        diff_norm[index] = norm_shots[pp[0]] - norm_shots[pp[1]]

    return diff_norm, pairing
示例#5
0
def pair_diff_PI(norm_shots, mask_corr, qs, 
    qidx_pair = 25,
    phi_offset=0,
    pair_method='int'):

    if pair_method=='corr':
        print("doing corr pairing...")
        #dummy qs
        num_phi=norm_shots.shape[-1]
        
        dc = DiffCorr(norm_shots,
          qs,0,pre_dif=True)
        corr = dc.autocorr()
        
        corr/=mask_corr
        corr=corr[:,:,phi_offset:num_phi/2-phi_offset]
        
        
        eps = distance.cdist(corr[:,qidx_pair],corr[:,qidx_pair], metric='euclidean')
        
    if pair_method=='int':
        print "doing intensity pair..."
        eps = distance.cdist(norm_shots[:,qidx_pair],norm_shots[:,qidx_pair], metric='euclidean')
    # do this so the diagonals are not the minimum, i.e. don't pair shot with itself
    epsI = 1.1 * eps.max(1) * np.identity(eps.shape[0])
    eps += epsI

    shot_preference = np.roll(eps.argsort(1), 1, axis=1)
    pref_dict = {str(E[0]): list(E[1:])
             for E in shot_preference.astype(str)}

    print("stable roommate pair....")
    pairs_dict = stable.stableroomate(prefs=pref_dict)

    pairing = np.array(MakeTagPairs._remove_duplicate_pairs(pairs_dict) )

    print("computing difference intensities...")
    diff_norm = np.zeros( (norm_shots.shape[0]/2, 
        norm_shots.shape[1], 
        norm_shots.shape[-1]), 
        dtype=np.float64 )

    for index, pp in enumerate( pairing ):
        diff_norm[index] = norm_shots[pp[0]]-norm_shots[pp[1]]

    return diff_norm, pairing
def get_shots(run_num, sample,qidx, num_shots=3500):
    f = h5py.File('/reg/d/psdm/cxi/cxilp6715/scratch/denoise_polar_intensity/diagnostics/all_pca_maskByRun/%s/run%d_PCA-denoise.h5'%(sample,run_num),
                  'r')
    f_mask = h5py.File('/reg/d/psdm/cxi/cxilp6715/scratch/combined_tables/finer_q/run%d.tbl'%run_num,'r')

    mask = f_mask['polar_mask_binned'].value
    mask = (mask==mask.max())
    mask.shape
    qs = np.linspace(0,1,mask.shape[0])
    dc=DiffCorr(mask[None,:,:],qs,0,pre_dif=True)
    mask_cor = dc.autocorr().mean(0)

    f_mask.close()
    
    cutoff=f['q%d'%qidx]['num_pca_cutoff'].value
    if num_shots == 'all':
        shots=f['q%d'%qidx]['pca%d'%cutoff]['all_train_difcors'][:]/mask_cor[qidx]
    else:
        shots=f['q%d'%qidx]['pca%d'%cutoff]['all_train_difcors'][:num_shots]/mask_cor[qidx]
    
    return shots[:,0,:]
    pc2 = new_rp_protein[class_member_mask, 1]
    if pc2.size % 2 > 0:
        pc2 = pc2[:-1]
    pc2_rank = np.argsort(pc2)
    pairing = pc2_rank.reshape((pc2_rank.size / 2, 2))

    diff_PI = np.zeros((pairing.shape[0], 3, shots.shape[-1]),
                       dtype=np.float64)

    for idx, pp in enumerate(pairing):
        shot_diff = shots[pp[0]] - shots[pp[1]]
        diff_PI[idx, 0] = shot_diff[:10].sum(0)
        diff_PI[idx, 1] = shot_diff[10:20].sum(0)
        diff_PI[idx, 2] = shot_diff[20:].sum(0)

    dc = DiffCorr(diff_PI, qvalues, k_beam, pre_dif=True)
    corr = dc.autocorr().mean(0)

    corrs.append(corr)

    cluster_sizes.append(diff_PI.shape[0])

total_shots = np.sum(cluster_sizes).astype(float)
print("total number of shots used is %d" % (total_shots * 2))

# diff cor for the whole run
cluster_sizes = np.array(cluster_sizes) / total_shots
corrs = np.array(corrs)
ave_corr = (corrs * cluster_sizes[:, None, None]).sum(0)

# save ave diff cor
            mask = make_mask(ss, zero_sigma=0.0)
        ss *= mask

        mean_ss = ss.sum(-1) / mask.sum(-1)

        ss = ss - mean_ss[:, None]
        norm_shots[idx] = np.nan_to_num(ss * mask)

    #clean up a bit
    del shots

    diff_norm = pair_diff_PI(norm_shots, qidx_pair=qidx4pairing)

    # dummy qvalues
    qs = np.linspace(0.1, 1.0, diff_norm.shape[1])
    dc = DiffCorr(diff_norm, qs, 0, pre_dif=True)
    ac = dc.autocorr().mean(0)
    norm_corrs.append(ac)
    shot_nums_per_set.append(diff_norm.shape[0])

    # save difference int
    f_out.create_dataset('norm_diff_%d' % shot_set_num, data=diff_norm)
    #
    shot_set_num += 1
##############
# Dubgging
# break
##############
ave_norm_corr = (norm_corrs * \
    (np.array(shot_nums_per_set)/float(np.sum(shot_nums_per_set)))[:,None,None]).sum(0)
# if use_basic_mask:
else:
    qpair_inds = range(qmin, qmax + 1)  # qmax is included

# load q values
qs = np.load(os.path.join(args.data_dir, 'qvalues.npy'))[qpair_inds]

# now combine basic mask and streak mask
print("making streak_mask")
sample_shots = PI[::80, 0, :]
num_bins = int(sample_shots.shape[0] / 10)
streak_mask = make_streak_mask(sample_shots, num_bins=num_bins)

mask = mask[qpair_inds, :]
mask = mask * streak_mask[None, :]

mask_dc = DiffCorr(mask[None, :], qs, 0, pre_dif=True)
mask_corr = mask_dc.autocorr()

# get pulse energy, max pos, max height
print("getting pulse energy per shot...")
pulse_energy =np.nan_to_num( \
(f['gas_detector']['f_21_ENRC'].value + f['gas_detector']['f_22_ENRC'].value)/2.)

# extract radial profile max and max pos
print("getting rad prof max pos and max height vals...")
num_shots = f['radial_profs'].shape[0]
max_val = np.zeros(num_shots)
max_pos = np.zeros(num_shots)
for idx in range(num_shots):
    y = f['radial_profs'][idx]
    y_interp = smooth(y, beta=0.1, window_size=50)
示例#10
0
        mask2 = mask.copy()
        sm_mask2 = all_streak_masks[streak_mask_labels[np.where(
            all_shot_tags == pp[1])[0][0]]]
        mask2[:10, :] = mask[:10, :] * sm_mask2[None, :]

        sm_mask12 = sm_mask1 * sm_mask2

        norm_shot1 = normalize_shot(shot1, mask1)
        norm_shot2 = normalize_shot(shot2, mask2)

        diff_shot12 = norm_shot1 - norm_shot2
        random_diff_norm[chunk_idx] = diff_shot12
        random_diff_streak_masks[chunk_idx] = sm_mask12

    dc = DiffCorr(random_diff_norm, qs, 0, pre_dif=True)

    all_diff_masks = np.array([mask] * random_diff_norm.shape[0])
    all_diff_masks[:, :
                   10, :] = random_diff_streak_masks[:,
                                                     None, :] * all_diff_masks[:, :
                                                                               10, :]

    mask_dc = DiffCorr(all_diff_masks.copy(), qs, 0, pre_dif=True)
    mask_corr = mask_dc.autocorr()
    print "mask corr shape:"
    print mask_corr.shape

    ac = dc.autocorr() / mask_corr
    norm_corrs.append(ac.mean(0))
示例#11
0
            mean_ss = ss.sum(-1)/mask.sum(-1) 

            ss = ss-mean_ss[:,None]
            
            # meanNorm_shots [idx] = np.nan_to_num(ss/mean_ss[:,None]*mask)
            norm_shots[idx] = np.nan_to_num(ss*mask)

        #clean up a bit
        del shots

        diff_norm = norm_shots[1::2]-norm_shots[::2]
        # diff_masked = masked_shots[1::2]-masked_shots[::2]
        # diff_meanNorm = meanNorm_shots[1::2]-meanNorm_shots[::2]

        qs = f['q_intervals'].value[1:,0]
        dc = DiffCorr(mask[None,:,:],qs,0,pre_dif=True)
        mask_ac=dc.autocorr().mean(0)

        dc = DiffCorr(diff_norm,qs,0,pre_dif=True)
        ac = dc.autocorr().mean(0)/mask_ac
        f_out.create_dataset('%s/norm_corr'%model_name,data=ac)

        del diff_norm

        # dc = DiffCorr(diff_masked,qs,0,pre_dif=True)
        # ac = dc.autocorr().mean(0)/mask_ac
        # f_out.create_dataset('masked_corr',data=ac)

        # dc = DiffCorr(diff_meanNorm,qs,0,pre_dif=True)
        # ac = dc.autocorr().mean(0)/mask_ac
        # f_out.create_dataset('meanNorm_corr',data=ac)
示例#12
0
else:
    qpair_inds = range(qmin,qmax+1) # qmax is included

# load q values
qs = np.load(os.path.join( args.data_dir, 'qvalues.npy') )[qpair_inds]

# now combine basic mask and streak mask
print("making streak_mask")
sample_shots = PI[::80,0,:]
num_bins = int(sample_shots.shape[0]/10)
streak_mask=make_streak_mask(sample_shots, num_bins=num_bins)

mask = mask[qpair_inds,:]
mask = mask*streak_mask[None,:]

mask_dc = DiffCorr(mask[None,:], qs, 0, pre_dif=True)
mask_corr = mask_dc.autocorr()

# get pulse energy, max pos, max height
print("getting pulse energy per shot...")
pulse_energy =np.nan_to_num( \
(f['gas_detector']['f_21_ENRC'].value + f['gas_detector']['f_22_ENRC'].value)/2.)

# extract radial profile max and max pos
print("getting rad prof max pos and max height vals...")
num_shots = f['radial_profs'].shape[0]
max_val = np.zeros(num_shots)
max_pos = np.zeros(num_shots)
for idx in range(num_shots):
    y = f['radial_profs'][idx]
    y_interp = smooth(y, beta=0.1,window_size=50)
示例#13
0
try:

    f_streak=h5py.File(os.path.join(streak_dir,out_file),'r')
    streak_mask=f_streak['streak_mask'].value
    print("loaded streak mask from %s"%os.path.join(streak_dir,out_file))

except:
    print("making streak_mask")
    sample_shots = PI[::80,0,:]
    num_bins = int(sample_shots.shape[0]/10)
    streak_mask=make_streak_mask(sample_shots, num_bins=num_bins)

mask = mask*streak_mask[None,:]

qs = np.array([0.2,0.22,0.24,0.26])
mask_dc = DiffCorr(mask[None,:], qs, 0, pre_dif=True)
mask_corr = mask_dc.autocorr()

# get pulse energy, max pos, max height
print("getting pulse energy per shot...")
pulse_energy =np.nan_to_num( \
(f['gas_detector']['f_21_ENRC'].value + f['gas_detector']['f_22_ENRC'].value)/2.)

# extract radial profile max and max pos
print("getting rad prof max height vals, pos, and interpolating rad profs...")
num_shots = f['radial_profs'].shape[0]

interp_rps = np.zeros( (num_shots,f['radial_profs'].shape[-1]) )
max_pos = np.zeros(num_shots)
for idx in range(num_shots):
    y = f['radial_profs'][idx]
示例#14
0
for ii in range(x.shape[1]):
    if select is None:
        select = np.abs(x[:,ii]-s[ii])<m[ii]*10
    select *= (np.abs(x[:,ii]-s[ii])<m[ii]*10)
select = select.astype(bool)
print select.shape, norm_shots.shape
print select.sum()/float(norm_shots.shape[0])
# f_out.close()

if norm_shots.shape[0]%2>0:
    norm_shots=norm_shots[:-1]
######## load mask and normalize the shots
phi_offset=30
num_phi=norm_shots.shape[-1]
qs = np.linspace(0,1,shots.shape[1])
dc=DiffCorr(mask[None,:,:],qs,0,pre_dif=True)
mask_corr=dc.autocorr()
##### compute single-shot correlations
dc = DiffCorr(norm_shots-norm_shots.mean(0)[None,:,:],
  qs,0,pre_dif=True)
corr = dc.autocorr()
print corr.shape

corr/=mask_corr
corr=corr[:,:,phi_offset:num_phi/2-phi_offset]

diff_shots = norm_shots[::2]-norm_shots[1::2]
dc=DiffCorr(diff_shots,qs,0,pre_dif=True)
no_cluster_ac= (dc.autocorr()/mask_corr).mean(0)
f_out.create_dataset('raw_corrs',data=no_cluster_ac)
示例#15
0
# output file to save data
cluster_file = run_file.replace('.tbl','_PCA-cluster.h5')
f_cluster = h5py.File(os.path.join(cluster_dir, cluster_file),'r')
cluster_set_keys = f_cluster.keys()

out_file = run_file.replace('.tbl','_cor.h5')
f_out = h5py.File(os.path.join(save_dir, out_file),'w')

if 'polar_mask_binned' in f.keys():
    mask = np.array(f['polar_mask_binned'].value==f['polar_mask_binned'].value.max(), dtype = int)
else:
    mask = np.load('/reg/d/psdm/cxi/cxilp6715/scratch/water_data/binned_pmask_basic.npy')


qs=np.linspace(0.2,0.88,mask.shape[0])
dc=DiffCorr(mask[None,:,:],qs,0,pre_dif=True)
mask_ac=dc.autocorr()


PI = f['polar_imgs']
shot_tags = np.arange(0,PI.shape[0])

for set_key in cluster_set_keys:
    print("computing diff cor for %s..."%set_key)
    qidx = int( set_key.split('q')[1] )
    labels = f_cluster[set_key]['cluster_labels'].value.astype(int)

    f_out.create_group(set_key)

    unique_labels=np.unique(labels)
    cluster_corrs=[]
示例#16
0
        continue

    shots_to_grab = sorted(shots_to_grab)

    shots = PI[shots_to_grab]
    # mask and normalize the shots

    if shots.dtype != 'float64':
        shots = shots.astype(np.float64)

    for idx, ss in enumerate(shots):
        mask = make_mask(ss, zero_sigma=1.5)

        ss *= mask
        mean_ss = ss.sum(-1) / mask.sum(-1)

        ss = ss - mean_ss[:, None]
        shots[idx] = ss * mask

    f_out.create_dataset('difInt_%d' % ll, data=shots)

    dc = DiffCorr(shots, qvalues, k_beam, pre_dif=False)
    corr = dc.autocorr()

    f_out.create_dataset('difCor_%d' % ll, data=corr)

f_out.close()

f_cluster.close()
f_run.close()
示例#17
0
	
    shots_to_grab = sorted(shots_to_grab)
    
    shots = PI[shots_to_grab]
    # mask and normalize the shots
    
    if shots.dtype != 'float64':
        shots = shots.astype(np.float64)
 
    for idx, ss in enumerate(shots):
        mask = make_mask(ss,zero_sigma=1.5)

        ss *=mask
        mean_ss = ss.sum(-1)/mask.sum(-1) 

        ss = ss-mean_ss[:,None]
        shots[idx] = ss*mask

    f_out.create_dataset('difInt_%d'%ll, data = shots)

    dc = DiffCorr(shots, qvalues, 
        k_beam, pre_dif = False)
    corr = dc.autocorr()

    f_out.create_dataset('difCor_%d'%ll, data = corr)

f_out.close()

f_cluster.close()
f_run.close()
示例#18
0
        mask_i= make_mask(  shot_i,zero_sigma=args.zero_sigma)
        shot_i *=mask_i
        mean_ss = shot_i.sum(-1)/mask_i.sum(-1) 
        shot_i = np.nan_to_num( (shot_i-mean_ss[:,None]) * mask_i)

        mask_j= make_mask(  shot_j,zero_sigma=args.zero_sigma)
        shot_j *=mask_j
        mean_ss = shot_j.sum(-1)/mask_j.sum(-1) 
        shot_j = np.nan_to_num( (shot_j-mean_ss[:,None]) * mask_j)

        diff_mask[idx] = mask_i*mask_j

        diff_PI[idx] = shot_i-shot_j


    dc = DiffCorr(diff_PI, qvalues, 
        k_beam, pre_dif = True)
    PI_corr = dc.autocorr()

    mask_dc = DiffCorr(diff_mask, qvalues, 
        k_beam, pre_dif = True)
    mask_corr = mask_dc.autocorr()
    #print mask_corr.shape
    #print PI_corr.shape
    #np.save('PI_corr.npy',PI_corr)
    #np.save('mask_corr.npy',mask_corr)


    # deal with the mask part
    #corr = np.nan_to_num((PI_corr/mask_corr)).mean(0)
    corr = PI_corr/mask_corr
    corr[corr==np.inf] = 0
示例#19
0
        
        mean_ss = ss.sum(-1)/mask.sum(-1) 

        ss = ss-mean_ss[:,None]
        norm_shots[idx] = np.nan_to_num(ss*mask)

    #clean up a bit
    del shots


    diff_norm = pair_diff_PI(norm_shots,
            qidx_pair = qidx4pairing)

    # dummy qvalues
    qs = np.linspace(0.1,1.0, diff_norm.shape[1])
    dc = DiffCorr(diff_norm, qs, 0,pre_dif=True)
    ac = dc.autocorr().mean(0)
    norm_corrs.append(ac)
    shot_nums_per_set.append(diff_norm.shape[0])

    # save difference int
    f_out.create_dataset('norm_diff_%d'%shot_set_num, data = diff_norm)
    # 
    shot_set_num+=1
##############
# Dubgging
# break
##############
ave_norm_corr = (norm_corrs * \
    (np.array(shot_nums_per_set)/float(np.sum(shot_nums_per_set)))[:,None,None]).sum(0)
# if use_basic_mask:
示例#20
0
data_dir = os.path.join(args.data_dir, sample)
save_dir = os.path.join(args.out_dir, sample)

if not os.path.exists(save_dir):
    os.makedirs(save_dir)
print save_dir

# load and compute mask dif cor
f_mask = h5py.File(os.path.join(args.mask_dir, 'run%d.tbl' % run_num), 'r')

mask = f_mask['polar_mask_binned'].value
mask = (mask == mask.max())
mask.shape
qs = np.linspace(0, 1, mask.shape[0])
dc = DiffCorr(mask[None, :, :], qs, 0, pre_dif=True)
mask_cor = dc.autocorr().mean(0)
f_mask.close()

##parameters
phi_offset = 15
interp_num_phi = 100
n_comp = 20

# load simulations and interpolate simulations and data
if sample.startswith('GDP'):
    sims = np.load(
        '/reg/d/psdm/cxi/cxilr6716/results/nnmf_filter/GDP_closed_121models.npy'
    )
else:
    sims = np.load(
示例#21
0
            ss = ss - mean_ss[:, None]
            norm_shots[idx] = np.nan_to_num(ss * mask)

        #clean up a bit
        del shots

        # rank by max pos and pair
        order = np.argsort(max_pos_set)
        sorted_shots = norm_shots[order]
        if sorted_shots.shape[0] % 2 > 0:
            sorted_shots = sorted_shots[:-1]
        diff_norm = sorted_shots[1::2] - sorted_shots[::2]

        # dummy qvalues
        qs = np.linspace(0.1, 1.0, diff_norm.shape[1])
        dc = DiffCorr(diff_norm, qs, 0, pre_dif=True)
        ac = dc.autocorr()
        norm_corrs.append(ac.mean(0))
        shot_nums_per_set.append(diff_norm.shape[0])

        # save difference int
        f_out.create_dataset('autocorr_%d' % shot_set_num, data=ac)

        shot_set_num += 1


ave_norm_corr = (norm_corrs * \
    (np.array(shot_nums_per_set)/float(np.sum(shot_nums_per_set)))[:,None,None]).sum(0)

qs = np.linspace(0.1, 1.0, diff_norm.shape[1])
mask_dc = DiffCorr(mask[None, :], qs, 0, pre_dif=True)
示例#22
0
    meanNorm_shots[idx] = np.nan_to_num(ss / mean_ss[:, None] * mask)
    norm_shots[idx] = np.nan_to_num(ss * mask)

#clean up a bit
del shots

diff_norm = norm_shots[1::2] - norm_shots[::2]
diff_masked = masked_shots[1::2] - masked_shots[::2]
diff_meanNorm = meanNorm_shots[1::2] - meanNorm_shots[::2]

# save difference int
# f_out.create_dataset('norm_diff', data = diff_norm)
# f_out.create_dataset('masked_diff', data = diff_masked)

qs = np.linspace(0.2, 0.9, PI.shape[1])
dc = DiffCorr(mask[None, :, :], qs, 0, pre_dif=True)
mask_ac = dc.autocorr().mean(0)

dc = DiffCorr(diff_norm, qs, 0, pre_dif=True)
ac = dc.autocorr().mean(0) / mask_ac
f_out.create_dataset('norm_corr', data=ac)

dc = DiffCorr(diff_masked, qs, 0, pre_dif=True)
ac = dc.autocorr().mean(0) / mask_ac
f_out.create_dataset('masked_corr', data=ac)

dc = DiffCorr(diff_meanNorm, qs, 0, pre_dif=True)
ac = dc.autocorr().mean(0) / mask_ac
f_out.create_dataset('meanNorm_corr', data=ac)

f_out.close()
示例#23
0
import h5py
import numpy as np
from loki.RingData import DiffCorr

f = h5py.File('/reg/d/psdm/cxi/cxilp6715/scratch/rp_clusters/PE_cluster_difCor/h2o/water_diffcorr.tbl','r')
diff_corr = f['diff_corr']

f_out = h5py.File('/reg/d/psdm/cxi/cxilp6715/scratch/rp_clusters/PE_cluster_difCor/h2o/water_sigConverge.tbl','w')


#get mask 
mask = np.load('/reg/d/psdm/cxi/cxilp6715/scratch/water_data/binned_pmask_basic.npy')
qs = np.linspace(0.1,1.0, diff_corr.shape[1])
mask_dc = DiffCorr(mask[None,:], qs, 0, pre_dif=True)
mask_corr = mask_dc.autocorr().mean(0)

num_samples = 200
results = np.zeros( (num_samples, diff_corr.shape[1], diff_corr.shape[2]) )
shot_inds = np.arange(diff_corr.shape[0])

num_shots = 30000
for nn in range(num_samples):
    np.random.shuffle(shot_inds)

    idx = sorted(shot_inds[:num_shots])

    results[nn] = diff_corr[idx,:,:].mean(0) / mask_corr
f_out.create_dataset('diff_corrs',data=results)
示例#24
0
# output file to save data
cluster_file = run_file.replace('.tbl','_PCA-cluster.h5')
f_cluster = h5py.File(os.path.join(cluster_dir, cluster_file),'r')
cluster_set_keys = f_cluster.keys()

out_file = run_file.replace('.tbl','_cor.h5')
f_out = h5py.File(os.path.join(save_dir, out_file),'w')

if 'polar_mask_binned' in f.keys():
    mask = np.array(f['polar_mask_binned'].value==f['polar_mask_binned'].value.max(), dtype = int)
else:
    mask = np.load('/reg/d/psdm/cxi/cxilp6715/scratch/water_data/binned_pmask_basic.npy')


qs=np.linspace(0.2,0.88,mask.shape[0])
dc=DiffCorr(mask[None,:,:],qs,0,pre_dif=True)
mask_ac=dc.autocorr()


PI = f['polar_imgs']
shot_tags = np.arange(0,PI.shape[0])

for set_key in cluster_set_keys:
    print("computing diff cor for %s..."%set_key)
    qidx = int( set_key.split('q')[1] )
    labels = f_cluster[set_key]['cluster_labels'].value.astype(int)
    if 'streak_masks' in f_cluster[set_key].keys():
        print ('using streak masks...')
        streak_masks = f_cluster[set_key]['streak_masks'].value.astype(bool)
    else:
        streak_masks = None
            rad_profs_set = rad_profs_set[:-1]
            cluster_shot_tags = sorted(cluster_shot_tags)[:-1]
        # diff_norm=norm_shots[::2]-norm_shots[1::2]

        diff_norm, pairing = pair_diff_PI(norm_shots, rad_profs_set, qs)
        diff_pair = np.zeros((diff_norm.shape[0], 2))
        diff_streak_masks = np.zeros((diff_norm.shape[0], diff_norm.shape[-1]),
                                     dtype=bool)

        for index, pp in enumerate(pairing):
            diff_pair[index, 0] = cluster_shot_tags[pp[0]]
            diff_pair[index, 1] = cluster_shot_tags[pp[1]]
            diff_streak_masks[index] = streak_masks[pp[0]] * streak_masks[
                pp[1]]

        dc = DiffCorr(diff_norm, qs, 0, pre_dif=True)
        all_diff_masks = np.array([mask] * diff_norm.shape[0])
        all_diff_masks[:, :
                       4, :] = diff_streak_masks[:,
                                                 None, :] * all_diff_masks[:, :
                                                                           4, :]
        mask_dc = DiffCorr(all_diff_masks.copy(), qs, 0, pre_dif=True)
        mask_corr = mask_dc.autocorr()
        print "mask corr shape:"
        print mask_corr.shape

        ac = dc.autocorr() / mask_corr
        norm_corrs.append(ac.mean(0))
        shot_nums_per_set.append(diff_norm.shape[0])
        f_out.create_dataset('pairing_%d' % shot_set_num, data=diff_pair)
        f_out.create_dataset('streak_masks_%d' % shot_set_num,
示例#26
0
    pairs_dict = stable.stableroomate(prefs=pref_dict)

    pairing = np.array(MakeTagPairs._remove_duplicate_pairs(pairs_dict) )
    # print pairing.shape

    diff_PI = np.zeros( (num_shots/2, 3, shots_merge.shape[-1]), 
        dtype=np.float64 )
    # print diff_PI.shape
    # print shots_merge[pairing[0][0]].shape
    for idx, pp in enumerate( pairing ):
        diff_PI[idx] = shots_merge[pp[0]]-shots_merge[pp[1]]
   

    # diff cor time

    dc = DiffCorr(diff_PI, qvalues, 
        k_beam, pre_dif = True)
    corr = dc.autocorr().mean(0)
    corrs.append(corr)

    diff_shots = shots_merge[1::2]-shots_merge[:-1][::2]
    dc = DiffCorr(diff_shots, qvalues,
                       k_beam, pre_dif = True)
    corr = dc.autocorr().mean(0)
    benchmark_corrs.append(corr)
    

    cluster_sizes.append(diff_PI.shape[0])


total_shots = np.sum(cluster_sizes).astype(float)
print ("total number of shots used is %d"% ( total_shots*2 ) )