def get_density(data): def mean_density_at(j): amp = 0. all_values = np.array([]) for d in pyalps_dset.flatten(data): all_values = np.concatenate([ all_values, d.y[0][( d.x==j )[:,0]] ]) amp = np.sum(all_values) std = np.std(all_values) return amp, std ret = pyalps_dset.DataSet() ret.props = pyalps_dset.dict_intersect([d.props for d in pyalps_dset.flatten(data)]) ret.props['observable'] = 'Rung density' bond_dim = int(ret.props['max_bond_dimension']) L = int(ret.props['L']) Leff = L if L%2 == 0 else L-1 nup = ret.props['Nup_total'] nholes = L - nup if nup % 2 == 0: ## for the case with two additional full sites filling = float(nup) / Leff else: filling = float(nup-1) / Leff ret.props['nholes'] = nholes ret.props['filling'] = filling density = np.empty((L,2)) for j in xrange(L): density[j] = mean_density_at(j) x = np.arange(0.5, L, 1) ret.x = x ret.y = density[:,0] return ret
def compute(fname): print "loading", fname data = load_raw_data.loadEigenstateMeasurements( [fname], what=["pair field 1", "pair field 2", "pair field 3", "pair field 4"] ) flat_data = pyalps_dset.flatten(data) flat_data = filter(lambda d: type(d) != list or (len(d) > 0 and type(d[0]) != list), flat_data) if len(flat_data) < 4: print "WARNING:", "Not enough datasets loaded in", fname return None common_props = pyalps_dset.dict_intersect([d.props for d in pyalps_dset.flatten(data)]) L = int(common_props["L"]) W = int(common_props["W"]) if "W" in common_props else 2 idx = index_map(L, W) try: pairfield_1 = select_to_nd(data, "pair field 1", idx, dims=4) pairfield_2 = select_to_nd(data, "pair field 2", idx, dims=4) pairfield_3 = select_to_nd(data, "pair field 3", idx, dims=4) pairfield_4 = select_to_nd(data, "pair field 4", idx, dims=4) except ObservableNotFound: print "WARNING:", "Measurement not found in", fname return None ix_lower = idx[:, 0].reshape(L, 1) ix_upper = idx[:, 1].reshape(L, 1) jx_lower = idx[:, 0].reshape(1, L) jx_upper = idx[:, 1].reshape(1, L) corr = np.zeros((L, L)) corr += +pairfield_1[ix_lower, ix_upper, jx_lower, jx_upper] corr += -pairfield_2[ix_lower, ix_upper, jx_lower, jx_upper] corr += -pairfield_3[ix_lower, ix_upper, jx_lower, jx_upper] corr += +pairfield_4[ix_lower, ix_upper, jx_lower, jx_upper] d = pyalps_dset.DataSet() d.props = deepcopy(common_props) d.props["observable"] = "Pairfield Correlation" d.y = corr d.idx = idx return d
def compute(filling, bond_dim, odd_sizes=False, amplitude_points=None): ## Load all system sizes if odd_sizes: densities = map(lambda parms: to_dataset(*utils.load_or_evaluate('density_fit', density.evaluate_fit, **parms)), utils.iter_odd_system_size(bond_dim=bond_dim, filling=filling)) else: densities = map(lambda parms: to_dataset(*utils.load_or_evaluate('density_fit', density.evaluate_fit, **parms)), utils.iter_system_size(bond_dim=bond_dim, filling=filling)) densities = filter(lambda d: d.props is not None, densities) if len(densities) < 3: print 'WARNING:', 'Only got {} valid densities, you need at least 3.'.format(len(densities)) d = pyalps_dset.DataSet() d.props = None return d # check that particle density in the middle is symmetric, otherwise exclude dataset # for L=192 it is better to use only M > 2000 # def symm_middle_filter(d): # x = int(d.props['L'] / 2) # delta = abs(d.y[x] - d.y[x-1]) if d.props['L']%2==0 else abs(d.y[x+1] - d.y[x-1]) # if delta > 5e-5: # print '# Discard L={L:.0f}, n={filling}, tperp={t\'}, M={max_bond_dimension:.0f} : {delta}'.format(delta=delta, **d.props) # return False # return True # densities = filter(symm_middle_filter, densities) ## Only even number of pairs densities = filter(lambda d: d.props['L']%2 != 0 or d.props['Nup_total'] % 2 == 0, densities) amplitudes = map(compute_amplitude, densities) for d in amplitudes: if d.props['density_fit_error'] > abs(d.y[0]): print 'WARNING:', 'Error in density fit is higher than amplitude magnitude.' print 'Error per site is', d.props['density_fit_error'], 'Amplidue is', d.y[0] print 'Dataset was:', 'L=%s filling=%s t_perp=%s M=%s' % (d.props['L'], d.props['filling'], d.props['t\''], d.props['max_bond_dimension']) common_props = pyalps_dset.dict_intersect([d.props for d in amplitudes]) amp_vs_L = pyalps_dset.collectXY(amplitudes, 'L', 'Amplitude') d = amp_vs_L[0] ## fit finite size analysis sel = np.ones(len(d.x), dtype=bool) if amplitude_points is not None and amplitude_points < len(d.x): sel[:-amplitude_points] = False cov = np.ones((2,2))*np.nan if sum(sel) > 2: coeff, cov = np.polyfit(np.log(d.x[sel]), np.log(d.y[sel]), deg=1, cov=True) else: coeff = np.polyfit(np.log(d.x[sel]), np.log(d.y[sel]), deg=1) r2 = rsquared(np.log(d.x), np.log(d.y), coeff) fit_range = [d.x[0], d.x[-1]] if amplitude_points is not None and amplitude_points < len(d.x): fit_range[0] = d.x[-amplitude_points] ## compute new Krho slope = coeff[0] errslope = np.sqrt(cov[0,0]) Krho = -2. * slope errKrho = 2. * errslope print 'M={} t_perp={} filling={:4.4f} -> Krho={:.3f} +/- {:.3f} : R^2 {:.4f}'.format(common_props['max_bond_dimension'], common_props['t\''], common_props['filling'], Krho, errKrho, r2) d.props['fit_range'] = fit_range d.props['fit_numpoints'] = amplitude_points d.props['fitted_coeff'] = coeff d.props['fitted_Krho'] = Krho d.props['fitted_Krho_error'] = errKrho d.props['fitted_r2'] = r2 return d
def compute(fname): data = load_raw_data.loadEigenstateMeasurements([fname], what=[ 'dens corr up-up', 'dens corr up-down', 'dens corr down-up', 'dens corr down-down', 'Local density up', 'Local density down', ]) flat_data = pyalps_dset.flatten(data) flat_data = filter(lambda d: type(d) != list or (len(d) > 0 and type(d[0]) != list), flat_data) if len(flat_data) < 6: print 'WARNING:', 'Not enough datasets loaded in', fname return None common_props = pyalps_dset.dict_intersect([d.props for d in pyalps_dset.flatten(data)]) L = int(common_props['L']) W = int(common_props['W']) if 'W' in common_props else 2 idx = index_map(L, W) try: dcor_up_up = select_to_2d(data, 'dens corr up-up' , idx) dcor_up_down = select_to_2d(data, 'dens corr up-down' , idx) dcor_down_up = select_to_2d(data, 'dens corr down-up' , idx) dcor_down_down = select_to_2d(data, 'dens corr down-down', idx) dens_up = select_to_1d(data, 'Local density up' , idx) dens_down = select_to_1d(data, 'Local density down' , idx) except ObservableNotFound: print 'WARNING:', 'Measurement not found in', fname return None ix_lower_chain = idx[:,0] ix_upper_chain = idx[:,1] total_dens = dens_up + dens_down dens_on_rungs = total_dens[ix_lower_chain] + total_dens[ix_upper_chain] dcor = np.zeros((L,L)) ## combine density correlators for <N(i)*N(j)> between rungs dcor += dcor_up_up [np.ix_(ix_lower_chain, ix_lower_chain)] dcor += dcor_up_down [np.ix_(ix_lower_chain, ix_lower_chain)] dcor += dcor_down_up [np.ix_(ix_lower_chain, ix_lower_chain)] dcor += dcor_down_down[np.ix_(ix_lower_chain, ix_lower_chain)] dcor += dcor_up_up [np.ix_(ix_lower_chain, ix_upper_chain)] dcor += dcor_up_down [np.ix_(ix_lower_chain, ix_upper_chain)] dcor += dcor_down_up [np.ix_(ix_lower_chain, ix_upper_chain)] dcor += dcor_down_down[np.ix_(ix_lower_chain, ix_upper_chain)] dcor += dcor_up_up [np.ix_(ix_upper_chain, ix_lower_chain)] dcor += dcor_up_down [np.ix_(ix_upper_chain, ix_lower_chain)] dcor += dcor_down_up [np.ix_(ix_upper_chain, ix_lower_chain)] dcor += dcor_down_down[np.ix_(ix_upper_chain, ix_lower_chain)] dcor += dcor_up_up [np.ix_(ix_upper_chain, ix_upper_chain)] dcor += dcor_up_down [np.ix_(ix_upper_chain, ix_upper_chain)] dcor += dcor_down_up [np.ix_(ix_upper_chain, ix_upper_chain)] dcor += dcor_down_down[np.ix_(ix_upper_chain, ix_upper_chain)] ## connected correlator dcor += -np.outer(dens_on_rungs, dens_on_rungs) d = pyalps_dset.DataSet() d.props = deepcopy(common_props) d.props['observable'] = 'Density Correlation' d.y = dcor d.idx = idx return d
def do_extrapolate(data, obs, xgetter, res_props, foreach, deg, num_points, full_output_at=[]): extrap = [] fits = [] obs_vs_extrap = [] groups = pyalps_dset.groupSets(data, for_each=foreach) for gg in groups: if (num_points is not None and num_points < len(gg) and num_points < deg+1) or len(gg) < deg+1: print 'WARNING:', 'Extrapolation not possible.', 'len() < deg+1, len={}, deg={}'.format(len(gg), deg) continue common_props = pyalps_dset.dict_intersect([d.props for d in gg]) extrap_x = [] bond_dims = [] observables = [] xval = None for d in gg: # get x values if xval is None: xval = d.x elif np.all(abs(d.x - xval) > 1e-10): raise Exception('`x` values do not match between the extrapolation group.') # get y values observables.append( d.y ) # get variance extrap_x.append( xgetter(d) ) bond_dims.append(d.props['max_bond_dimension']) extrap_x = np.array(extrap_x) bond_dims = np.array(bond_dims) observables = np.array(observables) order = np.argsort(extrap_x) extrap_x = extrap_x[order] bond_dims = bond_dims[order] observables = observables[order] dd = pyalps_dset.DataSet() dd.props = deepcopy(common_props) dd.props['fit_deg'] = deg dd.props['fit_numpoints'] = num_points dd.x = deepcopy(xval) dd.y = [0.]*len(xval) for i, xi in enumerate(xval): # warnings.filterwarnings('error', category=np.RankWarning) # extrapolate x_i val, err, r2, coeff, xfit, yfit = extrapolate_with_error(extrap_x, observables[:,i], deg, num_points) dd.y[i] = val # except np.RankWarning: # print 'RankWarning: num_points={}, deg={}, xi={}'.format(num_points, deg, xi) # print ' ', ' '.join(['{}={}'.format(k,common_props[k]) for k in foreach]) # dd.y[i] = np.nan # continue if xi in full_output_at: fit_cut = extrap_x[num_points-1] if num_points is not None and num_points < len(extrap_x) else extrap_x[-1] dfit = pyalps_dset.DataSet() dfit.props = deepcopy(common_props) dfit.props['fitted_x'] = xi dfit.props['fitted_r2'] = r2 dfit.props['fit_deg'] = deg dfit.props['fit_numpoints'] = num_points dfit.props['fit_cut'] = fit_cut dfit.props['fitted_coeff'] = coeff dfit.x = xfit dfit.y = yfit fits.append(dfit) dvals = pyalps_dset.DataSet() dvals.props = deepcopy(common_props) dvals.props['fitted_x'] = xi dvals.props['line'] = 'scatter' dvals.props['bond_dims'] = deepcopy(bond_dims) dvals.props['fit_deg'] = deg dvals.props['fit_numpoints'] = num_points dvals.props['fit_cut'] = fit_cut dvals.props['fitted_coeff'] = coeff dvals.props['fitted_r2'] = r2 dvals.x = deepcopy(extrap_x) dvals.y = observables[:,i] obs_vs_extrap.append(dvals) dd.props.update(res_props) extrap.append(dd) return extrap, obs_vs_extrap, fits