def get_density(data):
    def mean_density_at(j):
        amp = 0.
        all_values = np.array([])
        for d in pyalps_dset.flatten(data):
            all_values = np.concatenate([ all_values, d.y[0][( d.x==j )[:,0]] ])
        amp = np.sum(all_values)
        std = np.std(all_values)
        return amp, std
    
    ret = pyalps_dset.DataSet()
    ret.props = pyalps_dset.dict_intersect([d.props for d in pyalps_dset.flatten(data)])
    ret.props['observable'] = 'Rung density'
    
    bond_dim = int(ret.props['max_bond_dimension'])
    L = int(ret.props['L'])
    Leff = L if L%2 == 0 else L-1
    nup = ret.props['Nup_total']
    nholes = L - nup
    if nup % 2 == 0: ## for the case with two additional full sites
        filling = float(nup) / Leff
    else:
        filling = float(nup-1) / Leff
    ret.props['nholes']  = nholes
    ret.props['filling'] = filling
    
    density = np.empty((L,2))
    for j in xrange(L):
        density[j] = mean_density_at(j)
    x = np.arange(0.5, L, 1)
    
    ret.x = x
    ret.y = density[:,0]
    
    return ret
 def mean_density_at(j):
     amp = 0.
     all_values = np.array([])
     for d in pyalps_dset.flatten(data):
         all_values = np.concatenate([ all_values, d.y[0][( d.x==j )[:,0]] ])
     amp = np.sum(all_values)
     std = np.std(all_values)
     return amp, std
def compute(fname):
    print "loading", fname
    data = load_raw_data.loadEigenstateMeasurements(
        [fname], what=["pair field 1", "pair field 2", "pair field 3", "pair field 4"]
    )
    flat_data = pyalps_dset.flatten(data)
    flat_data = filter(lambda d: type(d) != list or (len(d) > 0 and type(d[0]) != list), flat_data)
    if len(flat_data) < 4:
        print "WARNING:", "Not enough datasets loaded in", fname
        return None

    common_props = pyalps_dset.dict_intersect([d.props for d in pyalps_dset.flatten(data)])

    L = int(common_props["L"])
    W = int(common_props["W"]) if "W" in common_props else 2
    idx = index_map(L, W)

    try:
        pairfield_1 = select_to_nd(data, "pair field 1", idx, dims=4)
        pairfield_2 = select_to_nd(data, "pair field 2", idx, dims=4)
        pairfield_3 = select_to_nd(data, "pair field 3", idx, dims=4)
        pairfield_4 = select_to_nd(data, "pair field 4", idx, dims=4)
    except ObservableNotFound:
        print "WARNING:", "Measurement not found in", fname
        return None

    ix_lower = idx[:, 0].reshape(L, 1)
    ix_upper = idx[:, 1].reshape(L, 1)
    jx_lower = idx[:, 0].reshape(1, L)
    jx_upper = idx[:, 1].reshape(1, L)

    corr = np.zeros((L, L))

    corr += +pairfield_1[ix_lower, ix_upper, jx_lower, jx_upper]
    corr += -pairfield_2[ix_lower, ix_upper, jx_lower, jx_upper]
    corr += -pairfield_3[ix_lower, ix_upper, jx_lower, jx_upper]
    corr += +pairfield_4[ix_lower, ix_upper, jx_lower, jx_upper]

    d = pyalps_dset.DataSet()
    d.props = deepcopy(common_props)
    d.props["observable"] = "Pairfield Correlation"
    d.y = corr
    d.idx = idx

    return d
def bond_dimension(data, foreach=[], deg=1, num_points=None):
    en_vs_bond = pyalps_dset.collectXY(pyalps_dset.flatten(data), 'max_bond_dimension', 'Energy', foreach=foreach)
    extrap = []
    fits   = []
    for d in en_vs_bond:
        bond_dims = deepcopy(d.x)
        d.x = 1./d.x
        d.x = d.x[::-1]
        d.y = d.y[::-1]
        
        sel = np.ones(len(d.x), dtype=bool)
        if num_points is not None and num_points < len(d.x): sel[num_points:] = False
        coeff = np.polyfit(d.x[sel], d.y[sel], deg=deg)
        r2 = rsquared(d.x[sel], d.y[sel], coeff)
        fit_cut = d.x[num_points-1] if num_points is not None and num_points < len(d.x) else d.x[-1]
        
        d.props['fitted_r2'] = r2
        d.props['fit_deg'] = deg
        d.props['fit_numpoints'] = num_points
        d.props['fit_cut']       = fit_cut
        d.props['fitted_coeff']  = coeff
        d.props['fitted_energy'] = coeff[-1]
        
        dd = pyalps_dset.DataSet()
        dd.props = deepcopy(d.props)
        dd.props['fitted_r2'] = r2
        dd.props['fit_deg'] = deg
        dd.props['fitted_coeff'] = coeff
        dd.x = np.linspace(0, max(d.x))
        dd.y = np.polyval(coeff, dd.x)
        fits.append(dd)
        
        dd = pyalps_dset.DataSet()
        dd.props = deepcopy(d.props)
        dd.props['max_bond_dimension'] = 'inf'
        dd.x = np.array([])
        dd.y = np.array([ coeff[-1] ])
        extrap.append(dd)
        
        d.props['line'] = 'scatter'
        d.props['bond_dims'] = bond_dims
        
    
    return extrap, en_vs_bond, fits
def select_obs(sets, obs):
    sel = pyalps_dset.select(pyalps_dset.flatten(sets), lambda d: d.props['observable'] == obs)
    if len(sel) == 0:
        raise ObservableNotFound()
    return sel[0]
def compute(fname):
    data = load_raw_data.loadEigenstateMeasurements([fname],
                    what=[
                          'dens corr up-up',
                          'dens corr up-down',
                          'dens corr down-up',
                          'dens corr down-down',
                          'Local density up',
                          'Local density down',
                          ])
    flat_data = pyalps_dset.flatten(data)
    flat_data = filter(lambda d: type(d) != list or (len(d) > 0 and type(d[0]) != list), flat_data)
    if len(flat_data) < 6:
        print 'WARNING:', 'Not enough datasets loaded in', fname
        return None
    
    common_props = pyalps_dset.dict_intersect([d.props for d in pyalps_dset.flatten(data)])
    
    L = int(common_props['L'])
    W = int(common_props['W']) if 'W' in common_props else 2
    idx = index_map(L, W)
    
    try:
        dcor_up_up     = select_to_2d(data, 'dens corr up-up'    , idx)
        dcor_up_down   = select_to_2d(data, 'dens corr up-down'  , idx)
        dcor_down_up   = select_to_2d(data, 'dens corr down-up'  , idx)
        dcor_down_down = select_to_2d(data, 'dens corr down-down', idx)
        dens_up        = select_to_1d(data, 'Local density up'   , idx)
        dens_down      = select_to_1d(data, 'Local density down' , idx)
    except ObservableNotFound:
        print 'WARNING:', 'Measurement not found in', fname
        return None
    
    ix_lower_chain = idx[:,0]
    ix_upper_chain = idx[:,1]
    
    total_dens = dens_up + dens_down
    dens_on_rungs = total_dens[ix_lower_chain] + total_dens[ix_upper_chain]
    
    
    dcor = np.zeros((L,L))
    ## combine density correlators for <N(i)*N(j)> between rungs
    dcor += dcor_up_up    [np.ix_(ix_lower_chain, ix_lower_chain)]
    dcor += dcor_up_down  [np.ix_(ix_lower_chain, ix_lower_chain)]
    dcor += dcor_down_up  [np.ix_(ix_lower_chain, ix_lower_chain)]
    dcor += dcor_down_down[np.ix_(ix_lower_chain, ix_lower_chain)]
    
    dcor += dcor_up_up    [np.ix_(ix_lower_chain, ix_upper_chain)]
    dcor += dcor_up_down  [np.ix_(ix_lower_chain, ix_upper_chain)]
    dcor += dcor_down_up  [np.ix_(ix_lower_chain, ix_upper_chain)]
    dcor += dcor_down_down[np.ix_(ix_lower_chain, ix_upper_chain)]
    
    dcor += dcor_up_up    [np.ix_(ix_upper_chain, ix_lower_chain)]
    dcor += dcor_up_down  [np.ix_(ix_upper_chain, ix_lower_chain)]
    dcor += dcor_down_up  [np.ix_(ix_upper_chain, ix_lower_chain)]
    dcor += dcor_down_down[np.ix_(ix_upper_chain, ix_lower_chain)]
    
    dcor += dcor_up_up    [np.ix_(ix_upper_chain, ix_upper_chain)]
    dcor += dcor_up_down  [np.ix_(ix_upper_chain, ix_upper_chain)]
    dcor += dcor_down_up  [np.ix_(ix_upper_chain, ix_upper_chain)]
    dcor += dcor_down_down[np.ix_(ix_upper_chain, ix_upper_chain)]
    
    ## connected correlator
    dcor += -np.outer(dens_on_rungs, dens_on_rungs)
    
    
    d = pyalps_dset.DataSet()
    d.props = deepcopy(common_props)
    d.props['observable'] = 'Density Correlation'
    d.y = dcor
    d.idx = idx
    
    return d