示例#1
0
 def check_value_scalar(self, string, should_be, significance=8, debug=False):
     import pyalps
     val=pyalps.loadMeasurements(pyalps.getResultFiles(prefix='parm'),[string])[0][0].y[0].mean
     err=pyalps.loadMeasurements(pyalps.getResultFiles(prefix='parm'),[string])[0][0].y[0].error
     if debug:
         print('\n')
         print(string+': '+str(val)+' +- '+str(err)+'\t'+str(abs(should_be-val)/max([err,1e-6])))
         print('\n')
     self.assertLess(abs(should_be-val)/max([err,1e-6]),significance)
示例#2
0
def get_vector_data(result_files, what):
    '''read data from a VectorObservable into a multidimensional numpy array,
    discarding all info except "mean" and "error".
    '''
    from pyalps.alea import MCScalarData as msd
    data_set = pyalps.loadMeasurements(result_files, what=what)
    return np.array([[msd(i.mean, i.error) for i in j[0].y] for j in data_set])
示例#3
0
def get_scalar_data(result_files, what):
    '''read data from a ScalarObservable into a numpy array,
    discarding all info except "mean" and "error"
    '''
    from pyalps.alea import MCScalarData as msd
    data_set = pyalps.loadMeasurements(result_files, what=what)
    return np.array([msd(i[0].y[0].mean, i[0].y[0].error) for i in data_set])
def read_results_from_file(path, prefix, X, Y, ForEach=None):
    """
    Input
    ------------------
    path:  path to result files
    prefix: prefix filenames (ending out.h5), if ending with a dot (.), only one simulation is assumed
                            else the separate simulation are identified and merged.
    X : string, name of parameter
    Y : string, name of observable

    returns
    -----------------
    pyalps X-Y-props list
    """

    if prefix[-1]=='.':
        dataset= pyalps.loadMeasurements(pyalps.getResultFiles(dirname=path,prefix=prefix),Y)

        if ForEach==None:
            return pyalps.collectXY(dataset, x=X, y=Y)
        else:
            return pyalps.collectXY(dataset, x=X, y=Y, foreach=[ForEach])

    else:
        raise ValueError("Not yet implemented for multiple runs.")
def ReadResults(path, prefix, X, Y):
    """
    Input
    ------------------
    path:  path to result files
    prefix: prefix filenames (ending out.h5), if ending with a dot (.), only one simulation is assumed
                            else the separate simulation are identified and merged.
    X : string, name of parameter
    Y : string, name of observable

    returns
    -----------------
    pyalps X-Y-props list
    """

    if prefix[-1]=='.':
        dataset= pyalps.loadMeasurements(pyalps.getResultFiles(dirname=path,prefix=filename),args.Y)

        return pyalps.collectXY(dataset, x=X, y=Y, foreach=['IncNo'])
    else:
        all_prefixes=[]
        for f in os.listdir(path):
            if fnmatch.fnmatch(f, prefix+'*.out.h5'):
                before_first_dot=f.split('.')[0]+'.'
                if before_first_dot not in all_prefixes:
                    all_prefixes.append(before_first_dot)
        datasetsXY = []
        for pre in all_prefixes:
            tmp_dset= pyalps.loadMeasurements(pyalps.getResultFiles(dirname=path,prefix=pre),args.Y)
            datasetsXY.append( pyalps.collectXY(tmp_dset, x=X, y=Y, foreach=['IncNo']) )

        for i,dxy in enumerate(datasetsXY[0]):   #Take the 1st dataset as reference
            rIncNo = dxy.props['IncNo']
            for dslist in datasetsXY[1:]:
                for dxy2nd in dslist:
                    if rIncNo == dxy2nd.props['IncNo']:
                        datasetsXY[0][i] = pyalps.mergeDataSets([datasetsXY[0][i], dxy2nd])   

        return datasetsXY[0]
示例#6
0
def detectDataType(fname):

    fname = pyalps.make_list(fname)

    # Monte Carlo results
    try:
        data = pyalps.loadMeasurements(fname)
        if len(data[0]) == 0:
            raise RuntimeError

        for task in data:
            for obs in task:
                tmp = obs.y[0].error

    except (RuntimeError, AttributeError, IndexError):
        pass
    else:
        return compareMC

    # mixed type (QWL)
    try:
        data = pyalps.loadMeasurements(fname)
        if len(data[0]) == 0:
            raise RuntimeError

    except (RuntimeError, AttributeError, IndexError):
        pass
    else:
        return compareMixed

    # Epsilon-precise results
    try:
        data = pyalps.loadEigenstateMeasurements(fname)
    except RuntimeError:
        pass
    else:
        return compareEpsilon

    raise Exception("Measurement data type couldn't be detected")
示例#7
0
文件: plot.py 项目: hotta1/LRI
name.append(['Energy_normalized','energynormalized'])
name.append(['Specific Heat','specheat'])
name.append(['Specific Heat Conventional','specheatconv'])
name.append(['Specific Heat by FT','specheatft'])
name.append(['Magnetic Susceptibility connected','magsuscon'])
name.append(['Magnetic Susceptibility connected for Scaling','magsusconsca'])
name.append(['Magnetic Susceptibility disconnected','magsusdis'])
name.append(['Magnetic Susceptibility disconnected for Scaling','magsusdissca'])
name.append(['Binder Ratio of Magnetization connected','bindercon'])
name.append(['Binder Ratio of Magnetization disconnected','binderdis'])
name.append(['Binder Ratio of Magnetization 1 connected','binder1con'])
name.append(['Binder Ratio of Magnetization 1 disconnected','binder1dis'])
name.append(['Specific Heat connected','specheatcon'])

for i in range(0,len(name)):
  data = pyalps.loadMeasurements(pyalps.getResultFiles(prefix='LRSW_params'),name[i][0])
  for item in pyalps.flatten(data):
    item.props['L'] = int(item.props['L'])
  graph = pyalps.collectXY(data,x='T',y=name[i][0],foreach=['L'])
  graph.sort(key=lambda item: item.props['L'])
  f1 = open(name[i][1]+'.plt','w')
  f1.write(pyalps.plot.makeGnuplotPlot(graph))
  f1.close()
  f2 = open(name[i][1]+'.dat','w')
  for j in graph:
    L=j.props['L']
    for k in range(0,len(j.x)):
      f2.write(str(L)+' '+str(j.x[k])+' '+str(j.y[k].mean)+' '+str(j.y[k].error)+'\n')
  f2.close()
  print 'finished to output ' + name[i][1] + '.plt and ' + name[i][1] + '.dat'
示例#8
0
文件: plot.py 项目: kaityo256/alps
import pyalps
import matplotlib.pyplot as plt
import pyalps.plot

data = pyalps.loadMeasurements(pyalps.getResultFiles(prefix='params'),(['Value']))
value = pyalps.collectXY(data,x='T', y='Value')
plt.figure()
pyalps.plot.plot(value)
plt.xlabel('T')
plt.ylabel('Value')
plt.title('Test Plot')
plt.show()
示例#9
0
#prepare the input parameters
parms = [{
    'LATTICE': "square lattice",
    'MODEL': "spin",
    'MEASURE[Correlations]': True,
    'MEASURE[Structure Factor]': True,
    'MEASURE[Green Function]': True,
    'local_S': 0.5,
    'T': 0.3,
    'J': 1,
    'THERMALIZATION': 10000,
    'SWEEPS': 500000,
    'L': 4,
    'h': 0.1
}]

#write the input file and run the simulation
input_file = pyalps.writeInputFiles('parm4', parms)
res = pyalps.runApplication('dirloop_sse', input_file, Tmin=5)

#load the magnetization and collect it as function of field h
data = pyalps.loadMeasurements(pyalps.getResultFiles())

# print all measurements
for s in pyalps.flatten(data):
    if len(s.x) == 1:
        print(s.props['observable'], ' : ', s.y[0])
    else:
        for (x, y) in zip(s.x, s.y):
            print(s.props['observable'], x, ' : ', y)
示例#10
0
        'MAXSTATES'                 : 100
       } ]

#write the input file and run the simulation
input_file = pyalps.writeInputFiles('parm_spin_one_half',parms)
res = pyalps.runApplication('dmrg',input_file,writexml=True)

#load all measurements for all states
data = pyalps.loadEigenstateMeasurements(pyalps.getResultFiles(prefix='parm_spin_one_half'))

# print properties of the eigenvector:
for s in data[0]:
    print(s.props['observable'], ' : ', s.y[0])

# load and plot iteration history
iter = pyalps.loadMeasurements(pyalps.getResultFiles(prefix='parm_spin_one_half'),
                               what=['Iteration Energy','Iteration Truncation Error'])

plt.figure()
pyalps.plot.plot(iter[0][0])
plt.title('Iteration history of ground state energy (S=1/2)')
plt.ylim(-15,0)
plt.ylabel('$E_0$')
plt.xlabel('iteration')

plt.figure()
pyalps.plot.plot(iter[0][1])
plt.title('Iteration history of truncation error (S=1/2)')
plt.yscale('log')
plt.ylabel('error')
plt.xlabel('iteration')
示例#11
0
def runmain():
    ts = np.linspace(0.01, 0.08, 15).tolist()
    mus = np.linspace(0, 1, 101).tolist()
    # mus = np.linspace(0, 1, 51).tolist()
    # mus = np.linspace(0, 0.25, 15).tolist()
    # ts = [0.01]
    # mus = [mus[1]]
    # mus = mus[0:10]
    # ts = [ts[0]]
    ts = np.linspace(0, 0.01, 11).tolist()
    # mus = [0.5]
    # ts = [np.linspace(0.01, 0.3, 10).tolist()[2]]
    # ts = [0.3]
    # ts = np.linspace(0.3, 0.3, 1).tolist()

    dims = [len(ts), len(mus)]
    ndims = dims + [numsites]

    finished = np.empty(dims, dtype=bool)

    E0res = np.empty(dims, dtype=object)
    fsres = np.empty(dims, dtype=object)
    nres = np.empty(dims, dtype=object)
    n2res = np.empty(dims, dtype=object)
    kres = np.empty(dims, dtype=object)
    nires = np.empty(ndims, dtype=object)
    ninres = np.empty(ndims, dtype=object)
    kires = np.empty(ndims, dtype=object)

    start = datetime.datetime.now()

    with concurrent.futures.ThreadPoolExecutor(max_workers=numthreads) as executor:
        futures = [executor.submit(runmc, i, tmu[0][0], tmu[0][1], tmu[1][0], tmu[1][1]) for i, tmu in
                   enumerate(zip(itertools.product(ts, mus), itertools.product(range(0, len(ts)), range(0, len(mus)))))]
        for future in gprogress(concurrent.futures.as_completed(futures), size=len(futures)):
            pass

    data = pyalps.loadMeasurements(pyalps.getResultFiles(prefix=filenameprefix), measurements)
    for d in data:
        it = int(d[0].props['it'])
        imu = int(d[0].props['imu'])
        outfile = d[0].props['filename'][0:-12] + 'out.xml'
        tree = ET.parse(outfile)
        root = tree.getroot()
        finished[it][imu] = root[0].attrib['status'] == 'finished'
        for s in d:
            for case in switch(s.props['observable']):
                if case('Energy'):
                    E0res[it][imu] = s.y[0]
                    break
                if case('Stiffness'):
                    fsres[it][imu] = L * s.y[0]
                    break
                if case('Density'):
                    nres[it][imu] = s.y[0]
                    break
                if case('Density^2'):
                    n2res[it][imu] = s.y[0]
                    break
                if case('Local Density'):
                    nires[it][imu] = s.y
                    break
                if case('Local Density * Global Density'):
                    ninres[it][imu] = s.y
                    break
        kres[it][imu] = beta * (n2res[it][imu] - numsites * (nres[it][imu] ** 2))
        kires[it][imu] = beta * (ninres[it][imu] - nires[it][imu] * nres[it][imu])

    end = datetime.datetime.now()

    resi = sys.argv[1]
    if sys.platform == 'darwin':
        resfile = '/Users/Abuenameh/Documents/Simulation Results/BH-MC/res.' + str(resi) + '.txt'
    elif sys.platform == 'linux2':
        resfile = '/home/ubuntu/Dropbox/Amazon EC2/Simulation Results/BH-MC/res.' + str(resi) + '.txt'
    resf = open(resfile, 'w')
    res = ''
    res += 'finished[{0}]={1};\n'.format(resi, mathformat(finished))
    res += 'delta[{0}]={1};\n'.format(resi, delta)
    # res += 'dres[{0}]={1};\n'.format(resi, d)
    res += 'Lres[{0}]={1};\n'.format(resi, L)
    res += 'Tres[{0}]={1};\n'.format(resi, T)
    res += 'thermres[{0}]={1};\n'.format(resi, thermalization)
    res += 'sweepsres[{0}]={1};\n'.format(resi, sweeps)
    res += 'limitres[{0}]={1};\n'.format(resi, limit)
    res += 'nmax[{0}]={1};\n'.format(resi, nmax)
    res += 'nures[{0}]={1};\n'.format(resi, mathformat(nu))
    res += 'mures[{0}]={1};\n'.format(resi, mathformat(mus))
    res += 'tres[{0}]={1};\n'.format(resi, mathformat(ts))
    res += 'E0res[{0}]={1:mean};\n'.format(resi, mathformat(E0res))
    res += 'E0reserr[{0}]={1:error};\n'.format(resi, mathformat(E0res))
    res += 'fsres[{0}]={1:mean};\n'.format(resi, mathformat(fsres))
    res += 'fsreserr[{0}]={1:error};\n'.format(resi, mathformat(fsres))
    res += 'nres[{0}]={1:mean};\n'.format(resi, mathformat(nres))
    res += 'nreserr[{0}]={1:error};\n'.format(resi, mathformat(nres))
    res += 'n2res[{0}]={1:mean};\n'.format(resi, mathformat(n2res))
    res += 'n2reserr[{0}]={1:error};\n'.format(resi, mathformat(n2res))
    res += 'kres[{0}]={1:mean};\n'.format(resi, mathformat(kres))
    res += 'kreserr[{0}]={1:error};\n'.format(resi, mathformat(kres))
    res += 'nires[{0}]={1:mean};\n'.format(resi, mathformat(nires))
    res += 'nireserr[{0}]={1:error};\n'.format(resi, mathformat(nires))
    res += 'ninres[{0}]={1:mean};\n'.format(resi, mathformat(ninres))
    res += 'ninreserr[{0}]={1:error};\n'.format(resi, mathformat(ninres))
    res += 'kires[{0}]={1:mean};\n'.format(resi, mathformat(kires))
    res += 'kireserr[{0}]={1:error};\n'.format(resi, mathformat(kires))
    res += 'runtime[{0}]=\"{1}\";\n'.format(resi, end - start)
    resf.write(res)

    # print '{0}'.format(mathformat(finished))
    # print '{0}'.format(mathformat(E0res))
    # print '{0}'.format(mathformat(fsres))
    # print '{0}'.format(mathformat(kres))
    # print '{0}'.format(mathformat(nres))
    # print '{0}'.format(mathformat(n2res))

    gtk.main_quit()
示例#12
0
# SHALL THE COPYRIGHT HOLDERS OR ANYONE DISTRIBUTING THE SOFTWARE BE LIABLE 
# FOR ANY DAMAGES OR OTHER LIABILITY, WHETHER IN CONTRACT, TORT OR OTHERWISE, 
# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 
# DEALINGS IN THE SOFTWARE.
# 
# ****************************************************************************

# Please run the two other tutorials before running this one. 
# This tutorial relies on the results created in those tutorials

import pyalps
import matplotlib.pyplot as plt
import pyalps.plot

# load all files
data = pyalps.loadMeasurements(pyalps.getResultFiles(),'Magnetization Density')

#flatten the hierarchical structure
data = pyalps.flatten(data)

#load the magnetization and collect it as function of field h
magnetization = pyalps.collectXY(data,x='h',y='Magnetization Density',foreach=['LATTICE'])

#make plot
plt.figure()
pyalps.plot.plot(magnetization)
plt.xlabel('Field $h$')
plt.ylabel('Magnetization $m$')
plt.ylim(0.0,0.5)
plt.legend()
plt.show()
示例#13
0
文件: checkSS.py 项目: potass13/ALPS
	val_list = val_list + no_error_val
	
	label = args.label
	
	if not pq in val_list:
		cmd.write('The physical quantity you inputted do not exist.\n')
		cmd.write('You should choose this list, ' + str(val_list) + '\n')
		sys.exit(1)
	
	if not label in no_error_val:
		cmd.write('The label you inputted do not exist or cannot use.\n')
		cmd.write('You should choose this list, ' + str(no_error_val) + '\n')
		sys.exit(1)
	
	cmd.write('Start Z-Hypothesis for beta_1 written in ALPS wiki.\n')
	data = pyalps.loadMeasurements(read_file, args.y)
	css = pyalps.checkSteadyState(data, confidenceInterval=args.gamma)
	if args.debug:
		cmd.write(str(css))
	stat_list = []
	tf = 0
	lb = ['# ' + label]
	i = 0
	z0_wiki = norm.ppf(0.5+0.5*args.gamma)
	
	for ds in css:
		qs = ds.props['checkSteadyState']
		qs = qs['statistics']
		if qs['z'] <= z0_wiki:
			tf = 1
		else:
示例#14
0
        FILENAME=sys.argv[-1]
        ar=pyalps.h5.archive(FILENAME,'r')
        try:
            CLONE_NR=re.match('.*clone(.*)\..*',sys.argv[-1]).group(1)
        except:
            print('Regex failed...')
            exit()
        data= ar['simulation/realizations/0/clones/'+str(CLONE_NR)+'/results/']
        spins     =data['Last Configuration']['mean']['value']
        coords    =data['Coordinates']['mean']['value']
        is_deleted=data['Is Deleted']['mean']['value']
        T=ar['parameters/T']

    if SSF:
        FILENAME=sys.argv[-1]
        data=pyalps.ResultsToXY(pyalps.loadMeasurements([FILENAME],['Last Configuration']),x='EXMC: Temperature',y='Last Configuration')[0]
        spins=data.y
    if EXMC:
        FILENAME=sys.argv[-2]
        ar=pyalps.h5.archive(FILENAME,'r')
        data= ar['simulation/realizations/0/clones/0/results/sections/'+sys.argv[-1]]
        T         =data['EXMC: Temperature']['mean']['value']
        spins     =data['Last Configuration']['mean']['value']
        coords    =data['Coordinates']['mean']['value']
        is_deleted=data['Is Deleted']['mean']['value']
    if ('--save' in sys.argv):
        SAVE=True
        SAVENAME=FILENAME[:-3]
        if(EXMC):
            SAVENAME=SAVENAME+'.sector'+((sys.argv[-1]).rjust(4,str(0)))
示例#15
0
    print('Accepted moves:', sim.accepted)
    
    
    analysis = analyzer.Analyzer(model=model)
    
    #==============================================================================
    # DATA ANALYSIS     
    #==============================================================================
    #how to calculate the Binder Ratio within Python:
    resultsDir = data_directory
    dataLocationPattern = data_directory+str(model)

    infiles=pyalps.getResultFiles(pattern=dataLocationPattern)

    data = pyalps.loadMeasurements(pyalps.getResultFiles(pattern=dataLocationPattern+'*'),['E','m^2', 'm^4'])
    m2 = pyalps.collectXY(data,x='BETA',y='m^2',foreach=['L'])
    m4 = pyalps.collectXY(data,x='BETA',y='m^4',foreach=['L'])
    E = pyalps.collectXY(data,x='BETA',y='E',foreach=['L'])
    
    m2plot = []
    eplot = []
    u4=[]
    for i in range(len(m2)):
        d = pyalps.DataSet()
        d.propsylabel='U4'
        d.props = m2[i].props
        d.x= m2[i].x
        d.y = m4[i].y/m2[i].y/m2[i].y
        u4.append(d)
        
示例#16
0
          'Number of Clusters' : 'cluster',
          }

xnames   = [ 'L', ]
foreachs = [ ['T'], ]
fe_types = [ [np.float], ]

def extract(data, xname, names, foreach, fe_types):
  if np.isscalar(foreach):
    foreach = [foreach]
  if np.isscalar(fe_types):
    fe_types = [fetypes]
  for name in names:
    for obs in pyalps.collectXY(data, xname, name, foreach=foreach):
      vals = [ typ(obs.props[sym]) for sym, typ in zip(foreach, fe_types) ]
      filename = names[name]
      for sym, val in zip(foreach, vals):
        filename += '-{}{}'.format(sym,val)
      filename += '.dat'
      with open(filename, 'w') as f:
        f.write(plot.convertToText([obs]).replace(' +/- ', ' '))


result_files = pyalps.getResultFiles(prefix='params')

data = pyalps.loadMeasurements(result_files, names.keys())

for xname, fe, fet in zip(xnames, foreachs, fe_types):
  extract(data, xname, names, fe, fet)

示例#17
0
      'THERMALIZATION': 5000,
      'SWEEPS'         : 50000,
      'ALGORITHM'      : "loop",
      # 'MEASURE[Winding Number]': 1,
      # 'MEASURE_CORRELATIONS[Diagonal spin correlations]':"Sz",
    }
)


#write the input file and run the simulation

input_file = pyalps.writeInputFiles(os.path.join(os.getcwd(), temp, timestamp, lattice_name), parms)

pyalps.runApplication('loop', input_file, writexml=True)

data = pyalps.loadMeasurements(pyalps.getResultFiles(prefix=lattice_name))

results = 'results'

try:
  os.mkdir(results)
except:
  pass

file_name = os.path.join(results, lattice_name + "_beta_{beta}_Nx_{Nx}_Ny_{Ny}_J_{J}_J1_{J1}.xml".format(beta=beta,
                                                                                                         Nx=Nx,
                                                                                                         Ny=Ny,
                                                                                                         J=J,
                                                                                                         J1=J1))

d_xml = data2xml.DataToXML(data=data, looper=True, lattice=LATTICE_LIBRARY)
示例#18
0
]:
    parms.append({
        'LATTICE': "ladder",
        'T': t,
        'J0': -1,
        'J1': -1,
        'THERMALIZATION': 10000,
        'SWEEPS': 500000,
        'UPDATE': "cluster",
        'MODEL': "Heisenberg",
        'L': 60
    })

#write the input file and run the simulation
input_file = pyalps.writeInputFiles('parm2b', parms)
pyalps.runApplication('spinmc', input_file, Tmin=5)

#load the susceptibility and collect it as function of temperature T
data = pyalps.loadMeasurements(pyalps.getResultFiles(prefix='parm2b'),
                               'Susceptibility')
susceptibility = pyalps.collectXY(data, x='T', y='Susceptibility')

#make plot
plt.figure()
pyalps.plot.plot(susceptibility)
plt.xlabel('Temperature $T/J$')
plt.ylabel('Susceptibility $\chi J$')
plt.ylim(0, 0.22)
plt.title('Heisenberg ladder')
plt.show()
示例#19
0
input_file = pyalps.writeInputFiles('parm',parms)
#pyalps.runApplication('mc++',input_file,Tmin=5)
# use the following instead if you have MPI
pyalps.runApplication('mc++',input_file,Tmin=5,MPI=1)

def f_alpha(Le, Lo, b=2, d=2):
    return 2-d*log(b)/log(Le)
def f_beta(Le, Lo, b=2, d=2):
    return (d*log(b)-log(Lo))/log(Le) 
def f_gamma(Le, Lo, b=2, d=2):
    return log(b)/log(Le)*(2*log(Lo)/log(b)-d)
def f_nu(Le, Lo, b=2, d=2):
    return log(b)/log(Le)

#load the susceptibility and collect it as function of temperature T
data = pyalps.loadMeasurements(pyalps.getResultFiles(prefix='parm'),['M', 'c_V', 'BinderCumulant', 'susceptibility'])

Tc0=2.269
Tc_min=2
Tc_max=2.5
le0=2
le_min=1.
le_max=10
lo0=3.66802
lo_min=1
lo_max=15
alpha0=f_alpha(le0,lo0,d=2)
beta0 =f_beta(le0,lo0,d=2)
gamma0=f_gamma(le0,lo0,d=2)
nu0   =f_nu(le0,lo0,d=2)
示例#20
0
 def check_data_length(self,string,should_be, output=False):
     import pyalps
     l=len(pyalps.loadMeasurements(pyalps.getResultFiles(prefix='parm'),[string])[0][0].y)
     self.assertEqual(l,should_be)
示例#21
0
    parms.append({
        'LATTICE': "ladder",
        'MODEL': "spin",
        'local_S': 0.5,
        'T': 0.08,
        'J0': 1,
        'J1': 1,
        'THERMALIZATION': 1000,
        'SWEEPS': 10000,
        'L': 20,
        'h': h
    })

#write the input file and run the simulation
input_file = pyalps.writeInputFiles('parm3b', parms)
res = pyalps.runApplication('dirloop_sse', input_file, Tmin=5)

#load the magnetization and collect it as function of field h
data = pyalps.loadMeasurements(pyalps.getResultFiles(prefix='parm3b'),
                               'Magnetization Density')
magnetization = pyalps.collectXY(data, x='h', y='Magnetization Density')

#make plot
plt.figure()
pyalps.plot.plot(magnetization)
plt.xlabel('Field $h$')
plt.ylabel('Magnetization $m$')
plt.ylim(0.0, 0.5)
plt.title('Quantum Heisenberg ladder')
plt.show()
示例#22
0
 def check_has_observable(self,string, output=False):
     import pyalps
     self.assertGreater(len(pyalps.loadMeasurements(pyalps.getResultFiles(prefix='parm'),[string])[0]), 0)
示例#23
0
    })

#write the input file and run the simulation
input_file = pyalps.writeInputFiles('parm1', parms)
pyalps.runApplication('spinmc', input_file, Tmin=5, writexml=True)

#get the list of result files
result_files = pyalps.getResultFiles(prefix='parm1')
print("Loading results from the files: ", result_files)

#print the observables stored in those files:
print("The files contain the following mesurements:", end=' ')
print(pyalps.loadObservableList(result_files))

#load a selection of measurements:
data = pyalps.loadMeasurements(result_files,
                               ['|Magnetization|', 'Magnetization^2'])

#make a plot for the magnetization: collect Magnetziation as function of T
plotdata = pyalps.collectXY(data, 'T', '|Magnetization|')
plt.figure()
pyalps.plot.plot(plotdata)
plt.xlim(0, 3)
plt.ylim(0, 1)
plt.title('Ising model')
plt.show()

# convert the data to text file for plotting using another tool
print(pyalps.plot.convertToText(plotdata))

# convert the data to grace file for plotting using xmgrace
print(pyalps.plot.makeGracePlot(plotdata))
示例#24
0
# SHALL THE COPYRIGHT HOLDERS OR ANYONE DISTRIBUTING THE SOFTWARE BE LIABLE
# FOR ANY DAMAGES OR OTHER LIABILITY, WHETHER IN CONTRACT, TORT OR OTHERWISE,
# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
#
# ****************************************************************************

# Please run all four other tutorials before running this one.
# This tutorial relies on the results created in those tutorials

import pyalps
import matplotlib.pyplot as plt
import pyalps.plot

# load all files
data = pyalps.loadMeasurements(pyalps.getResultFiles(), 'Susceptibility')

#flatten the hierarchical structure
data = pyalps.flatten(data)

# collect the susceptibility
susceptibility = pyalps.collectXY(data,
                                  x='T',
                                  y='Susceptibility',
                                  foreach=['MODEL', 'LATTICE'])

# assign labels to the data depending on the properties
for s in susceptibility:
    # print s.props
    if s.props['LATTICE'] == 'chain lattice':
        s.props['label'] = "chain"
示例#25
0
# Write into XML input file:
input_file = pyalps.writeInputFiles('mc01b',parms)

# and run the application spinmc:
pyalps.runApplication('spinmc', input_file, Tmin=10, writexml=True)

# We first get the list of all hdf5 result files via:
files = pyalps.getResultFiles(prefix='mc01b')

# and then extract, say the timeseries of the |Magnetization| measurements:
ts_M = pyalps.loadTimeSeries(files[0], '|Magnetization|');

# We can then visualize graphically:
import matplotlib.pyplot as plt
plt.plot(ts_M)
plt.show()

# ALPS Python provides a convenient tool to check whether a measurement observable(s) has (have) reached steady state equilibrium.
#
# Here is one example:
print pyalps.checkSteadyState(outfile=files[0], observable='|Magnetization|', confidenceInterval=0.95)
print

# and another one:
observables = pyalps.loadMeasurements(files, ['|Magnetization|', 'Energy'])
observables = pyalps.checkSteadyState(observables, confidenceInterval=0.95)
for o in observables:
    print '{}:\t{}'.format(o.props['observable'], o.props['checkSteadyState'])

示例#26
0
 def check_value_vector(self, string, index, should_be, significance=8):
     import pyalps
     val=pyalps.loadMeasurements(pyalps.getResultFiles(prefix='parm'),[string])[0][0].y.mean[index]
     err=pyalps.loadMeasurements(pyalps.getResultFiles(prefix='parm'),[string])[0][0].y.error[index]
     self.assertLess(abs(should_be-val)/max(err,1e-6),significance)
示例#27
0
# Preparing and running the simulation using Python
import pyalps

parms = [{
    'LATTICE': 'inhomogeneous simple cubic lattice',
    'L': 120,
    'MODEL': 'boson Hubbard',
    'Nmax': 20,
    't': 1.,
    'U': 8.11,
    'mu':
    '4.05 - (0.0073752*(x-(L-1)/2.)*(x-(L-1)/2.) + 0.0036849*(y-(L-1)/2.)*(y-(L-1)/2.) + 0.0039068155*(z-(L-1)/2.)*(z-(L-1)/2.))',
    'T': 1.,
    'THERMALIZATION': 1500,
    'SWEEPS': 7000,
    'SKIP': 50,
    'MEASURE[Local Density]': 1
}]

input_file = pyalps.writeInputFiles('parm2a', parms)
res = pyalps.runApplication('dwa', input_file)

# Evaluating and plotting in Python
import pyalps
import pyalps.plot as aplt

data = pyalps.loadMeasurements(pyalps.getResultFiles(prefix='parm2a'),
                               'Local Density')
aplt.plot3D(data, centeredAtOrigin=True)
示例#28
0
 def check_no_double_values(self,string, output=False):
     import pyalps
     from numpy import diff, sort
     val=pyalps.loadMeasurements(pyalps.getResultFiles(prefix='parm'),[string])[0][0].y
     if len(val)>1:
         self.assertGreater(min(diff(sort(abs(val)))),1e-10)
示例#29
0
input_file = pyalps.writeInputFiles('parm1',parms)

# The queue is loaded from a configuration file which should either be located in the execution directory or in ~/.batchq/configuration
q = load_queue(LSFBSub, "brutus")
desc = runApplicationBackground('spinmc',input_file,Tmin=5,writexml=True, queue = q, force_resubmit = False )

if not desc.finished():
   print "Your simulations has not yet ended, please run this command again later."
else:
    if desc.failed():
        print "Your submission has failed"
        sys.exit(-1)
    result_files = pyalps.getResultFiles(prefix='parm1')
    print result_files
    print pyalps.loadObservableList(result_files)
    data = pyalps.loadMeasurements(result_files,['|Magnetization|','Magnetization^2'])
    print data
    plotdata = pyalps.collectXY(data,'T','|Magnetization|')
    plt.figure()
    pyalps.plot.plot(plotdata)
    plt.xlim(0,3)
    plt.ylim(0,1)
    plt.title('Ising model')
    plt.show()
    print pyalps.plot.convertToText(plotdata)
    print pyalps.plot.makeGracePlot(plotdata)
    print pyalps.plot.makeGnuplotPlot(plotdata)
    binder = pyalps.DataSet()
    binder.props = pyalps.dict_intersect([d[0].props for d in data])
    binder.x = [d[0].props['T'] for d in data]
    binder.y = [d[1].y[0]/(d[0].y[0]*d[0].y[0]) for d in data]
示例#30
0
              'J0'             : 1 ,
              'J1'             : 1,
              'J2'             : j2,
              'THERMALIZATION' : 5000,
              'SWEEPS'         : 50000, 
              'MODEL'          : "spin",
              'L'              : 8,
              'W'              : 4
            }
    )
    
#write the input file and run the simulation
input_file = pyalps.writeInputFiles('mc08a',parms)
pyalps.runApplication('loop',input_file)

data = pyalps.loadMeasurements(pyalps.getResultFiles(pattern='mc08a.task*.out.h5'),['Staggered Susceptibility','Susceptibility'])
susc1=pyalps.collectXY(data,x='T',y='Susceptibility', foreach=['J2'])

lines = []
for data in susc1:
    pars = [fw.Parameter(1), fw.Parameter(1)]
    data.y= data.y[data.x < 1]
    data.x= data.x[data.x < 1]
    f = lambda self, x, pars: (pars[0]()/np.sqrt(x))*np.exp(-pars[1]()/x)
    fw.fit(None, f, pars, [v.mean for v in data.y], data.x)
    prefactor = pars[0].get()
    gap = pars[1].get()
    print prefactor,gap
    
    lines += plt.plot(data.x, f(None, data.x, pars))
    lines[-1].set_label('$J_2=%.4s$: $\chi = \frac{%.4s}{T}\exp(\frac{-%.4s}{T})$' % (data.props['J2'], prefactor,gap))
示例#31
0
文件: replace.py 项目: potass13/ALPS
		cmd.write('no_error_val is ' + str(no_error_val) + '\n')
		cmd.write('x_val is ' + x_val + '\n')
		cmd.write('y_val is ' + y_val + '\n')
	
	if not x_val in val_list:
		cmd.write('The X-value you inputted do not exist.\n')
		cmd.write('You should choose this list, ' + str(val_list) + '\n')
		sys.exit(1)
	elif not y_val in val_list:
		cmd.write('The Y-value you inputted do not exist.\n')
		cmd.write('You should choose this list, ' + str(val_list) + '\n')
		sys.exit(1)
	
	#XML data file -> gnuplot-form text
	cmd.write('Start to convert the files XML to gnuplot-form.\n')
	data = pyalps.loadMeasurements(read_file, y_val)
	data = pyalps.flatten(data)
	xy_data = pyalps.collectXY(data, x_val, y_val)
	gnu_xy_data = pyalps.plot.makeGnuplotPlot(xy_data)
	if args.debug:
		cmd.write(str(gnu_xy_data))
	cmd.write('Finish to convert the files XML to gnuplot-form.\n')
	
	#gnuplot-form text -> csv-form text
	cmd.write('Start to convert the files gnuplot-form to CSV.\n')
	temp_file = '__tmp_replace__.dat'
	f = open(temp_file, 'w')
	f.write(gnu_xy_data)
	f.close()
	
	head_x = x_val
示例#32
0
    plt.figure()
    plt.xlabel(r'$\tau$')
    plt.ylabel(r'$G_{flavor=0}(\tau)$')
    plt.title('Simulation at ' +
              r'$\beta = {beta}$'.format(beta=common_props['BETA']))
    pyalps.plot.plot(sim)
    plt.legend()

plt.show()

#################################################
## Display final occupation <n_{flavor=0}>
#################################################
## load the final iteration of G_{flavor=0}(tau)
data_G_tau = pyalps.loadMeasurements(res_files,
                                     respath='/simulation/results/G_tau',
                                     what=listobs,
                                     verbose=False)

print("Occupation in the last iteration at flavor=0")
for d in pyalps.flatten(data_G_tau):
    # obtain occupation using relation: <n_{flavor=0}> = -<G_{flavor=0}(tau=beta)>
    d.y = np.array([-d.y[-1]])
    print("n_0(beta =", d.props['BETA'], ") =", d.y[0])
    d.x = np.array([0])
    d.props['observable'] = 'occupation'

occupation = pyalps.collectXY(data_G_tau, 'BETA', 'occupation')
for d in occupation:
    d.props['line'] = "scatter"

plt.figure()
示例#33
0
    print '# L:', L, 'N:', N

    # Scan beta range [0,1] in steps of 0.1
    for beta in [0., .1, .2, .3, .4, .5, .6, .7, .8, .9, 1.]:
        for l in [4, 6, 8]:
            print '-----------'
            print 'beta =', beta
            sim = Simulation(beta, l)
            sim.run(N / 2, N)
            sim.save('ising.L_' + str(l) + 'beta_' + str(beta) + '.h5')

    #how to calculate the Binder Ratio within Python:
    infiles = pyalps.getResultFiles(pattern='ising.L')

    data = pyalps.loadMeasurements(pyalps.getResultFiles(pattern='ising.L*'),
                                   ['E', 'm^2', 'm^4'])
    m2 = pyalps.collectXY(data, x='BETA', y='m^2', foreach=['L'])
    m4 = pyalps.collectXY(data, x='BETA', y='m^4', foreach=['L'])

    u = []
    for i in range(len(m2)):
        d = pyalps.DataSet()
        d.propsylabel = 'U4'
        d.props = m2[i].props
        d.x = m2[i].x
        d.y = m4[i].y / m2[i].y / m2[i].y
        u.append(d)

    plt.figure()
    pyalps.plot.plot(u)
    plt.xlabel('Inverse Temperature $\\beta$')
示例#34
0
        cmd.write('no_error_val is ' + str(no_error_val) + '\n')
        cmd.write('x_val is ' + x_val + '\n')
        cmd.write('y_val is ' + y_val + '\n')

    if not x_val in val_list:
        cmd.write('The X-value you inputted do not exist.\n')
        cmd.write('You should choose this list, ' + str(val_list) + '\n')
        sys.exit(1)
    elif not y_val in val_list:
        cmd.write('The Y-value you inputted do not exist.\n')
        cmd.write('You should choose this list, ' + str(val_list) + '\n')
        sys.exit(1)

    #XML data file -> gnuplot-form text
    cmd.write('Start to convert the files XML to gnuplot-form.\n')
    data = pyalps.loadMeasurements(read_file, y_val)
    data = pyalps.flatten(data)
    xy_data = pyalps.collectXY(data, x_val, y_val)
    gnu_xy_data = pyalps.plot.makeGnuplotPlot(xy_data)
    if args.debug:
        cmd.write(str(gnu_xy_data))
    cmd.write('Finish to convert the files XML to gnuplot-form.\n')

    #gnuplot-form text -> csv-form text
    cmd.write('Start to convert the files gnuplot-form to CSV.\n')
    temp_file = '__tmp_replace__.dat'
    f = open(temp_file, 'w')
    f.write(gnu_xy_data)
    f.close()

    head_x = x_val
示例#35
0
              'UPDATE'         : "cluster",
              'MODEL'          : "Ising",
              'L'              : l
            }
    )

#write the input file and run the simulation
input_file = pyalps.writeInputFiles('parm7a',parms)
pyalps.runApplication('spinmc',input_file,Tmin=5)
# use the following instead if you have MPI
#pyalps.runApplication('spinmc',input_file,Tmin=5,MPI=2)

pyalps.evaluateSpinMC(pyalps.getResultFiles(prefix='parm7a'))

#load the susceptibility and collect it as function of temperature T
data = pyalps.loadMeasurements(pyalps.getResultFiles(prefix='parm7a'),['|Magnetization|', 'Connected Susceptibility', 'Specific Heat', 'Binder Cumulant', 'Binder Cumulant U2'])
magnetization_abs = pyalps.collectXY(data,x='T',y='|Magnetization|',foreach=['L'])
connected_susc = pyalps.collectXY(data,x='T',y='Connected Susceptibility',foreach=['L'])
spec_heat = pyalps.collectXY(data,x='T',y='Specific Heat',foreach=['L'])
binder_u4 = pyalps.collectXY(data,x='T',y='Binder Cumulant',foreach=['L'])
binder_u2 = pyalps.collectXY(data,x='T',y='Binder Cumulant U2',foreach=['L'])

#make plots
plt.figure()
pyalps.plot.plot(magnetization_abs)
plt.xlabel('Temperature $T$')
plt.ylabel('Magnetization $|m|$')
plt.title('2D Ising model')

plt.figure()
pyalps.plot.plot(connected_susc)
示例#36
0
文件: checkSS.py 项目: potass13/ALPS
    val_list = val_list + no_error_val

    label = args.label

    if not pq in val_list:
        cmd.write('The physical quantity you inputted do not exist.\n')
        cmd.write('You should choose this list, ' + str(val_list) + '\n')
        sys.exit(1)

    if not label in no_error_val:
        cmd.write('The label you inputted do not exist or cannot use.\n')
        cmd.write('You should choose this list, ' + str(no_error_val) + '\n')
        sys.exit(1)

    cmd.write('Start Z-Hypothesis for beta_1 written in ALPS wiki.\n')
    data = pyalps.loadMeasurements(read_file, args.y)
    css = pyalps.checkSteadyState(data, confidenceInterval=args.gamma)
    if args.debug:
        cmd.write(str(css))
    stat_list = []
    tf = 0
    lb = ['# ' + label]
    i = 0
    z0_wiki = norm.ppf(0.5 + 0.5 * args.gamma)

    for ds in css:
        qs = ds.props['checkSteadyState']
        qs = qs['statistics']
        if qs['z'] <= z0_wiki:
            tf = 1
        else:
    return x_values, y_values, y_errors


parser = argparse.ArgumentParser(description='Evaluate Renyi entropies for range of L', epilog='(C) Johannes Helmes 2014')

parser.add_argument('--infile','-i', help='Prefix of result files',required=True)
parser.add_argument('--foreach','-f',default='h',help='Parameter name, (default h)')
parser.add_argument('--steps','-s',nargs=2,type=int,help='Number of increment steps to complete U and O, (default=1/2 1)')
parser.add_argument('--plot','-p',action='store_true')
parser.add_argument('--verbose','-v',action='store_true')
args=parser.parse_args()

REntropy={}


data = pyalps.loadMeasurements(pyalps.getResultFiles(prefix=args.infile),['EG'])


if args.verbose:
    print data

renyi_dataG = pyalps.collectXY(data, x='IncNo', y='EG', foreach=[args.foreach])

if args.verbose:
    print renyi_dataG

if (args.steps!=None):
    IncNosIItoU=range(args.steps[0])
    IncNosUtoO=range(args.steps[0],args.steps[1])
    #IncNos = [%.1f % i for i in range(args.IncNoRange[0], args.IncNoRange[1])]
    print IncNosIItoU, IncNosUtoO
示例#38
0
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE, TITLE AND NON-INFRINGEMENT. IN NO EVENT
# SHALL THE COPYRIGHT HOLDERS OR ANYONE DISTRIBUTING THE SOFTWARE BE LIABLE
# FOR ANY DAMAGES OR OTHER LIABILITY, WHETHER IN CONTRACT, TORT OR OTHERWISE,
# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
#
# ****************************************************************************

import pyalps
import pyalps.plot as alpsplot
import matplotlib.pyplot as pyplot

data = pyalps.loadMeasurements(pyalps.getResultFiles(prefix='parm9a'), [
    'Specific Heat', 'Magnetization Density^2', 'Binder Ratio of Magnetization'
])
for item in pyalps.flatten(data):
    item.props['L'] = int(item.props['L'])

magnetization2 = pyalps.collectXY(data,
                                  x='T',
                                  y='Magnetization Density^2',
                                  foreach=['L'])
magnetization2.sort(key=lambda item: item.props['L'])

specificheat = pyalps.collectXY(data, x='T', y='Specific Heat', foreach=['L'])
specificheat.sort(key=lambda item: item.props['L'])

binderratio = pyalps.collectXY(data,
                               x='T',
示例#39
0
              'THERMALIZATION'      : 500,
              'U'                   : u,
              'J'                   : j,
              't0'                  : 0.5,
              't1'                  : 1
        }
        )

# For more precise calculations we propose to enhance the SWEEPS

#write the input file and run the simulation
for p in parms:
    input_file = pyalps.writeParameterFile('parm_u_'+str(p['U'])+'_j_'+str(p['J']),p)
    res = pyalps.runDMFT(input_file)

listobs = ['0', '2']   # flavor 0 is SYMMETRIZED with 1, flavor 2 is SYMMETRIZED with 3
    
data = pyalps.loadMeasurements(pyalps.getResultFiles(pattern='parm_u_*h5'), respath='/simulation/results/G_tau', what=listobs, verbose=True)
for d in pyalps.flatten(data):
    d.x = d.x*d.props["BETA"]/float(d.props["N"])
    d.y = -d.y
    d.props['label'] = r'$U=$'+str(d.props['U'])+'; flavor='+str(d.props['observable'][len(d.props['observable'])-1])
plt.figure()
plt.yscale('log')
plt.xlabel(r'$\tau$')
plt.ylabel(r'$G_{flavor}(\tau)$')
plt.title('DMFT-05: Orbitally Selective Mott Transition on the Bethe lattice')
pyalps.plot.plot(data)
plt.legend()
plt.show()
示例#40
0
def compareMC(testfiles, reffiles, tol_factor='auto', whatlist=None):
    """ Compare results of Monte Carlo Simulations

    returns True if test succeeded"""

    if tol_factor == 'auto':
        tol_factor = 2.0

    testdata = pyalps.loadMeasurements(testfiles)
    refdata = pyalps.loadMeasurements(reffiles)

    if len(testdata) != len(refdata):
        raise Exception(
            "Comparison Error: test and reference data differ in number of tasks"
        )

    # File level
    compare_list = []
    for testtask, reftask in zip(testdata, refdata):
        testfile = testtask[0].props['filename']
        reffile = reftask[0].props['filename']
        # Ensure we compare equivalent tasks
        if len(testtask) != len(reftask):
            raise Exception("Comparison Error: test and reference data have \
                different number of observables\n\
                (Have both reference and test data been evaluated?)")

        # Observables

        # Select only observables from whatlist if specified
        if whatlist:
            notfoundtest = [
                w for w in whatlist
                if w not in [o.props['observable'] for o in testtask]
            ]
            if notfoundtest:
                print(
                    "The following observables specified for comparison\nhave not been found in test results:"
                )
                print("File:", testfile)
                print(notfoundtest)
                sys.exit(1)

            notfoundref = [
                w for w in whatlist
                if w not in [o.props['observable'] for o in reftask]
            ]
            if notfoundref:
                print(
                    "The following observables specified for comparison\nhave not been found in reference results:"
                )
                print("File:", reffile)
                print(notfoundref)
                sys.exit(1)

            testtask = [
                o for o in testtask if o.props['observable'] in whatlist
            ]
            reftask = [o for o in reftask if o.props['observable'] in whatlist]

        #print("\ncomparing file " + testfile + " against file " + reffile)
        compare_obs = []
        for testobs, refobs in zip(testtask, reftask):

            # Scalar observables
            if pyalps.size(testobs.y[0]) == 1:
                testerr = testobs.y[0].error
                referr = refobs.y[0].error
                tol = np.sqrt(testerr**2 + referr**2) * tol_factor
                diff = np.abs(testobs.y[0].mean - refobs.y[0].mean)
                compare_obs.append(obsdict(tol, diff, testobs.props))

            # Array valued observables
            else:
                tol_list = []
                diff_list = []
                for (ty, ry) in zip(testobs.y[0], refobs.y[0]):
                    tol_list.append(
                        np.sqrt(ty.error**2 + ry.error**2) * tol_factor)
                    diff_list.append(np.abs(ty - ry))

                maxdiff = max(diff_list)
                tol = tol_list[diff_list.index(maxdiff)] * tol_factor
                compare_obs.append(obsdict(tol, maxdiff, testobs.props))

        compare_list.append(compare_obs)

    #writeTest2stdout(compare_list) # or a file, if that has been specified
    succeed_list = [
        obs['passed'] for obs_list in compare_list for obs in obs_list
    ]
    return False not in succeed_list, compare_list
示例#41
0
            'J0': 1,
            'J1': 1,
            'J2': j2,
            'THERMALIZATION': 5000,
            'SWEEPS': 50000,
            'MODEL': "spin",
            'L': l,
            'W': l / 2
        })

#write the input file and run the simulation
input_file = pyalps.writeInputFiles('mc08b', parms)
pyalps.runApplication('loop', input_file)

data = pyalps.loadMeasurements(
    pyalps.getResultFiles(pattern='mc08b.task*.out.h5'),
    ['Binder Ratio of Staggered Magnetization', 'Stiffness'])

binder = pyalps.collectXY(data,
                          x='J2',
                          y='Binder Ratio of Staggered Magnetization',
                          foreach=['L'])
stiffness = pyalps.collectXY(data, x='J2', y='Stiffness', foreach=['L'])

for q in stiffness:
    q.y = q.y * q.props['L']

#make plot
plt.figure()
pyalps.plot.plot(stiffness)
plt.xlabel(r'$J2$')
示例#42
0
文件: plot.py 项目: DropD/CQP
import sys, os

import numpy as np
import matplotlib.pyplot as plt
import pyalps
from pyalps.plot import plot

files = pyalps.getResultFiles(dirname='data')
data = pyalps.loadMeasurements(files , ['|m|','m^2', 'Connected Susceptibility', 'Binder Cumulant U2'])

for d in pyalps.flatten(data):
    d.props['M/L'] = d.props['M'] / d.props['L']

m = pyalps.collectXY(data, 'Jx', '|m|', foreach=['L', 'M'])
chi = pyalps.collectXY(data, 'Jx', 'Connected Susceptibility', foreach=['L', 'M'])
binder = pyalps.collectXY(data, 'Jx', 'Binder Cumulant U2', foreach=['L', 'M'])


for d in pyalps.flatten(m):
    d.x = np.exp(2.*d.props['Jy'])*d.x
plt.figure()
plot(m)
plt.xlabel('$J/\\Gamma$')
plt.ylabel('magnetization')
plt.legend(loc='best', frameon=False)


for d in pyalps.flatten(chi):
    d.x = np.exp(2.*d.props['Jy'])*d.x
plt.figure()
plot(chi)
示例#43
0
def compareMixed(testfiles, reffiles, tol_factor='auto', whatlist=None):
    """ Compare results of QWL, DMRG (ALPS)

    returns True if test succeeded"""

    if tol_factor == 'auto':
        tol_factor = 2.0

    testdata = pyalps.loadMeasurements(testfiles)
    refdata = pyalps.loadMeasurements(reffiles)
    if len(testdata) != len(refdata):
        raise Exception(
            "Comparison Error: test and reference data differ in number of tasks"
        )

    # This is needed by the dmrg example
    try:
        testeig = pyalps.loadEigenstateMeasurements(testfiles)
        refeig = pyalps.loadEigenstateMeasurements(reffiles)
        for ttask, rtask, teig, reig in zip(testdata, refdata, testeig,
                                            refeig):
            ttask += teig
            rtask += reig
    except RuntimeError:
        pass

    # File level
    compare_list = []
    for testtask, reftask in zip(testdata, refdata):
        testfile = testtask[0].props['filename']
        reffile = reftask[0].props['filename']

        # Ensure we compare equivalent tasks
        if len(testtask) != len(reftask):
            raise Exception("Comparison Error: test and reference data have \
                different number of observables\n")

        # Observables

        # Select only observables from whatlist if specified
        if whatlist:
            notfoundtest = [
                w for w in whatlist
                if w not in [o.props['observable'] for o in testtask]
            ]
            if notfoundtest:
                print(
                    "The following observables specified for comparison\nhave not been found in test results:"
                )
                print("File:", testfile)
                print(notfoundtest)
                sys.exit(1)

            notfoundref = [
                w for w in whatlist
                if w not in [o.props['observable'] for o in reftask]
            ]
            if notfoundref:
                print(
                    "The following observables specified for comparison\nhave not been found in reference results:"
                )
                print("File:", reffile)
                print(notfoundref)
                sys.exit(1)

            testtask = [
                o for o in testtask if o.props['observable'] in whatlist
            ]
            reftask = [o for o in reftask if o.props['observable'] in whatlist]

        #print("\ncomparing file " + testfile + " against file " + reffile)
        compare_obs = []
        for testobs, refobs in zip(testtask, reftask):

            # MC if it succeeds
            try:
                # Scalar observables
                if pyalps.size(testobs.y) == 1:
                    testerr = testobs.y[0].error
                    referr = refobs.y[0].error
                    tol = np.sqrt(testerr**2 + referr**2) * tol_factor
                    diff = np.abs(testobs.y[0].mean - refobs.y[0].mean)
                    compare_obs.append(obsdict(tol, diff, testobs.props))

                # Array valued observables
                else:
                    tol_list = []
                    diff_list = []
                    for (ty, ry) in zip(testobs.y, refobs.y):
                        tol_list.append(
                            np.sqrt(ty.error**2 + ry.error**2) * tol_factor)
                        diff_list.append(np.abs(ty - ry))

                    maxdiff = max(diff_list)
                    tol = tol_list[diff_list.index(maxdiff)] * tol_factor
                    compare_obs.append(obsdict(tol, maxdiff, testobs.props))

            # Epsilon otherwise
            except AttributeError:
                # Scalar observables
                if pyalps.size(testobs.y) == 1:
                    tol = max(10e-12,
                              np.abs(refobs.y[0]) * 10e-12) * tol_factor
                    diff = np.abs(testobs.y[0] - refobs.y[0])
                    compare_obs.append(obsdict(tol, diff, testobs.props))

                # Array valued observables
                else:
                    tol_list = []
                    diff_list = []
                    for (ty, ry) in zip(testobs.y, refobs.y):
                        tol_list.append(max(10e-12, ry * 10e-12))
                        diff_list.append(np.abs(ty - ry))

                    maxdiff = max(diff_list)
                    tol = tol_list[diff_list.index(maxdiff)] * tol_factor
                    compare_obs.append(obsdict(tol, maxdiff, testobs.props))

        compare_list.append(compare_obs)

    #writeTest2stdout(compare_list) # or a file, if that has been specified
    succeed_list = [
        obs['passed'] for obs_list in compare_list for obs in obs_list
    ]
    return False not in succeed_list, compare_list
示例#44
0
def get_vector_mean(result_files, what):
    '''read only means from a VectorObservable into a multidimensional numpy array.
    '''
    data_set = pyalps.loadMeasurements(result_files, what=what)
    return np.array([i[0].y.mean for i in data_set])
示例#45
0
def createTest(script, inputs=None, outputs=None, prefix=None, refdir='./ref'):
    """ Create reference data, .testin.xml file and execute_test.py

    inputs are:
    -----------
    script: computes results to be tested 

    inputs: Optional list of input files if the application(s)
            called in 'script' rely on them and the input files are in the
            same directory as 'script'. If you specified
            relative paths to another directory, it won't work.

    outputs or prefix: outputs of script can either be specified with
               a complete list of output files or as a prefix 

    creates a script called apptest_name_of_script.py, which can be used to execute the test
    """

    if outputs is not None and prefix is not None:
        raise Exception("Cannot both define outputs and prefix")
    elif outputs is None and prefix is None:
        raise Exception("Script output has to be specified")
    script = os.path.expandvars(script)
    scriptdir = os.path.dirname(script)

    if not os.path.exists(refdir): recursive_mkdir(refdir)

    # Copy input files to refdir to allow execution of script there
    if inputs is not None:

        for f in inputs:
            if not os.path.expandvars(os.path.dirname(f)) == scriptdir:
                print(
                    "Input files to %s should be in the same directory as %s" %
                    (script, script))
                sys.exit(1)

            shutil.copy(f, refdir)

    # execute given script in refdir ( creates reference data )
    pardir = os.getcwd()
    os.chdir(refdir)
    cmdline = [sys.executable, os.path.join(pardir, script)]
    pyalps.executeCommand(cmdline)
    if inputs is not None:
        for f in inputs:
            os.remove(f)
    os.chdir(pardir)

    if prefix is None:
        reffiles = [os.path.join(refdir, os.path.basename(f)) for f in outputs]
    else:
        reffiles = pyalps.getResultFiles(prefix=prefix, dirname=refdir)

    if not reffiles:
        print(
            "Reference files not found. (If you use 'loop' or 'dmrg', try to delete old result files.)"
        )
        sys.exit(1)

    # acquire a list of all observables
    allobs = []
    try:
        eigenstatedata = pyalps.loadEigenstateMeasurements(reffiles)
    except RuntimeError:
        pass
    else:
        try:
            allobs += [o.props['observable'] for o in eigenstatedata[0][0]]

        # DMRG eigenstate data has one level of nesting less
        except TypeError:
            allobs += [o.props['observable'] for o in eigenstatedata[0]]

    try:
        mcdata = pyalps.loadMeasurements(reffiles)
    except RuntimeError:
        pass
    else:
        allobs += [o.props['observable'] for o in mcdata[0]]

    allobs = list(set(allobs))

    scriptname = os.path.basename(script)
    scriptname = os.path.splitext(scriptname)[0]
    scriptname_prefixed = 'apptest_%s.py' % scriptname

    # Write .xml test-input file
    refparms = {
        "TESTNAME": scriptname,
        "TOLERANCE": "auto",
        "WRITE_RESULTS_TO_FILE": "yes",
        "SAVE_OUT_IF_FAIL": "yes"
    }

    testinputfile = writeTestInputFile(script, inputs, refparms, reffiles,
                                       allobs)
    pyalps.tools.copyStylesheet(pardir)

    # Write .py test-start script
    f = open(scriptname_prefixed, 'w')
    f.write('#!/usr/bin/env python\n\n')
    f.write('import sys\n')
    f.write('from pyalps import apptest\n')

    f.write(
        '# Explicitly specify "compMethod=..." and "outputs=..." if needed\n')
    f.write(
        "ret = apptest.runTest( '%s', outputs='auto', compMethod='auto', pyexec='auto' )\n"
        % testinputfile)
    f.write('if not ret: sys.exit(1)\n')

    f.close()
    os.chmod(scriptname_prefixed, 0o755)
示例#46
0
                 'THERMALIZATION' : 50000,
                 'SWEEPS'         : 15000,
                 'UPDATE'         : "ssf",
                 'cutoff_distance': 3.0,
                 'L'              : l,
                 'structure_factor': True,
                 'Targeted Acceptance Ratio': 0.4,
                 'Each_Measurement': 15
            }
           )
    #write the input file and run the simulation
    input_file = pyalps.writeInputFiles('parm',parms)
if RUN_SIMULATION:
    pyalps.runApplication('mc++',input_file,Tmin=5,MPI=1)
if ANALYZE:
    data = pyalps.loadMeasurements(pyalps.getResultFiles(prefix='parm'),['|Structure Factor|^2'])

    def Get_0k_range(L):
        return range(0,L*L,L)
    def Get_k0_range(L):
        return range(0,L,1)
    def Get_kk_range(L):
        return range(0,L*L,L+1)
    def Get_0k_data(data,L):
        return data.flatten()[Get_0k_range(L)]
    def Get_k0_data(data,L):
        return data.flatten()[Get_k0_range(L)]
    def Get_kk_data(data,L):
        return data.flatten()[Get_kk_range(L)]

    data = pyalps.collectXY(data,x='T',y='|Structure Factor|^2')[0]
示例#47
0
            'U': 1.0,
            'Nmax': 2,
            'THERMALIZATION': 100000,
            'SWEEPS': 2000000,
            'SKIP': 500,
            'MEASURE[Winding Number]': 1
        })

input_file = pyalps.writeInputFiles('parm1b', parms)
res = pyalps.runApplication('dwa', input_file, Tmin=5, writexml=True)

# Evaluating the simulation and preparing plots using Python
import pyalps
import matplotlib.pyplot as plt
import pyalps.plot as aplt

data = pyalps.loadMeasurements(pyalps.getResultFiles(prefix='parm1b'),
                               'Stiffness')
rhos = pyalps.collectXY(data, x='t', y='Stiffness', foreach=['L'])

for rho in rhos:
    rho.y = rho.y * float(rho.props['L'])

plt.figure()
aplt.plot(rhos)
plt.xlabel('Hopping $t/U$')
plt.ylabel('$\\rho _sL$')
plt.legend()
plt.title('Scaling plot for Bose-Hubbard model')
plt.show()
示例#48
0
文件: mcrg.py 项目: domischi/mcpp
    LVsT          =np.zeros([2,mcrg_iteration_depth,num_max_reduction_types, num_max_interactions, num_T]) # (odd|even), iteration, reduction technique, interaction set
    PlotT         =np.zeros([2,mcrg_iteration_depth,num_max_reduction_types, num_max_interactions, num_T]) # (odd|even), iteration, reduction technique, interaction set
    counter_matrix=np.zeros([2,mcrg_iteration_depth,num_max_reduction_types, num_max_interactions])        # (odd|even), iteration, reduction technique, interaction set
    for f in filenames:
        filename=f
        if filename[-4:]=='.xml':
            filename=filename[:-4]+'.h5'
        T=pyalps.loadProperties([filename])[0]['T']
        reduction_type =pyalps.loadProperties([filename])[0]['MCRG Reduction Technique']
        interaction_set=pyalps.loadProperties([filename])[0]['MCRG Interactions']
        add_to_reduction_dict(reduction_type) 
        add_to_interaction_dict(interaction_set) 
        print(index_reduction_type(reduction_type),reduction_type, index_interaction_set(interaction_set), interaction_set ,f)
        for type_of_interaction in ['e', 'o']:
            for it in range(1,mcrg_iteration_depth+1): 
                data = (pyalps.loadMeasurements([filename], ['MCRG S_alpha'+str(it-1),'MCRG S_alpha'+str(it),'MCRG S_alpha'+str(it-1)+' S_beta'+str(it),'MCRG S_alpha'+str(it)+' S_beta'+str(it)]))

                mean_sasb_n=  LoadMean(filename,'MCRG' + type_of_interaction+ ' S_alpha'+str(it)  +' S_beta'+str(it))
                mean_sasb_nm1=LoadMean(filename,'MCRG' + type_of_interaction+ ' S_alpha'+str(it-1)+' S_beta'+str(it))
                mean_sa_n=    LoadMean(filename,'MCRG' + type_of_interaction+ ' S_alpha'+str(it)  )
                mean_sa_nm1=  LoadMean(filename,'MCRG' + type_of_interaction+ ' S_alpha'+str(it-1))
               
                #jknf_sasb_n=  LoadJackknife(filename,'MCRG' + type_of_interaction+ ' S_alpha'+str(it)  +' S_beta'+str(it))
                #jknf_sasb_nm1=LoadJackknife(filename,'MCRG' + type_of_interaction+ ' S_alpha'+str(it-1)+' S_beta'+str(it))
                #jknf_sa_n=    LoadJackknife(filename,'MCRG' + type_of_interaction+ ' S_alpha'+str(it)  )
                #jknf_sa_nm1=  LoadJackknife(filename,'MCRG' + type_of_interaction+ ' S_alpha'+str(it-1))
                #print(reduction_type) 
                #print(EV(mean_sasb_n, mean_sasb_nm1, mean_sa_n, mean_sa_nm1, debug1=False, debug2=False))
                lambda_ = EV(mean_sasb_n, mean_sasb_nm1, mean_sa_n, mean_sa_nm1, debug1=False, debug2=False)[0]
                lambda_ = RawSanitize(lambda_)
                counter=int(counter_matrix[int('e'==type_of_interaction),it-1, index_reduction_type(reduction_type), index_interaction_set(interaction_set)])