Exemplo n.º 1
0
def compareEpsilon(testfiles, reffiles, tol_factor='auto', whatlist=None):
    """ Compare results from diagonalization applications 
    
    returns True if test succeeded"""

    if tol_factor == 'auto':
        tol_factor = 1.0

    testdata = pyalps.loadEigenstateMeasurements(testfiles)
    refdata = pyalps.loadEigenstateMeasurements(reffiles)
    if not testdata or not refdata:
        if not testdata:
            print(
                "loadEigenstateMeasurements of file %s returned an empty list"
                % testfiles)

        if not refdata:
            print(
                "loadEigenstateMeasurements of file %s returned an empty list"
                % reffiles)

        return

    # File level
    compare_list = []
    for testtask, reftask in zip(testdata, refdata):
        try:
            # ALPS applications
            testfile = testtask[0][0].props['filename']
            reffile = reftask[0][0].props['filename']

        except AttributeError:
            # workaround for MAQUIS DMRG which doesn't have sectors
            testtask = [testtask]
            reftask = [reftask]
            testfile = testtask[0][0].props['filename']
            reffile = reftask[0][0].props['filename']

        # Ensure we compare equivalent tasks
        if len(testtask) != len(reftask):
            raise Exception("Comparison Error: test and reference data have \
                              different number of sectors\n\
                              (Have both reference and test data been pyalps.evaluate'd?)"
                            )

        # Sector level
        #print("\ncomparing file " + testfile + " against file " + reffile)
        compare_sector = []
        for testsector, refsector in zip(testtask, reftask):

            # Observables

            # Select only observables from whatlist if specified
            if whatlist:
                notfoundtest = [
                    w for w in whatlist
                    if w not in [o.props['observable'] for o in testsector]
                ]
                if notfoundtest:
                    print(
                        "The following observables specified for comparison\n\
                           have not been found in test results:")
                    print("File:", testfile)
                    print(notfoundtest)
                    sys.exit(1)

                notfoundref = [
                    w for w in whatlist
                    if w not in [o.props['observable'] for o in refsector]
                ]
                if notfoundref:
                    print(
                        "The following observables specified for comparison\n\
                           have not been found in reference results:")
                    print("File:", reffile)
                    print(notfoundref)
                    sys.exit(1)

                testsector = [
                    o for o in testsector if o.props['observable'] in whatlist
                ]
                refsector = [
                    o for o in refsector if o.props['observable'] in whatlist
                ]

            for testobs, refobs in zip(testsector, refsector):

                # Scalar observables
                if pyalps.size(testobs.y[0]) == 1:
                    tol = max(10e-12,
                              np.abs(refobs.y[0]) * 10e-12) * tol_factor
                    diff = np.abs(testobs.y[0] - refobs.y[0])
                    compare_sector.append(obsdict(tol, diff, testobs.props))

                # Array valued observables
                else:
                    tol_list = []
                    diff_list = []
                    for (ty, ry) in zip(testobs.y[0], refobs.y[0]):
                        tol_list.append(max(10e-12, ry * 10e-12))
                        diff_list.append(np.abs(ty - ry))

                    maxdiff = max(diff_list)
                    tol = tol_list[diff_list.index(maxdiff)] * tol_factor
                    compare_sector.append(obsdict(tol, maxdiff, testobs.props))

        compare_list.append(compare_sector)

    #writeTest2stdout(compare_list) # or a file, if that has been specified
    succeed_list = [
        obs['passed'] for obs_list in compare_list for obs in obs_list
    ]
    return False not in succeed_list, compare_list
Exemplo n.º 2
0
def compareMC(testfiles, reffiles, tol_factor='auto', whatlist=None):
    """ Compare results of Monte Carlo Simulations

    returns True if test succeeded"""

    if tol_factor == 'auto':
        tol_factor = 2.0

    testdata = pyalps.loadMeasurements(testfiles)
    refdata = pyalps.loadMeasurements(reffiles)

    if len(testdata) != len(refdata):
        raise Exception(
            "Comparison Error: test and reference data differ in number of tasks"
        )

    # File level
    compare_list = []
    for testtask, reftask in zip(testdata, refdata):
        testfile = testtask[0].props['filename']
        reffile = reftask[0].props['filename']
        # Ensure we compare equivalent tasks
        if len(testtask) != len(reftask):
            raise Exception("Comparison Error: test and reference data have \
                different number of observables\n\
                (Have both reference and test data been evaluated?)")

        # Observables

        # Select only observables from whatlist if specified
        if whatlist:
            notfoundtest = [
                w for w in whatlist
                if w not in [o.props['observable'] for o in testtask]
            ]
            if notfoundtest:
                print(
                    "The following observables specified for comparison\nhave not been found in test results:"
                )
                print("File:", testfile)
                print(notfoundtest)
                sys.exit(1)

            notfoundref = [
                w for w in whatlist
                if w not in [o.props['observable'] for o in reftask]
            ]
            if notfoundref:
                print(
                    "The following observables specified for comparison\nhave not been found in reference results:"
                )
                print("File:", reffile)
                print(notfoundref)
                sys.exit(1)

            testtask = [
                o for o in testtask if o.props['observable'] in whatlist
            ]
            reftask = [o for o in reftask if o.props['observable'] in whatlist]

        #print("\ncomparing file " + testfile + " against file " + reffile)
        compare_obs = []
        for testobs, refobs in zip(testtask, reftask):

            # Scalar observables
            if pyalps.size(testobs.y[0]) == 1:
                testerr = testobs.y[0].error
                referr = refobs.y[0].error
                tol = np.sqrt(testerr**2 + referr**2) * tol_factor
                diff = np.abs(testobs.y[0].mean - refobs.y[0].mean)
                compare_obs.append(obsdict(tol, diff, testobs.props))

            # Array valued observables
            else:
                tol_list = []
                diff_list = []
                for (ty, ry) in zip(testobs.y[0], refobs.y[0]):
                    tol_list.append(
                        np.sqrt(ty.error**2 + ry.error**2) * tol_factor)
                    diff_list.append(np.abs(ty - ry))

                maxdiff = max(diff_list)
                tol = tol_list[diff_list.index(maxdiff)] * tol_factor
                compare_obs.append(obsdict(tol, maxdiff, testobs.props))

        compare_list.append(compare_obs)

    #writeTest2stdout(compare_list) # or a file, if that has been specified
    succeed_list = [
        obs['passed'] for obs_list in compare_list for obs in obs_list
    ]
    return False not in succeed_list, compare_list
Exemplo n.º 3
0
def compareMixed(testfiles, reffiles, tol_factor='auto', whatlist=None):
    """ Compare results of QWL, DMRG (ALPS)

    returns True if test succeeded"""

    if tol_factor == 'auto':
        tol_factor = 2.0

    testdata = pyalps.loadMeasurements(testfiles)
    refdata = pyalps.loadMeasurements(reffiles)
    if len(testdata) != len(refdata):
        raise Exception(
            "Comparison Error: test and reference data differ in number of tasks"
        )

    # This is needed by the dmrg example
    try:
        testeig = pyalps.loadEigenstateMeasurements(testfiles)
        refeig = pyalps.loadEigenstateMeasurements(reffiles)
        for ttask, rtask, teig, reig in zip(testdata, refdata, testeig,
                                            refeig):
            ttask += teig
            rtask += reig
    except RuntimeError:
        pass

    # File level
    compare_list = []
    for testtask, reftask in zip(testdata, refdata):
        testfile = testtask[0].props['filename']
        reffile = reftask[0].props['filename']

        # Ensure we compare equivalent tasks
        if len(testtask) != len(reftask):
            raise Exception("Comparison Error: test and reference data have \
                different number of observables\n")

        # Observables

        # Select only observables from whatlist if specified
        if whatlist:
            notfoundtest = [
                w for w in whatlist
                if w not in [o.props['observable'] for o in testtask]
            ]
            if notfoundtest:
                print(
                    "The following observables specified for comparison\nhave not been found in test results:"
                )
                print("File:", testfile)
                print(notfoundtest)
                sys.exit(1)

            notfoundref = [
                w for w in whatlist
                if w not in [o.props['observable'] for o in reftask]
            ]
            if notfoundref:
                print(
                    "The following observables specified for comparison\nhave not been found in reference results:"
                )
                print("File:", reffile)
                print(notfoundref)
                sys.exit(1)

            testtask = [
                o for o in testtask if o.props['observable'] in whatlist
            ]
            reftask = [o for o in reftask if o.props['observable'] in whatlist]

        #print("\ncomparing file " + testfile + " against file " + reffile)
        compare_obs = []
        for testobs, refobs in zip(testtask, reftask):

            # MC if it succeeds
            try:
                # Scalar observables
                if pyalps.size(testobs.y) == 1:
                    testerr = testobs.y[0].error
                    referr = refobs.y[0].error
                    tol = np.sqrt(testerr**2 + referr**2) * tol_factor
                    diff = np.abs(testobs.y[0].mean - refobs.y[0].mean)
                    compare_obs.append(obsdict(tol, diff, testobs.props))

                # Array valued observables
                else:
                    tol_list = []
                    diff_list = []
                    for (ty, ry) in zip(testobs.y, refobs.y):
                        tol_list.append(
                            np.sqrt(ty.error**2 + ry.error**2) * tol_factor)
                        diff_list.append(np.abs(ty - ry))

                    maxdiff = max(diff_list)
                    tol = tol_list[diff_list.index(maxdiff)] * tol_factor
                    compare_obs.append(obsdict(tol, maxdiff, testobs.props))

            # Epsilon otherwise
            except AttributeError:
                # Scalar observables
                if pyalps.size(testobs.y) == 1:
                    tol = max(10e-12,
                              np.abs(refobs.y[0]) * 10e-12) * tol_factor
                    diff = np.abs(testobs.y[0] - refobs.y[0])
                    compare_obs.append(obsdict(tol, diff, testobs.props))

                # Array valued observables
                else:
                    tol_list = []
                    diff_list = []
                    for (ty, ry) in zip(testobs.y, refobs.y):
                        tol_list.append(max(10e-12, ry * 10e-12))
                        diff_list.append(np.abs(ty - ry))

                    maxdiff = max(diff_list)
                    tol = tol_list[diff_list.index(maxdiff)] * tol_factor
                    compare_obs.append(obsdict(tol, maxdiff, testobs.props))

        compare_list.append(compare_obs)

    #writeTest2stdout(compare_list) # or a file, if that has been specified
    succeed_list = [
        obs['passed'] for obs_list in compare_list for obs in obs_list
    ]
    return False not in succeed_list, compare_list
Exemplo n.º 4
0
#prepare the input parameters
parms = [{
    'LATTICE': "chain lattice",
    'MODEL': "spin",
    'local_S': 1,
    'J': 1,
    'L': 4,
    'CONSERVED_QUANTUMNUMBERS': 'Sz',
    'MEASURE_STRUCTURE_FACTOR[Structure Factor S]': 'Sz',
    'MEASURE_CORRELATIONS[Diagonal spin correlations]=': 'Sz',
    'MEASURE_CORRELATIONS[Offdiagonal spin correlations]': 'Splus:Sminus'
}]

#write the input file and run the simulation
input_file = pyalps.writeInputFiles('ed01a', parms)
res = pyalps.runApplication('sparsediag', input_file)

#load all measurements for all states
data = pyalps.loadEigenstateMeasurements(pyalps.getResultFiles(prefix='ed01a'))

# print properties of ground states in all sectors:
for sector in data[0]:
    print '\nSector with Sz =', sector[0].props['Sz'],
    print 'and k =', sector[0].props['TOTAL_MOMENTUM']
    for s in sector:
        if pyalps.size(s.y[0]) == 1:
            print s.props['observable'], ' : ', s.y[0]
        else:
            for (x, y) in zip(s.x, s.y[0]):
                print s.props['observable'], '(', x, ') : ', y