コード例 #1
0
ファイル: mrptbccollect.py プロジェクト: maxhgerlach/detqmc
def eval_bc_subdirs(tuple_list):
    """if possible call mrpt for subdirs in tuple_list, return output_directory if not skipped from evaluation
    
       tuple_list : [(bc, subdir), ...]. return output directory if mrptbc has been run for those """

    print tuple_list,
    
    if len(tuple_list) != 4:
        print "=> faulty group, skip "
        return ""
        
    four_subdirs = [subdir for (bc, subdir) in tuple_list]

    # already done earlier: removed directories not containing any time series files

    output_directory = tuple_list[0][1].replace("bc"+tuple_list[0][0], "bc"+"averaged")
    if evalOnlyNew and glob(output_directory + "/mrpt-*.values"):
        print "already evaluated: skip, but still take into account:", output_directory
        return output_directory
    mkdir_p(output_directory)

    print "=> evaluate"

    # generate info.dat file with common metadata
    bc_infodat_files = ["%s/info.dat" % subdir for subdir in four_subdirs]
    bc_infodat_meta  = [parseHeader(getHeader(f)) for f in bc_infodat_files]
    combined_metadata = getCommonDictionary(bc_infodat_meta)
    combined_metadata["bc"] = "averaged"
    writeMetadictToFile("%s/info.dat" % output_directory, combined_metadata)

    # run mrptbc
    
    commandline = "mrptbc " + options + " --outputDirectory " + output_directory

    for bc, sd in tuple_list:
        commandline += " --info-" + bc + " " + sd + "/info.dat"

    for bc, sd in tuple_list:
        commandline += " %s/p*/associatedEnergy.series %s/p*/normMeanPhi.series" % (sd, sd)
        
    commandline += " ; exit 0"
    print commandline
    print output_directory
    stdout_and_stderr = subprocess.check_output(commandline, shell=True,
                                                stderr=subprocess.STDOUT)
    print stdout_and_stderr
    return output_directory
コード例 #2
0
ファイル: mrptbccollect.py プロジェクト: maxhgerlach/detqmc
    def collect_mrpt_file(filename, mrpt_prefix):
        # get observable name
        observable_name = parseHeader(getHeader(filename))["observable"]
        
        output_filename = prefix + mrpt_prefix + variable + "-" + observable_name + "_" + \
                          multivalString + ".values"

        with open(output_filename, 'w') as output_file:
            # prepend commonmetadata, add key = variable
            output_file.write(commonHeader)
            for k,v in zip(multivalKeys, tup):
                output_file.write("# %s = %s" % (k, v) + "\n")
            output_file.write("# key = " + variable + "\n")

            # copy rest of file contents
            with open(filename, "r") as input_file:
                for line in input_file:
                    output_file.write(line) 
コード例 #3
0
def filterOutDone(taskCommandlineDictionary, newSweeps=0):
    filteredDictionary = OrderedDict()
    for subdir, commandline in taskCommandlineDictionary.iteritems():
        print subdir,
        subdirInfo = subdir + "/info.dat"
        if os.path.exists(subdirInfo):
            info = parseConf(getHeader(subdirInfo))
            sweeps = int(info["sweeps"][0])  # use newSweeps if set
            if newSweeps > sweeps:
                sweeps = newSweeps
            sweepsDone = int(info["sweepsDone"][0])
            thermalization = int(info["thermalization"][0])
            sweepsDoneThermalization = int(info["sweepsDoneThermalization"][0])
            if thermalization == sweepsDoneThermalization and sweeps == sweepsDone:
                print "job finished"
            else:
                print "will be continued"
                filteredDictionary.update({subdir: commandline})
        else:
            print
            filteredDictionary.update({subdir: commandline})
    return filteredDictionary
コード例 #4
0
        subdir_candidates.append(root)

subdirs = [
    f for f in subdir_candidates
    if (f.startswith(prefix) or f.startswith("./" + prefix)) and glob(
        f + "/*.series"
    )  # exclude subdirs without time series -- e.g. pt top directories
    and ("simindex" in f and not "simindexjoined" in f
         )  # we want to have the simindex written into the directory name
]

#collect info.dat contents, maybe add simindex = 0
##################################################
infodata = {}
for sd in subdirs:
    header = parseHeader(getHeader(sd + "/info.dat"))
    if not header is None:
        infodata[sd] = header
        if not 'simindex' in infodata[sd]:
            infodata[sd]['simindex'] = '0'  # default simindex: 0
infodata = {k: v for k, v in infodata.iteritems() if v is not None}

# We need to group the subdirectories in groups that differ only by the simindex
################################################################################
groupable_list = []
# keys in the following links are removed from the metadata before sorting by the metadata
exclude_from_metadata = [
    "simindex",  # in one group we want to have all simindex
    # compilation related keys:
    "ARMA_VERSION",
    "BOOST_LIB_VERSION",
コード例 #5
0
ファイル: detptsubmit.py プロジェクト: maxhgerlach/detqmc
def process(jobconffname,
            runningJobs,
            executable=None,
            newSaveInterval=0,
            newSweeps=0,
            excludeJobs=None):
    if excludeJobs is None:
        excludeJobs = []

    build_host = get_build_host()

    jobconf = parseConf(open(jobconffname, 'r'))

    options = jobconf.keys()

    # options to be passed to simulation instances
    simoptions = [opt for opt in options if opt not in possibleJobOptions]
    # the rest, to be replaced in the jobTemplate
    joboptions = [opt for opt in options if opt in possibleJobOptions]

    # if rValues is not specified explicitly in the job file, check if
    # paramters rMin, rMax and rCount are given. Then we can compute
    # rValues here as a linspace.
    rValues_specified = False
    for opt in simoptions:
        if opt == "rValues":
            rValues_specified = True
            break
        else:
            # possibility of having rValues in an option pair
            m = re.match(r'(.+):(.+)', opt)
            if m:
                if m.group(1) == "rValues" or m.group(2) == "rValues":
                    rValues_specified = True
                    break
    if not rValues_specified:
        try:
            jobconf["rValues"] = []
            for rMin_str, rMax_str, rCount_str in zip(jobconf["rMin"],
                                                      jobconf["rMax"],
                                                      jobconf["rCount"]):
                rMin = float(rMin_str)
                rMax = float(rMax_str)
                rCount = int(rCount_str)
                rValues = np.linspace(rMin, rMax, rCount)
                jobconf["rValues"].append(" ".join(
                    ["%.3f" % f for f in rValues]))
            simoptions.append("rValues")
            simoptions.remove("rMin")
            simoptions.remove("rMax")
            simoptions.remove("rCount")
            del jobconf["rMin"]
            del jobconf["rMax"]
            del jobconf["rCount"]
        except KeyError:
            raise Exception("No rValues specified")

    # filter out option value list of the form "option, [empty]"
    # ...
    optionsForJobName = []
    for opt in simoptions:
        values = jobconf[opt]
        if values[-1] == "":
            # the last value is empty
            # -> use this opt for the jobname, remove the empty entry from jobconf
            optionsForJobName.append(opt)
            del values[-1]

    if 'maindir' in joboptions:
        maindir = jobconf["maindir"][0]
    else:
        ##TODO: write to scratch
        # maindir =  os.path.relpath(os.getcwd(),
        #                       '/home/mgerlac2/heisenberg-kitaev/sim')
        maindir = os.getcwd()
    if 'jobprefix' in joboptions:
        jobprefix = jobconf["jobprefix"][0] + "_"
    else:
        jobprefix = ""
    # if 'cores' in joboptions:
    #     cores = jobconf['cores'][0]
    # else:
    #     cores = "1"
    if 'nodes' in joboptions:
        nodes = jobconf['nodes'][0]
    else:
        nodes = "1"
    if 'ntasks' in joboptions:
        ntasks = jobconf['ntasks'][0]
    else:
        ntasks = "1"
    if 'constraint' in joboptions:
        constraint = jobconf['constraint'][0]
    else:
        constraint = '""'
    if 'partition' in joboptions and jobconf['partition'][0] == 'devel':
        partitionOption = "--partition=devel"
    else:
        partitionOption = ""
    if 'modules' in joboptions:
        modules = jobconf['modules'][0]
    else:
        modules = "intel/15.0 mkl/11.2 intelmpi/5.0.3"

    if not executable:
        if 'executable' in joboptions:
            executable = jobconf['executable'][0]
        else:
            raise Exception("No simulation executable specified!")

    if build_host == "jureca":
        if int(ntasks) % 24 != 0:
            raise Exception("Jureca: ntasks must be a multple of 24")
        if int(ntasks) != int(nodes) * 24:
            raise Exception("Jureca: ntasks must be 24 * nodes")

    # additionally: specify opt1:opt2 pairs with values val1_a:val2_a, val1_b:val2_b, ...
    pairoptions = [opt for opt in simoptions if re.match(r'.+:.+', opt)]
    optionpairs = [opt.split(':', 1) for opt in pairoptions]
    pairoptionvaluepairs = [[
        valpair.split(':', 1) for valpair in jobconf[opt]
    ] for opt in pairoptions]

    nonpairoptions = [opt for opt in simoptions if not opt in pairoptions]
    nonpairvalues = [jobconf[opt] for opt in nonpairoptions]

    # create jobs for all possible combinations of mulitvaloption values...
    multivaloptions = [opt for opt in nonpairoptions if len(jobconf[opt]) > 1]

    #allsimvalues = [jobconf[opt] for opt in simoptions]

    # do not allow paired options that also occur as single/multival options:
    for (opt1, opt2) in optionpairs:
        for opt in (opt1, opt2):
            if opt in nonpairoptions:
                raise Exception(
                    "Error: %s occurs both as paired and as non-paired option"
                    % opt)

    nonpairvalues_combinations = [
        vals for vals in itertools.product(*nonpairvalues)
    ]

    # list of option->value dictionaries
    optvals = []
    for values in nonpairvalues_combinations:
        optval_shell = OrderedDict([
            (opt, val) for (opt, val) in zip(nonpairoptions, values)
        ])
        if pairoptions == []:
            optvals.append(optval_shell)
        else:
            ## add all combinations pair-option values

            optvals_to_be_added = [optval_shell]

            for (opt1, opt2), valpairs in zip(optionpairs,
                                              pairoptionvaluepairs):
                unfinished_optvals = optvals_to_be_added
                optvals_to_be_added = []

                for u_o in unfinished_optvals:
                    for (val1, val2) in valpairs:
                        optval = u_o.copy()
                        optval[opt1] = val1
                        optval[opt2] = val2
                        optvals_to_be_added.append(optval)

            optvals.extend(optvals_to_be_added)

    # multivaloptions is used to create the job identifier. To make things more consistent,
    # from now on also include the pairoptions in multivaloptions:
    multivaloptions += [opt for optpair in optionpairs for opt in optpair]
    # also include those options explicitly set to be used for the jobname
    multivaloptions += [opt for opt in optionsForJobName]
    # Make unique!
    multivaloptions = list(set(multivaloptions))

    # iterate over all possible combinations of simulation values.
    for optval in optvals:
        # potentially update save interval and sweeps
        if newSaveInterval != 0 and newSaveInterval != int(
                optval['saveInterval']):
            optval['saveInterval'] = str(newSaveInterval)
        if newSweeps > int(optval['sweeps']):
            optval['sweeps'] = str(newSweeps)

        commandline = executable + " " + " ".join(
            ["--%s %s" % (opt, val) for opt, val in optval.items()])
        subdir = jobprefix + "_".join(
            ["%s%s" % (opt, optval[opt]) for opt in multivaloptions])
        print subdir,

        if subdir in runningJobs:
            print "already running, skipping"
            continue
        if subdir in excludeJobs:
            print "excluding this jobname, skipping"
            continue

        subdirInfo = subdir + "/info.dat"
        if os.path.exists(subdirInfo):
            info = parseConf(getHeader(subdirInfo))
            sweeps = int(info["sweeps"][0])  # use newSweeps if set
            if newSweeps > sweeps:
                sweeps = newSweeps
            sweepsDone = int(info["sweepsDone"][0])
            thermalization = int(info["thermalization"][0])
            sweepsDoneThermalization = int(info["sweepsDoneThermalization"][0])
            if thermalization == sweepsDoneThermalization and sweeps == sweepsDone:
                print "job finished"
                continue
            else:
                print "will be continued",

        print

        print commandline

        if build_host == "cheops":
            # Add some variation to the requested wall times (add up too 30
            # extra minutes)
            extra_seconds = random.randint(0, 30 * 60)
        else:
            extra_seconds = 0
        walltime = secondsToTimeString(
            int(walltimeInSeconds(jobconf['walltime'][0])) + extra_seconds)
        if 'mintime' in joboptions:
            mintime = secondsToTimeString(
                int(walltimeInSeconds(jobconf['mintime'][0])) + extra_seconds)
        else:
            mintime = walltime

        if build_host == "cheops":
            jobTemplate = jobTemplate_cheops
        elif build_host == "jureca":
            jobTemplate = jobTemplate_jureca

        job = jobTemplate
        job = job.replace("%mintime", mintime)
        job = job.replace("%walltime", walltime)
        for jobopt in joboptions:
            job = job.replace("%" + jobopt, jobconf[jobopt][0])
        job = job.replace("%constraint", constraint)
        # job = job.replace("%cores", cores)
        job = job.replace("%nodes", nodes)
        job = job.replace("%ntasks", ntasks)
        job = job.replace("%maindir", maindir)
        job = job.replace("%subdir", subdir)
        job = job.replace("%commandline", commandline)
        job = job.replace("%modules", modules)
        # job = job.replace("%wtimeseconds", walltimeInSeconds(jobconf["walltime"][0]))

        jobname = subdir
        jobfilename = "%s/job.sh" % subdir
        outputfile = "%s/output.%%j.log" % subdir  # %j is replaced by the job allocation number
        errorfile = "%s/error.%%j.log" % subdir
        qsubcommand = "sbatch %s --job-name=%s --output=%s --error=%s %s" % (
            partitionOption, jobname, outputfile, errorfile, jobfilename)

        # resubmitting the same job from within the job if it exited gracefully
        if build_host == "cheops":
            loginNode = "cheops0"
        elif build_host == "jureca":
            loginNode = "jrl02"
        resubmit_outputfile = "$WORKDIR/output.%j.log"  # %j is replaced by the job allocation number
        resubmit_errorfile = "$WORKDIR/error.%j.log"
        qresubcommand = "ssh $USER@%s sbatch %s --job-name=%s --output=%s --error=%s $WORKDIR/job.sh" % (
            loginNode, partitionOption, jobname, resubmit_outputfile,
            resubmit_errorfile)
        job = job.replace("%qresubcommand", qresubcommand)

        if build_host == "cheops":
            # afterwards submit a job to copy to /projects.
            rsyncJob = rsyncJobTemplate.replace('%maindir', maindir).replace(
                "%subdir", subdir)
            rsyncJobname = "rsync-" + jobname
            rsyncJobfilename = "%s/rsyncjob.sh" % subdir
            rsync_outputfile = "$WORKDIR/rsync-output.%j.log"  # %j is replaced by the job allocation number
            rsync_errorfile = "$WORKDIR/rsync-error.%j.log"
            mkdir_p(subdir)
            with open(rsyncJobfilename, 'w') as rsyncJobfile:
                rsyncJobfile.write(rsyncJob)
            qrsyncsubcommand = "ssh $USER@cheops0 sbatch --job-name=%s --output=%s --error=%s $WORKDIR/rsyncjob.sh" % (
                rsyncJobname, rsync_outputfile, rsync_errorfile)
            job = job.replace("%qrsyncsubcommand", qrsyncsubcommand)

        # put master job script into a file
        mkdir_p(subdir)
        with open(jobfilename, 'w') as jobfile:
            jobfile.write(job)

        # submit job
        try:
            stdout_and_stderr = check_output(qsubcommand,
                                             shell=True,
                                             stderr=subprocess.STDOUT)
            print stdout_and_stderr
        except subprocess.CalledProcessError, e:
            print "Command:", e.cmd
            print "Error:", e.output
            raise
コード例 #6
0
ファイル: cpi_to_cp.py プロジェクト: maxhgerlach/detqmc
#!/usr/bin/env python

import re
import sys
import numpy as np
from scripthelpers import getHeader, parseHeader

# meta files for the replica exchange process contain one column with
# the control parameter index ("cpi") and one with the relevant data.
# this script exchanges the cpi with the actual control parameter values



if __name__ == "__main__":
    assert len(sys.argv) == 3
    filename = sys.argv[1]
    outputfilename = sys.argv[2]

    header = getHeader(filename)
    
    data = np.loadtxt(filename)
    cpi = data[:,0]

    cpv = np.array([float(s) for s in parseHeader(header)["controlParameterValues"].split(" ")])
    assert len(cpv) == len(cpi)

    data[:,0] = cpv

    np.savetxt(outputfilename, data, header="".join(header).replace("control parameter index", "control parameter value"))
コード例 #7
0
print("pruned subdir_candidates for prefix")
sys.stdout.flush()

# potentially append "_" for the output prefix
if prefix != "":
    if prefix[-1:] == "-":
        prefix = prefix[:-1] + "_"  # replace "-" in tail by "_"
    elif prefix[-1:] != "_":
        # append a "_" if it is not there already
        prefix = prefix + "_"

#collect info.dat contents (only common entries), potentially prune non simindexjoined
######################################################################################
infodata = {}
for sd in subdirs:
    header = parseHeader(getHeader(sd + "/info.dat"))
    if not header is None:
        if only_simindexjoined:
            if not "simindex" in header or header["simindex"] != "joined":
                # ignore individual simindex data
                continue
            infodata[sd] = header
        else:
            if "simindex" in header and header["simindex"] == "joined":
                # only take into account individual simindex data
                continue
            infodata[sd] = header
subdirs = infodata.keys()
# print subdirs

print("collected subdir info.dat contents")
コード例 #8
0
ファイル: mrptcollect.py プロジェクト: yogeshwar2609/detqmc
def is_simindexjoined_subdir(sd):
    header = parseHeader(getHeader(sd + "/info.dat"))
    if not header is None:
        if "simindex" in header and header["simindex"] == "joined":
            return True
    return False
コード例 #9
0
ファイル: mrptcollect.py プロジェクト: yogeshwar2609/detqmc
    if sd != '':
        subdirs.remove(sd)



# collect mrpt results and metadata
###################################

# helper:
def addControlParameterCount(meta_dict):
    if "controlParameterValues" in meta_dict:
        meta_dict["controlParameterCount"] = str(len(meta_dict["controlParameterValues"].split()))
    return meta_dict

# map: subdirectory -> metadata dictionary [for replica exchange simulations: count controlParameterValues]
metadata = {sd: addControlParameterCount(parseHeader(getHeader(sd + "/info.dat"))) for sd in subdirs}
# prune subdirectories with empty metadata
metadata = {sd:meta for sd,meta in metadata.iteritems() if meta is not None}

# go over all the metadata dictionaries, each time take the keys of those dictionaries, then find all 
# the common ones (set intersection)
commonkeys = set.intersection(*(set(d.iterkeys()) for d in metadata.itervalues())) # * : reverse of zip
# map commonkeys -> metadata ; only if metadata also equal
commonmetadata = dict(set.intersection(*(set(d.iteritems()) for d in metadata.itervalues())))
try:
    del commonmetadata['jkBlocks'] # remove metadata that is no longer valid for the eval-results
except KeyError:
    pass

variable = commonmetadata["controlParameterName"]
コード例 #10
0
def find_intersection_for_subdirs(tuple_list):
    """if possible call mrpt-binderratio-intersect for subdirs in tuple_list,
       return output_directory if not skipped from evaluation
    
       tuple_list : [(L, bc, subdir), ...]. return output directory if successful """

    if len(tuple_list) % 4 != 0:
        print "=> number of subdirectories should be multiple of 4, skipping"
        return ""
    
    # map: L -> bc -> subdir
    map_L_bc_subdir = { }
    for L, bc, subdir in tuple_list:
        map_L_bc_subdir.setdefault(int(L), { })[bc] = subdir
    
    print map_L_bc_subdir

    for L in map_L_bc_subdir:
        if len(map_L_bc_subdir[L].keys()) != 4:
            print "Wrong number of bc subdirs for L =", L, "=> skipping"
            return ""

    my_subdirs = [subdir for (L, bc, subdir) in tuple_list]
    # already done earlier: removed directories not containing any time series files

    # Find control_file for our group of subdirectories, if we have one.
    # Otherwise, skip this directory.
    my_cf = None
    my_cf_meta = None
    for cf, meta in controlFilesMetaData.items():
        if dictContainedInDict(meta, infodata[my_subdirs[0]]):
            my_cf = cf
            my_cf_meta = meta
            break
    if my_cf is None:
        return ""
    print "control file:", my_cf
    for sd in my_subdirs[1:]:
        if not dictContainedInDict(my_cf_meta, infodata[sd]):
            print "Error: control file does not match subdirectory:", sd
            return ""

    # get information (4 columns) about where to look for L-pair
    # Binder cumulant intersections
    L1_L2_cpMin_cpMax = np.loadtxt(my_cf, ndmin=2)

    output_directory = prefix + "mrptbc-binderratio-intersect"
    for key, value in my_cf_meta.items():
        output_directory += "_" + key + value
    output_directory += "_bcaveraged"
    mkdir_p(output_directory)
    print output_directory

    print "=> evaluate"

    # generate info.dat file with common metadata
    L_infodat_files = ["%s/info.dat" % subdir for subdir in my_subdirs]
    L_infodat_meta  = [parseHeader(getHeader(f)) for f in L_infodat_files]
    combined_metadata = getCommonDictionary(L_infodat_meta)
    combined_metadata["bc"] = "averaged"
    writeMetadictToFile("%s/info.dat" % output_directory, combined_metadata)
    
    # make mrptbc-binderratio-intersect calls
    for L1, L2, cpMin, cpMax in L1_L2_cpMin_cpMax:
        L1 = int(L1)
        L2 = int(L2)
        if evalOnlyNew and glob(output_directory + "/mrptbc-binder-intersect-l%dl%d.dat" % (L1,L2)):
            print "already evaluated: skip, but still take into account: (%d, %d)" % (L1,L2)
            continue
        
        sd1_bc = {}
        sd2_bc = {}
        info1_bc = {}
        info2_bc = {}

        for bc in all_bc:
            sd1_bc[bc] = map_L_bc_subdir[L1][bc]
            sd2_bc[bc] = map_L_bc_subdir[L2][bc]          
            info1_bc[bc] = sd1_bc[bc] + "/info.dat"
            info2_bc[bc] = sd2_bc[bc] + "/info.dat"

        print "Finding Binder-ratio intersection for L1=", L1, ", L2=", L2

        commandline = "mrptbc-binderratio-intersect " + options \
                      + " --outputDirectory " + output_directory \
                      + " --cp-range %f %f" % (cpMin, cpMax)
        for bc in all_bc:
            commandline += " --info1-%s %s --info2-%s %s" % (bc, info1_bc[bc], bc, info2_bc[bc])

        for bc in all_bc:
            for sd in [sd1_bc[bc], sd2_bc[bc]]:
                commandline += " %s/p*/associatedEnergy.series %s/p*/normMeanPhi.series" % (sd, sd)
        
        commandline += " ; exit 0"

        print "commandline:", commandline

        stdout_and_stderr = subprocess.check_output(commandline, shell=True,
                                                    stderr=subprocess.STDOUT)
        print stdout_and_stderr

    return output_directory
コード例 #11
0
# potentially append "_" for the output prefix
if prefix != "":
    if prefix[-1:] == "-":
        prefix = prefix[:-1] + "_" # replace "-" in tail by "_"
    elif prefix[-1:] != "_":
        # append a "_" if it is not there already
        prefix = prefix + "_"



#collect info.dat contents
##########################
infodata = {}
for sd in subdirs:
    header = parseHeader(getHeader(sd + "/info.dat"))
    if not header is None:
        infodata[sd] = header


# collect controlFiles metadata
controlFilesMetaData = {}
for cf in controlFiles:
    header = parseHeader(getHeader(cf))
    if not header is None:
        controlFilesMetaData[cf] = header
    else:
        print "control file", cf, "does not contain metadata"
        

コード例 #12
0
print "pruned subdir_candidates for prefix"
sys.stdout.flush()

# potentially append "_" for the output prefix
if prefix != "":
    if prefix[-1:] == "-":
        prefix = prefix[:-1] + "_"  # replace "-" in tail by "_"
    elif prefix[-1:] != "_":
        # append a "_" if it is not there already
        prefix = prefix + "_"

#collect info.dat contents (only common entries), potentially prune non simindexjoined
######################################################################################
infodata = {}
for sd in subdirs:
    header = parseHeader(getHeader(sd + "/info.dat"))
    if not header is None:
        if only_simindexjoined:
            if not "simindex" in header or header["simindex"] != "joined":
                # ignore individual simindex data
                continue
            infodata[sd] = header
        else:
            if "simindex" in header and header["simindex"] == "joined":
                # only take into account individual simindex data
                continue
            infodata[sd] = header
subdirs = infodata.keys()
# print subdirs

print "collected subdir info.dat contents"
コード例 #13
0
                                             d)

    ## for each group find the p*_r* data directories; also read out
    ## the property "saveConfigurationStreamInterval", which we will
    ## use to adjust sweeps_discard
    data_directories = {}
    saveConfigurationStreamInterval = {}
    for key in groups.keys():
        example_source_dir = os.path.join(source_toplevel, groups[key][0])
        data_directories[key] = [
            d for d in os.listdir(example_source_dir) if d.startswith("p")
            and os.path.isdir(os.path.join(example_source_dir, d))
        ]
        try:
            meta = parseConf(
                getHeader(os.path.join(example_source_dir, "info.dat")))
            # scsi = int(meta['saveConfigurationStreamInterval'])
            scsi = int(meta['saveConfigurationStreamInterval'][0])
        except KeyError:
            scsi = 1
        saveConfigurationStreamInterval[key] = scsi

    ## handle key after key
    for key in groups.keys():
        print key,
        print "==> consider running sdwcorr"

        input_directories = [
            os.path.join(source_toplevel, d) for d in groups[key]
        ]
        output_directory = os.path.join(dest_toplevel, target_directories[key])
コード例 #14
0
#     print subdir
sys.stdout.flush()

# potentially append "_" for the output prefix
if prefix != "":
    if prefix[-1:] == "-":
        prefix = prefix[:-1] + "_"  # replace "-" in tail by "_"
    elif prefix[-1:] != "_":
        # append a "_" if it is not there already
        prefix = prefix + "_"

#collect info.dat contents (only common entries), potentially prune non simindexjoined
######################################################################################
infodata = {}
for sd in subdirs:
    header = parseHeader(getHeader(sd + "/info.dat"))
    if not header is None:
        if only_simindexjoined and "simindex" in header and header[
                "simindex"] != "joined":
            continue
        else:
            infodata[sd] = header
subdirs = infodata.keys()

# for subdir in infodata.keys():
#     print subdir

# collect controlFiles metadata
controlFilesMetaData = {}
for cf in controlFiles:
    header = parseHeader(getHeader(cf))
コード例 #15
0
ファイル: jobsummary.py プロジェクト: yogeshwar2609/detqmc

if __name__ == '__main__':
    subdir_jobs = getSubdirs()
    
    subdirs_with_info = [s for s in subdir_jobs if os.path.exists(s + "/info.dat")]

    interesting_fields = ['sweeps', 'sweepsDone', 'thermalization', 'sweepsDoneThermalization', 'totalWallTimeSecs']

    Jobinfo = namedtuple('Jobinfo', interesting_fields)
    subdir_jobinfo = {}
    subdir_replica_count = {}
    finished_jobs = []
    incomplete_jobs = []
    for s in subdirs_with_info:
        info = parseConf(getHeader(s + "/info.dat"))

        try:
            jinfo = Jobinfo(*[int(info[f][0]) for f in interesting_fields])
        except KeyError:
            # info without entries like sweepsDone, don't show
            # (probably is simindexjoined)
            continue

        subdir_jobinfo[s] = jinfo

        if jinfo.sweepsDone == jinfo.sweeps and jinfo.thermalization == jinfo.sweepsDoneThermalization:
            finished_jobs.append(s)
        else:
            incomplete_jobs.append(s)
コード例 #16
0
def find_intersection_for_subdirs(tuple_list):
    """if possible call mrpt-binderratio-intersect for subdirs in tuple_list,
       return output_directory if not skipped from evaluation
    
       tuple_list : [(L, subdir), ...]. return output directory if successful """

    print tuple_list

    if len(tuple_list) < 2:
        print "=> too few subdirectories, skip"
        return ""

    my_subdirs = [subdir for (L, subdir) in tuple_list]
    L_to_subdirs = {int(L): subdir for (L, subdir) in tuple_list}
    my_subdirs = L_to_subdirs.values()
    # already done earlier: removed directories not containing any time series files

    # Find control_file for our group of subdirectories, if we have one.
    # Otherwise, skip this directory.
    my_cf = None
    my_cf_meta = None
    for cf, meta in controlFilesMetaData.items():
        if dictContainedInDict(meta, infodata[my_subdirs[0]]):
            my_cf = cf
            my_cf_meta = meta
            break
    if my_cf is None:
        return ""
    print "control file:", my_cf
    for sd in my_subdirs[1:]:
        if not dictContainedInDict(my_cf_meta, infodata[sd]):
            print "Error: control file does not match subdirectory:", sd
            return ""

    # get information (4 columns) about where to look for L-pair
    # Binder cumulant intersections
    L1_L2_cpMin_cpMax = np.loadtxt(my_cf, ndmin=2)

    output_directory = prefix + "mrpt-binderratio-intersect"
    for key, value in my_cf_meta.items():
        output_directory += "_" + key + value
    mkdir_p(output_directory)
    print output_directory

    print "=> evaluate"

    # generate info.dat file with common metadata
    L_infodat_files = ["%s/info.dat" % subdir for subdir in my_subdirs]
    L_infodat_meta = [parseHeader(getHeader(f)) for f in L_infodat_files]
    combined_metadata = getCommonDictionary(L_infodat_meta)
    writeMetadictToFile("%s/info.dat" % output_directory, combined_metadata)

    # make mrpt-binderratio-intersect calls
    for L1, L2, cpMin, cpMax in L1_L2_cpMin_cpMax:
        L1 = int(L1)
        L2 = int(L2)
        if evalOnlyNew and glob(output_directory +
                                "/mrpt-binder-intersect-l%dl%d.dat" %
                                (L1, L2)):
            print "already evaluated: skip, but still take into account: (%d, %d)" % (
                L1, L2)
            continue

        sd1 = L_to_subdirs[L1]
        sd2 = L_to_subdirs[L2]
        info1 = sd1 + "/info.dat"
        info2 = sd2 + "/info.dat"

        print "Finding Binder-ratio intersection for L1=", L1, ", L2=", L2

        commandline = "mrpt-binderratio-intersect " + options \
                      + " --outputDirectory " + output_directory \
                      + " --info1 " + info1 + " --info2 " + info2 \
                      + " --cp-range %f %f" % (cpMin, cpMax)
        for sd in [sd1, sd2]:
            commandline += " %s/p*/associatedEnergy.series %s/p*/normMeanPhi.series" % (
                sd, sd)

        commandline += " ; exit 0"

        print "commandline:", commandline

        stdout_and_stderr = subprocess.check_output(commandline,
                                                    shell=True,
                                                    stderr=subprocess.STDOUT)
        print stdout_and_stderr

    return output_directory
コード例 #17
0
ファイル: mrptbccollect.py プロジェクト: maxhgerlach/detqmc
# potentially append "_" for the output prefix
if prefix != "":
    if prefix[-1:] == "-":
        prefix = prefix[:-1] + "_" # replace "-" in tail by "_"
    elif prefix[-1:] != "_":
        # append a "_" if it is not there already
        prefix = prefix + "_"



#collect info.dat contents
##########################
infodata = {}
for sd in subdirs:
    header = parseHeader(getHeader(sd + "/info.dat"))
    if not header is None:
        infodata[sd] = header
infodata = {k:v for k,v in infodata.iteritems() if v is not None}



# We need to group the subdirectories in groups that differ only by the boundary conditions
###########################################################################################
groupable_list = []
# keys in the following links are removed from the metadata before sorting by the metadata
exclude_from_metadata = ["bc", # in one group we want to have all 4 possible b.c.'s
                         # compilation related keys:
                         "ARMA_VERSION", "BOOST_LIB_VERSION", "buildDate", "buildHost", "buildTime", "cppflags", "cxxflags", "gitBranch", "gitRevisionHash",
                         # rng seed will be different:
                         "rngSeed",