from credo.modelrun import ModelRun, SimParams
import credo.modelsuite as msuite
from credo.modelsuite import ModelSuite, StgXMLVariant
import credo.jobrunner
import credo.analysis.modelplots as modelplots

outPathBase = os.path.join('output','resScalingTest')
if not os.path.exists(outPathBase):
    os.makedirs(outPathBase)

defParams = SimParams(nsteps=2)
mRun = ModelRun("RayTayOverturn", "RayleighTaylorBenchmark.xml",
    simParams=defParams)

mSuite = ModelSuite(outPathBase, templateMRun=mRun)
mSuite.resRange = [8, 16, 32, 64, 96, 128, 150, 180, 256]
mSuite.addVariant("elResI", StgXMLVariant("elementResI", mSuite.resRange))
mSuite.addVariant("elResJ", StgXMLVariant("elementResJ", mSuite.resRange))
# TODO: would be nice to have a simpler subpath modifier
mSuite.subOutputPathGenFunc = msuite.getSubdir_RunIndexAndText
mSuite.generateRuns(iterGen=itertools.izip)

#-----------------------------

def reporting(mSuite, mResults):
    timePerEls = modelplots.getTimePerEls(mSuite.runs, mResults)
    print "Time per els were: %s" % timePerEls
    csvName = os.path.join(outPathBase, "runPerfInfo.csv")
    csvFile = open(csvName, "wb")
    wtr = csv.writer(csvFile)
    wtr.writerow(["Run", "res(els)", "walltime (sec)", "max mem (MB)",