コード例 #1
0
ファイル: modelchooser.py プロジェクト: AndreCAndersen/nupic
  def __init__(self,  jobID, jobsDAO, logLevel = None):
    """TODO: Documentation """

    self._jobID = jobID
    self._cjDB = jobsDAO
    self._lastUpdateAttemptTime = 0
    initLogging(verbose = True)
    self.logger = logging.getLogger(".".join( ['com.numenta',
                       self.__class__.__module__, self.__class__.__name__]))
    if logLevel is not None:
      self.logger.setLevel(logLevel)

    self.logger.info("Created new ModelChooser for job %s" % str(jobID))
コード例 #2
0
  def __init__(self,  jobID, jobsDAO, logLevel = None):
    """TODO: Documentation """

    self._jobID = jobID
    self._cjDB = jobsDAO
    self._lastUpdateAttemptTime = 0
    initLogging(verbose = True)
    self.logger = logging.getLogger(".".join( ['com.numenta',
                       self.__class__.__module__, self.__class__.__name__]))
    if logLevel is not None:
      self.logger.setLevel(logLevel)

    self.logger.info("Created new ModelChooser for job %s" % str(jobID))
コード例 #3
0
ファイル: experiment_runner.py プロジェクト: Erichy94/nupic
def main():
  """ Module-level entry point.  Run according to options in sys.argv

  Usage: python -m python -m nupic.frameworks.opf.experiment_runner

  """
  initLogging(verbose=True)

  # Initialize pseudo-random number generators (PRNGs)
  #
  # This will fix the seed that is used by numpy when generating 'random'
  # numbers. This allows for repeatability across experiments.
  initExperimentPrng()

  # Run it!
  runExperiment(sys.argv[1:])
コード例 #4
0
def main():
    """ Module-level entry point.  Run according to options in sys.argv

  Usage: python -m python -m nupic.frameworks.opf.experiment_runner

  """
    initLogging(verbose=True)

    # Initialize pseudo-random number generators (PRNGs)
    #
    # This will fix the seed that is used by numpy when generating 'random'
    # numbers. This allows for repeatability across experiments.
    initExperimentPrng()

    # Run it!
    runExperiment(sys.argv[1:])
コード例 #5
0
def main():
  """Run according to options in sys.argv and diff classifiers."""
  initLogging(verbose=True)

  # Initialize PRNGs
  initExperimentPrng()

  # Mock out the creation of the CLAClassifier.
  @staticmethod
  def _mockCreate(*args, **kwargs):
    kwargs.pop('implementation', None)
    return CLAClassifierDiff(*args, **kwargs)
  CLAClassifierFactory.create = _mockCreate

  # Run it!
  runExperiment(sys.argv[1:])
コード例 #6
0
def main():
  """Run according to options in sys.argv and diff classifiers."""
  initLogging(verbose=True)

  # Initialize PRNGs
  initExperimentPrng()

  # Mock out the creation of the SDRClassifier.
  @staticmethod
  def _mockCreate(*args, **kwargs):
    kwargs.pop('implementation', None)
    return SDRClassifierDiff(*args, **kwargs)
  SDRClassifierFactory.create = _mockCreate

  # Run it!
  runExperiment(sys.argv[1:])
コード例 #7
0
def main():
  """Run according to options in sys.argv and diff classifiers."""
  # Init the NuPic logging configuration from the nupic-logging.conf
  # configuration file. This is found either in the NTA_CONF_DIR directory
  # (if defined) or in the 'conf' subdirectory of the NuPic install location.
  initLogging(verbose=True)

  # Initialize PRNGs
  initExperimentPrng()

  # Mock out the creation of the CLAClassifier.
  @staticmethod
  def _mockCreate(*args, **kwargs):
    kwargs.pop('implementation', None)
    return CLAClassifierDiff(*args, **kwargs)
  CLAClassifierFactory.create = _mockCreate

  # Run it!
  runExperiment(sys.argv[1:])
コード例 #8
0
def main():
    """Run according to options in sys.argv and diff classifiers."""
    # Init the NuPic logging configuration from the nupic-logging.conf
    # configuration file. This is found either in the NTA_CONF_DIR directory
    # (if defined) or in the 'conf' subdirectory of the NuPic install location.
    initLogging(verbose=True)

    # Initialize PRNGs
    initExperimentPrng()

    # Mock out the creation of the CLAClassifier.
    @staticmethod
    def _mockCreate(*args, **kwargs):
        kwargs.pop('implementation', None)
        return CLAClassifierDiff(*args, **kwargs)

    CLAClassifierFactory.create = _mockCreate

    # Run it!
    runExperiment(sys.argv[1:])
コード例 #9
0
def main(argv):
  """
  The main function of the HypersearchWorker script. This parses the command
  line arguments, instantiates a HypersearchWorker instance, and then
  runs it.

  Parameters:
  ----------------------------------------------------------------------
  retval:     jobID of the job we ran. This is used by unit test code
                when calling this working using the --params command
                line option (which tells this worker to insert the job
                itself).
  """

  parser = OptionParser(helpString)

  parser.add_option("--jobID", action="store", type="int", default=None,
        help="jobID of the job within the dbTable [default: %default].")

  parser.add_option("--modelID", action="store", type="str", default=None,
        help=("Tell worker to re-run this model ID. When specified, jobID "
         "must also be specified [default: %default]."))

  parser.add_option("--workerID", action="store", type="str", default=None,
        help=("workerID of the scheduler's SlotAgent (GenericWorker) that "
          "hosts this SpecializedWorker [default: %default]."))

  parser.add_option("--params", action="store", default=None,
        help="Create and execute a new hypersearch request using this JSON " \
        "format params string. This is helpful for unit tests and debugging. " \
        "When specified jobID must NOT be specified. [default: %default].")

  parser.add_option("--clearModels", action="store_true", default=False,
        help="clear out the models table before starting [default: %default].")

  parser.add_option("--resetJobStatus", action="store_true", default=False,
        help="Reset the job status before starting  [default: %default].")

  parser.add_option("--logLevel", action="store", type="int", default=None,
        help="override default log level. Pass in an integer value that "
        "represents the desired logging level (10=logging.DEBUG, "
        "20=logging.INFO, etc.) [default: %default].")

  # Evaluate command line arguments
  (options, args) = parser.parse_args(argv[1:])
  if len(args) != 0:
    raise RuntimeError("Expected no command line arguments, but got: %s" % \
                        (args))

  if (options.jobID and options.params):
    raise RuntimeError("--jobID and --params can not be used at the same time")

  if (options.jobID is None and options.params is None):
    raise RuntimeError("Either --jobID or --params must be specified.")

  initLogging(verbose=True)

  # Instantiate the HypersearchWorker and run it
  hst = HypersearchWorker(options, argv[1:])

  # Normal use. This is one of among a number of workers. If we encounter
  #  an exception at the outer loop here, we fail the entire job.
  if options.params is None:
    try:
      jobID = hst.run()

    except Exception, e:
      jobID = options.jobID
      msg = StringIO.StringIO()
      print >>msg, "%s: Exception occurred in Hypersearch Worker: %r" % \
         (ErrorCodes.hypersearchLogicErr, e)
      traceback.print_exc(None, msg)

      completionReason = ClientJobsDAO.CMPL_REASON_ERROR
      completionMsg = msg.getvalue()
      hst.logger.error(completionMsg)

      # If no other worker already marked the job as failed, do so now.
      jobsDAO = ClientJobsDAO.get()
      workerCmpReason = jobsDAO.jobGetFields(options.jobID,
          ['workerCompletionReason'])[0]
      if workerCmpReason == ClientJobsDAO.CMPL_REASON_SUCCESS:
        jobsDAO.jobSetFields(options.jobID, fields=dict(
            cancel=True,
            workerCompletionReason = ClientJobsDAO.CMPL_REASON_ERROR,
            workerCompletionMsg = completionMsg),
            useConnectionID=False,
            ignoreUnchanged=True)
コード例 #10
0
ファイル: aggregation_test.py プロジェクト: AI-Cdrone/nupic
    tempFile = handle.name
    handle.close()    

    outputFile = generateDataset(ai, 'weighted_mean.csv', tempFile)

    result = []
    with FileRecordStream(outputFile) as f:
      print f.getFields()
      for r in f:
        result.append(r)

    self.assertEqual(result[0][0], 6.0)
    self.assertEqual(result[0][1], 1.0)
    self.assertEqual(result[1][0], 1.0)
    self.assertEqual(result[1][1], 52.0)
    self.assertEqual(result[2][0], 4.0)
    self.assertEqual(result[2][1], 0.0)
    self.assertEqual(result[3][0], None)
    self.assertEqual(result[3][1], 0.0)
    return


if __name__=='__main__':
  nupic_support.initLogging()

  # Add verbosity to unittest output (so it prints a header for each test)
  #sys.argv.append("--verbose")

  # Run the test
  unittest.TestProgram()
コード例 #11
0
def main(argv):
    parser = OptionParser(helpString)

    parser.add_option("--jobID",
                      action="store",
                      type="int",
                      default=None,
                      help="jobID of the hypersearch job [default: %default].")

    # Evaluate command line arguments
    options, args = parser.parse_args(argv[1:])
    if len(args) != 0:
        raise RuntimeError("Expected no command line arguments but got: %s" %
                           args)

    initLogging(verbose=True)

    # Open up the database client
    cjDAO = ClientJobsDAO.get()

    # Read in the models for this job
    modelIDCtrList = cjDAO.modelsGetUpdateCounters(options.jobID)
    if len(modelIDCtrList) == 0:
        raise RuntimeError("No models found")
        return

    modelIDs = [x[0] for x in modelIDCtrList]
    modelInfos = cjDAO.modelsInfo(modelIDs)

    # See which variables are permuted over
    permuteVars = set()
    for modelInfo in modelInfos:
        data = modelInfo._asdict()
        params = json.loads(data['params'])
        varStates = params['particleState']['varStates']
        permuteVars = permuteVars.union(varStates.keys())

    # Prepare a csv file to hold the results
    modelsCSVFilename = 'job%d_models.tsv' % (options.jobID)
    modelsCSVFD = open(modelsCSVFilename, 'wb')
    modelsCSV = csv.writer(modelsCSVFD,
                           delimiter='\t',
                           quoting=csv.QUOTE_MINIMAL)

    # Include all the built-in fields of the models table
    fieldsToDump = list(modelInfos[0]._fields)

    # Re-order the columns slightly
    fieldsToDump.remove('engParamsHash')
    fieldsToDump.insert(2, 'engParamsHash')
    fieldsToDump.remove('optimizedMetric')
    fieldsToDump.insert(3, 'optimizedMetric')

    # Insert our generated fields
    generatedFields = [
        '_sprintIdx', '_swarmId', '_particleId', '_genIdx', '_particleVars',
        '_modelVars'
    ]
    generatedFields.extend(sorted(permuteVars))
    idx = 4
    for field in generatedFields:
        fieldsToDump.insert(idx, field)
        idx += 1

    # Write the header
    modelsCSV.writerow(fieldsToDump)

    # Write the data for each model
    scorePerSeconds = dict()
    for modelInfo in modelInfos:
        data = modelInfo._asdict()
        params = json.loads(data['params'])
        data['_swarmId'] = params['particleState']['swarmId']
        fields = data['_swarmId'].split('.')
        data['_sprintIdx'] = len(fields) - 1
        data['_particleId'] = params['particleState']['id']
        data['_genIdx'] = params['particleState']['genIdx']
        data['_particleVars'] = json.dumps(
            params['particleState']['varStates'])
        data['_modelVars'] = json.dumps(params['structuredParams'])

        varStates = params['particleState']['varStates']
        for varName in permuteVars:
            if varName in varStates:
                data[varName] = varStates[varName]['position']
            else:
                data[varName] = ' '

        # Convert hashes to hex
        data['engParamsHash'] = data['engParamsHash'].encode('hex')
        data['engParticleHash'] = data['engParticleHash'].encode('hex')

        # Write out the data
        rowData = []
        for field in fieldsToDump:
            rowData.append(data[field])
        modelsCSV.writerow(rowData)

        # Keep track of the best score over time
        if data['completionReason'] in ['eof', 'stopped']:
            errScore = data['optimizedMetric']
            endSeconds = time.mktime(data['endTime'].timetuple())
            if endSeconds in scorePerSeconds:
                if errScore < scorePerSeconds[endSeconds]:
                    scorePerSeconds[endSeconds] = errScore
            else:
                scorePerSeconds[endSeconds] = errScore

    # Close the models table
    modelsCSVFD.close()
    print "Generated output file %s" % (modelsCSVFilename)

    # Generate the score per seconds elapsed
    scoresFilename = 'job%d_scoreOverTime.csv' % (options.jobID)
    scoresFilenameFD = open(scoresFilename, 'wb')
    scoresFilenameCSV = csv.writer(scoresFilenameFD,
                                   delimiter=',',
                                   quoting=csv.QUOTE_MINIMAL)
    scoresFilenameCSV.writerow(['seconds', 'score', 'bestScore'])

    # Write out the best score over time
    scores = scorePerSeconds.items()
    scores.sort()  # Sort by time
    startTime = scores[0][0]
    bestScore = scores[0][1]
    for (secs, score) in scores:
        if score < bestScore:
            bestScore = score
        scoresFilenameCSV.writerow([secs - startTime, score, bestScore])
    scoresFilenameFD.close()
    print "Generated output file %s" % (scoresFilename)
コード例 #12
0
        tempFile = handle.name
        handle.close()

        outputFile = generateDataset(ai, 'weighted_mean.csv', tempFile)

        result = []
        with FileRecordStream(outputFile) as f:
            print f.getFields()
            for r in f:
                result.append(r)

        self.assertEqual(result[0][0], 6.0)
        self.assertEqual(result[0][1], 1.0)
        self.assertEqual(result[1][0], 1.0)
        self.assertEqual(result[1][1], 52.0)
        self.assertEqual(result[2][0], 4.0)
        self.assertEqual(result[2][1], 0.0)
        self.assertEqual(result[3][0], None)
        self.assertEqual(result[3][1], 0.0)
        return


if __name__ == '__main__':
    nupic_support.initLogging()

    # Add verbosity to unittest output (so it prints a header for each test)
    #sys.argv.append("--verbose")

    # Run the test
    unittest.TestProgram()
コード例 #13
0
ファイル: opf_checkpoint_test.py プロジェクト: lejarx/nupic
            'DefaultTask.TemporalMultiStep.predictionLog.csv')

    @unittest.skip("Currently Fails: NUP-1864")
    def test_TemporalAnomaly(self):
        """ Test that we get the same predictions out of a model that was
    saved and reloaded from a checkpoint as we do from one that runs
    continuously.
    """

        self._testSamePredictions(
            experiment="temporal_anomaly",
            predSteps=1,
            checkpointAt=250,
            predictionsFilename='DefaultTask.TemporalAnomaly.predictionLog.csv',
            additionalFields=['anomalyScore'])

    def test_BackwardsCompatibility(self):
        """ Test that we can load in a checkpoint saved by an earlier version of
    the OPF.
    """

        self._testBackwardsCompatibility(
            os.path.join('backwards_compatibility', 'a'),
            'savedmodels_2012-10-05')


if __name__ == "__main__":
    initLogging(verbose=True)

    unittest.main()
コード例 #14
0
ファイル: HypersearchV2Dump.py プロジェクト: jason-zhu/nupic
def main(argv):
    parser = OptionParser(helpString)

    parser.add_option(
        "--jobID", action="store", type="int", default=None, help="jobID of the hypersearch job [default: %default]."
    )

    # Evaluate command line arguments
    options, args = parser.parse_args(argv[1:])
    if len(args) != 0:
        raise RuntimeError("Expected no command line arguments but got: %s" % args)

    initLogging(verbose=True)

    # Open up the database client
    cjDAO = ClientJobsDAO.get()

    # Read in the models for this job
    modelIDCtrList = cjDAO.modelsGetUpdateCounters(options.jobID)
    if len(modelIDCtrList) == 0:
        raise RuntimeError("No models found")
        return

    modelIDs = [x[0] for x in modelIDCtrList]
    modelInfos = cjDAO.modelsInfo(modelIDs)

    # See which variables are permuted over
    permuteVars = set()
    for modelInfo in modelInfos:
        data = modelInfo._asdict()
        params = json.loads(data["params"])
        varStates = params["particleState"]["varStates"]
        permuteVars = permuteVars.union(varStates.keys())

    # Prepare a csv file to hold the results
    modelsCSVFilename = "job%d_models.tsv" % (options.jobID)
    modelsCSVFD = open(modelsCSVFilename, "wb")
    modelsCSV = csv.writer(modelsCSVFD, delimiter="\t", quoting=csv.QUOTE_MINIMAL)

    # Include all the built-in fields of the models table
    fieldsToDump = list(modelInfos[0]._fields)

    # Re-order the columns slightly
    fieldsToDump.remove("engParamsHash")
    fieldsToDump.insert(2, "engParamsHash")
    fieldsToDump.remove("optimizedMetric")
    fieldsToDump.insert(3, "optimizedMetric")

    # Insert our generated fields
    generatedFields = ["_sprintIdx", "_swarmId", "_particleId", "_genIdx", "_particleVars", "_modelVars"]
    generatedFields.extend(sorted(permuteVars))
    idx = 4
    for field in generatedFields:
        fieldsToDump.insert(idx, field)
        idx += 1

    # Write the header
    modelsCSV.writerow(fieldsToDump)

    # Write the data for each model
    scorePerSeconds = dict()
    for modelInfo in modelInfos:
        data = modelInfo._asdict()
        params = json.loads(data["params"])
        data["_swarmId"] = params["particleState"]["swarmId"]
        fields = data["_swarmId"].split(".")
        data["_sprintIdx"] = len(fields) - 1
        data["_particleId"] = params["particleState"]["id"]
        data["_genIdx"] = params["particleState"]["genIdx"]
        data["_particleVars"] = json.dumps(params["particleState"]["varStates"])
        data["_modelVars"] = json.dumps(params["structuredParams"])

        varStates = params["particleState"]["varStates"]
        for varName in permuteVars:
            if varName in varStates:
                data[varName] = varStates[varName]["position"]
            else:
                data[varName] = " "

        # Convert hashes to hex
        data["engParamsHash"] = data["engParamsHash"].encode("hex")
        data["engParticleHash"] = data["engParticleHash"].encode("hex")

        # Write out the data
        rowData = []
        for field in fieldsToDump:
            rowData.append(data[field])
        modelsCSV.writerow(rowData)

        # Keep track of the best score over time
        if data["completionReason"] in ["eof", "stopped"]:
            errScore = data["optimizedMetric"]
            endSeconds = time.mktime(data["endTime"].timetuple())
            if endSeconds in scorePerSeconds:
                if errScore < scorePerSeconds[endSeconds]:
                    scorePerSeconds[endSeconds] = errScore
            else:
                scorePerSeconds[endSeconds] = errScore

    # Close the models table
    modelsCSVFD.close()
    print "Generated output file %s" % (modelsCSVFilename)

    # Generate the score per seconds elapsed
    scoresFilename = "job%d_scoreOverTime.csv" % (options.jobID)
    scoresFilenameFD = open(scoresFilename, "wb")
    scoresFilenameCSV = csv.writer(scoresFilenameFD, delimiter=",", quoting=csv.QUOTE_MINIMAL)
    scoresFilenameCSV.writerow(["seconds", "score", "bestScore"])

    # Write out the best score over time
    scores = scorePerSeconds.items()
    scores.sort()  # Sort by time
    startTime = scores[0][0]
    bestScore = scores[0][1]
    for (secs, score) in scores:
        if score < bestScore:
            bestScore = score
        scoresFilenameCSV.writerow([secs - startTime, score, bestScore])
    scoresFilenameFD.close()
    print "Generated output file %s" % (scoresFilename)
コード例 #15
0
def main(argv):
    """
  The main function of the HypersearchWorker script. This parses the command
  line arguments, instantiates a HypersearchWorker instance, and then
  runs it.

  Parameters:
  ----------------------------------------------------------------------
  retval:     jobID of the job we ran. This is used by unit test code
                when calling this working using the --params command
                line option (which tells this worker to insert the job
                itself).
  """

    parser = OptionParser(helpString)

    parser.add_option(
        "--jobID",
        action="store",
        type="int",
        default=None,
        help="jobID of the job within the dbTable [default: %default].",
    )

    parser.add_option(
        "--modelID",
        action="store",
        type="str",
        default=None,
        help=(
            "Tell worker to re-run this model ID. When specified, jobID " "must also be specified [default: %default]."
        ),
    )

    parser.add_option(
        "--workerID",
        action="store",
        type="str",
        default=None,
        help=(
            "workerID of the scheduler's SlotAgent (GenericWorker) that "
            "hosts this SpecializedWorker [default: %default]."
        ),
    )

    parser.add_option(
        "--params",
        action="store",
        default=None,
        help="Create and execute a new hypersearch request using this JSON "
        "format params string. This is helpful for unit tests and debugging. "
        "When specified jobID must NOT be specified. [default: %default].",
    )

    parser.add_option(
        "--clearModels",
        action="store_true",
        default=False,
        help="clear out the models table before starting [default: %default].",
    )

    parser.add_option(
        "--resetJobStatus",
        action="store_true",
        default=False,
        help="Reset the job status before starting  [default: %default].",
    )

    parser.add_option(
        "--logLevel",
        action="store",
        type="int",
        default=None,
        help="override default log level. Pass in an integer value that "
        "represents the desired logging level (10=logging.DEBUG, "
        "20=logging.INFO, etc.) [default: %default].",
    )

    # Evaluate command line arguments
    (options, args) = parser.parse_args(argv[1:])
    if len(args) != 0:
        raise RuntimeError("Expected no command line arguments, but got: %s" % (args))

    if options.jobID and options.params:
        raise RuntimeError("--jobID and --params can not be used at the same time")

    if options.jobID is None and options.params is None:
        raise RuntimeError("Either --jobID or --params must be specified.")

    initLogging(verbose=True)

    # Instantiate the HypersearchWorker and run it
    hst = HypersearchWorker(options, argv[1:])

    # Normal use. This is one of among a number of workers. If we encounter
    #  an exception at the outer loop here, we fail the entire job.
    if options.params is None:
        try:
            jobID = hst.run()

        except Exception, e:
            jobID = options.jobID
            msg = StringIO.StringIO()
            print >> msg, "%s: Exception occurred in Hypersearch Worker: %r" % (ErrorCodes.hypersearchLogicErr, e)
            traceback.print_exc(None, msg)

            completionReason = ClientJobsDAO.CMPL_REASON_ERROR
            completionMsg = msg.getvalue()
            hst.logger.error(completionMsg)

            # If no other worker already marked the job as failed, do so now.
            jobsDAO = ClientJobsDAO.get()
            workerCmpReason = jobsDAO.jobGetFields(options.jobID, ["workerCompletionReason"])[0]
            if workerCmpReason == ClientJobsDAO.CMPL_REASON_SUCCESS:
                jobsDAO.jobSetFields(
                    options.jobID,
                    fields=dict(
                        cancel=True,
                        workerCompletionReason=ClientJobsDAO.CMPL_REASON_ERROR,
                        workerCompletionMsg=completionMsg,
                    ),
                    useConnectionID=False,
                    ignoreUnchanged=True,
                )
コード例 #16
0
ファイル: opf_checkpoint_test.py プロジェクト: Erichy94/nupic
  @unittest.skip("Currently Fails: NUP-1864")
  def test_TemporalAnomaly(self):
    """ Test that we get the same predictions out of a model that was
    saved and reloaded from a checkpoint as we do from one that runs
    continuously.
    """

    self._testSamePredictions(experiment="temporal_anomaly", predSteps=1,
      checkpointAt=250,
      predictionsFilename='DefaultTask.TemporalAnomaly.predictionLog.csv',
      additionalFields=['anomalyScore'])


  @unittest.skip("We aren't currently supporting serialization backward "
                 "compatibility")
  def test_BackwardsCompatibility(self):
    """ Test that we can load in a checkpoint saved by an earlier version of
    the OPF.
    """

    self._testBackwardsCompatibility(
          os.path.join('backwards_compatibility', 'a'),
          'savedmodels_2012-10-05')



if __name__ == "__main__":
  initLogging(verbose=True)

  unittest.main()
コード例 #17
0
ファイル: HypersearchV2Dump.py プロジェクト: DarkyMago/nupic
def main(argv):
  parser = OptionParser(helpString)

  parser.add_option("--jobID", action="store", type="int", default=None,
        help="jobID of the hypersearch job [default: %default].")

  # Evaluate command line arguments
  options, args = parser.parse_args(argv[1:])
  if len(args) != 0:
    raise RuntimeError("Expected no command line arguments but got: %s" % args)

  # Init the NuPic logging configuration from the nupic-logging.conf configuration
  # file. This is found either in the NTA_CONF_DIR directory (if defined) or
  # in the 'conf' subdirectory of the NuPic install location.
  initLogging(verbose=True)

  # Open up the database client
  cjDAO = ClientJobsDAO.get()

  # Read in the models for this job
  modelIDCtrList = cjDAO.modelsGetUpdateCounters(options.jobID)
  if len(modelIDCtrList) == 0:
    raise RuntimeError ("No models found")
    return

  modelIDs = [x[0] for x in modelIDCtrList]
  modelInfos = cjDAO.modelsInfo(modelIDs)

  # See which variables are permuted over
  permuteVars = set()
  for modelInfo in modelInfos:
    data = modelInfo._asdict()
    params = json.loads(data['params'])
    varStates = params['particleState']['varStates']
    permuteVars = permuteVars.union(varStates.keys())

  # Prepare a csv file to hold the results
  modelsCSVFilename = 'job%d_models.tsv' % (options.jobID)
  modelsCSVFD = open(modelsCSVFilename, 'wb')
  modelsCSV = csv.writer(modelsCSVFD, delimiter='\t',
                quoting=csv.QUOTE_MINIMAL)

  # Include all the built-in fields of the models table
  fieldsToDump = list(modelInfos[0]._fields)

  # Re-order the columns slightly
  fieldsToDump.remove('engParamsHash')
  fieldsToDump.insert(2, 'engParamsHash')
  fieldsToDump.remove('optimizedMetric')
  fieldsToDump.insert(3, 'optimizedMetric')

  # Insert our generated fields
  generatedFields = ['_sprintIdx', '_swarmId', '_particleId', '_genIdx',
                     '_particleVars', '_modelVars']
  generatedFields.extend(sorted(permuteVars))
  idx=4
  for field in generatedFields:
    fieldsToDump.insert(idx, field)
    idx += 1

  # Write the header
  modelsCSV.writerow(fieldsToDump)

  # Write the data for each model
  scorePerSeconds = dict()
  for modelInfo in modelInfos:
    data = modelInfo._asdict()
    params = json.loads(data['params'])
    data['_swarmId'] = params['particleState']['swarmId']
    fields = data['_swarmId'].split('.')
    data['_sprintIdx'] = len(fields)-1
    data['_particleId'] = params['particleState']['id']
    data['_genIdx'] = params['particleState']['genIdx']
    data['_particleVars'] = json.dumps(params['particleState']['varStates'])
    data['_modelVars'] = json.dumps(params['structuredParams'])

    varStates = params['particleState']['varStates']
    for varName in permuteVars:
      if varName in varStates:
        data[varName] = varStates[varName]['position']
      else:
        data[varName] = ' '

    # Convert hashes to hex
    data['engParamsHash'] = data['engParamsHash'].encode('hex')
    data['engParticleHash'] = data['engParticleHash'].encode('hex')

    # Write out the data
    rowData = []
    for field in fieldsToDump:
      rowData.append(data[field])
    modelsCSV.writerow(rowData)

    # Keep track of the best score over time
    if data['completionReason'] in ['eof', 'stopped']:
      errScore = data['optimizedMetric']
      endSeconds = time.mktime(data['endTime'].timetuple())
      if endSeconds in scorePerSeconds:
        if errScore < scorePerSeconds[endSeconds]:
          scorePerSeconds[endSeconds] = errScore
      else:
        scorePerSeconds[endSeconds] = errScore

  # Close the models table
  modelsCSVFD.close()
  print "Generated output file %s" % (modelsCSVFilename)

  # Generate the score per seconds elapsed
  scoresFilename = 'job%d_scoreOverTime.csv' % (options.jobID)
  scoresFilenameFD = open(scoresFilename, 'wb')
  scoresFilenameCSV = csv.writer(scoresFilenameFD, delimiter=',',
                quoting=csv.QUOTE_MINIMAL)
  scoresFilenameCSV.writerow(['seconds', 'score', 'bestScore'])

  # Write out the best score over time
  scores = scorePerSeconds.items()
  scores.sort()   # Sort by time
  startTime = scores[0][0]
  bestScore = scores[0][1]
  for (secs, score) in scores:
    if score < bestScore:
      bestScore = score
    scoresFilenameCSV.writerow([secs-startTime, score, bestScore])
  scoresFilenameFD.close()
  print "Generated output file %s" % (scoresFilename)