def run(args):
  """ Run the 'query' test.
  This method handles scenarios for running a single model or all of them.
  """
  (trainingData, labelRefs, _, documentTextMap) = readDataAndReshuffle(args)

  if args.modelName == "all":
    modelNames = NLP_MODEL_TYPES
    runningAllModels = True
  else:
    modelNames = [args.modelName]
    runningAllModels = False

  accuracies = {}
  for name in modelNames:
    # Setup args
    args.modelName = name
    args.modelDir = os.path.join(args.experimentDir, name)
    if name == "htm":
      if runningAllModels:
        # Need to specify network config for htm models
        try:
          htmModelInfo = HTM_CONFIGS.pop()
        except KeyError:
          print "Not enough HTM configs, so skipping the HTM model."
          continue
        name = htmModelInfo[0]
        args.networkConfigPath = htmModelInfo[1]
      else:
        # Get the specific model name from the config path
        for (modelName, configPath) in HTM_CONFIGS:
          if configPath == args.networkConfigPath:
            name = modelName

    # Create a model, train it, save it, reload it
    _, model = executeModelLifecycle(args, trainingData, labelRefs)

    # Now query the model using some example HR complaints about managers
    queryModel(model,
               "Begin by treating the employees of the department with the "
               "respect they deserve. Halt the unfair practices "
               "that they are aware of doing. There is no compassion "
               "or loyalty to its senior employees",
               documentTextMap)

    queryModel(model,
               "My manager is really incompetent. He has no clue how to "
               "properly supervise his employees and keep them motivated.",
               documentTextMap)

    queryModel(model,
               "I wish I had a lot more vacation and much more flexibility "
               "in how I manage my own time. I should be able to choose "
               "when I come in as long as I manage to get all my tasks done.",
               documentTextMap)

    if args.verbosity > 0:
      # Print profile information
      print
      model.dumpProfile()

  resultsCheck(name)
Пример #2
0
def run(args):
  """ Run the classification test.
  This method handles scenarios for running a single model or all of them.
  Also tests serialization by checking the a model's results match before and
  after saving/loading.
  """
  if args.hello:
    args = _setupHelloTest(args)

  (dataset, labelRefs, documentCategoryMap, _) = readDataAndReshuffle(args)

  if args.modelName == "all":
    modelNames = NLP_MODEL_TYPES
    runningAllModels = True
  else:
    modelNames = [args.modelName]
    runningAllModels = False

  accuracies = {}
  for name in modelNames:
    # Setup args
    args.modelName = name
    args.modelDir = os.path.join(args.experimentName, name)
    if name == "htm":
      if runningAllModels:
        # Need to specify network config for htm models
        try:
          htmModelInfo = HTM_CONFIGS.pop()
        except KeyError:
          print "Not enough HTM configs, so skipping the HTM model."
          continue
        name = htmModelInfo[0]
        args.networkConfigPath = htmModelInfo[1]
      else:
        # Get the specific model name from the config path
        for (modelName, configPath) in HTM_CONFIGS:
          if configPath == args.networkConfigPath:
            name = modelName

    # Split data for train/test (We still test on the training data!)
    if args.split:
      split = int(len(dataset) * args.split)
      trainingData = dataset[:split]
    else:
      trainingData = dataset

    # Create a model, train it, save it, reload it
    _, model = executeModelLifecycle(args, trainingData, labelRefs)

    # Test the model
    accuracies[name] = testModel(model,
                                 dataset,
                                 labelRefs,
                                 documentCategoryMap,
                                 args.verbosity)

    if args.verbosity > 0:
      # Print profile information
      print
      model.dumpProfile()

  printSummary(args.experimentName, accuracies)

  if args.hello:
    assertResults("hello_classification", accuracies)
Пример #3
0
def run(args):
    """ Run the classification test.
  This method handles scenarios for running a single model or all of them.
  Also tests serialization by checking the a model's results match before and
  after saving/loading.
  """
    if args.hello:
        args = _setupHelloTest(args)

    (dataset, labelRefs, documentCategoryMap, _) = readDataAndReshuffle(args)

    if args.modelName == "all":
        modelNames = NLP_MODEL_TYPES
        runningAllModels = True
    else:
        modelNames = [args.modelName]
        runningAllModels = False

    accuracies = {}
    for name in modelNames:
        # Setup args
        args.modelName = name
        args.modelDir = os.path.join(args.experimentName, name)
        if name == "htm":
            if runningAllModels:
                # Need to specify network config for htm models
                try:
                    htmModelInfo = HTM_CONFIGS.pop()
                except KeyError:
                    print "Not enough HTM configs, so skipping the HTM model."
                    continue
                name = htmModelInfo[0]
                args.networkConfigPath = htmModelInfo[1]
            else:
                # Get the specific model name from the config path
                for (modelName, configPath) in HTM_CONFIGS:
                    if configPath == args.networkConfigPath:
                        name = modelName

        # Split data for train/test (We still test on the training data!)
        if args.split:
            split = int(len(dataset) * args.split)
            trainingData = dataset[:split]
        else:
            trainingData = dataset

        # Create a model, train it, save it, reload it
        _, model = executeModelLifecycle(args, trainingData, labelRefs)

        # Test the model
        accuracies[name] = testModel(model, dataset, labelRefs,
                                     documentCategoryMap, args.verbosity)

        if args.verbosity > 0:
            # Print profile information
            print
            model.dumpProfile()

    printSummary(args.experimentName, accuracies)

    if args.hello:
        assertResults("hello_classification", accuracies)
Пример #4
0
def run(args):
    """ Run the 'query' test.
  This method handles scenarios for running a single model or all of them.
  """
    (trainingData, labelRefs, _, documentTextMap) = readDataAndReshuffle(args)

    if args.modelName == "all":
        modelNames = NLP_MODEL_TYPES
        runningAllModels = True
    else:
        modelNames = [args.modelName]
        runningAllModels = False

    accuracies = {}
    for name in modelNames:
        # Setup args
        args.modelName = name
        args.modelDir = os.path.join(args.experimentDir, name)
        if name == "htm":
            if runningAllModels:
                # Need to specify network config for htm models
                try:
                    htmModelInfo = HTM_CONFIGS.pop()
                except KeyError:
                    print "Not enough HTM configs, so skipping the HTM model."
                    continue
                name = htmModelInfo[0]
                args.networkConfigPath = htmModelInfo[1]
            else:
                # Get the specific model name from the config path
                for (modelName, configPath) in HTM_CONFIGS:
                    if configPath == args.networkConfigPath:
                        name = modelName

        # Create a model, train it, save it, reload it
        _, model = executeModelLifecycle(args, trainingData, labelRefs)

        # Now query the model using some example HR complaints about managers
        queryModel(
            model,
            "Begin by treating the employees of the department with the "
            "respect they deserve. Halt the unfair practices "
            "that they are aware of doing. There is no compassion "
            "or loyalty to its senior employees", documentTextMap)

        queryModel(
            model, "My manager is really incompetent. He has no clue how to "
            "properly supervise his employees and keep them motivated.",
            documentTextMap)

        queryModel(
            model,
            "I wish I had a lot more vacation and much more flexibility "
            "in how I manage my own time. I should be able to choose "
            "when I come in as long as I manage to get all my tasks done.",
            documentTextMap)

        if args.verbosity > 0:
            # Print profile information
            print
            model.dumpProfile()

    resultsCheck(name)