Пример #1
0
  def load(self, modelID):
    """ Retrieve a model instance from checkpoint.

    param modelID: unique model ID

    retval: an OPF model instance

    raises:
      ModelNotFound if the model checkpoint hasn't been saved yet or if this
        model's entry doesn't exist in the checkpoint archive
    """
    startTime = time.time()

    checkpointStoreDirPath = self._getCurrentCheckpointRealPath(modelID)

    modelInstanceDirPath = os.path.join(checkpointStoreDirPath,
                                        self._CHECKPOINT_INSTANCE_DIR_NAME)

    model = ModelFactory.loadFromCheckpoint(modelInstanceDirPath)

    self._logger.info(
      "{TAG:MCKPT.LOAD} Loaded model=%s: duration=%ss; directory=%s",
      modelID, time.time() - startTime, checkpointStoreDirPath)

    return model
Пример #2
0
 def load_model(self, modelSaveDir):
     try:
         print "Loading model from %s..." % modelSaveDir
         model = ModelFactory.loadFromCheckpoint(modelSaveDir)
         self._init_model(model)
     except:
         print "ERR", "load_model", sys.exc_info()
         traceback.print_exc()
         raise
Пример #3
0
    def initModel(self):
        if os.path.exists(os.path.abspath(self.model_path)):
            self.model = ModelFactory.loadFromCheckpoint(os.path.relpath(self.model_path))
        else:
            self.model = ModelFactory.create(self.model_params)

        predicted_field = self.model_params["predictedField"]
        if predicted_field:
            self.model.enableInference({"predictedField": predicted_field})
Пример #4
0
    def initModel(self):
        if os.path.exists(os.path.abspath(self.model_path)):
            self.model = ModelFactory.loadFromCheckpoint(
                os.path.relpath(self.model_path))
        else:
            self.model = ModelFactory.create(self.model_params)

        predicted_field = self.model_params['predictedField']
        if predicted_field:
            self.model.enableInference({'predictedField': predicted_field})
Пример #5
0
    def __init__(self,name,admin_in,admin_out,sensor_spec, sensors_dir,sensor_in,store,swarm):
        threading.Thread.__init__(self)
        #self.config = config
        self.sensor_in = sensor_in
        self.store = store
        self.swarm = swarm
        self.name = name
        self.brain_available = False
        threading.Thread.__init__(self)
        Sensor. __init__(self,name=name,admin_in=admin_in, admin_out=admin_out,sensor_spec=sensor_spec, sensors_dir=sensors_dir)
        swarm_config_path = sensors_dir  + sensor_in +'/stores/' + store + '/swarms/' + swarm +'/'
        #store_path = sensors_dir  + sensor_in +'/stores/' + store + '/out.csv'
        #model = ModelFactory.loadFromCheckpoint('/home/hans/cortical_one_var/sensors/cpu/stores/store_3/swarms/swarm_1/model_save')

        print swarm_config_path

        #load original swarm config file
        with open(swarm_config_path + 'swarm_config.json')as json_file:
            self.swarm_config = json.load(json_file)
            print(self.swarm_config)

        self.swarm_config_ng = SwarmConfig(self.swarm_config)

        print self.swarm_config_ng.get_predicted_field()


        #if there is a 'brain', then tae the existing brain
        self.possible_brain_path = str(swarm_config_path +  'model_save')
        if os.path.exists(self.possible_brain_path):
            possible_brain_2 = '/home/hans/cortical_one_var/sensors/cpu/stores/store_3/swarms/swarm_1/model_save'
            print "load existing brain..."
            print self.possible_brain_path
            #model = ModelFactory.loadFromCheckpoint(possible_brain_2)
            model = ModelFactory.loadFromCheckpoint(self.possible_brain_path)
            #use this case to add the availabilty of a 'brain' (???!!!) to your annuncement

        else:

            #laod model configuration
            model = ModelFactory.create(getModelParamsFromFileNG(swarm_config_path))

            #configure prediction
            model.enableInference({"predictedField": self.swarm_config_ng.get_predicted_field()})

        self.connection_sensor_in = stomp.Connection()
        self.connection_sensor_in.set_listener(name=self.name, lstnr=AbstractSensorListener(self.name,topic = '/topic/' +self.sensor_in,config=self.swarm_config_ng,model=model))
        self.connection_sensor_in.start()
        self.connection_sensor_in.connect(self.user, self.password, wait=True)
        #self.connection_sensor_in.connect('admin', 'password', wait=True)
        self.abstract_listener = self.connection_sensor_in.get_listener(name=self.name)
        self.connection_sensor_in.subscribe(destination='/topic/' +self.sensor_in, id=2, ack='auto')

        self.values = []

        self.self_announcement()
def createModel(modelParams):
    #model = ModelFactory.create(modelParams)
    #model.save('/Users/manpreet.singh/Sandbox/codehub/github/datascience/workspace/cortical/harddrive_lifeguard/model/model0')
    model = ModelFactory.loadFromCheckpoint('/Users/manpreet.singh/Sandbox/codehub/github/datascience/workspace/cortical/harddrive_lifeguard/model/model0')
    model.enableInference({'predictedField': 'class'})

    #model.disableLearning()

    # model = ModelFactory.create(modelParams)
    # model.enableInference({'predictedField': 'class'})
    return model
Пример #7
0
def runHospitalModel(inputFilePath):
    model = None
    if model_exists(mind_palaces+"weight"+"/model.pkl"):
        print "using existing model"
        model = ModelFactory.loadFromCheckpoint(mind_palaces+"weight")
    else:
        print "creating new model"
        model = createModel()
    runModel(model, inputFilePath)
    print "======> updating model"
    model.save(mind_palaces+"weight")
def runHospitalModel(inputFilePath, run_count):
    model = None
    if model_exists(mind_palaces+"vaccination_present_fresh"+"/model.pkl"):
        print "using existing model"
        model = ModelFactory.loadFromCheckpoint(mind_palaces+"vaccination_present_fresh")
    else:
        print "creating new model"
        model = createModel()
    runModel(model, inputFilePath, run_count)
    print "======> updating model"
    model.save(mind_palaces+"vaccination_present_fresh")
Пример #9
0
def runHospitalModel(inputFilePath, run_count):
    model = None
    if model_exists(mind_palaces + "vaccination_present_fresh" + "/model.pkl"):
        print "using existing model"
        model = ModelFactory.loadFromCheckpoint(mind_palaces +
                                                "vaccination_present_fresh")
    else:
        print "creating new model"
        model = createModel()
    runModel(model, inputFilePath, run_count)
    print "======> updating model"
    model.save(mind_palaces + "vaccination_present_fresh")
Пример #10
0
def createModel(modelParams):
    #model = ModelFactory.create(modelParams)
    #model.save('/Users/manpreet.singh/Sandbox/codehub/github/datascience/workspace/cortical/harddrive_lifeguard/model/model0')
    model = ModelFactory.loadFromCheckpoint(
        '/Users/manpreet.singh/Sandbox/codehub/github/datascience/workspace/cortical/harddrive_lifeguard/model/model0'
    )
    model.enableInference({'predictedField': 'class'})

    #model.disableLearning()

    # model = ModelFactory.create(modelParams)
    # model.enableInference({'predictedField': 'class'})
    return model
Пример #11
0
def run():
    if SMOOTH:
        inputDataDir = "../preprocessing/smoothed_data"
    else:
        inputDataDir = "../preprocessing/formatted_data"

    for channel in CHANNELS:
        for movement in MOTOR:

            outputName = "%s_%s" % (movement, channel)
            if PLOT:
                output = NuPICPlotOutput("output_%s" % outputName,
                                         show_anomaly_score=True)
            else:
                output = NuPICFileOutput("data_output_%s" % outputName,
                                         show_anomaly_score=True)

            # load model from checkpoint
            modelDir = "%s/%s_%s_%s" % (MODEL_PARENT_DIR, MODEL_PREFIX,
                                        movement, channel)
            model = ModelFactory.loadFromCheckpoint(modelDir)
            model.enableInference({"predictedField": "channel_value"})

            # disable learning
            model.disableLearning()

            # get input file
            inputFile = "%s/%s_%s.csv" % (inputDataDir, movement, channel)
            with open(inputFile, "rb") as input:
                csvReader = csv.reader(input)

                # skip header rows
                csvReader.next()
                csvReader.next()
                csvReader.next()

                # the real data
                timestamp = 0
                for row in csvReader:
                    channel_value = float(row[0])
                    result = model.run({"channel_value": channel_value})
                    output.write(timestamp,
                                 "channel_value",
                                 channel_value,
                                 result,
                                 prediction_step=1)
                    timestamp += 1

            output.close()
def getModel(flag):

    modelDir = os.getcwd() + '/model/%s' % flag

    _LOGGER.info(modelDir)

    if os.path.exists(modelDir):
        _LOGGER.info('model exists')
        model = ModelFactory.loadFromCheckpoint(modelDir)
        return model
    else:
        _LOGGER.info('creating new model')
        model = ModelFactory.create(MODEL_PARAMS)
        model.save(modelDir)
        return model
Пример #13
0
def new_abstract_sensor(base_sensor_name, admin_in, admin_out,store,swarm):
    print "create abstract sensor...."
    model = ModelFactory.loadFromCheckpoint('/home/hans/cortical_one_var/sensors/cpu/stores/store_3/swarms/swarm_1/model_save')
    abstract_sensor = AbstractSensor(
        name='%s_predict' % base_sensor_name,
        admin_in= admin_in,
        admin_out=admin_out,
        sensor_spec=["consolidate"], #not needed anyway
        sensors_dir=sensors_dir_ng,
        sensor_in=base_sensor_name,
        store = store,
        swarm= swarm
    )
    abstract_sensor.start()
    print "new abstract sensor created"
def getModel(flag):

    modelDir = os.getcwd() + "/model/%s" % flag

    _LOGGER.info(modelDir)

    if os.path.exists(modelDir):
        _LOGGER.info("model exists")
        model = ModelFactory.loadFromCheckpoint(modelDir)
        return model
    else:
        _LOGGER.info("creating new model")
        model = ModelFactory.create(MODEL_PARAMS)
        model.save(modelDir)
        return model
Пример #15
0
    def getModel(self):
        """
        Get the existing model from file or create
        from model params if not already existing
        :return:
        """
        # Check if the dir is empty
        if os.path.exists(self.savedModelsPath) and os.listdir(
                self.savedModelsPath):
            log.info("Loading model from checkpoint %s" % self.savedModelsPath)
            model = ModelFactory.loadFromCheckpoint(self.savedModelsPath)
        else:
            log.info("Creating model from %s..." % self.modelParamsPath)
            model = self.createModelFromParams(self.getModelParams())

        return model
Пример #16
0
def new_abstract_sensor(base_sensor_name, admin_in, admin_out, store, swarm):
    print "create abstract sensor...."
    model = ModelFactory.loadFromCheckpoint(
        '/home/hans/cortical_one_var/sensors/cpu/stores/store_3/swarms/swarm_1/model_save'
    )
    abstract_sensor = AbstractSensor(
        name='%s_predict' % base_sensor_name,
        admin_in=admin_in,
        admin_out=admin_out,
        sensor_spec=["consolidate"],  #not needed anyway
        sensors_dir=sensors_dir_ng,
        sensor_in=base_sensor_name,
        store=store,
        swarm=swarm)
    abstract_sensor.start()
    print "new abstract sensor created"
Пример #17
0
def get_online(number_of_records=20):# 0 means forever
    model = ModelFactory.loadFromCheckpoint(os.getcwd() + "/model_save")

    count=0
    ser.flushInput()
    while (count < number_of_records) or (number_of_records == 0):
        count = count + 1
        text = ser.readline()
        if (len(text.split(",")) == 4):
            result = model.run({
                "s1": float(text.split(",")[0]),
                "s2": float(text.split(",")[1]),
                "s3": float(text.split(",")[2]),
                "s4": float(text.split(",")[3])
            })
            prediction = int(result.inferences['multiStepBestPredictions'][4])
            sys.stdout.write("\r"+ str(prediction))
            sys.stdout.write("\t"+ text)
            ser.write(str(prediction)+ '\n')
Пример #18
0
def run():
    if SMOOTH:
        inputDataDir = "../preprocessing/smoothed_data"
    else:
        inputDataDir = "../preprocessing/formatted_data"


    for channel in CHANNELS:
        for movement in MOTOR:

            outputName = "%s_%s" % (movement, channel)
            if PLOT:
                output = NuPICPlotOutput("output_%s" % outputName, show_anomaly_score=True)
            else:
                output = NuPICFileOutput("data_output_%s" %outputName, show_anomaly_score=True)

            # load model from checkpoint
            modelDir = "%s/%s_%s_%s" % (MODEL_PARENT_DIR, MODEL_PREFIX, movement, channel)
            model = ModelFactory.loadFromCheckpoint(modelDir)
            model.enableInference({"predictedField": "channel_value"})

            # disable learning
            model.disableLearning()

            # get input file
            inputFile = "%s/%s_%s.csv" % (inputDataDir, movement, channel)
            with open(inputFile, "rb") as input:
                csvReader = csv.reader(input)

                # skip header rows
                csvReader.next()
                csvReader.next()
                csvReader.next()

                # the real data
                timestamp = 0
                for row in csvReader:
                    channel_value = float(row[0])
                    result = model.run({"channel_value": channel_value})
                    output.write(timestamp, "channel_value", channel_value, result, prediction_step=1)
                    timestamp +=1

            output.close()
Пример #19
0
  def testCheckpoint(self):
    tmpDir = tempfile.mkdtemp()
    model = ModelFactory.create(MODEL_PARAMS)
    model.enableInference({'predictedField': 'consumption'})
    headers = ['timestamp', 'consumption']
  
    # Now do a bunch of small load/train/save batches
    for _ in range(20):  

      for _ in range(2):
        record = [datetime.datetime(2013, 12, 12), numpy.random.uniform(100)]
        modelInput = dict(zip(headers, record))
        model.run(modelInput)
  
      # Save and load a checkpoint after each batch. Clean up.
      tmpBundleName = os.path.join(tmpDir, "test_checkpoint")
      self.assertIs(model.save(tmpBundleName), None, "Save command failed.")
      model = ModelFactory.loadFromCheckpoint(tmpBundleName)
      shutil.rmtree(tmpBundleName)
Пример #20
0
    def clone(self):
        """
        Returns a deep copy of this object.
        @return: a deep copy of this object. 
        """
        tmpDirLocation = os.path.expanduser("~/buffer-htm")
        # Make sure the directory exists and it is empty
        if os.path.exists(tmpDirLocation):
            shutil.rmtree(tmpDirLocation)
        #os.makedirs(tmpDirLocation)

        # Save the model in the tmp folder
        self.model.save(tmpDirLocation)

        # load from tmp location
        newModel = ModelFactory.loadFromCheckpoint(tmpDirLocation)
        result = HTMWrapper()
        result.model = newModel
        return result
Пример #21
0
def createModel(intersection):
    modelDir = getModelDir(intersection)
    if CACHE_MODELS and os.path.isdir(modelDir):
        # Read in the cached model
        print "Loading cached model for {} from {}...".format(intersection, modelDir)
        return ModelFactory.loadFromCheckpoint(modelDir)
    else:
        start = time.time()
        # redo the modelParams to use the actual sensor names
        modelParams = getModelParamsFromName('3001')

        sensor_counts = get_most_used_sensors(intersection)
        # try:
        #     pField = getSwarmConfig(intersection)['inferenceArgs']['predictedField']
        # except:
        #     print "Determining most used sensor for ", intersection
        try:
            counts = sensor_counts.most_common(1)
            if counts[0][1] == 0:
                return None
            else:
                pField = counts[0][0]
        except:
            return None
        print "Using", pField, "as predictedField for", intersection
        doc = readings_collection.find_one({'site_no': intersection})

        for k in doc['readings']:
            # don't model unused sensors
            # could run into errors when the sensor
            # was damaged for more than the sample period though
            if sensor_counts[k] == 0 or k != pField:
                continue
            modelParams['modelParams']['sensorParams']['encoders'][k] = get_sensor_encoder(k)

        for i in get_time_encoders():
            modelParams['modelParams']['sensorParams']['encoders'][i['name']] = i


        model = ModelFactory.create(modelParams)
        model.enableInference({'predictedField': pField})
        print "Creating model for {}, in {}s".format(intersection, time.time() - start)
        return model
Пример #22
0
 def clone(self):
     """
     Returns a deep copy of this object.
     @return: a deep copy of this object. 
     """
     tmpDirLocation = os.path.expanduser("~/buffer-htm")
     # Make sure the directory exists and it is empty
     if os.path.exists(tmpDirLocation):
         shutil.rmtree(tmpDirLocation)
     #os.makedirs(tmpDirLocation)
     
     # Save the model in the tmp folder
     self.model.save(tmpDirLocation)
     
     # load from tmp location
     newModel = ModelFactory.loadFromCheckpoint(tmpDirLocation)
     result = HTMWrapper()
     result.model = newModel
     return result
Пример #23
0
def createModel(intersection):
    modelDir = getModelDir(intersection)
    if CACHE_MODELS and os.path.isdir(modelDir):
        # Read in the cached model
        print "Loading cached model for {} from {}...".format(intersection, modelDir)
        return ModelFactory.loadFromCheckpoint(modelDir)
    else:
        start = time.time()
        # redo the modelParams to use the actual sensor names
        modelParams = getModelParamsFromName("3001")

        sensor_counts = get_most_used_sensors(intersection)
        # try:
        #     pField = getSwarmConfig(intersection)['inferenceArgs']['predictedField']
        # except:
        #     print "Determining most used sensor for ", intersection
        try:
            counts = sensor_counts.most_common(1)
            if counts[0][1] == 0:
                return None
            else:
                pField = counts[0][0]
        except:
            return None
        print "Using", pField, "as predictedField for", intersection
        doc = readings_collection.find_one({"site_no": intersection})

        for k in doc["readings"]:
            # don't model unused sensors
            # could run into errors when the sensor
            # was damaged for more than the sample period though
            if sensor_counts[k] == 0 or k != pField:
                continue
            modelParams["modelParams"]["sensorParams"]["encoders"][k] = get_sensor_encoder(k)

        for i in get_time_encoders():
            modelParams["modelParams"]["sensorParams"]["encoders"][i["name"]] = i

        model = ModelFactory.create(modelParams)
        model.enableInference({"predictedField": pField})
        print "Creating model for {}, in {}s".format(intersection, time.time() - start)
        return model
Пример #24
0
def createModel(intersection):
    modelDir = getModelDir(intersection)
    if CACHE_MODELS and os.path.isdir(modelDir):
        # Read in the cached model
        print "Loading cached model for {} from {}...".format(intersection, modelDir)
        return ModelFactory.loadFromCheckpoint(modelDir)
    else:
        start = time.time()
        # redo the modelParams to use the actual sensor names
        modelParams = getModelParamsFromName('3001')

        sensor_counts = get_most_used_sensors(intersection)
        # try:
        #     pField = getSwarmConfig(intersection)['inferenceArgs']['predictedField']
        # except:
        #     print "Determining most used sensor for ", intersection
        try:
            counts = sensor_counts.most_common(1)
            if counts[0][1] == 0:
                return None
            else:
                pField = counts[0][0]
        except:
            return None
        print "Using", pField, "as predictedField for", intersection
        location = locations_collection.find_one({'intersection_number': intersection})

        for k in location['sensors']:
            modelParams['modelParams']['sensorParams']['encoders'][k] = get_sensor_encoder(k)

        for i in get_time_encoders():
            modelParams['modelParams']['sensorParams']['encoders'][i['name']] = i


        model = ModelFactory.create(modelParams)
        model.enableInference({'predictedField': pField})
        print "Creating model for {}, in {}s".format(intersection, time.time() - start)
        return model
    def testCheckpoint(self):
        tmpDir = tempfile.mkdtemp()
        model = ModelFactory.create(MODEL_PARAMS)
        model.enableInference({'predictedField': 'consumption'})
        headers = ['timestamp', 'consumption']

        # Now do a bunch of small load/train/save batches
        for _ in range(20):

            for _ in range(2):
                record = [
                    datetime.datetime(2013, 12, 12),
                    numpy.random.uniform(100)
                ]
                modelInput = dict(zip(headers, record))
                model.run(modelInput)

            # Save and load a checkpoint after each batch. Clean up.
            tmpBundleName = os.path.join(tmpDir, "test_checkpoint")
            self.assertIs(model.save(tmpBundleName), None,
                          "Save command failed.")
            model = ModelFactory.loadFromCheckpoint(tmpBundleName)
            shutil.rmtree(tmpBundleName)
Пример #26
0
def runModel(gymName, plot=False, load=False):
    """
    Assumes the gynName corresponds to both a like-named model_params file in the
    model_params directory, and that the data exists in a like-named CSV file in
    the current directory.
    :param gymName: Important for finding model params and input CSV file
    :param plot: Plot in matplotlib? Don't use this unless matplotlib is
    installed.
    """

    if load:
        print "Loading model from %s..." % MODEL_DIR
        model = ModelFactory.loadFromCheckpoint(MODEL_DIR)
        model.disableLearning()

        f = open(DATA_DIR + "/learning_list.txt")
        filename = f.readline().strip()

        inputData = "%s/%s.csv" % (DATA_DIR, filename)
        runIoThroughNupic(inputData, model, filename, plot, load)

    else:
        print "Creating model from %s..." % gymName
        model = createModel(getModelParamsFromName(gymName))
        model.enableLearning()

        # read learning file list from learning_list.txt
        f = open(DATA_DIR + "/learning_list.txt")
        files = f.readlines()
        f.close()
        for file in files:
            model.resetSequenceStates()
            filename = file.strip()
            print(filename)
            inputData = "%s/%s.csv" % (DATA_DIR, filename)
            model = runIoThroughNupic(inputData, model, filename, plot, load)
Пример #27
0
def runModel(gymName, plot=False, load=False):
    """
    Assumes the gynName corresponds to both a like-named model_params file in the
    model_params directory, and that the data exists in a like-named CSV file in
    the current directory.
    :param gymName: Important for finding model params and input CSV file
    :param plot: Plot in matplotlib? Don't use this unless matplotlib is
    installed.
    """

    if load:
        print "Loading model from %s..." % MODEL_DIR
        model = ModelFactory.loadFromCheckpoint(MODEL_DIR)
        model.disableLearning()

        f = open(DATA_DIR + "/learning_list.txt")
        filename = f.readline().strip()

        inputData = "%s/%s.csv" % (DATA_DIR, filename)
        runIoThroughNupic(inputData, model, filename, plot, load)

    else:
        print "Creating model from %s..." % gymName
        model = createModel(getModelParamsFromName(gymName))
        model.enableLearning()

        # read learning file list from learning_list.txt
        f = open(DATA_DIR + "/learning_list.txt")
        files = f.readlines()
        f.close()
        for file in files:
            model.resetSequenceStates()
            filename = file.strip()
            print(filename)
            inputData = "%s/%s.csv" % (DATA_DIR, filename)
            model = runIoThroughNupic(inputData, model, filename, plot, load)
Пример #28
0
    inp.seek(0)
    next(inp)
    if CLUSTERING:
        print "Max. Long: %s ; Min. Long: %s\n" % str(maxLongitude) % str(
            minLongitude)
        print "Max. Lat: %s ; Min. Lat: %s\n" % str(maxLatitude) % str(
            minLatitude)

# FIFO
events = reversed(events)

if PREDICT: import PREDICTmodel_params as model_params

else: import model_params as model_params

if LOAD: model = ModelFactory.loadFromCheckpoint(MODELSTATE)
else: model = ModelFactory.create(model_params.MODEL_PARAMS)
if VISUALIZE: Patcher().patchCLAModel(model)
model.enableInference({"predictedField": "event"})
print "Model created!\n"

# Get the Model-Classes:
anomalyLikelihood = AnomalyLikelihood()
if PREDICT:
    from nupic.data.inference_shifter import InferenceShifter
    shifter = InferenceShifter()

if (WINDOWSIZE != None):
    AnomalyScores = deque(numpy.ones(WINDOWSIZE), maxlen=WINDOWSIZE)
else:
    AnomalyScores = deque()  # numpy.ones(len(events)), maxlen=len(events) ?
Пример #29
0
def _runExperimentImpl(options, model=None):
    """Creates and runs the experiment

  Args:
    options: namedtuple ParseCommandLineOptionsResult
    model: For testing: may pass in an existing OPF Model instance
        to use instead of creating a new one.

  Returns: referece to OPFExperiment instance that was constructed (this
      is provided to aid with debugging) or None, if none was
      created.
  """
    jsonhelpers.validate(options.privateOptions, schemaDict=g_parsedPrivateCommandLineOptionsSchema)

    # Load the experiment's description.py module
    experimentDir = options.experimentDir
    descriptionPyModule = opfhelpers.loadExperimentDescriptionScriptFromDir(experimentDir)
    expIface = opfhelpers.getExperimentDescriptionInterfaceFromModule(descriptionPyModule)

    # Handle "list checkpoints" request
    if options.privateOptions["listAvailableCheckpoints"]:
        _printAvailableCheckpoints(experimentDir)
        return None

    # Load experiment tasks
    experimentTasks = expIface.getModelControl().get("tasks", [])

    # If the tasks list is empty, and this is a nupic environment description
    # file being run from the OPF, convert it to a simple OPF description file.
    if len(experimentTasks) == 0 and expIface.getModelControl()["environment"] == OpfEnvironment.Nupic:
        expIface.convertNupicEnvToOPF()
        experimentTasks = expIface.getModelControl().get("tasks", [])

    # Handle listTasks
    if options.privateOptions["listTasks"]:
        print "Available tasks:"

        for label in [t["taskLabel"] for t in experimentTasks]:
            print "\t", label

        return None

    # Construct the experiment instance
    if options.privateOptions["runCheckpointName"]:

        assert model is None

        checkpointName = options.privateOptions["runCheckpointName"]

        model = ModelFactory.loadFromCheckpoint(savedModelDir=_getModelCheckpointDir(experimentDir, checkpointName))

    elif model is not None:
        print "Skipping creation of OPFExperiment instance: caller provided his own"
    else:
        modelDescription = expIface.getModelDescription()
        model = ModelFactory.create(modelDescription)

    # Handle "create model" request
    if options.privateOptions["createCheckpointName"]:
        checkpointName = options.privateOptions["createCheckpointName"]
        _saveModel(model=model, experimentDir=experimentDir, checkpointLabel=checkpointName)

        return model

    # Build the task list

    # Default task execution index list is in the natural list order of the tasks
    taskIndexList = range(len(experimentTasks))

    customTaskExecutionLabelsList = options.privateOptions["taskLabels"]
    if customTaskExecutionLabelsList:
        taskLabelsList = [t["taskLabel"] for t in experimentTasks]
        taskLabelsSet = set(taskLabelsList)

        customTaskExecutionLabelsSet = set(customTaskExecutionLabelsList)

        assert customTaskExecutionLabelsSet.issubset(taskLabelsSet), (
            "Some custom-provided task execution labels don't correspond "
            "to actual task labels: mismatched labels: %r; actual task "
            "labels: %r."
        ) % (customTaskExecutionLabelsSet - taskLabelsSet, customTaskExecutionLabelsList)

        taskIndexList = [taskLabelsList.index(label) for label in customTaskExecutionLabelsList]

        print "#### Executing custom task list: %r" % [taskLabelsList[i] for i in taskIndexList]

    # Run all experiment tasks
    for taskIndex in taskIndexList:

        task = experimentTasks[taskIndex]

        # Create a task runner and run it!
        taskRunner = _TaskRunner(model=model, task=task, cmdOptions=options)
        taskRunner.run()
        del taskRunner

        if options.privateOptions["checkpointModel"]:
            _saveModel(model=model, experimentDir=experimentDir, checkpointLabel=task["taskLabel"])

    return model
Пример #30
0
def _runExperimentImpl(options, model=None):
  """Creates and runs the experiment

  Args:
    options: namedtuple ParseCommandLineOptionsResult
    model: For testing: may pass in an existing OPF Model instance
        to use instead of creating a new one.

  Returns: reference to OPFExperiment instance that was constructed (this
      is provided to aid with debugging) or None, if none was
      created.
  """
  jsonhelpers.validate(options.privateOptions,
                       schemaDict=g_parsedPrivateCommandLineOptionsSchema)

  # Load the experiment's description.py module
  experimentDir = options.experimentDir
  descriptionPyModule = opfhelpers.loadExperimentDescriptionScriptFromDir(
      experimentDir)
  expIface = opfhelpers.getExperimentDescriptionInterfaceFromModule(
      descriptionPyModule)

  # Handle "list checkpoints" request
  if options.privateOptions['listAvailableCheckpoints']:
    _printAvailableCheckpoints(experimentDir)
    return None

  # Load experiment tasks
  experimentTasks = expIface.getModelControl().get('tasks', [])

  # If the tasks list is empty, and this is a nupic environment description
  # file being run from the OPF, convert it to a simple OPF description file.
  if (len(experimentTasks) == 0 and
      expIface.getModelControl()['environment'] == OpfEnvironment.Nupic):
    expIface.convertNupicEnvToOPF()
    experimentTasks = expIface.getModelControl().get('tasks', [])

  # Ensures all the source locations are either absolute paths or relative to
  # the nupic.datafiles package_data location.
  expIface.normalizeStreamSources()

  # Handle listTasks
  if options.privateOptions['listTasks']:
    print "Available tasks:"

    for label in [t['taskLabel'] for t in experimentTasks]:
      print "\t", label

    return None

  # Construct the experiment instance
  if options.privateOptions['runCheckpointName']:

    assert model is None

    checkpointName = options.privateOptions['runCheckpointName']

    model = ModelFactory.loadFromCheckpoint(
          savedModelDir=_getModelCheckpointDir(experimentDir, checkpointName))

  elif model is not None:
    print "Skipping creation of OPFExperiment instance: caller provided his own"
  else:
    modelDescription = expIface.getModelDescription()
    model = ModelFactory.create(modelDescription)

  # Handle "create model" request
  if options.privateOptions['createCheckpointName']:
    checkpointName = options.privateOptions['createCheckpointName']
    _saveModel(model=model,
               experimentDir=experimentDir,
               checkpointLabel=checkpointName)

    return model

  # Build the task list

  # Default task execution index list is in the natural list order of the tasks
  taskIndexList = range(len(experimentTasks))

  customTaskExecutionLabelsList = options.privateOptions['taskLabels']
  if customTaskExecutionLabelsList:
    taskLabelsList = [t['taskLabel'] for t in experimentTasks]
    taskLabelsSet = set(taskLabelsList)

    customTaskExecutionLabelsSet = set(customTaskExecutionLabelsList)

    assert customTaskExecutionLabelsSet.issubset(taskLabelsSet), \
           ("Some custom-provided task execution labels don't correspond "
            "to actual task labels: mismatched labels: %r; actual task "
            "labels: %r.") % (customTaskExecutionLabelsSet - taskLabelsSet,
                              customTaskExecutionLabelsList)

    taskIndexList = [taskLabelsList.index(label) for label in
                     customTaskExecutionLabelsList]

    print "#### Executing custom task list: %r" % [taskLabelsList[i] for
                                                   i in taskIndexList]

  # Run all experiment tasks
  for taskIndex in taskIndexList:

    task = experimentTasks[taskIndex]

    # Create a task runner and run it!
    taskRunner = _TaskRunner(model=model,
                             task=task,
                             cmdOptions=options)
    taskRunner.run()
    del taskRunner

    if options.privateOptions['checkpointModel']:
      _saveModel(model=model,
                 experimentDir=experimentDir,
                 checkpointLabel=task['taskLabel'])

  return model
Пример #31
0
def run():

    print "loading models ..."
    start = time.time()

    models = {}
    for motor in MOTOR:

        if not motor in models:
            models[motor] = {}

        for channel in CHANNELS:
            modelDir = "%s/%s_%s_%s" % (MODEL_PARENT_DIR, MODEL_PREFIX, motor, channel)
            model = ModelFactory.loadFromCheckpoint(modelDir)
            models[motor][channel] = model
            print "loaded model %s" % modelDir

    end = time.time()
    print "loaded model in % s" % (end - start)

    if SMOOTH:
        inputDataDir = "../preprocessing/smoothed_data"
    else:
        inputDataDir = "../preprocessing/formatted_data"


    # keeps track of the 3 different likelihoods per movement
    likelihoods = {}
    for motor in MOTOR:
        likelihoods[motor] = anomaly_likelihood.AnomalyLikelihood(0,1)


    for movementToClassify in MOTOR:
        for channel in CHANNELS:
            print ""
            print "Incoming EGG signal on channel %s (Patient is in the '%s' phase)" % (channel, movementToClassify)
            print "-> Let's classify this EEG signal!"

            # try with all 3 models
            averageAnomalyScores = []
            anomalyLikelihoods = []
            anomalyCount = 0
            for movement in MOTOR:

                # load model from checkpoint
                print "   * Trying with %s model : %s" % (channel, movement)
                model = models[movementToClassify][channel]
                model.enableInference({"predictedField": "channel_value"})

                # disable learning
                model.disableLearning()

                # keep tack of anomaly scores
                anomalyScores = []

                # Stream data
                inputFile = "%s/%s_%s.csv" % (inputDataDir, movement, channel)
                with open(inputFile, "rb") as input:
                    csvReader = csv.reader(input)

                    # skip 3 header rows
                    for i in range (0,3):
                        csvReader.next()

                    # stream in the data
                    rowCount = 0
                    for row in csvReader:

                        if rowCount < 100:

                            channel_value = float(row[0])
                            result = model.run({"channel_value": channel_value})
                            anomalyScore = result.inferences['anomalyScore']
                            anomalyScores.append(anomalyScore)

                            # Compute the Anomaly Likelihood
                            timestamp = datetime.datetime.now()
                            likelihood = likelihoods[movement].anomalyProbability(channel_value, anomalyScore, timestamp)
                            print "likelihood: %s" % likelihood

                            # compute the log likelihood
                            logLikelihood = likelihoods[movement].computeLogLikelihood(likelihood)
                            anomalyLikelihoods.append(logLikelihood)
                            print "loglikelihood: %s" % logLikelihood

                            # count anomalies
                            if logLikelihood > 0.9999:
                                anomalyCount += 1
                                print "anomaly count : %s " % anomalyCount


                            # just stream a bit of points
                            rowCount += 1



                print "     ... streamed %s data points" % rowCount
                averageAnomalyScore = sum(anomalyScores) / len(anomalyScores)
                averageAnomalyScores.append(averageAnomalyScore)
                print "     ... anomaly scores : %s" % anomalyScores
                print "     ... Average anomaly score : %s" % averageAnomalyScore

            # find out who had the lowest anomaly score. This is the prediction.
            # TODO: I should do that with the anomaly likelyhood instead
            minAnomalyScore = min(averageAnomalyScores)
            predictedMovement = MOTOR[averageAnomalyScores.index(minAnomalyScore)]
            print "   ==> Predicted movement : %s" % predictedMovement
Пример #32
0
"""Utility script for checking the in-memory size of a model checkpoint."""

import resource
import sys
import time

from nupic.frameworks.opf.modelfactory import ModelFactory



if __name__ == "__main__":
  helpMessage = "Usage: check_model_mem_size.py <path/to/checkpointDir>"
  if len(sys.argv) != 2:
    print helpMessage
    sys.exit(-1)
  elif sys.argv[1].strip() == "--help":
    print helpMessage
    sys.exit()

  originalMemory = resource.getrusage(resource.RUSAGE_SELF).ru_maxrss
  print "Original memory usage: ", originalMemory
  models = []
  for _ in xrange(10):
    s = time.time()
    models.append(ModelFactory.loadFromCheckpoint(sys.argv[1].strip()))
    print "Loaded model in %f seconds" % (time.time() - s)
    diff = resource.getrusage(resource.RUSAGE_SELF).ru_maxrss - originalMemory
    average = float(diff) / len(models)
    print "Using %i bytes for %i models (%f average)." % (diff, len(models), average)
Пример #33
0
  next(inp)

# LIFO - important for learning the correct sequences!
events = reversed(events)

# Cluster the events:
if CLUSTERING: 
  from clustering import cluster, scipy_cluster
  events, minLatitudes, maxLatitudes, minLongitudes, maxLongitudes = cluster(events)


if PREDICT: import PREDICTmodel_params as model_params

else: import TESTmodel_params as model_params

if LOAD: model = ModelFactory.loadFromCheckpoint(MODELSTATE)  
else: model = ModelFactory.create(model_params.MODEL_PARAMS)

if VISUALIZE: Patcher().patchCLAModel(model)

model.enableInference({"predictedField": "event"}) # Predict not only event but also scalar! TODO
# model.enableInference({"predictedField": "scalar"})
# model.enableInference({"predictedField": "timestamp"})
print "Model created!\n"

# Get the Model-Classes:
if PREDICT: 
  from nupic.data.inference_shifter import InferenceShifter
  shifter = InferenceShifter()

anomalyLikelihood = AnomalyLikelihood()
Пример #34
0
# ----------------------------------------------------------------------
"""Utility script for checking the in-memory size of a model checkpoint."""

import resource
import sys
import time

from nupic.frameworks.opf.modelfactory import ModelFactory

if __name__ == "__main__":
    helpMessage = "Usage: check_model_mem_size.py <path/to/checkpointDir>"
    if len(sys.argv) != 2:
        print helpMessage
        sys.exit(-1)
    elif sys.argv[1].strip() == "--help":
        print helpMessage
        sys.exit()

    originalMemory = resource.getrusage(resource.RUSAGE_SELF).ru_maxrss
    print "Original memory usage: ", originalMemory
    models = []
    for _ in xrange(10):
        s = time.time()
        models.append(ModelFactory.loadFromCheckpoint(sys.argv[1].strip()))
        print "Loaded model in %f seconds" % (time.time() - s)
        diff = resource.getrusage(
            resource.RUSAGE_SELF).ru_maxrss - originalMemory
        average = float(diff) / len(models)
        print "Using %i bytes for %i models (%f average)." % (
            diff, len(models), average)
Пример #35
0
def runGame():
  global games
  global yards
  global slopesize
  global slopewidth
  global slopewidthmin
  global slopewidthmax
  global variablewidth
  global savefile
  tree = "|"
  skier = "H"
  minpadding = 0
  maxpadding = slopesize - slopewidth
  #choicelist_drift = [-2,-1,0,1,2]
  #choicelist_width = [-2,0,2]
  choicelist_drift = [-1,0,1]
  choicelist_width = [-1,0,1]

  games = games + 1
  change = 0
  padding = 14
  skierposition = (padding + (slopewidth/2))

  # See if we have a saved model to load
  if os.path.exists(savefile):
    print "Loading game history from " + savefile
    model = ModelFactory.loadFromCheckpoint(savefile)
  else:
    print "Creating new game model"
    model = createModel()

  model.enableInference({'predictionSteps': [1], 'predictedField': 'pos', 'numRecords': 4000})
  inf_shift = InferenceShifter();

  # - Train on a perfect run
  print
  print "================================= Start Training ================================="
  print
  for i in xrange(slopesize - slopewidth):
    record = print_slopeline_perfect(i,tree,skier,slopewidth)
    result = inf_shift.shift(model.run(record))

  while i > 0:
    record = print_slopeline_perfect(i,tree,skier,slopewidth)
    result = inf_shift.shift(model.run(record))
    i = i - 1

  while i < padding:
    record = print_slopeline_perfect(i,tree,skier,slopewidth)
    result = inf_shift.shift(model.run(record))
    i = i + 1

  for i in xrange(_NUM_RECORDS):
    yards = yards + 1
    if (variablewidth):
        change = generate_random(choicelist_width)
        slopewidth = slopewidth + change
        if slopewidth > slopewidthmax:
            slopewidth = slopewidthmax
        if slopewidth < slopewidthmin:
            slopewidth = slopewidthmin

    drift = generate_random(choicelist_drift)
    padding = padding + drift
    if padding > maxpadding:
        padding = maxpadding
    if padding < minpadding:
        padding = minpadding

    padding = padding - (change/2)
    if padding < 0:
        padding = 0
    if padding + slopewidth > slopesize:
        padding = slopesize - slopewidth

    record = print_slopeline_perfect(padding,tree,skier,slopewidth)

    result = inf_shift.shift(model.run(record))

  # - Then set it free to run on it's own
#  model.disableLearning()
  print
  print "=================================== Begin Game ==================================="
  print
  yards = 0
  padding = 14
  skierposition = (padding + (slopewidth/2))
  while True:
    yards = yards + 1
    if (variablewidth):
        change = generate_random(choicelist_width)
        slopewidth = slopewidth + change
        if slopewidth > slopewidthmax:
            slopewidth = slopewidthmax
        if slopewidth < slopewidthmin:
            slopewidth = slopewidthmin

    drift = generate_random(choicelist_drift)
    padding = padding + drift
    if padding > maxpadding:
        padding = maxpadding
    if padding < minpadding:
        padding = minpadding

    padding = padding - (change/2)
    if padding < 0:
        padding = 0
    if padding + slopewidth > slopesize:
        padding = slopesize - slopewidth

    record = print_slopeline(padding,tree,skier,slopewidth,skierposition)
    if ((skierposition - padding) < 1) or ((skierposition - padding) > slopewidth):
        break

    model.save(savefile)

    result = inf_shift.shift(model.run(record))
    #inferred = result.inferences['multiStepPredictions'][1]
    #predicted = sorted(inferred.items(), key=lambda x: x[1])[-1][0]
    predicted = 0.0
    total_probability = 0.0
    for key, value in result.inferences['multiStepPredictions'][1].iteritems():
        predicted += float(key) * float(value)
        total_probability += float(value)
    predicted = predicted / total_probability

    skierposition = calc_skier_position(skierposition, predicted)

    # reload if we made a bad prediction
    perfect = padding + slopewidth/2
    if abs(perfect - skierposition) > 2:
      model = ModelFactory.loadFromCheckpoint(savefile)
Пример #36
0
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# This script is main module of this repository.
# Predict next word's POS in the sentence.
#

import nltk
from nupic.frameworks.opf.modelfactory import ModelFactory
from nupic.data.inference_shifter import InferenceShifter


model = ModelFactory.loadFromCheckpoint("model/")
model.disableLearning()
shifter = InferenceShifter()


def predictPOS(target_str):
    pos_list = nltk.pos_tag(nltk.word_tokenize(target_str))
    ret = []
    for row in pos_list:
        model_input = {"token": row[1]}
        result = shifter.shift(model.run(model_input))
        dic = result.inferences["multiStepPredictions"][1]
        if type(dic) == type({}):
            if dic.has_key(row[1]):
                ret.append((row[0], row[1], dic[row[1]]))
            else:
                ret.append((row[0], row[1], 0.0))
        else:
            ret.append((row[0], row[1], 0.0))
Пример #37
0
def resurrect_model(saved_model):
  return ModelFactory.loadFromCheckpoint(saved_model)
Пример #38
0
 def load_model(self, name):
     name = os.path.abspath(name)
     self.model = ModelFactory.loadFromCheckpoint(name)
Пример #39
0
def resurrect_model(saved_model):
  return ModelFactory.loadFromCheckpoint(saved_model)
Пример #40
0
import cmd
import logging
from nupic.data.datasethelpers import findDataset
from nupic.frameworks.opf.modelfactory import ModelFactory
from nupic.frameworks.opf.predictionmetricsmanager import MetricsManager
from urls import loadTlds
from urls import parseUrl
import datetime
import texttable


scriptDir = os.path.dirname(os.path.realpath(__file__))
_LOGGER = logging.getLogger(__name__)
_DATA_PATH = os.path.join(scriptDir, "..", "data", "sanitized.csv")
_MODEL_PATH = os.path.join(scriptDir, "..", "savedModel", "checkpoint")
model = ModelFactory.loadFromCheckpoint(_MODEL_PATH)
model.disableLearning()

tlds = loadTlds()


class UrlShell(cmd.Cmd):
    intro = 'Enter URL to predict next hostname.\n'
    prompt = 'url> '
    file = None

    def default(self, line):
      processOneUrl(line)

def processOneUrl(url): 
      timestamp = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%s")
Пример #41
0
    def __init__(self, name, admin_in, admin_out, sensor_spec, sensors_dir,
                 sensor_in, store, swarm):
        threading.Thread.__init__(self)
        #self.config = config
        self.sensor_in = sensor_in
        self.store = store
        self.swarm = swarm
        self.name = name
        self.brain_available = False
        threading.Thread.__init__(self)
        Sensor.__init__(self,
                        name=name,
                        admin_in=admin_in,
                        admin_out=admin_out,
                        sensor_spec=sensor_spec,
                        sensors_dir=sensors_dir)
        swarm_config_path = sensors_dir + sensor_in + '/stores/' + store + '/swarms/' + swarm + '/'
        #store_path = sensors_dir  + sensor_in +'/stores/' + store + '/out.csv'
        #model = ModelFactory.loadFromCheckpoint('/home/hans/cortical_one_var/sensors/cpu/stores/store_3/swarms/swarm_1/model_save')

        print swarm_config_path

        #load original swarm config file
        with open(swarm_config_path + 'swarm_config.json') as json_file:
            self.swarm_config = json.load(json_file)
            print(self.swarm_config)

        self.swarm_config_ng = SwarmConfig(self.swarm_config)

        print self.swarm_config_ng.get_predicted_field()

        #if there is a 'brain', then tae the existing brain
        self.possible_brain_path = str(swarm_config_path + 'model_save')
        if os.path.exists(self.possible_brain_path):
            possible_brain_2 = '/home/hans/cortical_one_var/sensors/cpu/stores/store_3/swarms/swarm_1/model_save'
            print "load existing brain..."
            print self.possible_brain_path
            #model = ModelFactory.loadFromCheckpoint(possible_brain_2)
            model = ModelFactory.loadFromCheckpoint(self.possible_brain_path)
            #use this case to add the availabilty of a 'brain' (???!!!) to your annuncement

        else:

            #laod model configuration
            model = ModelFactory.create(
                getModelParamsFromFileNG(swarm_config_path))

            #configure prediction
            model.enableInference(
                {"predictedField": self.swarm_config_ng.get_predicted_field()})

        self.connection_sensor_in = stomp.Connection()
        self.connection_sensor_in.set_listener(
            name=self.name,
            lstnr=AbstractSensorListener(self.name,
                                         topic='/topic/' + self.sensor_in,
                                         config=self.swarm_config_ng,
                                         model=model))
        self.connection_sensor_in.start()
        self.connection_sensor_in.connect(self.user, self.password, wait=True)
        #self.connection_sensor_in.connect('admin', 'password', wait=True)
        self.abstract_listener = self.connection_sensor_in.get_listener(
            name=self.name)
        self.connection_sensor_in.subscribe(destination='/topic/' +
                                            self.sensor_in,
                                            id=2,
                                            ack='auto')

        self.values = []

        self.self_announcement()
Пример #42
0
def run():

    print "loading models ..."
    start = time.time()

    models = {}
    for motor in MOTOR:

        if not motor in models:
            models[motor] = {}

        for channel in CHANNELS:
            modelDir = "%s/%s_%s_%s" % (MODEL_PARENT_DIR, MODEL_PREFIX, motor,
                                        channel)
            model = ModelFactory.loadFromCheckpoint(modelDir)
            models[motor][channel] = model
            print "loaded model %s" % modelDir

    end = time.time()
    print "loaded model in % s" % (end - start)

    if SMOOTH:
        inputDataDir = "../preprocessing/smoothed_data"
    else:
        inputDataDir = "../preprocessing/formatted_data"

    # keeps track of the 3 different likelihoods per movement
    likelihoods = {}
    for motor in MOTOR:
        likelihoods[motor] = anomaly_likelihood.AnomalyLikelihood(0, 1)

    for movementToClassify in MOTOR:
        for channel in CHANNELS:
            print ""
            print "Incoming EGG signal on channel %s (Patient is in the '%s' phase)" % (
                channel, movementToClassify)
            print "-> Let's classify this EEG signal!"

            # try with all 3 models
            averageAnomalyScores = []
            anomalyLikelihoods = []
            anomalyCount = 0
            for movement in MOTOR:

                # load model from checkpoint
                print "   * Trying with %s model : %s" % (channel, movement)
                model = models[movementToClassify][channel]
                model.enableInference({"predictedField": "channel_value"})

                # disable learning
                model.disableLearning()

                # keep tack of anomaly scores
                anomalyScores = []

                # Stream data
                inputFile = "%s/%s_%s.csv" % (inputDataDir, movement, channel)
                with open(inputFile, "rb") as input:
                    csvReader = csv.reader(input)

                    # skip 3 header rows
                    for i in range(0, 3):
                        csvReader.next()

                    # stream in the data
                    rowCount = 0
                    for row in csvReader:

                        if rowCount < 100:

                            channel_value = float(row[0])
                            result = model.run(
                                {"channel_value": channel_value})
                            anomalyScore = result.inferences['anomalyScore']
                            anomalyScores.append(anomalyScore)

                            # Compute the Anomaly Likelihood
                            timestamp = datetime.datetime.now()
                            likelihood = likelihoods[
                                movement].anomalyProbability(
                                    channel_value, anomalyScore, timestamp)
                            print "likelihood: %s" % likelihood

                            # compute the log likelihood
                            logLikelihood = likelihoods[
                                movement].computeLogLikelihood(likelihood)
                            anomalyLikelihoods.append(logLikelihood)
                            print "loglikelihood: %s" % logLikelihood

                            # count anomalies
                            if logLikelihood > 0.9999:
                                anomalyCount += 1
                                print "anomaly count : %s " % anomalyCount

                            # just stream a bit of points
                            rowCount += 1

                print "     ... streamed %s data points" % rowCount
                averageAnomalyScore = sum(anomalyScores) / len(anomalyScores)
                averageAnomalyScores.append(averageAnomalyScore)
                print "     ... anomaly scores : %s" % anomalyScores
                print "     ... Average anomaly score : %s" % averageAnomalyScore

            # find out who had the lowest anomaly score. This is the prediction.
            # TODO: I should do that with the anomaly likelyhood instead
            minAnomalyScore = min(averageAnomalyScores)
            predictedMovement = MOTOR[averageAnomalyScores.index(
                minAnomalyScore)]
            print "   ==> Predicted movement : %s" % predictedMovement
Пример #43
0
import sys
sys.path.append('model_0')
import model_params
from nupic.frameworks.opf.modelfactory import ModelFactory
from scrape_data import getPlayersOnline
import time
import datetime
from Queue import Queue
import csv


predictionSteps = 15
# load prev model
if len(sys.argv) >= 2 and sys.argv[1] != "None":
	print "loading old model"
	model = ModelFactory.loadFromCheckpoint(sys.argv[1])
else:
	model = ModelFactory.create(model_params.MODEL_PARAMS)
	model.enableInference({'predictedField': 'players'})
# load initialization data
if len(sys.argv) >= 3 and sys.argv[2] != "None":
	print "initializing model with data"
	f = open(sys.argv[2], 'r')
	csvReader = csv.reader(f)
	# skip header rows
	csvReader.next()
	csvReader.next()
	csvReader.next()
	count = 0
	for row in csvReader:
		if count % 100 == 0: