예제 #1
0
def run_sine_experiment():
    input_file = "sine.csv"
    generate_data.run(input_file)
    model_params = swarm_over_data()
    if PLOT:
        output = NuPICPlotOutput("sine_output", show_anomaly_score=True)
    else:
        output = NuPICFileOutput("sine_output", show_anomaly_score=True)
    model = ModelFactory.create(model_params)
    model.enableInference({"predictedField": "sine"})

    with open(input_file, "rb") as sine_input:
        csv_reader = csv.reader(sine_input)

        # skip header rows
        csv_reader.next()
        csv_reader.next()
        csv_reader.next()

        # the real data
        for row in csv_reader:
            angle = float(row[0])
            sine_value = float(row[1])
            result = model.run({"sine": sine_value})
            output.write(angle, sine_value, result, prediction_step=1)

    output.close()
예제 #2
0
def run_sine_experiment():
    input_file = "netio.csv"
    generate_data.run(input_file)
    model_params = swarm_over_data()
    if PLOT:
        output = NuPICPlotOutput("netio_output", show_anomaly_score=True)
    else:
        output = NuPICFileOutput("netio_output", show_anomaly_score=True)
    model = ModelFactory.create(model_params)
    model.enableInference({"predictedField": "bytes_sent"})

    with open(input_file, "rb") as netio_input:
        csv_reader = csv.reader(netio_input)

        # skip header rows
        csv_reader.next()
        csv_reader.next()
        csv_reader.next()

        # the real data
        for row in csv_reader:

            timestamp = datetime.datetime.strptime(row[0], DATE_FORMAT)
            bytes_sent = float(row[1])

            #netio = float(row[3])

            result = model.run({"bytes_sent": bytes_sent})
            output.write(timestamp, bytes_sent, result, prediction_step=1)

    output.close()
예제 #3
0
def run_mem_experiment():
    input_file = "cpu.csv"
    generate_data.run(input_file)
    model_params = swarm_over_data(SWARM_CONFIG)
    if PLOT:
        output = NuPICPlotOutput("final_mem_output")
    else:
        output = NuPICFileOutput("final_mem_output")
    model = ModelFactory.create(model_params)
    model.enableInference({"predictedField": "mem"})

    with open(input_file, "rb") as sine_input:
        csv_reader = csv.reader(sine_input)

        # skip header rows
        csv_reader.next()
        csv_reader.next()
        csv_reader.next()

        # the real data
        for row in csv_reader:
            timestamp = datetime.datetime.strptime(row[0], DATE_FORMAT)
            mem = float(row[1])
            result = model.run({"mem": mem})
            prediction = result.inferences["multiStepBestPredictions"][1]
            anomalyScore = result.inferences['anomalyScore']
            output.write(timestamp, mem, prediction, anomalyScore)

    output.close()
예제 #4
0
def run_cpu_experiment():
  input_file = "cpu.csv"
  cur_dir = os.getcwd()
  input_file = os.path.join(cur_dir,'cpu/cpu.csv')
  cpu_generate_data.run(input_file)
  model_params = swarm_over_data(SWARM_CONFIG)
  model = ModelFactory.create(model_params)
  model.enableInference({"predictedField": "cpu"})
  #To load with no swarming 
  #model = ModelFactory.create(model_params)


  

  if PLOT:
    output = NuPICPlotOutput("cpu/final_cpu_output")
  else:
    output = NuPICFileOutput("cpu/final_cpu_output")

  with open(input_file, "rb") as cpu_input:
    csv_reader = csv.reader(cpu_input)

    # skip header rows
    csv_reader.next()
    csv_reader.next()
    csv_reader.next()

    # the real data
    sumOfUtilityFitness=0.0
    sumOfWeaight = 0.0 
    for row in csv_reader:
      timestamp = datetime.datetime.strptime(row[0], DATE_FORMAT)
      cpu = float(row[1])
      result = model.run({"cpu": cpu})
      prediction = result.inferences["multiStepBestPredictions"][1]
      anomalyScore = result.inferences['anomalyScore']
      anomalyLikelihood = anomalyLikelihoodHelper.anomalyProbability(cpu, anomalyScore, timestamp)
      uc= (anomalyLikelihood * cpu + prediction * anomalyLikelihood )/(anomalyScore + anomalyLikelihood) 
      sumOfUtilityFitness= sumOfUtilityFitness + (float(cpu) * float(anomalyLikelihood))
      sumOfWeaight = sumOfWeaight + float(anomalyLikelihood)
      output.write(timestamp, cpu, prediction, anomalyScore)

  output.close()

  print 'sumOfWeaight: ', sumOfWeaight, 'sumOfUtilityFitness: ', sumOfUtilityFitness
  result_output = 'cpu/final_cpu_output_out.csv'
  utilityOfCpu=0.0
  with open(result_output, "rb") as result_input:
    csv_reader = csv.reader(result_input)

    # skip header rows
    csv_reader.next()
    csv_reader.next()
    csv_reader.next()
    for row in csv_reader:
      anomalyLikelihood = float(row[3])
      utilityOfCpu= utilityOfCpu + (anomalyLikelihood * sumOfUtilityFitness)/sumOfWeaight
    print 'utilityOfCpu: ', utilityOfCpu

  move_model()
예제 #5
0
def run_model(input_file, output_file, model, train=False, plot=True):
    if plot:
        output = NuPICPlotOutput(output_file, show_anomaly_score=True)
    else:
        output = NuPICFileOutput(output_file, show_anomaly_score=True)

    if train:
        print "Enable learning."
        model.enableLearning()
    else:
        print "Not for training. Disabling learning."
        model.disableLearning()

    with open(input_file, "rb") as eggs_input:
        csv_reader = csv.reader(eggs_input)

        # skip header rows
        csv_reader.next()
        csv_reader.next()
        csv_reader.next()

        # the real data
        for row in csv_reader:
            twelvth_beat = float(row[0])
            pitch_value = float(row[1])
            result = model.run({"pitch": pitch_value})
            output.write(twelvth_beat, pitch_value, result, prediction_step=1)

    output.close()
예제 #6
0
def run_sine_experiment():
    input_file = "cpu.csv"
    #generate_data.run(input_file)
    #model_params = swarm_over_data()
    attempt = 0
    if PLOT:
        output = NuPICPlotOutput("final_cpu_output")
    else:
        output = NuPICFileOutput("final_cpu_output")
    #model = ModelFactory.create(model_params)
    model = ModelFactory.create(model_params.MODEL_PARAMS)
    model.enableInference({"predictedField": "cpu"})
    adapter = 0

    for row in range(1, 300):
        s = time.strftime(DATE_FORMAT)
        timestamp = datetime.datetime.strptime(s, DATE_FORMAT)
        #timestamp = datetime.datetime.strptime(row[0], DATE_FORMAT)
        cpu1 = psutil.cpu_percent(interval=1)
        cpu = float(cpu1)
        result = model.run({"cpu": cpu})

        prediction = result.inferences["multiStepBestPredictions"][1]
        anomalyScore = result.inferences['anomalyScore']
        anomalyLikelihood = anomalyLikelihoodHelper.anomalyProbability(
            cpu, anomalyScore, timestamp)
        print 'anomalyLikelihood: ', anomalyLikelihood

        if anomalyScore > 0.75:
            print "anomalyScore is high: ", 'anomalyScore: ', str(
                anomalyScore
            ), 'anomalyLikelihood: ', anomalyLikelihood, " CPU@: ", cpu, " steps: ", str(
                adapter)
            adapter = adapter + 20
            if adapter >= 300:
                run_adaptation_strategy(attempt, cpu, anomalyLikelihood)
                attempt += 1
                adapter = 0
                print "reset timer for new adaptation action"
        else:
            print "anomalyScore is high: ", 'anomalyScore: ', str(
                anomalyScore
            ), 'anomalyLikelihood: ', anomalyLikelihood, " CPU@: ", cpu, " steps: ", str(
                adapter)
            run_adaptation_strategy(attempt, cpu, anomalyLikelihood)
            attempt += 1

        #with open("/tmp/output.log", "w") as loutput:
        #subprocess.call("docker service scale web=1", shell=True, stdout=loutput, stderr=loutput)
        #output.write(timestamp, cpu, prediction, anomalyScore)
        try:
            plt.pause(SECONDS_PER_STEP)
        except:
            pass
        row += 1
예제 #7
0
def run_experiment():
    generate_data.run()
    swarm_over_data()
    import model_params
    model = ModelFactory.create(model_params.MODEL_PARAMS)
    model.enableInference({"predictedField": "sine"})
    output = NuPICPlotOutput("sine_out", show_anomaly_score=True)

    with open("sine.csv", "rb") as sine_input:
        csv_reader = csv.reader(sine_input)

        # Skip headers
        csv_reader.next()
        csv_reader.next()
        csv_reader.next()

        # Real Data
        for row in csv_reader:
            angle = float(row[0])
            sine_value = float(row[1])

            result = model.run({"sine": sine_value})
            output.write(angle, sine_value, result)

    output.close()
예제 #8
0
def run_sine_experiment():
  input_file = "sine.csv"
  generate_data.run(input_file)
  model_params = swarm_over_data()
  if PLOT:
    output = NuPICPlotOutput("sine_output", show_anomaly_score=True)
  else:
    output = NuPICFileOutput("sine_output", show_anomaly_score=True)
  model = ModelFactory.create(model_params)
  model.enableInference({"predictedField": "sine"})

  with open(input_file, "rb") as sine_input:
    csv_reader = csv.reader(sine_input)

    # skip header rows
    csv_reader.next()
    csv_reader.next()
    csv_reader.next()

    # the real data
    for row in csv_reader:
      angle = float(row[0])
      sine_value = float(row[1])
      result = model.run({"sine": sine_value})
      output.write(angle, sine_value, result, prediction_step=1)

  output.close()
예제 #9
0
def run_mem_experiment():
    cur_dir = os.getcwd()
    input_file = source = os.path.join(cur_dir, 'mem/mem.csv')
    print input_file
    #mem_generate_data.run(input_file)
    print 'input_file', input_file
    model_params = swarm_over_data()
    model = ModelFactory.create(model_params)
    model.enableInference({"predictedField": "mem"})
    out_file = 'mem/final_mem.csv'
    if PLOT:
        output = NuPICPlotOutput(out_file)
    else:
        output = NuPICFileOutput(out_file)
    with open(input_file, "rb") as sine_input:
        csv_reader = csv.reader(sine_input)

        # skip header rows
        csv_reader.next()
        csv_reader.next()
        csv_reader.next()

        # the real data
        sumOfUtilityFitness = 0.0
        sumOfWeaight = 0.0
        for row in csv_reader:
            timestamp = datetime.datetime.strptime(row[0], DATE_FORMAT)
            mem = float(row[1])
            result = model.run({"mem": mem})
            prediction = result.inferences["multiStepBestPredictions"][1]
            anomalyScore = result.inferences['anomalyScore']
            anomalyLikelihood = anomalyLikelihoodHelper.anomalyProbability(
                mem, anomalyScore, timestamp)
            sumOfUtilityFitness = sumOfUtilityFitness + (
                float(mem) * float(anomalyLikelihood))
            sumOfWeaight = sumOfWeaight + float(anomalyLikelihood)
            output.write(timestamp, mem, prediction, anomalyScore)

    output.close()
    print 'sumOfWeaight: ', sumOfWeaight, 'sumOfUtilityFitness: ', sumOfUtilityFitness
    result_output = 'mem/final_mem_out.csv'
    utilityOfmem = 0.0
    with open(result_output, "rb") as result_input:
        csv_reader = csv.reader(result_input)

        # skip header rows
        csv_reader.next()
        csv_reader.next()
        csv_reader.next()
        for row in csv_reader:
            anomalyLikelihood = float(row[3])
            utilityOfmem = utilityOfmem + (anomalyLikelihood *
                                           sumOfUtilityFitness) / sumOfWeaight
        print 'utilityOfmem: ', utilityOfmem
    move_model()
def run_model(input_file, output_file, model, train=False, plot=True):
  if plot:
    output = NuPICPlotOutput(output_file, show_anomaly_score=True)
  else:
    output = NuPICFileOutput(output_file, show_anomaly_score=True)

  if train:
    print "Enable learning."
    model.enableLearning()
  else:
    print "Not for training. Disabling learning."
    model.disableLearning()

  with open(input_file, "rb") as eggs_input:
    csv_reader = csv.reader(eggs_input)

    # skip header rows
    csv_reader.next()
    csv_reader.next()
    csv_reader.next()

    # the real data
    for row in csv_reader:
      twelvth_beat = float(row[0])
      pitch_value = float(row[1])
      result = model.run({"pitch": pitch_value})
      output.write(twelvth_beat, pitch_value, result, prediction_step=1)

  output.close()
예제 #11
0
def run_disk_experiment():
    input_file = "disk/disk.csv"
    disk_generate_data.run(input_file)
    disk_params = swarm_over_data()
    model_disk = ModelFactory.create(disk_params)
    model_disk.enableInference({"predictedField": "disk"})
    if PLOT:
        output = NuPICPlotOutput("disk/final_disk_output")
    else:
        output = NuPICFileOutput("disk/final_disk_output")
    with open(input_file, "rb") as sine_input:
        csv_reader = csv.reader(sine_input)

        # skip header rows
        csv_reader.next()
        csv_reader.next()
        csv_reader.next()

        # the real data
        sumOfUtilityFitness = 0.0
        sumOfWeaight = 0.0
        for row in csv_reader:
            timestamp = datetime.datetime.strptime(row[0], DATE_FORMAT)
            disk = float(row[1])
            result_disk = model_disk.run({"disk": disk})
            prediction = result_disk.inferences["multiStepBestPredictions"][1]
            anomalyScore = result_disk.inferences['anomalyScore']
            anomalyLikelihood = anomalyLikelihoodHelper.anomalyProbability(
                disk, anomalyScore, timestamp)
            sumOfUtilityFitness = sumOfUtilityFitness + (
                float(disk) * float(anomalyLikelihood))
            sumOfWeaight = sumOfWeaight + float(anomalyLikelihood)
            output.write(timestamp, disk, prediction, anomalyScore)

    output.close()
    print 'sumOfWeaight: ', sumOfWeaight, 'sumOfUtilityFitness: ', sumOfUtilityFitness
    result_output = 'disk/final_disk_output_out.csv'
    utilityOfDisk = 0.0
    with open(result_output, "rb") as result_input:
        csv_reader = csv.reader(result_input)

        # skip header rows
        csv_reader.next()
        csv_reader.next()
        csv_reader.next()
        for row in csv_reader:
            anomalyLikelihood = float(row[3])
            utilityOfDisk = utilityOfDisk + (
                anomalyLikelihood * sumOfUtilityFitness) / sumOfWeaight
        print 'utilityOfDisk: ', utilityOfDisk
    move_model()
예제 #12
0
def run_mem_experiment():
  input_file = "mem.csv"
  #generate_data.run(input_file)
  meme_params = swarm_over_data(SWARM_CONFIG)
  model_mem = ModelFactory.create(meme_params)
  model_mem.enableInference({"predictedField": "mem"})


  if PLOT:
    output = NuPICPlotOutput("final_mem_output")
  else:
    output = NuPICFileOutput("final_mem_output")
  
  with open(input_file, "rb") as sine_input:
    csv_reader = csv.reader(sine_input)

    # skip header rows
    csv_reader.next()
    csv_reader.next()
    csv_reader.next()

    # the real data
    for row in csv_reader:
      timestamp = datetime.datetime.strptime(row[0], DATE_FORMAT)
      mem = float(row[1])
      result_mem = model_mem.run({"mem": mem})
      prediction = result_mem.inferences["multiStepBestPredictions"][1]
      anomalyScore = result_mem.inferences['anomalyScore']
      output.write(timestamp, mem, prediction, anomalyScore)
      sumOfUtilityFitness= sumOfUtilityFitness + (float(mem) * float(anomalyLikelihood))
      sumOfWeaight = sumOfWeaight + float(anomalyLikelihood)

  output.close()
  print 'sumOfWeaight: ', sumOfWeaight, 'sumOfUtilityFitness: ', sumOfUtilityFitness
  result_output = 'final_cpu_output'
  with open(input_file, "rb") as result_input:
    csv_reader = csv.reader(result_input)

    # skip header rows
    csv_reader.next()
    csv_reader.next()
    csv_reader.next()
    for row in csv_reader:
      anomalyLikelihood = float(row[3])
      utilityOfmem= utilityOfCpu + (anomalyLikelihood * sumOfUtilityFitness)/sumOfWeaight
    print 'utilityOfCpu: ', utilityOfmem