def run_sine_experiment():
    input_file = "netio.csv"
    generate_data.run(input_file)
    model_params = swarm_over_data()
    if PLOT:
        output = NuPICPlotOutput("netio_output", show_anomaly_score=True)
    else:
        output = NuPICFileOutput("netio_output", show_anomaly_score=True)
    model = ModelFactory.create(model_params)
    model.enableInference({"predictedField": "bytes_sent"})

    with open(input_file, "rb") as netio_input:
        csv_reader = csv.reader(netio_input)

        # skip header rows
        csv_reader.next()
        csv_reader.next()
        csv_reader.next()

        # the real data
        for row in csv_reader:

            timestamp = datetime.datetime.strptime(row[0], DATE_FORMAT)
            bytes_sent = float(row[1])

            #netio = float(row[3])

            result = model.run({"bytes_sent": bytes_sent})
            output.write(timestamp, bytes_sent, result, prediction_step=1)

    output.close()
Exemple #2
0
def run_sine_experiment():
    input_file = "sine.csv"
    generate_data.run(input_file)
    model_params = swarm_over_data()
    if PLOT:
        output = NuPICPlotOutput("sine_output", show_anomaly_score=True)
    else:
        output = NuPICFileOutput("sine_output", show_anomaly_score=True)
    model = ModelFactory.create(model_params)
    model.enableInference({"predictedField": "sine"})

    with open(input_file, "rb") as sine_input:
        csv_reader = csv.reader(sine_input)

        # skip header rows
        csv_reader.next()
        csv_reader.next()
        csv_reader.next()

        # the real data
        for row in csv_reader:
            angle = float(row[0])
            sine_value = float(row[1])
            result = model.run({"sine": sine_value})
            output.write(angle, sine_value, result, prediction_step=1)

    output.close()
Exemple #3
0
def run_mem_experiment():
    input_file = "cpu.csv"
    generate_data.run(input_file)
    model_params = swarm_over_data(SWARM_CONFIG)
    if PLOT:
        output = NuPICPlotOutput("final_mem_output")
    else:
        output = NuPICFileOutput("final_mem_output")
    model = ModelFactory.create(model_params)
    model.enableInference({"predictedField": "mem"})

    with open(input_file, "rb") as sine_input:
        csv_reader = csv.reader(sine_input)

        # skip header rows
        csv_reader.next()
        csv_reader.next()
        csv_reader.next()

        # the real data
        for row in csv_reader:
            timestamp = datetime.datetime.strptime(row[0], DATE_FORMAT)
            mem = float(row[1])
            result = model.run({"mem": mem})
            prediction = result.inferences["multiStepBestPredictions"][1]
            anomalyScore = result.inferences['anomalyScore']
            output.write(timestamp, mem, prediction, anomalyScore)

    output.close()
Exemple #4
0
def run_cpu_experiment():
  input_file = "cpu.csv"
  cur_dir = os.getcwd()
  input_file = os.path.join(cur_dir,'cpu/cpu.csv')
  cpu_generate_data.run(input_file)
  model_params = swarm_over_data(SWARM_CONFIG)
  model = ModelFactory.create(model_params)
  model.enableInference({"predictedField": "cpu"})
  #To load with no swarming 
  #model = ModelFactory.create(model_params)


  

  if PLOT:
    output = NuPICPlotOutput("cpu/final_cpu_output")
  else:
    output = NuPICFileOutput("cpu/final_cpu_output")

  with open(input_file, "rb") as cpu_input:
    csv_reader = csv.reader(cpu_input)

    # skip header rows
    csv_reader.next()
    csv_reader.next()
    csv_reader.next()

    # the real data
    sumOfUtilityFitness=0.0
    sumOfWeaight = 0.0 
    for row in csv_reader:
      timestamp = datetime.datetime.strptime(row[0], DATE_FORMAT)
      cpu = float(row[1])
      result = model.run({"cpu": cpu})
      prediction = result.inferences["multiStepBestPredictions"][1]
      anomalyScore = result.inferences['anomalyScore']
      anomalyLikelihood = anomalyLikelihoodHelper.anomalyProbability(cpu, anomalyScore, timestamp)
      uc= (anomalyLikelihood * cpu + prediction * anomalyLikelihood )/(anomalyScore + anomalyLikelihood) 
      sumOfUtilityFitness= sumOfUtilityFitness + (float(cpu) * float(anomalyLikelihood))
      sumOfWeaight = sumOfWeaight + float(anomalyLikelihood)
      output.write(timestamp, cpu, prediction, anomalyScore)

  output.close()

  print 'sumOfWeaight: ', sumOfWeaight, 'sumOfUtilityFitness: ', sumOfUtilityFitness
  result_output = 'cpu/final_cpu_output_out.csv'
  utilityOfCpu=0.0
  with open(result_output, "rb") as result_input:
    csv_reader = csv.reader(result_input)

    # skip header rows
    csv_reader.next()
    csv_reader.next()
    csv_reader.next()
    for row in csv_reader:
      anomalyLikelihood = float(row[3])
      utilityOfCpu= utilityOfCpu + (anomalyLikelihood * sumOfUtilityFitness)/sumOfWeaight
    print 'utilityOfCpu: ', utilityOfCpu

  move_model()
Exemple #5
0
def run_model(input_file, output_file, model, train=False, plot=True):
    if plot:
        output = NuPICPlotOutput(output_file, show_anomaly_score=True)
    else:
        output = NuPICFileOutput(output_file, show_anomaly_score=True)

    if train:
        print "Enable learning."
        model.enableLearning()
    else:
        print "Not for training. Disabling learning."
        model.disableLearning()

    with open(input_file, "rb") as eggs_input:
        csv_reader = csv.reader(eggs_input)

        # skip header rows
        csv_reader.next()
        csv_reader.next()
        csv_reader.next()

        # the real data
        for row in csv_reader:
            twelvth_beat = float(row[0])
            pitch_value = float(row[1])
            result = model.run({"pitch": pitch_value})
            output.write(twelvth_beat, pitch_value, result, prediction_step=1)

    output.close()
Exemple #6
0
def run_mem_experiment():
    cur_dir = os.getcwd()
    input_file = source = os.path.join(cur_dir, 'mem/mem.csv')
    print input_file
    #mem_generate_data.run(input_file)
    print 'input_file', input_file
    model_params = swarm_over_data()
    model = ModelFactory.create(model_params)
    model.enableInference({"predictedField": "mem"})
    out_file = 'mem/final_mem.csv'
    if PLOT:
        output = NuPICPlotOutput(out_file)
    else:
        output = NuPICFileOutput(out_file)
    with open(input_file, "rb") as sine_input:
        csv_reader = csv.reader(sine_input)

        # skip header rows
        csv_reader.next()
        csv_reader.next()
        csv_reader.next()

        # the real data
        sumOfUtilityFitness = 0.0
        sumOfWeaight = 0.0
        for row in csv_reader:
            timestamp = datetime.datetime.strptime(row[0], DATE_FORMAT)
            mem = float(row[1])
            result = model.run({"mem": mem})
            prediction = result.inferences["multiStepBestPredictions"][1]
            anomalyScore = result.inferences['anomalyScore']
            anomalyLikelihood = anomalyLikelihoodHelper.anomalyProbability(
                mem, anomalyScore, timestamp)
            sumOfUtilityFitness = sumOfUtilityFitness + (
                float(mem) * float(anomalyLikelihood))
            sumOfWeaight = sumOfWeaight + float(anomalyLikelihood)
            output.write(timestamp, mem, prediction, anomalyScore)

    output.close()
    print 'sumOfWeaight: ', sumOfWeaight, 'sumOfUtilityFitness: ', sumOfUtilityFitness
    result_output = 'mem/final_mem_out.csv'
    utilityOfmem = 0.0
    with open(result_output, "rb") as result_input:
        csv_reader = csv.reader(result_input)

        # skip header rows
        csv_reader.next()
        csv_reader.next()
        csv_reader.next()
        for row in csv_reader:
            anomalyLikelihood = float(row[3])
            utilityOfmem = utilityOfmem + (anomalyLikelihood *
                                           sumOfUtilityFitness) / sumOfWeaight
        print 'utilityOfmem: ', utilityOfmem
    move_model()
def run_sine_experiment():
    input_file = "cpu.csv"
    #generate_data.run(input_file)
    #model_params = swarm_over_data()
    attempt = 0
    if PLOT:
        output = NuPICPlotOutput("final_cpu_output")
    else:
        output = NuPICFileOutput("final_cpu_output")
    #model = ModelFactory.create(model_params)
    model = ModelFactory.create(model_params.MODEL_PARAMS)
    model.enableInference({"predictedField": "cpu"})
    adapter = 0

    for row in range(1, 300):
        s = time.strftime(DATE_FORMAT)
        timestamp = datetime.datetime.strptime(s, DATE_FORMAT)
        #timestamp = datetime.datetime.strptime(row[0], DATE_FORMAT)
        cpu1 = psutil.cpu_percent(interval=1)
        cpu = float(cpu1)
        result = model.run({"cpu": cpu})

        prediction = result.inferences["multiStepBestPredictions"][1]
        anomalyScore = result.inferences['anomalyScore']
        anomalyLikelihood = anomalyLikelihoodHelper.anomalyProbability(
            cpu, anomalyScore, timestamp)
        print 'anomalyLikelihood: ', anomalyLikelihood

        if anomalyScore > 0.75:
            print "anomalyScore is high: ", 'anomalyScore: ', str(
                anomalyScore
            ), 'anomalyLikelihood: ', anomalyLikelihood, " CPU@: ", cpu, " steps: ", str(
                adapter)
            adapter = adapter + 20
            if adapter >= 300:
                run_adaptation_strategy(attempt, cpu, anomalyLikelihood)
                attempt += 1
                adapter = 0
                print "reset timer for new adaptation action"
        else:
            print "anomalyScore is high: ", 'anomalyScore: ', str(
                anomalyScore
            ), 'anomalyLikelihood: ', anomalyLikelihood, " CPU@: ", cpu, " steps: ", str(
                adapter)
            run_adaptation_strategy(attempt, cpu, anomalyLikelihood)
            attempt += 1

        #with open("/tmp/output.log", "w") as loutput:
        #subprocess.call("docker service scale web=1", shell=True, stdout=loutput, stderr=loutput)
        #output.write(timestamp, cpu, prediction, anomalyScore)
        try:
            plt.pause(SECONDS_PER_STEP)
        except:
            pass
        row += 1
Exemple #8
0
def run_seism_experiment():
    input_file = "eQnoise.csv"
    generate_data.run(input_file)
    print("time is :%f secs" % ((clk.time() - t0) / 60.))
    model_params = swarm_over_data()
    print("time is :%f secs" % ((clk.time() - t0) / 60.))
    print(model_params)
    if PLOT:
        pass
        #output = NuPICPlotOutput("sine3_output", show_anomaly_score=True)
    else:
        output = NuPICFileOutput("eQnoise_output", show_anomaly_score=True)
    print("time is :%f min" % ((clk.time() - t0) / 60.))
    model = ModelFactory.create(model_params)
    model.enableInference({"predictedField": "acc"})

    with open(input_file, "rb") as data_input:
        csv_reader = csv.reader(data_input)

        # skip header rows
        csv_reader.next()
        csv_reader.next()
        csv_reader.next()

        # the real data
        for row in csv_reader:
            time = float(row[0])
            acc_value = float(row[1])
            result = model.run({"acc": acc_value})
            output.write(time, acc_value, result, prediction_step=PSTEPS)

    output.close()
    print("time is :%f min" % ((clk.time() - t0) / 60.))
Exemple #9
0
def run(fname="verylargeseism_out"):
    print("reading %s" % input_file)
    print("using this model: ", model_params)
    model = ModelFactory.create(model_params)
    model.enableInference({"predictedField": "acc"})
    print("created nupic model in :%f min" % ((clk.time() - t0) / 60.))
    print("will output predictions and anomalies \n in this file: %s" % fname)

    output = NuPICFileOutput(fname, show_anomaly_score=True)
    with open(input_file, "rb") as data_input:
        csv_reader = csv.reader(data_input)

        # skip header rows
        csv_reader.next()
        csv_reader.next()
        csv_reader.next()

        # the real data
        for row in csv_reader:
            time = float(row[0])
            acc_value = float(row[1])
            result = model.run({"acc": acc_value})
            output.write(time, acc_value, result, prediction_step=PSTEPS)

    output.close()
    print("time is :%f min" % ((clk.time() - t0) / 60.))
Exemple #10
0
def run_disk_experiment():
    input_file = "disk/disk.csv"
    disk_generate_data.run(input_file)
    disk_params = swarm_over_data()
    model_disk = ModelFactory.create(disk_params)
    model_disk.enableInference({"predictedField": "disk"})
    if PLOT:
        output = NuPICPlotOutput("disk/final_disk_output")
    else:
        output = NuPICFileOutput("disk/final_disk_output")
    with open(input_file, "rb") as sine_input:
        csv_reader = csv.reader(sine_input)

        # skip header rows
        csv_reader.next()
        csv_reader.next()
        csv_reader.next()

        # the real data
        sumOfUtilityFitness = 0.0
        sumOfWeaight = 0.0
        for row in csv_reader:
            timestamp = datetime.datetime.strptime(row[0], DATE_FORMAT)
            disk = float(row[1])
            result_disk = model_disk.run({"disk": disk})
            prediction = result_disk.inferences["multiStepBestPredictions"][1]
            anomalyScore = result_disk.inferences['anomalyScore']
            anomalyLikelihood = anomalyLikelihoodHelper.anomalyProbability(
                disk, anomalyScore, timestamp)
            sumOfUtilityFitness = sumOfUtilityFitness + (
                float(disk) * float(anomalyLikelihood))
            sumOfWeaight = sumOfWeaight + float(anomalyLikelihood)
            output.write(timestamp, disk, prediction, anomalyScore)

    output.close()
    print 'sumOfWeaight: ', sumOfWeaight, 'sumOfUtilityFitness: ', sumOfUtilityFitness
    result_output = 'disk/final_disk_output_out.csv'
    utilityOfDisk = 0.0
    with open(result_output, "rb") as result_input:
        csv_reader = csv.reader(result_input)

        # skip header rows
        csv_reader.next()
        csv_reader.next()
        csv_reader.next()
        for row in csv_reader:
            anomalyLikelihood = float(row[3])
            utilityOfDisk = utilityOfDisk + (
                anomalyLikelihood * sumOfUtilityFitness) / sumOfWeaight
        print 'utilityOfDisk: ', utilityOfDisk
    move_model()
Exemple #11
0
def run_mem_experiment():
  input_file = "mem.csv"
  #generate_data.run(input_file)
  meme_params = swarm_over_data(SWARM_CONFIG)
  model_mem = ModelFactory.create(meme_params)
  model_mem.enableInference({"predictedField": "mem"})


  if PLOT:
    output = NuPICPlotOutput("final_mem_output")
  else:
    output = NuPICFileOutput("final_mem_output")
  
  with open(input_file, "rb") as sine_input:
    csv_reader = csv.reader(sine_input)

    # skip header rows
    csv_reader.next()
    csv_reader.next()
    csv_reader.next()

    # the real data
    for row in csv_reader:
      timestamp = datetime.datetime.strptime(row[0], DATE_FORMAT)
      mem = float(row[1])
      result_mem = model_mem.run({"mem": mem})
      prediction = result_mem.inferences["multiStepBestPredictions"][1]
      anomalyScore = result_mem.inferences['anomalyScore']
      output.write(timestamp, mem, prediction, anomalyScore)
      sumOfUtilityFitness= sumOfUtilityFitness + (float(mem) * float(anomalyLikelihood))
      sumOfWeaight = sumOfWeaight + float(anomalyLikelihood)

  output.close()
  print 'sumOfWeaight: ', sumOfWeaight, 'sumOfUtilityFitness: ', sumOfUtilityFitness
  result_output = 'final_cpu_output'
  with open(input_file, "rb") as result_input:
    csv_reader = csv.reader(result_input)

    # skip header rows
    csv_reader.next()
    csv_reader.next()
    csv_reader.next()
    for row in csv_reader:
      anomalyLikelihood = float(row[3])
      utilityOfmem= utilityOfCpu + (anomalyLikelihood * sumOfUtilityFitness)/sumOfWeaight
    print 'utilityOfCpu: ', utilityOfmem
def predict():
    from model_0 import model_params
    input_file = "sine.csv"
    model = ModelFactory.create(model_params.MODEL_PARAMS)
    model.enableInference({"predictedField": "sine"})

    output = NuPICFileOutput("sine_output", show_anomaly_score=True)

    with open(input_file, "rb") as sine_input:
        csv_reader = csv.reader(sine_input)

        # skip header rows
        csv_reader.next()
        csv_reader.next()
        csv_reader.next()

        # the real data
        for row in csv_reader:
            angle = float(row[0])
            sine_value = float(row[1])
            result = model.run({"sine": sine_value})
            output.write(angle, sine_value, result, prediction_step=1)
    output.close()
    clusterA = copy.deepcopy(
        clusterAss[np.nonzero(clusterAss[:, 0] == clu)[0]])  #选出某一聚类的所有类分类结果数据
    minV = min(clusterA[:, 1])  #选出最短距离
    index = list(clusterA[:, 1]).index(minV)  #找出最短距离对应的share数据是哪一条

    data = copy.deepcopy(Data.iloc[:, index].values)
    data = data[np.nonzero(data != 0)[0]]  #swarm只考虑非0数据
    print('Data.columns: ', Data.columns)
    #time.sleep(6)
    paras = swarm(data, number=index, col=Data.columns[index])  #运行swarm
    print('paras: ', paras)  #best params
    import csv
    model = ModelFactory.create(paras)
    model.enableInference({"predictedField": "value"})

    output = NuPICFileOutput("output" + str(clu), show_anomaly_score=True)

    for i in range(Data.shape[1]):
        input_file = '/opt/share_code_data/' + str(Data.columns[i]) + '.csv'
        with open(input_file, "rb") as sine_input:
            csv_reader = csv.reader(sine_input)
            # the real data

            # skip header rows
            csv_reader.next()
            csv_reader.next()
            csv_reader.next()

            for row in csv_reader:
                timeS = row[0]
                value = float(row[6])