# D-StreamII training objects trainer = DStreamII(complexity=0, numInputs=1, discreteOutputs=0, discreteInputs=0, appFieldsDict= {'gridSize': [0.25], 'gridUpperRange':[25], 'gridLowerRange':[0]}); trainInputData = [] trainOutputData = [] # Train to determine the grid size with open("trace.txt", mode='r') as fp: for line in fp: dataInfo = line.split() trainInputData.append(dataInfo) with open("trace_obs.txt", mode='r') as fp: for line in fp: dataInfo = line.split() trainOutputData.append(dataInfo) trainer.addBatchObservations(trainInputData, trainOutputData); trainer.train(); inputData = [] #Execute D-Stream II clustering algorithm with open("power_use.txt") as fp: for line in fp: dataInfo = line.split() trainer.execute(dataInfo) trainer.printClusters()
firstTS = time.time(); dStreamIITest.train(); secondTS = time.time(); dStreamIITimestamps["train"] = secondTS - firstTS; runningTotal = 0; for executeSample in range(numExecuteSamples): inputRow = next(inputReader); outputRow = next(outputReader); if (len(inputRow) > 0): input1 = float(inputRow[0]); output = float(outputRow[0]); firstTS = time.time(); dStreamIITest.execute([input1, input2]); theor = output; secondTS = time.time(); dStreamIITimestamps["test" + str(executeSample)] = secondTS - firstTS; dStreamIITimestamps["delta" + str(executeSample)] = abs(output - theor); runningTotal += output; dStreamIITest.printClusters() avgActual = runningTotal/(1.0*numExecuteSamples); firstTS = time.time(); encryptedFile = dStreamIITest.encrypt(inputFilePath) secondTS = time.time(); dStreamIITimestamps["encrypt"] = secondTS - firstTS; firstTS = time.time();
'gridLowerRange': [0] }) trainInputData = [] trainOutputData = [] # Train to determine the grid size with open("trace.txt", mode='r') as fp: for line in fp: dataInfo = line.split() trainInputData.append(dataInfo) with open("trace_obs.txt", mode='r') as fp: for line in fp: dataInfo = line.split() trainOutputData.append(dataInfo) trainer.addBatchObservations(trainInputData, trainOutputData) trainer.train() inputData = [] #Execute D-Stream II clustering algorithm with open("power_use.txt") as fp: for line in fp: dataInfo = line.split() trainer.execute(dataInfo) trainer.printClusters()
firstTS = time.time() dStreamIITest.train() secondTS = time.time() dStreamIITimestamps["train"] = secondTS - firstTS runningTotal = 0 for executeSample in range(numExecuteSamples): inputRow = next(inputReader) outputRow = next(outputReader) if (len(inputRow) > 0): input1 = float(inputRow[0]) output = float(outputRow[0]) firstTS = time.time() dStreamIITest.execute([input1, input2]) theor = output secondTS = time.time() dStreamIITimestamps["test" + str(executeSample)] = secondTS - firstTS dStreamIITimestamps["delta" + str(executeSample)] = abs(output - theor) runningTotal += output dStreamIITest.printClusters() avgActual = runningTotal / (1.0 * numExecuteSamples) netLoadingTime = 0 for i in range(numTrainingSamples): netLoadingTime += dStreamIITimestamps["load" + str(i)] netExecuteTime = 0 runningMAE = 0.0