Exemplo n.º 1
0
def main():
    # Parse command line arguments
    parser = argparse.ArgumentParser(description='Train on a given dataset to recognize pitch')
    parser.add_argument('-i', dest='inputData', type=str, help='Input data set file path', default='')
    parser.add_argument('-m', dest='modelPath', type=str, help='Set the model path', default='model.pth')
    parser.add_argument('-mt', dest='modelClass', type=str, help='Set the model class', default='Model1')
    parser.add_argument('-R', dest='loadDataToRam', action='store_true', help='Preload all data into ram to speedup training')
    parser.add_argument('-C', dest='chunkDataToRam', action='store_true', help='Load chunked data of size -M (in MB) into ram to speedup training')
    parser.add_argument('-p', dest='plotOnly', action='store_true', help='Only plot all layer outputs - and not evaluate precision in testing. Good for debug')
    parser.add_argument('-M', dest='memoryLimit', type=int, help='Load chunked data of this size (in MB) into ram to speedup training', default='2048')
    parser.add_argument('-r', dest='learningRate', type=float, help='Set the learning rate', default='0.001')
    parser.add_argument('-v', dest='validationSplit', type=float, help='Set the validation split', default='0.1')
    parser.add_argument('-e', dest='epochs', type=int, help='Set the epochs', default='20')
    parser.add_argument('-g', dest='gpu', action='store_true', help='Set the training to run on gpu')
    parser.add_argument('-w', dest='windowStep', type=int, help='Set the window step to use when performing stft', default='4000')
    parser.add_argument('-o', dest='output', type=str, help='Set the output path', default='model.pth')

    options = parser.parse_args()

    # Error check
    if options.inputData == '':
        print("No input given. BYE!\n")
        return 1
    elif not os.path.isdir(options.inputData):
        print (f"Given input path {options.inputData} does not exist!")
        return 2

    if options.gpu:
        device = torch.device('cuda')
        print ("Using device", torch.cuda.get_device_name(0))
    else:
        device = torch.device('cpu')
        print ("Using device CPU")

    if options.modelClass == 'Model1':
        modelClass = NetworkModel.Model1
    elif options.modelClass == 'Model2':
        modelClass = NetworkModel.Model2
    elif options.modelClass == 'Model4':
        modelClass = NetworkModel.Model4

    if options.loadDataToRam:
        datasetClass = NetworkModel.NSynthRamLoadedDataSet
    elif options.chunkDataToRam:
        datasetClass = NetworkModel.NSynthChunkedDataSet
    else:
        datasetClass = NetworkModel.NSynthDataSet

    NetworkModel.test(
        root_dir = options.inputData, 
        model_class = modelClass, 
        dataset_class = datasetClass,
        memoryLimitInMB = options.memoryLimit,
        device = device, 
        plotOnly = options.plotOnly,
        load_path = options.modelPath,
        windowStep = options.windowStep
    )
    
    return 0
Exemplo n.º 2
0
def initModels():
	global model1
	model1.dataSource = NetworkModel.getOrCreateDataSource()
	model1.network = Network("network1.nta")
	#reset the dataSource
	model1.network.regions[_RECORD_SENSOR].dataSource = model1.dataSource
	
	"""
Exemplo n.º 3
0
    def cross_valid_and_fit_model(self, df, model, epochs):
        nm = NetworkModel()
        n_splits = 2
        n_times = 5
        acc_sum = 0
        f1_sum = 0

        X = df.drop(columns=['value'])
        Y = df['value'].values
        kf = StratifiedKFold(n_splits=n_splits, random_state=np.random, shuffle=True)
        kf.get_n_splits(X)

        for i in range(n_times):
            for train_index, test_index in kf.split(X,Y):
                X_train = X.iloc[train_index]
                X_test = X.iloc[test_index]
                y_train, y_test = Y[train_index], Y[test_index]

                X_train, y_train = balance_dataset(X_train, y_train)

                model_history = model.fit(  X_train,
                                            y_train,
                                            epochs=epochs,
                                            validation_data=(X_test, y_test),
                                            verbose=1)
                acc_sum += model_history.history['val_acc'][-1]
                f1_sum += model_history.history['val_f1_score'][-1]
                filename = datetime.now().strftime('%H_%M_%S')
                nm.create_learning_plots(model_history, filename )
                nm.create_confusion_matrix(model, X_train, y_train, filename)


        return acc_sum/(n_splits*n_times), f1_sum/(n_splits*n_times)
def runModel(jsonData):
    global model1
    global model2
    global model3

    print("RunMethod" + str(jsonData))
    cpuMetric = json.loads(jsonData)['cpu']
    memMetric = json.loads(jsonData)['mem']
    nothing = ['None', 'null', None]

    if (cpuMetric in nothing or memMetric in nothing):
        return

    cpuMetric = float(cpuMetric)
    memMetric = float(memMetric)

    start = datetime.datetime.now()
    actualVal, predictions, errorVal, anomalyScore = NetworkModel.runNetwork(
        model1.network, model1.dataSource, cpuMetric, False, False)
    end = datetime.datetime.now()
    elapsed = end - start
    model1.outputFile.write(
        str(actualVal) + "|" + str(predictions) + "|" + errorVal + "|" +
        str(anomalyScore) + "|" + str(elapsed.microseconds / 1000) + "\n")
    model1.outputFile.flush()

    start = datetime.datetime.now()
    actualVal, predictions, errorVal, anomalyScore = MultiLevelNetworkModel.runNetwork(
        model2.network, model2.dataSource, cpuMetric, False)
    end = datetime.datetime.now()
    elapsed = end - start
    model2.outputFile.write(
        str(actualVal) + "|" + str(predictions) + "|" + errorVal + "|" +
        str(anomalyScore) + "|" + str(elapsed.microseconds / 1000) + "\n")
    model2.outputFile.flush()

    start = datetime.datetime.now()
    actualVal, anomalyScore = MultiLevelNetworkAnomaly.runNetwork(
        model3.network, model3.dataSource, cpuMetric, memMetric, False)
    end = datetime.datetime.now()
    elapsed = end - start
    model3.outputFile.write(
        str(actualVal) + "|" + str(anomalyScore) + "|" +
        str(elapsed.microseconds / 1000) + "\n")
    model3.outputFile.flush()
def initModels():
    global model1
    model1.dataSource, model1.network = NetworkModel.BuildNetwork()
    model1.outputFile = open("model1.txt", "w+")
    model1.outputFile.write(
        "actualVal|predictions|avgError|anomalyScore|time(ms)\n")
    model1.outputFile.flush()

    global model2
    model2.dataSource, model2.network = MultiLevelNetworkModel.BuildNetwork()
    model2.outputFile = open("model2.txt", "w+")
    model2.outputFile.write(
        "actualVal|predictions|avgError|anomalyScore|time(ms)\n")
    model2.outputFile.flush()

    global model3
    model3.dataSource, model3.network = MultiLevelNetworkAnomaly.BuildNetwork()
    model3.outputFile = open("model3.txt", "w+")
    model3.outputFile.write("actualVal|anomalyScore|time(ms)\n")
    model3.outputFile.flush()
Exemplo n.º 6
0
def analysis():
    if request.method == 'POST':
        result = request.form
        val = NetworkModel.analyzeText(result.get('message'))
        taglist = ""
        for x in range(len(val['tag list'])):
            if (x < len(val['tag list']) - 1):
                taglist += val['tag list'][x] + ", "
            else:
                taglist += val['tag list'][x]
        percentile = ""
        if val['percents'][0] == 'abv75p':
            percentile = "top 25%"
        elif val['percents'][0] == 'abv25p':
            percentile = "bottom 50%"
        elif val['percents'][0] == 'abv50p':
            percentile = "top 50%"
        else:
            percentile = "lower 25%"
        ls = [
            'Tag_Finance', 'Tag_Analytics', 'Tag_Company', 'Tag_Hospitality',
            'Tag_National', 'Tag_Healthcare'
        ]
        images = []
        for i in ls:
            if i in val['tag list']:
                images.append(i.split('_')[1] + ".png")
        tagacc = str(round(val['tag acc'] * 100, 2)) + "%"
        peracc = str(round(val['percentile acc'] * 100, 2)) + "%"
        return render_template("pages/analysis.html",
                               images=images,
                               similars=val['similars'],
                               taglist=taglist,
                               perlist=percentile,
                               tagacc=tagacc,
                               peracc=peracc)
Exemplo n.º 7
0
def runModel(jsonData, printflag):
	global model1
	global model2
	global model3
	global predictionList
	global rcount
	
	cpuSLOViolationPredictions=0
	AnomalyScoreViolation=0
	
	cpuMetric = json.loads(jsonData)['cpu']
	memMetric = json.loads(jsonData)['mem']
	violation = json.loads(jsonData)['violations']
	avgresponse = json.loads(jsonData)['mean']
	
	nothing = ['None', 'null', None]
	if(cpuMetric in nothing or memMetric in nothing):
		return;
	
	cpuMetric = float(cpuMetric)
	memMetric = float(memMetric)
	violation = int(violation)
	avgresponse = int(avgresponse)
	if avgresponse >= _SLO_RESPONSE_TIME:
		violation = 1
	
	rcount += 1
	anomalywindow = list()
	
	
	actualVal, predictions, errorVal, anomalyScore = NetworkModel.runNetwork(model1.network, model1.dataSource, cpuMetric, True, True)
	anomalywindow.append(anomalyScore)
	
	# find the future anomaly scores with learning mode OFF
	for prediction in predictions:
		x, y, z, anomalyScore = NetworkModel.runNetwork(model1.network, model1.dataSource, cpuMetric, False, False)
		anomalywindow.append(anomalyScore)
	
	
	for anomaly in anomalywindow:
		if anomaly > _ANOMALY_SCORE:
			AnomalyScoreViolation = AnomalyScoreViolation + 1
		
	
	# Raise alarm only when this satisfy
	if AnomalyScoreViolation > 0 and rcount > 1:
		if printflag == True:
			raiseAlarm(1, rcount)

		anomaly = list()
		anomaly.append(-1)
		anomaly.append('A')
		anomaly.append(rcount)
		anomaly.append('TP')
		anomaly.append(0)
		predictionList.append(anomaly)
	else:
		normal = list()
		normal.append(-1)
		normal.append('N')
		normal.append(rcount)
		normal.append('TN')
		predictionList.append(normal)
		timenow = int(time.time())
	
	if violation > 0:
		processpredictionList('A', rcount)
		timenow = int(time.time())
		if printflag == True:
			print("SLO violation at "+str(timenow)+" for record number "+ str(rcount))
	else:
		processpredictionList('N', rcount)