def main(): slot2Id = getSlot2Id() device = torch.device("cuda" if torch.cuda.is_available() else "cpu") model = BertForTokenClassification.from_pretrained( config.pretrained_model_name_or_path, num_labels=len(slot2Id)) model.to(device) x, y = processData() train_dataloader, val_dataloader = getDataLoader(x, y) train(model, device, train_dataloader, val_dataloader, config.epochs, config.max_grad_norm)
def promethoes(): test_data = b'{"receiver":"test","status":"firing","alerts":[{"status":"firing","labels":{"alertname":"\xe4\xb8\xbb\xe6\x9c\xba\xe8\xbf\x9c\xe7\xa8\x8b\xe8\xbf\x9e\xe6\x8e\xa5\xe5\xa4\xb1\xe8\xb4\xa5","host":"db-01","instance":"127.0.0.1:9066","job":"service_guard","service":"dbas"},"annotations":{"description":"db-01\xe4\xb8\xbb\xe6\x9c\xba\xe8\xbf\x9c\xe7\xa8\x8b\xe8\xbf\x9e\xe6\x8e\xa5\xe5\xa4\xb1\xe8\xb4\xa5.","summary":"db-01\xe4\xb8\xbb\xe6\x9c\xba\xe8\xbf\x9c\xe7\xa8\x8b\xe8\xbf\x9e\xe6\x8e\xa5\xe5\xa4\xb1\xe8\xb4\xa5."},"startsAt":"2019-04-03T16:22:20.137532326+08:00","endsAt":"0001-01-01T00:00:00Z","generatorURL":"http://mix-app-131-11:9090/graph?g0.expr=guard_remote_conn_up+%3D%3D+0\\u0026g0.tab=1"}],"groupLabels":{},"commonLabels":{"service":"dbas"},"commonAnnotations":{},"externalURL":"http://mix-app-131-11:9093","version":"4","groupKey":"{}:{}"}\n' print("========收到告警==========") # request.data = test_data processed_alerts = process_data.processData(request.data) for alert in processed_alerts: status = alert["status"] if status == "resolved": alarm = "告警解除---" else: alarm = "告警触发---" content_msg = alarm + alert["content"] key = alert["receiver"] alarmType = alarm + "自监控告警" print("告警消息为:" + content_msg) utils(content_msg, key, key, "", alarmType) # if res: # log_out.log_out("告警:"+" 时间: "+alert["time"]+" 内容: "+ alert["content"]) time.sleep(2) print("========告警已发送=========") return "告警成功"
import process_data as pd import csv from sklearn.naive_bayes import GaussianNB from sklearn.model_selection import GridSearchCV trainfile08 = 'train_2008.csv' testfile08 = 'test_2008.csv' perc = 75 X, Y, Xtest = pd.processData(trainfile08, testfile08, perc) parameters = {} def classifyAll(): model = GaussianNB() clf = GridSearchCV(model, parameters, verbose=20) clf = clf.fit(X, Y) with open('NaiveBayes.csv', 'w') as csv_file: writer = csv.writer(csv_file, delimiter=',', lineterminator='\n') for key, value in clf.cv_results_.items(): writer.writerow([key, value]) print(clf.cv_results_) return clf def classifyOne(): clf = GaussianNB(priors=None) clf = clf.fit(X, Y) return clf
import csv import numpy as np from sklearn.ensemble import AdaBoostClassifier from sklearn.model_selection import GridSearchCV from sklearn.tree import DecisionTreeClassifier from sklearn.feature_selection import RFECV, RFE import process_data as pd trainfile08 = "train_2008.csv" testfile08 = "test_2008.csv" testfile12 = "test_2012.csv" perc = 100 X, Y, Xtest = pd.processData(trainfile08, testfile08, perc) _, _, Xtest2 = pd.processData(trainfile08, testfile12, perc) print("Data processed") def classifyRFE(): clf = AdaBoostClassifier( base_estimator=DecisionTreeClassifier( max_depth=1, max_features='log2'), n_estimators=750 ) print("Performing RFECV") selector = RFECV(clf, verbose=100, step=10) selector = selector.fit(X, Y) # Print results to file with open("logs\\adaBDT_rfe_results_3.txt", "w") as fle:
temp_train = cvModel.predict([X_train, X_train_angle]) y_train_pred_log += temp_train.reshape(temp_train.shape[0]) y_test_pred_log = y_test_pred_log / K y_train_pred_log = y_train_pred_log / K print('Train Log Loss Validation= ', log_loss(y_train, y_train_pred_log)) print('Valid Log Loss Validation= ', log_loss(y_train, y_valid_pred_log)) return y_test_pred_log ### Input Model Name ### ### See README for different models ### model = 'vgg16' X_train, X_train_angle, X_test, X_test_angle, y_train = processData( "train.json", "test.json", model) predictions = transferCV(X_train, X_train_angle, X_test, X_test_angle, y_train, model=model, finetune=False, finetune_layer=15) test = pd.read_json("test.json") predictions_df = test[['id']].copy() predictions_df['is_iceberg'] = predictions