def upload_page(): if request.method == "GET": return render_template("index.html") else: uploaded_file = request.files['file'] if uploaded_file != '': print(os.getcwd()) arq = os.path.join(app.config['UPLOAD_FOLDER'], secure_filename(uploaded_file.filename)) print(arq) uploaded_file.save(arq) name= request.form["name"] page = int(request.form["page"]) id = request.form["id"] quest = "" for i in range(1,MAX_ITENS+1): key = "customRadioInline"+str(i) try: quest += request.form[key] except KeyError: quest += "." output = os.path.join(app.config['UPLOAD_FOLDER'], "Gabarito_"+"_".join(name.split())) split(arq, page, output+".pdf") transform(id,quest,output,name,parameters) return send_file(output+".pdf",as_attachment =True)#,mimetype = '.pdf') os.remove(output+".pdf") os.remove(arq)
def p_detect(self, image: str): self.pnet.eval() pboxes = torch.Tensor([]).to(self.device) image = Image.open(image) while min(image.size) > 12: "Scale" scale = 1 "Data" data = functions.transform(image).unsqueeze(0).to(self.device) "Net" confi, offset, _ = self.pnet(data) confi, offset = confi.permute(0, 2, 3, 1), offset.permute(0, 2, 3, 1) # (N, W, H, C) mask = confi[..., 0] > cfg.CONFI["pnet"] index = torch.nonzero(mask) "ROI" side, stride = cfg.NET["pnet"] x1, y1 = index[:, 1] * stride, index[:, 2] * stride x2, y2 = x1 + side, y1 + stride roi = torch.stack([x1, y1, x2, y2], dim=-1) "Origin" confi, offset = confi[mask], offset[mask] origin = offset * side / scale + roi.float() box = torch.cat((confi, origin), dim=-1) pboxes = torch.cat((pboxes, box), dim=0) "Pyramid" scale *= 0.707 image = image.resize((image.size[0], image.size[1])) return functions.nms(pboxes)
def simplexing(euler_angle, strain): Q = np.asarray(fn.transformation(euler_angle)) s = fn.sixD(fn.transform(Q, strain)) obj_fn = [] for j in s: obj_fn.append(-1 * j) return obj_fn
def predict(self): # transform array values to 1-vector Tensor waveform = torch.Tensor([a for a in self.list]).flatten() with torch.no_grad(): mel = transform(waveform).squeeze(0).t() pred = self.get_pred(mel) if pred == 1: self.success_num = self.success_num + 1 beep(sound="coin") print(f"Success #{self.success_num}") time.sleep(0.1) # after keyword detection skip 100 ms
def run(): cwd = os.getcwd() print("Selecionando pdf mais recente ...") time.sleep(1.5) files = glob.glob(cwd + "/*.pdf") files.sort(key=os.path.getmtime, reverse=True) arq = files[0] print(f"{arq[len(cwd):]} selecionado!") time.sleep(.5) while True: try: page = int(input("Página do Seu Gabarito: ")) break except: print("Digite um número!!\n") while True: try: id = input("Matrícula: ") int(id) break except: print("Digite apenas números") while True: quest = input("Gabarito (sem espaços):") quest = quest.split() if len(quest) == 1: quest = quest[0] break else: print("Digite suas Respostas sem espaços!!") name = input("Nome e Sobrenome: ") output = "Gabarito_" + "_".join(name.split()) split(arq, page, output + ".pdf") transform(id, quest, output, name, parameters)
def __crop2square__(self, image, boxes, size): data, prior = [], [] image = Image.open(image) x1, y1, x2, y2 = boxes[:, 1], boxes[:, 2], boxes[:, 3], boxes[:, 4] cx, cy = (x2 - x1) / 2, (y2 - y1) / 2 mside = max((x2 - x1), (y2 - y1)) __x1, __y1 = (cx - m / 2).int(), (cy - m / 2).int() __x2, __y2 = __x1 + mside, __y1 + mside for i in range(boxes.size(0)): prior.append([__x1[i], __y1[i], __x2[i], __y2[i]]) __data = image.crop(prior[-1]) __data = __data.resize((size, size)) __data = functions.transform(__data).unsqueeze(0) data.append(__data) return torch.stack(prior), torch.stack(data)
def to_dataset(self, dataset, is_keyword): dataset = [transform(data).t() for data in dataset] dataset = [(data, torch.zeros(len(data), dtype=torch.long), 1 if is_keyword else 0) for data in dataset] if is_keyword: for data in dataset: n = len(data[1]) for i in range(n): if i >= (n - constants.KEYWORD_MARKER_NUMBER) and i < (n - 0): data[1][i] = 1 return dataset
def main(): models = ['RF'] # 'LSTM', 'NN', 'LR', 'RF', 'DT', 'SVC', targets = ['ph'] # ['DOcategory', 'pHcategory'] # 'ph','dissolved_oxygen' # ph TH: 24,36,48 sondefilename = 'leavon_wo_2019-07-01-2020-01-15' n_job = -1 for model_name in models: print(model_name) for target in targets: if target.find('category') > 0: cat = 1 directory = 'Results/balance_data/output_Cat_' + \ model_name+'/oversampling_cv_models/' data = { 'target_names': 'target_names', 'method_names': 'method_names', 'window_nuggets': 'window_nuggets', 'temporalhorizons': 'temporalhorizons', 'CV': 'CV', 'file_names': 'file_names', 'std_test_score': 'std_test_score', 'mean_test_score': 'mean_test_score', 'params': 'params', 'bestscore': 'bestscore', 'F1_0': 'F1_0', 'F1_1': 'F1_1', 'P_0': 'P_0', 'P_1': 'P_1', 'R_0': 'R_0', 'R_1': 'R_1', 'acc0_1': 'acc0_1', 'F1_0_1': 'F1_0_1', 'F1_all': 'F1_all', 'fbeta': 'fbeta', 'imfeatures': 'imfeatures', 'best_thresh_0': 'best_thresh_0', 'best_thresh_1': 'best_thresh_1', 'best_thresh_2': 'best_thresh_2' } else: cat = 0 directory = 'Results/balance_data/output_Reg_' + \ model_name+'/oversampling_cv_models/' data = { 'target_names': 'target_names', 'method_names': 'method_names', 'window_nuggets': 'window_nuggets', 'temporalhorizons': 'temporalhorizons', 'CV': 'CV', 'file_names': 'file_names', 'std_test_score': 'std_test_score', 'mean_test_score': 'mean_test_score', 'params': 'params', 'bestscore': 'bestscore', 'mape': 'mape', 'me': 'me', 'mae': 'mae', 'mpe': 'mpe', 'rmse': 'rmse', 'R2': 'R2', 'imfeatures': 'imfeatures' } if not os.path.exists(directory): os.makedirs(directory) resultFileName = 'results_' + target + str(time.time()) + '.csv' dfheader = pd.DataFrame(data=data, index=[0]) dfheader.to_csv(directory + resultFileName, index=False, header=False) if model_name == 'DT' or model_name == 'RF': path = 'Sondes_data/train/train_data/' method = 'OrgData' else: method = 'StandardScaler' path = 'Sondes_data/train/train_data_normalized/' + method + '/' + target + '/' for n_steps in [1, 3, 6, 12]: for PrH_index in [1, 3, 6, 12, 24, 36, 48]: files = [ f for f in os.listdir(path) if f.endswith('.csv') and f.startswith(sondefilename) ] file = files[0] print('Window: ' + str(n_steps) + ' TH: ' + str(PrH_index) + ' ' + method + ' ' + target) dataset = pd.read_csv(path + file) train_X_grid, train_y_grid, input_dim, features = func.preparedata( dataset, PrH_index, n_steps, target, cat) if cat == 1 and (model_name == 'LSTM' or model_name == 'NN'): train_y_grid = to_categorical(train_y_grid, 3) if model_name == 'LSTM' or model_name == 'NN': n_job = 1 start_time = time.time() # resample = SMOTETomek(tomek=TomekLinks( # sampling_strategy='majority')) # print(train_y_grid[train_y_grid.argmax(axis=1)==2]) model = func.algofind(model_name, input_dim, n_steps, cat) # ('r', resample), # if cat == 1: # model = CalibratedClassifierCV( # model, method='isotonic') pipeline = Pipeline(steps=[('model', model)]) custom_cv = func.custom_cv_2folds(train_X_grid, 5) gs = RandomizedSearchCV( estimator=pipeline, param_distributions=func.param_grid['param_grid_' + model_name + str(cat)], n_iter=10, cv=custom_cv, verbose=0, random_state=42, n_jobs=n_job) if cat == 1 and (model_name == 'LSTM' or model_name == 'NN'): clf = gs.fit(train_X_grid, train_y_grid, model__class_weight={ 0: 1, 1: 50, 2: 100 }) else: clf = gs.fit(train_X_grid, train_y_grid) test_Score = clf.cv_results_['mean_test_score'].mean() test_std = clf.cv_results_['std_test_score'].mean() print('Mean test scores: %.3f' % test_Score) i = 1 custom_cv = func.custom_cv_2folds(train_X_grid, 3) for train_index, test_index in custom_cv: test_X = train_X_grid[test_index] test_y = train_y_grid[test_index] predictions = clf.predict(test_X) # predict_mine = [] fpath = 'predictions_' + method+target+'_Window' + \ str(n_steps) + '_TH' + \ str(PrH_index)+'_CV' + str(i)+file if cat == 1: # predict probabilities yhat = clf.predict_proba(test_X) # print(yhat[100:103]) y = label_binarize(test_y, classes=[0, 1, 2]) # print(y[100:103]) # roc_curve fpr = dict() tpr = dict() roc_auc = dict() best_thresh = dict() for i in range(3): fpr[i], tpr[i], thresholds = roc_curve( y[:, i], yhat[:, i]) roc_auc[i] = auc(fpr[i], tpr[i]) J = tpr[i] - fpr[i] # get the best threshold ix = argmax(J) best_thresh[i] = thresholds[ix] print('Best Threshold=%f, roc_auc=%.3f' % (best_thresh[i], roc_auc[i])) # Compute micro-average ROC curve and ROC area fpr["micro"], tpr["micro"], _ = roc_curve( y.ravel(), yhat.ravel()) roc_auc["micro"] = auc(fpr["micro"], tpr["micro"]) plt.plot( fpr["micro"], tpr["micro"], label='micro-average ROC curve (area = {0:0.2f})' ''.format(roc_auc["micro"]), color='deeppink', linestyle=':', linewidth=4) colors = cycle( ['aqua', 'darkorange', 'cornflowerblue']) for i, color in zip(range(3), colors): plt.plot( fpr[i], tpr[i], color=color, lw=2, label= 'ROC curve of class {0} (area = {1:0.2f})' ''.format(i, roc_auc[i])) # plot the roc curve for the model plt.plot([0, 1], [0, 1], linestyle='--', label='No Skill') # axis labels plt.xlabel('False Positive Rate') plt.ylabel('True Positive Rate') plt.title( 'Some extension of Receiver operating characteristic to multi-class' ) plt.legend(loc="lower right") # show the plot plt.savefig(directory + fpath + 'ROC_curve.jpg') plt.close() if cat == 1 and (model_name == 'LSTM' or model_name == 'NN'): test_y = argmax(test_y, axis=1) # predictions = argmax(predictions, axis=1) if cat == 0: predictions, test_y = func.transform( predictions, test_y, method, target, file) cm0 = func.forecast_accuracy(predictions, test_y, cat) plt.scatter(np.arange(len(test_y)), test_y, s=1) plt.scatter(np.arange(len(predictions)), predictions, s=1) plt.legend(['actual', 'predictions'], loc='upper right') plt.savefig(directory + fpath + '.jpg') plt.close() # data = {'Actual': test_y, 'Predictions': predictions} print(test_y.shape) print(predictions.shape) # if model_name == 'RF': # df = pd.DataFrame(data=data) # else: # df = pd.DataFrame(data=data, index=[0]) # df.to_csv(directory+fpath, index=False) if cat == 1: data = { 'target_names': target, 'method_names': method, 'window_nuggets': n_steps, 'temporalhorizons': PrH_index, 'CV': i, 'file_names': fpath, 'std_test_score': [test_std], 'mean_test_score': [test_Score], 'params': [clf.best_params_], 'bestscore': [clf.best_score_], 'F1_0': cm0[0], 'F1_1': cm0[1], 'P_0': cm0[2], 'P_1': cm0[3], 'R_0': cm0[4], 'R_1': cm0[5], 'acc0_1': cm0[6], 'F1_0_1': cm0[7], 'F1_all': cm0[8], 'fbeta': [cm0[9]], 'imfeatures': [clf.best_estimator_], 'best_thresh_0': best_thresh[0], 'best_thresh_1': best_thresh[1], 'best_thresh_2': best_thresh[2] } elif cat == 0: data = { 'target_names': target, 'method_names': method, 'window_nuggets': n_steps, 'temporalhorizons': PrH_index, 'CV': i, 'file_names': fpath, 'std_test_score': [test_std], 'mean_test_score': [test_Score], 'params': [clf.best_params_], 'bestscore': [clf.best_score_], 'mape': cm0[0], 'me': cm0[1], 'mae': cm0[2], 'mpe': cm0[3], 'rmse': cm0[4], 'R2': cm0[5], 'imfeatures': [clf.best_estimator_] } df = pd.DataFrame(data=data, index=[0]) df.to_csv(directory + resultFileName, index=False, mode='a', header=False) elapsed_time = time.time() - start_time print( time.strftime("%H:%M:%S", time.gmtime(elapsed_time))) i = i + 1 Kb.clear_session() gc.collect() del clf
import functions as app if __name__ == '__main__': app.init() app.transform() app.end()
def main(): # check if GPU is available. device = torch.device("cuda" if torch.cuda.is_available() else "cpu") # parameters setting EPOCH = 50 BATCH_SIZE = 128 ifnormalized = False # if you want a normalized matrix, set this to be true # Cifar-10 classes = ('plane', 'car', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck') # choose the algorithm to run algorithm = input( "Select your algorithm: ResNet,AlexNet,LeNet5,VGG11(R/A/L/V) : " ).upper() if algorithm == 'R': transform_train, transform_test = transform() net = ResNet18().to(device) elif algorithm == 'A': transform_train, transform_test = transform() net = AlexNet().to(device) elif algorithm == 'L': transform_train, transform_test = transform() net = LeNet_5().to(device) elif algorithm == 'V': transform_train, transform_test = transform_vgg() net = VGG11().to(device) else: print("Please enter a correct algorithm") exit() # loss function and optimizer if algorithm == 'V': optimizer = optim.Adam(net.parameters(), lr=0.0001) else: optimizer = optim.SGD(net.parameters(), lr=0.01, momentum=0.9, weight_decay=5e-4) # Data loading testset = torchvision.datasets.CIFAR10(root='./data', train=False, download=True, transform=transform_test) testloader = torch.utils.data.DataLoader(testset, batch_size=100, shuffle=False, num_workers=1) trainset = torchvision.datasets.CIFAR10(root='./data', train=True, download=True, transform=transform_train) trainloader = torch.utils.data.DataLoader(trainset, batch_size=BATCH_SIZE, shuffle=True, num_workers=1, pin_memory=True) # Training will train a new model, testing will use the model that saved from the last train choice = input( "Start a new training or Testing saved model? (A/B): ").upper() if choice == 'A': # empty the data recorded form last training if it's starting a new train empty_correspondingTXTfile(algorithm) #In the training()function, we test the network each epoch but we don't caluclate the F1-score, Recall #and precision. Thoses three parameters will be claculated once the train is finished training(net, device, trainloader, testloader, optimizer, EPOCH, algorithm, classes) # save the selected model save_correspondingMODEL(net, algorithm) else: #testing_existingModel() function will test the saved model, # then generate confusion matrix for that model and # the values of Precision, Recall and F1-score if algorithm == 'R': testing_savedModel(net, device, testloader, algorithm, classes, ifnormalized, PATH="./result/cifar_ResNet.pth") elif algorithm == 'A': testing_savedModel(net, device, testloader, algorithm, classes, ifnormalized, PATH="./result/cifar_AlexNet.pth") elif algorithm == 'L': testing_savedModel(net, device, testloader, algorithm, classes, ifnormalized, PATH="./result/cifar_LeNet5.pth") elif algorithm == 'V': testing_savedModel(net, device, testloader, algorithm, classes, ifnormalized, PATH="./result/cifar_VGG11.pth") # We saved the training accuracy and loss data in the .txt file. # In this case to view the curve, users don't need to start a new training to plot the curve. # Select the testing mode and the learning curve from the last train will pop up LearningCurve_plot(algorithm)
output_length = 20 # load dict save_dir = './model3' input_encoding = json.load(open(save_dir + '/input_encoding.json')) input_decoding = json.load(open(save_dir + '/input_decoding.json')) input_decoding = {int(k): v for k, v in input_decoding.items()} output_encoding = json.load(open(save_dir + '/output_encoding.json')) output_decoding = json.load(open(save_dir + '/output_decoding.json')) output_decoding = {int(k): v for k, v in output_decoding.items()} input_dict_size = len(input_decoding) + 1 output_dict_size = len(output_decoding) + 1 # transform the data encoded_training_input = transform(input_encoding, training_input, vector_size=20) encoded_training_output = transform(output_encoding, training_output, vector_size=20) encoded_val_input = transform(input_encoding, val_input, vector_size=20) encoded_val_output = transform(output_encoding, val_output, vector_size=20) # encoder input training_encoder_input = encoded_training_input val_encoder_input = encoded_val_input # decoder input padding by START_CHAR_CODE training_decoder_input = np.zeros_like(encoded_training_output) training_decoder_input[:, 1:] = encoded_training_output[:, :-1] training_decoder_input[:, 0] = START_CHAR_CODE
def iterate(self, n=1): for i in range(n): self.iters += 1 self.arr_ = transform(self.arr_, self.key_) return
focus, show, contrast, checkBrightness) # Import Image and resize image = cv2.imread('flower.jpg') image = resize(image) imageOriginal = image.copy() # Checks the brightness of the image and adjusts #If the image is too blurry, improve contrast by equalizing histogram channels image = equalize(image) # Find the item you want to enlarge. If it is not correct, adjust the k value to an odd number from 0-9. k = 5 contours = contours(image, k) rect, drawing = findPage(contours, imageOriginal) name = 'Box Drawn' show(drawing, name) # Warp the boxed item to full screen. You can play with focusing/sharpening the image based on your needs. warp = transform(imageOriginal, rect) warp = checkBrightness(warp) # Focus/Sharpen the scanned photo. Change the alpha value to adjust the level of focus in the photo alpha = 4 warp = focus(warp, alpha) warp = sharp(warp) name = 'Scanned Photo' show(warp, name)