def classify(): if request.method == 'POST': input_file = request.files[ 'file'] # werkzeug.datastructures.FileStorage instance # if user does not select file, submit a empty part without filename if input_file.filename == '': # file name (without path) print "no selected file! ", input_file, input_file.filename #flash('No selected file') return redirect(request.url) elif input_file and allowed_file(input_file.filename): #filename = secure_filename(input_file.filename) #create_path_if_doesnt_exist(UPLOAD_FOLDER) #input_file.save(os.path.join(app.config['UPLOAD_FOLDER'], filename)) dc = DigitClassifier("CNN") # Load trained model model = dc.load_model() model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy']) # evaluate loaded model on test data #filename = 'data/test/4391.png' predicted_label = dc.predict_image(input_file, model) print "Predictions with loaded model for image: ", input_file.filename, type( input_file.filename), ": ", predicted_label return 'Predicted number for input image: %s ' % (predicted_label) #return redirect(url_for('uploaded_file', filename=filename)) else: return "file input format not allowed or was empty" else: return "ok" # a function or a string must be return
def upload(): image_path = request.files['file'] dc = DigitClassifier("CNN") # Load trained model model = dc.load_model() model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy']) # evaluate loaded model on test data #image_path = 'data/test/4391.png' predicted_label = dc.predict_image(image_path, model) print "Prediction with loaded model for image: ", image_path, " is: ", predicted_label, request.files[ 'file'] return 'predict: %s ' % (predicted_label[0])
def main(): my01path = "C:/DigitProject/DigitDetector/binaryFolder/digit_data/" files = [f for f in listdir(my01path) if isfile(join(my01path, f))] matches = [re.search("^input_[0-9]+_[0-9]+_[0-9]+\.json$", i) for i in files] jsonFiles = [i.group(0) for i in matches if i] partitions = kCross(10, jsonFiles) DigitClassifier(partitions, my01path, 'neural')
def __initData(self): self.__paintBoard = PaintBoard(self) self.__model = DigitClassifier()
class MainWidget(QWidget): def __init__(self, Parent=None): super().__init__(Parent) self.__result = -1 self.__initData() self.__initView() def __initData(self): self.__paintBoard = PaintBoard(self) self.__model = DigitClassifier() def __initView(self): self.setFixedSize(600, 400) self.setWindowTitle('Application') main_layout = QHBoxLayout(self) main_layout.setSpacing(10) main_layout.addWidget(self.__paintBoard) sub_layout = QVBoxLayout() sub_layout.setContentsMargins(10, 10, 10, 10) sub_layout.setSpacing(30) self.__btn_Clear = QPushButton('clear') self.__btn_Clear.setParent(self) self.__btn_Clear.clicked.connect(self.__paintBoard.clear) sub_layout.addWidget(self.__btn_Clear) self.__btn_Predict = QPushButton('predict') self.__btn_Predict.setParent(self) self.__btn_Predict.clicked.connect(self.predict) sub_layout.addWidget(self.__btn_Predict) self.__btn_Quit = QPushButton('quit') self.__btn_Quit.setParent(self) self.__btn_Quit.clicked.connect(self.quit) sub_layout.addWidget(self.__btn_Quit) self.__lb_Result_Tip = QLabel() font = QFont() font.setPointSize(24) self.__lb_Result_Tip.setFont(font) self.__lb_Result_Tip.setText('result') self.__lb_Result_Tip.setParent(self) sub_layout.addWidget(self.__lb_Result_Tip) self.__lb_Result = QLabel() font = QFont() font.setPointSize(30) self.__lb_Result.setFont(font) self.__lb_Result.setParent(self) self.__lb_Result.setAlignment(Qt.AlignHCenter) sub_layout.addWidget(self.__lb_Result) main_layout.addLayout(sub_layout) def quit(self): self.close() def predict(self): image = self.__paintBoard.getImage() pil_img = ImageQt.fromqimage(image) pil_img = pil_img.resize((28, 28), Image.ANTIALIAS) # pil_img.save('./images/test66.png') # pil_img.show() img_array = np.array(pil_img.convert('L')).reshape(784) img_array = np.hstack([img_array, [1.0]]).reshape((1, 785)) # display image # plt.imshow(img_array.reshape(28, 28), cmap="binary") # plt.imshow(pil_img, cmap="binary") # plt.show() # fig = plt.figure(figsize=(6, 6)) # fig.subplots_adjust(left=0, right=1, bottom=0, top=1, hspace=0.05, wspace=0.05) # # 绘制数字:每张图像8*8像素点 # for i in range(64): # ax = fig.add_subplot(8, 8, i + 1, xticks=[], yticks=[]) # ax.imshow(self.xtest[i].reshape(28, 28), cmap=plt.cm.binary, interpolation='nearest') # # 用目标值标记图像 # ax.text(0, 7, str(self.ytest[i])) # plt.show() self.__result = self.__model.predict(img_array) print("result: %d" % self.__result) self.__lb_Result.setText("%d" % self.__result)
} if len(sys.argv) > 1: #TODO check + controllo games if sys.argv[1] is not None: info['game'] = sys.argv[1] if sys.argv[2] is not None: info['GRID_LEN'] = int(sys.argv[2]) if sys.argv[3] is not None: info['SQUARE_LEN'] = int(sys.argv[3]) puzzle_detected = False puzzle_analyzed = False puzzle_solved = False detector = PuzzleDetector(info) classifier = DigitClassifier() solver = Solver(info) REAL_TIME = True # 1. Board detection phase #if REAL_TIME: # cap = cv2.VideoCapture(0, cv2.CAP_DSHOW) # counter = 0 # while not puzzle_detected: # _, frame = cap.read() # detector.detectGameBoard(frame) # counter += 1 # if counter % 10 == 0 and detector.grid_digit_images is not None:
class MainWidget(QWidget): def __init__(self, Parent=None): super().__init__(Parent) self.__result = -1 self.__initData() self.__initView() def __initData(self): self.__paintBoard = PaintBoard(self) self.__model = DigitClassifier() def __initView(self): self.setFixedSize(600, 400) self.setWindowTitle('Application') main_layout = QHBoxLayout(self) main_layout.setSpacing(10) main_layout.addWidget(self.__paintBoard) sub_layout = QVBoxLayout() sub_layout.setContentsMargins(10, 10, 10, 10) sub_layout.setSpacing(30) self.__btn_Clear = QPushButton('clear') self.__btn_Clear.setParent(self) self.__btn_Clear.clicked.connect(self.__paintBoard.clear) sub_layout.addWidget(self.__btn_Clear) self.__btn_Predict = QPushButton('predict') self.__btn_Predict.setParent(self) self.__btn_Predict.clicked.connect(self.predict) sub_layout.addWidget(self.__btn_Predict) self.__btn_Quit = QPushButton('quit') self.__btn_Quit.setParent(self) self.__btn_Quit.clicked.connect(self.quit) sub_layout.addWidget(self.__btn_Quit) self.__lb_Result_Tip = QLabel() font = QFont() font.setPointSize(24) self.__lb_Result_Tip.setFont(font) self.__lb_Result_Tip.setText('result') self.__lb_Result_Tip.setParent(self) sub_layout.addWidget(self.__lb_Result_Tip) self.__lb_Result = QLabel() font = QFont() font.setPointSize(30) self.__lb_Result.setFont(font) self.__lb_Result.setParent(self) self.__lb_Result.setAlignment(Qt.AlignHCenter) sub_layout.addWidget(self.__lb_Result) main_layout.addLayout(sub_layout) def quit(self): self.close() def predict(self): image = self.__paintBoard.getImage() pil_img = ImageQt.fromqimage(image) pil_img = pil_img.resize((28, 28), Image.ANTIALIAS) # pil_img.save('./images/test66.png') # pil_img.show() img_array = np.array(pil_img.convert('L')).reshape(784) # display image plt.imshow(img_array.reshape(28, 28), cmap="binary") # plt.imshow(pil_img, cmap="binary") plt.show() img_array = np.hstack([img_array, [1.0]]).reshape((1, 785)) # img_array = np.hstack([img_array, [1.0]]) # print(img_array.shape) # (785,) # img_array = np.reshape(img_array, (img_array.shape[0], -1)) # print(img_array.shape) # (785, 1) self.__result = self.__model.predict(img_array) print("result: %d" % self.__result) self.__lb_Result.setText("%d" % self.__result)
from Downloader import Downloader from DigitClassifier import DigitClassifier import numpy as np from TheBlueAllianceAPI import get_event_match_keys_with_vidoes, get_event_match_outcomes from MatchProcessing import MatchProcessing from DataBaseWorker import DataBaseWorker from MatchProcessingWorker import MatchProcessingWorker import time score_classifier = DigitClassifier( 'C:\\Users\\darkd\\Documents\\ScoreProject\\knntrain\\', (15, 20), np.array([200, 200, 200]), np.array([255, 255, 255])) time_classifier = DigitClassifier( 'C:\\Users\\darkd\\Documents\\ScoreProject\\knntraintime\\', (14, 20), np.array([0, 0, 0]), np.array([120, 120, 120])) d = Downloader('https://www.youtube.com/watch?v=hbLME8QLdeU', 'C:\\Users\\darkd\\Documents\\ScoreProject\\', 'testmilian') d.download() match = MatchProcessing(d.name, score_classifier, time_classifier) states = match.process_match() print(states) # db = DataBaseWorker('C:\\Users\\darkd\\Documents\\ScoreProject\\timeseriesdb.db') # db.start() # event_name = ['2018mibel', '2018milan', '2018miwmi', '2018miesc', '2018migay', '2018migul', '2018milin', '2018mimid'] # thread_list = [] # for event in event_name: # matches_and_vidoes = get_event_match_keys_with_vidoes(event)