def main(): Info.printTitle() # get options from arguments. dicOptions = Opts.getOptions() if dicOptions == None: Info.printHelp() return False workClass = dicOptions[PAR_MODE] # get variables for NN. dicNNVariables = NNet.getNNVariables() # set up parameters. workClass.setOptions(dicOptions) workClass.setNNVariables(dicNNVariables) # run work-function. if workClass.run() == False: print("Fail.") return False print("Complete.") return True
def recognizeCharacters(self, image): global RESIZED_IMAGE_WIDTH, RESIZED_IMAGE_HEIGHT, kNearest self.text = "" informations = [] grayBlurredImage = cv2.GaussianBlur( ImageProcessor.convertImageToGray(image), (5, 5), 0) # convert the image to gray and blur it... thresholdedImage = cv2.adaptiveThreshold( grayBlurredImage, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY_INV, 11, 2) image, contours, hierarchy = cv2.findContours(thresholdedImage.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) for contour in contours: information = Information() information.contour = contour information.contourArea = cv2.contourArea(information.contour) if information.isContourValid(): information.boundingRectangle = cv2.boundingRect( information.contour) information.calculateRectangle() informations.append(information) informations.sort(key=operator.attrgetter( "rectangle.x")) # sort contours from left to right... del self.rectangles[:] # clearing the list "rectangles"... for information in informations: image = thresholdedImage[ information.rectangle.y:information.rectangle.y + information.rectangle.height, information.rectangle.x:information.rectangle.x + information.rectangle.width] image = cv2.resize(image, (RESIZED_IMAGE_WIDTH, RESIZED_IMAGE_HEIGHT)) numpyArray = numpy.float32( image.reshape( (1, RESIZED_IMAGE_WIDTH * RESIZED_IMAGE_HEIGHT ))) # convert type of numpy array from int to float... returnValue, result, neighborResponse, distance = kNearest.findNearest( numpyArray, k=1 ) # result = vector with results of prediction (regression or classification) for each input sample... it is a single-precision floating-point vector with number_of_samples elements... if distance > 4712875: continue self.text = self.text + str(chr(int(result[0][0]))) self.rectangles.append(information.rectangle) print "=" + str(distance) + "=" # for debugging purpose... if len(self.text.strip()) != 0: print self.text + "\n"
class ObjectOrientedTitanic(): def __init__(self, train, test): print("ObjectOrientedTitanic object created") self.testPassengerID = test['PassengerId'] self.number_of_train = train.shape[0] self.y_train = train['Survived'] self.train = train.drop('Survived', axis=1) self.test = test self.all_data = self._get_all_data() # Create instance of objects self._info = Information() self.preprocessStrategy = PreprocessStrategy() self.visualizer = Visualizer() self.gridSearchHelper = GridSearchHelper() def _get_all_data(self): return pd.concat([self.train, self.test]) def information(self): self._info.info(self.all_data) def preprocessing(self, strategy_type): self.strategy_type = strategy_type self.all_data = self.preprocessStrategy.strategy( self._get_all_data(), strategy_type) def visualize(self, visualizer_type, number_of_features=None): self._get_train_and_test() if visualizer_type == "RadViz": self.visualizer.RandianViz(X=self.X_train, y=self.y_train, number_of_features=number_of_features) def machine_learning(self): self._get_train_and_test() self.gridSearchHelper.fit_predict_save(self.X_train, self.X_test, self.y_train, self.testPassengerID, self.strategy_type) def _get_train_and_test(self): self.X_train = self.all_data[:self.number_of_train] self.X_test = self.all_data[self.number_of_train:]
def __init__(self, train, test): print("ObjectOrientedTitanic object created") self.testPassengerID = test['PassengerId'] self.number_of_train = train.shape[0] self.y_train = train['Survived'] self.train = train.drop('Survived', axis=1) self.test = test self.all_data = self._get_all_data() # Create instance of objects self._info = Information() self.preprocessStrategy = PreprocessStrategy() self.visualizer = Visualizer() self.gridSearchHelper = GridSearchHelper()
def __init__(self): customInfo = Information() self.positionMap = { 'GoalKeeper': [0], 'Defender': [1, 2, 3], 'Midfielder': [4, 5, 6, 7, 8], 'Forward': [9, 10, 11, 12] } self.choiceDict = { 0: customInfo.posDict, 1: customInfo.basicList, 2: customInfo.abilityList, 3: customInfo.posDetailedDict } self.knownPositives = {0: [], 1: [], 2: [], 3: []} self.knownNegatives = {1: [], 2: []} self.knownNegativeNationalities = [] self.knownNationalities = '' self.dataScraper = DataScraper(self.positionMap)
def __init__(self, positionMap): self.base_url = 'http://pesdb.net/pes2019/?feature=0' self.positionMap = positionMap self.customInfo = Information()
def __init__(self, citymap, passenger_list_, taxi_list_,mode_=None): self.pool = [] self.mode = mode_ Information.__init__(self, citymap, passenger_list_, taxi_list_)
def recognizeCharacters(self, image): global text, kNearest text = "" informations = [] validInformations = [] imgGray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) # get grayscale image imgBlurred = cv2.GaussianBlur(imgGray, (5, 5), 0) # blur # filter image from grayscale to black and white imgThresh = cv2.adaptiveThreshold( imgBlurred, # input image 255, # make pixels that pass the threshold full white cv2. ADAPTIVE_THRESH_GAUSSIAN_C, # use gaussian rather than mean, seems to give better results cv2. THRESH_BINARY_INV, # invert so foreground will be white, background will be black 11, # size of a pixel neighborhood used to calculate threshold value 2) # constant subtracted from the mean or weighted mean imgThreshCopy = imgThresh.copy( ) # make a copy of the thresh image, this in necessary b/c findContours modifies the image imgContours, contours, npaHierarchy = cv2.findContours( imgThreshCopy, # input image, make sure to use a copy since the function will modify this image in the course of finding contours cv2.RETR_EXTERNAL, # retrieve the outermost contours only cv2.CHAIN_APPROX_SIMPLE ) # compress horizontal, vertical, and diagonal segments and leave only their end points for contour in contours: information = Information() information.contour = contour information.boundingRectangle = cv2.boundingRect( information.contour) information.calculateRectangle() information.contourArea = cv2.contourArea(information.contour) informations.append(information) for information in informations: if information.isContourValid(): validInformations.append(information) validInformations.sort(key=operator.attrgetter( "rectangle.x")) # sort contours from left to right for information in validInformations: # cv2.rectangle(image, (information.x, information.y), (information.x + information.width, information.y + information.height), (0, 255, 0), 2) imgROI = imgThresh[ information.rectangle.y:information.rectangle.y + information.rectangle.height, information.rectangle.x:information.rectangle.x + information.rectangle.width] imgROIResized = cv2.resize( imgROI, (self.RESIZED_IMAGE_WIDTH, self.RESIZED_IMAGE_HEIGHT) ) # resize image, this will be more consistent for recognition and storage npaROIResized = imgROIResized.reshape( (1, self.RESIZED_IMAGE_WIDTH * self.RESIZED_IMAGE_HEIGHT )) # flatten image into 1d numpy array npaROIResized = np.float32( npaROIResized ) # convert from 1d numpy array of ints to 1d numpy array of floats retval, npaResults, neigh_resp, dists = kNearest.findNearest( npaROIResized, k=1) if dists > 4712875: continue self.rectangles.append(information.rectangle) print "=" + str(dists) + "=" text = text + str(chr(int( npaResults[0][0]))) # append current char to full string if len(text.strip()) != 0: print text + "\n" return image
def __init__(self, file, path, load=False): self.file_list = file self.Info = Information(path, load=load) self.tot = 0
class MakeData: def __init__(self, file, path, load=False): self.file_list = file self.Info = Information(path, load=load) self.tot = 0 def makeData(self): for file in self.file_list: try: try: img_name = file + ".png" text_name = file + ".txt" text_processor = TextProcessor(text_name) text, double_lines, pic_words, ori_t = text_processor.getProcessText( ) hasKR0008 = False for sent in ori_t: if sent.find("KR0008") != -1 or sent.find( "KR0034" ) != -1 or sent.find("KR0146") != -1 or sent.find( "KR0306" ) != -1 or sent.find("KR0320") != -1 or sent.find( "KR1320" ) != -1 or sent.find("KR1350") != -1 or sent.find( "KR2853" ) != -1 or sent.find("KR3213") != -1 or sent.find( "KR3578" ) != -1 or sent.find("KR3577") != -1 or sent.find( "KR4283") != -1 or sent.find("KR4472") != -1: hasKR0008 = True break if hasKR0008: continue type = text_processor.getType() col_cutter = V_ColumnCutter(img_name, type) cols, img_with_line = col_cutter.getColumns() if not self.isLegalCol(cols, type): continue ori, gray = col_cutter.getImg() gray_shape = gray.shape if gray_shape[0] != 790: continue img_name = col_cutter.getImgName() text_cutter = V_TextCutter(gray, ori, self.Info, cols, text, type, img_name, double_lines, pic_words, ori_t, img_with_line) text_cutter.cutColumns() self.tot += 1 except: t = re.findall(r'四库全书.*\.png', img_name) continue except: print("AN ERROR OCCURS!") continue self.Info.save() def isLegalCol(self, cols, type): if type == 'b': l_bond = 0 r_bond = 1 else: l_bond = 1 r_bond = 0 # 根据类型决定删掉哪行 for i in range(l_bond, len(cols) - r_bond): p = cols[i] if p[1] - p[0] < 60 or p[1] - p[0] > 80: return False return True def makeData_MT(self): p1 = Producer(self.file_list, self.Info) p1.start() consumers = [Consumer() for i in range(32)] for c in consumers: c.start() for c in consumers: c.join() self.Info.save()
from ServerUDP import ServerUDP from Information import Information objUDP = ServerUDP('') while True: udp = objUDP.getCon() msg, c = udp.recvfrom(2048) modified = msg.upper() Info = Information() print modified udp.sendto(Info.getOptions(modified),c) del Info