def processCard(cardId): card = BusinessCards.find_one({'_id': ObjectId(cardId)}) cardPath = FOLDER_PATH + card['filename'] img = utils.readImage(cardPath) goodTransform, cardImg = findCard.findCard(img) goodBlur = process.checkBlur(cardImg) regions, text = process.processCard(cardImg) processedCard = process.drawRegions(cardImg, regions) suggestedFields = process.guessFields(regions, text) imgWrite = processedCard[::, ::, ::] cv2.imwrite('server/static/processed/' + card['filename'], imgWrite) card['processed'] = True card['text'] = text card['regions'] = regions card['suggested'] = suggestedFields if not goodTransform: card['warnings'] = ['Detected a bad transformation'] if not goodBlur: warningMsg = 'Detected a blurry image' if 'warnings' in card: card['warnings'].append(warningMsg) else: card['warnings'] = [warningMsg] BusinessCards.save(card)
def __init__(self, threads=4, positivePath="positive", negativePath="negative", maxPositiveImages=-1, maxNegativeImages=-1): """ threads: number of threads to run parallel (on some computations) poitivePath: folder with the positive images negativePath: folder with the negative images maxPositiveImages: maximum number of positive images (-1: all posible) maxNegativeImages: maximum number of negative images (-1: all posible) tainingData: list with the integral image of the training data and the type of the image 1 for positve 0 for negative positiveNumber: number of positive images negativeNumber: number of negative images """ self.threads = threads posImages = listdir(positivePath) negImages = listdir(negativePath) if maxPositiveImages > -1: posImages = posImages[:maxPositiveImages] if maxNegativeImages > -1: negImages = negImages[:maxNegativeImages] self.positiveNumber = len(posImages) self.negativeNumber = len(negImages) self.trainingData = [] self.imageClass = [] for im in posImages: self.trainingData.append( integralImage(readImage(positivePath + "/" + im))) self.imageClass.append(1) for im in negImages: self.trainingData.append( integralImage(readImage(negativePath + "/" + im))) self.imageClass.append(0)
def main(): img = utils.readImage() img = smooth(img) img_red, img_blue = img, img processImage(img_red, 'Red') processImage(img_blue, 'Blue') utils.showImage(img, 'Final Classification')
def handlePicture(cardPath): # card = BusinessCards.find_one({'_id': Objectid(cardId)}) # path = '../' + card['filename'] img = utils.readImage(cardPath) good, card = findCard.findCard(img) regions, text = process.processCard(card) processedCard = process.drawRegions(card, regions) cards = [('Original', img), (str(text), processedCard)] print text utils.display(cards)
def petClone(): coords = [(935, 590), (935, 660), (935, 730)] petMystic = eval(readImage(takeImage(951, 921, 1142, 955))) petBattle = eval(readImage(takeImage(951, 957, 1142, 991))) cloneStats = [ int(max(petBattle * 23 / 11000, 1)), # clonePhysical int(petBattle * 20 // 33), # cloneMystic int(petMystic * 1.1 // 2) # cloneBattle ] #if cloneStats[1] > 50 < cloneStats[2]: # cloneStats[0] = 2 for x in range(len(coords)): clicky(*coords[x]) hotkeyPress("ctrl", "a") keyPress(cloneStats[x]) clicky(1400, 830) return ''
def getStat(stat): try: imCoords = { speed: (335, 400, 605, 435), power: (335, 450, 605, 485), hp: (335, 500, 605, 535), count: (335, 550, 605, 585), exp: (335, 600, 605, 635) } im = takeImage(*imCoords[stat]) parseNum(readImage(im)) except ValueError: print("This is area attempted to read") im.show() raise ValueError
def petHunger(currentHunger = 0): if currentHunger == 0: movey(970, 335) currentHunger = float(readImage(takeImage(540, 555, 595, 575))) hungerSecond = 100 / (12 * 60 * 60) to75 = max(currentHunger - 75, 0) to50 = max(currentHunger - 50, 0) to10 = max(currentHunger - 10, 0) for x in [to10, to50, to75]: if x > 0: print(formatTime(x / hungerSecond)) tim = x soundOffIn(tim / hungerSecond) return ''
def __init__(self): print("****** Start CV Part ******") img = utils.readImage(str(input("Please Input the ImgPath: "))) img_ = img.copy() # back-up; img = utils.shift_demo(img, 10, 50) img = self.colorFilter(img, img_) img = utils.threshold(img) img1 = img_.copy() region = self.roi_solve(img) for i in range(len(region)): rect2 = region[i] w1, w2 = rect2[0], rect2[0] + rect2[2] h1, h2 = rect2[1], rect2[1] + rect2[3] box = [[w1, h2], [w1, h1], [w2, h1], [w2, h2]] cv2.drawContours(img1, np.array([box]), 0, (0, 0, 255), 1) # self.saveImage(img_, box, i) utils.showImage(img1, "Result")
ModelView = lookat(eye, center, up) ViewProjection = np.dot(np.dot(Viewport, Projection), ModelView) def map_to_screen(v): v = np.dot(ViewProjection, np.append(v, 1)) return v[:3] / v[3] if __name__ == "__main__": timestart = time.perf_counter() image = utils.createImage(width, height, 3, np.uint8) m = Model("obj/african_head.obj") texture = utils.readImage("obj/african_head_diffuse.tga") texture = np.flipud(texture) texture = texture.transpose(1, 0, 2) print("texture.shape = {}, dtype = {}".format(texture.shape, texture.dtype)) timeend = time.perf_counter() print("Read model :: ", (timeend - timestart), "s") light_dir = np.array([0, 0, -1]) timestart = time.perf_counter() z_buffer = np.zeros((width, height)) for f in m.faces: v = np.array([map_to_screen(m.vertices[f[i][0]]) for i in range(3)]) t = np.array([m.tex[f[i][1]] for i in range(3)]).T
def runAfky(): #TODO: Refactor clickCoords = { speed: (750, 415), power: (650, 520), hp: (650, 570), count: (1245, 470) } stats = {speed: 0, power: 0, hp: 0, count: 0, exp: 0} for key in stats: stats[key] = getStat(key) while True: if detectKeypress(): return '' stats['exp'] = getStat('exp') hpExp = (stats['hp']**2 * 100) // 3 countExp = count**2 * 100 #hpExp + capExp if (hp better than count) else countExp + capExp temp = (hpExp + (count * (hp * count * 2 + count) / 2)) if ( hpExp / (count**0.9) < countExp / (hp**1.1)) else (countExp + (hp * (hp * count * 2 + hp) / 2)) #print(hpExp / (count ** 0.9) < countExp / (hp ** 1.1), ' ', temp) #True if exp >= above else False #print(True if exp > temp else False) neededExp = (hpExp + (count * (hp * count * 2 + count) / 2) ) if hpExp / (count**0.9) < countExp / (hp**1.1) else ( countExp + (hp * (hp * count * 2 + hp) / 2)) print('Exp:', exp, 'hp:', hp, 'hp exp:', hpExp, 'count:', count, 'count exp:', countExp, 'Needed Exp:', neededExp) while exp < neededExp: if detectKeypress(): return '' exp = parseNum(readImage(takeImage(335, 600, 605, 635))) sleep(1) check = True if (exp >= neededExp) else False while check: if detectKeypress(): return '' if hp > 40 and (hp + count) * 10 > speed < 3800 and exp >= ( hp + count) * 10 - speed * ( (hp + count) * 10 - speed + 1) / 2: clicky(*coords[0]) exp = exp - ((hp + count) * 10 - speed * ((hp + count) * 10 - speed + 1) / 2) speed += 10 elif hpExp / (count**0.9) < countExp / (hp**1.1): if count == hp == 1: capExp = 1 else: capExp = (count * (hp * count * 2 + count) / 2) if hpExp + capExp <= exp: clicky(*coords[2]) clicky(*coords[4]) exp = exp - hpExp - capExp hp += 1 else: break elif countExp / (hp**1.1) < hpExp / (count**0.9): if count == hp == 1: capExp = 1 else: capExp = (hp * (hp * count * 2 + hp) / 2) if countExp + capExp <= exp: clicky(*coords[3]) clicky(*coords[4]) exp = exp - countExp - capExp count += 1 else: break else: break hpExp = (hp**2 * 100) // 3 countExp = count**2 * 100 neededExp = ( hpExp + (count * (hp * count * 2 + count) / 2) ) if hpExp / (count**0.9) < countExp / (hp**1.1) else ( countExp + (hp * (hp * count * 2 + hp) / 2)) check = True if (exp >= neededExp) else False return ''
import model.mbllen.Network as Network from model.common import resolve_single from utils import cropAndResize, load_image, concatenateImage, showImage, readImage, denoise import cv2 # Load model 1 -- Super resolution sr_gen = generator() sr_gen.load_weights('weight/srgan/gan_generator.h5') # Load model 2 -- Illumination improvement mbllen_gen = Network.build_mbllen((32, 32, 3)) mbllen_gen.load_weights('weight/mbllen/LOL_img_lowlight.h5') # Load test image img = readImage(r'C:\Users\ywqqq\Documents\PRS_prj\maskdetection\test.jpg') # If noise in image, denoise. gaussian_noise = False salt_and_pepper_noise = False if gaussian_noise: img = denoise(img, 'gaussian') if salt_and_pepper_noise: img = denoise(img, 'salt-and-pepper') # Get the luminance of the image. # If luminance < 70, then apply illumination improvemtn imgHSV = cv2.cvtColor(img, cv2.COLOR_RGB2HSV) H, S, V = cv2.split(imgHSV)
def dicom2nrrd(dcm_path, nrrd_path): print("Dicom to nrrd...") image = readImage(dcm_path) img_basename = os.path.basename(dcm_path) sitk.WriteImage(image, nrrd_path) return image