def main(tme): if tme == 1: page_setup() global M M = rotate(c) global M5 M5 = trep_matr() tme = 2 crop_img = crop(M,M5,tme/2) if crop_img is not 0: s=ocr(crop_img) dictionary(s) ad.tts('do you want to continue reading?') while(1): cmmd = ad.stt() if cmmd == None: continue elif ad.find(cmmd, 'bye'): return 2 elif ad.find(cmmd, 'no'): return 1 elif ad.find(cmmd, 'yes') or ad.find(cmmd, 'yeah'): tme = tme +1 break if not tme %2 ==0: tme = tme +1 main(tme)
def main(tme): if tme == 1: page_setup() global M M = rotate(c) global M5 M5 = trep_matr() tme = 2 crop_img = crop(M,M5,tme/2) if crop_img is not 0: s=ocr(crop_img) dictionary(s) ad.tts('do you want to continue reading?') while(1): cmmd = ad.stt() if cmmd == None: continue elif ad.find(cmmd, 'bye') or ad.find(cmmd, 'no'): break elif ad.find(cmmd, 'yes') or ad.find(cmmd, 'yeah'): tme = tme +1 break if not tme %2 ==0: tme = tme +1 main(tme)
def which_mode(): #ad.tts(str(name)+"What do you intend to do?") wh=ad.stt() if ad.find(wh,"read"): ad.tts("ok, I am ready to assist you in reading.") #read() elif ( ad.find(wh,"sketch") or ad.find(wh,"draw") ): ad.tts("ok, I am ready to assist you in sketching.") #sketch() elif ( ad.find(wh,"note") or ad.find(wh,"write") ): ad.tts("ok, I am ready to assist you in taking notes.") #note() else: ad.tts("Sorry, I didn't get you. Are you reading or sketching or taking notes?") which_mode()
def start(): hour= int(time.strftime("%H")) print hour if (hour>5 and hour <12): ad.tts("Good morning") elif (hour>12 and hour <16): ad.tts("Good afternoon") else: ad.tts("Good evening") ad.tts("May i know your good name?") global name name=str(ad.stt()) print name ad.tts("Hello "+str(name)+", I am Amina. your helper.") return
def which_mode(): while (1): wh=ad.stt() if wh is None: ad.tts( "try again") continue if ad.find(wh,"read"): ad.tts("ok, I am ready to assist you in reading.") a= feature.main(1) return a; elif ( ad.find(wh,"sketch") or ad.find(wh,"draw") ): ad.tts("ok, I am ready to assist you in sketching.") #sketch() return; elif ( ad.find(wh,"note") or ad.find(wh,"write") ): ad.tts("ok, I am ready to assist you in taking notes.") #note() return; else: ad.tts("Sorry, I didn't get you. Are you reading or sketching or taking notes?") which_mode()
def dictionary(word): if word == "": ad.tts("I Didn't get the word") return; d = enchant.Dict("en_GB") if not d.check(word): word = d.suggest(word)[0] if word[-1] == '.': word= word[0:-1] i=0 print word dictionary=PyDictionary() dict=dictionary.meaning(word) while (1): c=0 if dict is not None: ad.tts("your word is " + word) if ( dict.has_key('Adjective')) : s= dict['Adjective'] if len(s)>i : print s[i] ad.tts("adjective, " + s[i]) c=1 if dict.has_key('Noun') : s= dict['Noun'] if len(s)>i : print s[i] ad.tts("Noun, " + s[i]) c=1 if dict.has_key('Verb') : s= dict['Verb'] if len(s)>i : print s[i] ad.tts("Verb, " + s[i]) c=1 if dict.has_key('Adverb') : s= dict['Adverb'] if len(s)>i : print s[i] ad.tts("Adverb, " + s[i]) c=1 if dict.has_key('Preposition') : s= dict['Preposition'] if len(s)>=i : print s[i] ad.tts("Preposition, " + s[i]) c=1 i=i+1 if c==0: ad.tts("sorry, no more meanings available") break else: ad.tts("sorry, meaning is not available") break ad.tts("Do you want an alternate meaning?" ) while (1): cmmd=ad.stt() if cmmd == None: continue elif ad.find(cmmd, "yes") or ad.find(cmmd, "yeah"): break elif ad.find(cmmd, "no"): return; return;
def ocr(img): gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) cv2.imwrite('test.jpg', gray) str1 = pytesseract.image_to_string(Image.open('test.jpg')) os.remove('test.jpg') str1 = str1.strip() #to remove any leading blank spaces, if any n = str1.find('\n') if n!=-1: str1 = str1[0:n] #first line stored str1 = str1.strip() #to remove any leading or trailing blank spaces, if any l= len(str1) str1 = str1 + ' ' if (str1.count(' ')) <=3 : #at most 3 words n=str1.count(' ') else: n=3 if l==0: n=0 a=list(); print l i=0 word='' c=0 while c<n : if str1[i] == ' ' : a.append(word) word = '' c= c+1 else: word = word + str1[i] i=i+1 print n i=0 d = enchant.Dict("en_GB") while n>i: print a[i] if not d.check(a[i]): if len(d.suggest(a[i])) >0: a[i] = d.suggest(a[i])[0] print a[i] ad.tts("is your word " + a[i]) cmmd = "" while (1): cmmd=ad.stt() if cmmd is None: ad.tts("Try again") continue elif (ad.find(cmmd, "yes")): return a[i]; elif (ad.find(cmmd, "no")): break i=i+1 return "";
def crop (M2,M5,picno): # construct the argument parse and parse the arguments ap = argparse.ArgumentParser() ap.add_argument("-v", "--video", help="path to the (optional) video file") ap.add_argument("-b", "--buffer", type=int, default=32, help="max buffer size") args = vars(ap.parse_args()) # initialize the list of tracked points and the coordinate deltas pts = deque(maxlen=args["buffer"]) (dX, dY) = (0 , 0) l=0 camera = cv2.VideoCapture(0) while True: ad.tts("when you need me, say okay.") #NEW* cmmd=ad.stt() if cmmd is None: continue if ad.find(cmmd, "ok"): l=0 while True: if l==0: ad.tts("I am ready.") ret,frame0 = camera.read() frame0 = cv2.warpPerspective(frame0,M5,(p1,p2)) l=-1 # grab the current frame (grabbed, frame) = camera.read() # if we are viewing a video and we did not grab a frame, then we have reached the end of the video if args.get("video") and not grabbed: break # resize the frame, crop it(into a quadrilateral), blur it, and convert it to the HSV color space frame = cv2.warpPerspective(frame,M5,(p1,p2)) blurred = cv2.GaussianBlur(frame, (11, 11), 0) hsv = cv2.cvtColor(blurred, cv2.COLOR_BGR2HSV) # construct a mask for a color, then perform a series of dilations and erosions to remove any small blobs left #in the mask mask = cv2.inRange(hsv, low1, high1) mask = cv2.erode(mask, None, iterations=2) mask = cv2.dilate(mask, None, iterations=2) cv2.imshow('mask', mask) # find contours in the mask and initialize the current (x, y) center of the ball cnts = cv2.findContours(mask.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)[-2] center = None # only proceed if at least one contour was found if len(cnts) > 0: # find the largest contour in the mask, then use it to compute the minimum enclosing circle and centroid c = max(cnts, key=cv2.contourArea) ((x, y), radius) = cv2.minEnclosingCircle(c) M = cv2.moments(c) center = (int(M["m10"] / M["m00"]), int(M["m01"] / M["m00"])) if l==-1 : #NEW* a = int(M["m10"] / M["m00"]) b = int(M["m01"] / M["m00"]) l=-2 #NEW* # only proceed if the radius meets a minimum size if radius > 5: #update the list of tracked points cv2.circle(frame, (int(x), int(y)), int(radius), (0, 255, 255), 2) pts.appendleft(center) if len(pts)>10 : # check to see if enough points have been accumulated in # the buffer if pts[-10] is not None: # compute the difference between the x and y # coordinates dX = pts[-10][0] - pts[1][0] dY = pts[-10][1] - pts[1][1] # show the frame to our screen and increment the frame counter cv2.imshow("frame", frame) key = cv2.waitKey(1) & 0xFF #counter += 1 if (l==-2 and (np.abs(dX) > 15 or np.abs(dY) > 15)): #NEW* l=-3 #NEW* # if the 'q' key is pressed, or if the object slows down, loop is exited and the cropped part is displayed if (l==-3 and key == ord('q')) or (l==-3 and len(pts)>24 and np.abs(dX) < 10 and np.abs(dY) < 10): c = int(M["m10"] / M["m00"]) d = int(M["m01"] / M["m00"]) if (np.abs(d-b)>35): if b<d or c<a : ad.tts("sorry, i could not get it, please try again") break ad.tts("image is found and being cropped") crop_img = frame0[d:b,a:c] camera.release() newpath = '/home/sam/Desktop/itsp/photos'+str(time) if not os.path.exists(newpath): os.makedirs(newpath) cv2.imwrite(newpath + '/' + str(picno)+'.jpg',crop_img) cv2.imshow('cropped', crop_img) cv2.waitKey(5000) cv2.destroyAllWindows() return 0; #else treat it as an underline of a word else: if c<a : ad.tts("sorry, i could not get it, please try again") break ad.tts("I am looking for the meaning") crop_img = frame0[b-50:b-15,a:c] cv2.imwrite('Image0.jpg',crop_img) crop_img = cntour(crop_img) cv2.imwrite('Image.jpg',crop_img) z=2 # cleanup the camera and close any open windows camera.release() cv2.destroyAllWindows() return crop_img; elif ad.find(cmmd, "done"): break else: ad.tts("I don't get you. If you want my assistance than speak okay.") continue
def dictionary(word): if word == "": ad.tts("Didn't get the word") return; d = enchant.Dict("en_GB") if not d.check(word): word = d.suggest(word)[0] if word[-1] == '.': word= word[0:-1] i=0 print word dictionary=PyDictionary() dict=dictionary.meaning(word) while (1): c=0 if dict is not None: ad.tts("your word is " + word) if ( dict.has_key('Adjective')) : s= dict['Adjective'] if len(s)>i : print s[i] ad.tts("adjective, " + s[i]) c=1 if dict.has_key('Noun') : s= dict['Noun'] if len(s)>i : print s[i] ad.tts("Noun, " + s[i]) c=1 if dict.has_key('Verb') : s= dict['Verb'] if len(s)>i : print s[i] ad.tts("Verb, " + s[i]) c=1 if dict.has_key('Adverb') : s= dict['Adverb'] if len(s)>i : print s[i] ad.tts("Adverb, " + s[i]) c=1 if dict.has_key('Preposition') : s= dict['Preposition'] if len(s)>=i : print s[i] ad.tts("Preposition, " + s[i]) c=1 i=i+1 if c==0: ad.tts("sorry, no more meanings available") break else: ad.tts("sorry, the meaning is not available") break ad.tts("Do you want an alternate meaning?" ) while (1): cmmd=ad.stt() if cmmd == None: continue elif ad.find(cmmd, "yes") or ad.find(cmmd, "yeah"): break elif ad.find(cmmd, "no"): return; return;
def crop (M2,M5,picno): # construct the argument parse and parse the arguments ap = argparse.ArgumentParser() ap.add_argument("-v", "--video", help="path to the (optional) video file") ap.add_argument("-b", "--buffer", type=int, default=32, help="max buffer size") args = vars(ap.parse_args()) # initialize the list of tracked points and the coordinate deltas pts = deque(maxlen=args["buffer"]) (dX, dY) = (0 , 0) l=0 camera = cv2.VideoCapture(0) while True: ad.tts("to start, say okay") #NEW* cmmd=ad.stt() if cmmd is None: continue if ad.find(cmmd, "ok"): l=0 while True: if l==0: ad.tts("I am ready.") ret,frame0 = camera.read() frame0 = cv2.warpPerspective(frame0,M5,(p1,p2)) l=-1 # grab the current frame (grabbed, frame) = camera.read() # if we are viewing a video and we did not grab a frame, then we have reached the end of the video if args.get("video") and not grabbed: break # resize the frame, crop it(into a quadrilateral), blur it, and convert it to the HSV color space frame = cv2.warpPerspective(frame,M5,(p1,p2)) blurred = cv2.GaussianBlur(frame, (11, 11), 0) hsv = cv2.cvtColor(blurred, cv2.COLOR_BGR2HSV) # construct a mask for a color, then perform a series of dilations and erosions to remove any small blobs left #in the mask mask = cv2.inRange(hsv, low1, high1) mask = cv2.erode(mask, None, iterations=2) mask = cv2.dilate(mask, None, iterations=2) cv2.imshow('mask', mask) # find contours in the mask and initialize the current (x, y) center of the ball cnts = cv2.findContours(mask.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)[-2] center = None # only proceed if at least one contour was found if len(cnts) > 0: # find the largest contour in the mask, then use it to compute the minimum enclosing circle and centroid c = max(cnts, key=cv2.contourArea) ((x, y), radius) = cv2.minEnclosingCircle(c) M = cv2.moments(c) center = (int(M["m10"] / M["m00"]), int(M["m01"] / M["m00"])) if l==-1 : #NEW* a = int(M["m10"] / M["m00"]) b = int(M["m01"] / M["m00"]) l=-2 #NEW* # only proceed if the radius meets a minimum size if radius > 5: #update the list of tracked points cv2.circle(frame, (int(x), int(y)), int(radius), (0, 255, 255), 2) pts.appendleft(center) if len(pts)>10 : # check to see if enough points have been accumulated in # the buffer if pts[-10] is not None: # compute the difference between the x and y # coordinates dX = pts[-10][0] - pts[1][0] dY = pts[-10][1] - pts[1][1] # show the frame to our screen and increment the frame counter cv2.imshow("frame", frame) key = cv2.waitKey(1) & 0xFF #counter += 1 if (l==-2 and (np.abs(dX) > 15 or np.abs(dY) > 15)): #NEW* l=-3 #NEW* # if the 'q' key is pressed, or if the object slows down, loop is exited and the cropped part is displayed if (l==-3 and key == ord('q')) or (l==-3 and len(pts)>24 and np.abs(dX) < 10 and np.abs(dY) < 10): c = int(M["m10"] / M["m00"]) d = int(M["m01"] / M["m00"]) if (np.abs(d-b)>35): if b<d or c<a : ad.tts("sorry, invalid, please try again") break ad.tts("image is found and being cropped") crop_img = frame0[d:b,a:c] camera.release() newpath = '/home/sam/Desktop/itsp/photos'+str(time) if not os.path.exists(newpath): os.makedirs(newpath) cv2.imwrite(newpath + '/' + str(picno)+'.jpg',crop_img) cv2.imshow('cropped', crop_img) cv2.waitKey(5000) cv2.destroyAllWindows() return 0; #else treat it as an underline of a word else: if c<a : ad.tts("sorry, invalid, please try again") break ad.tts("I am looking for the meaning") crop_img = frame0[b-50:b-15,a:c] cv2.imwrite('Image0.jpg',crop_img) crop_img = cntour(crop_img) cv2.imwrite('Image.jpg',crop_img) z=2 # cleanup the camera and close any open windows camera.release() cv2.destroyAllWindows() return crop_img; elif ad.find(cmmd, "done"): break else: ad.tts("I don't get you. If you want my assistance than speak okay.") continue
import interface as intf import audio_fn as ad if __name__ == "__main__": while(True): cmmd=ad.stt() if cmmd == None: continue elif ad.find(cmmd,"wake"): intf.start() ad.tts(str(intf.name)+", What do you intend to do?") b = intf.which_mode() if b==1: ad.tts("Ok, I am going to sleep. say wake up when you want my assistance again. Good bye.") continue elif b==2: ad.tts("Ok, It was great to assist you.") break else: break else: ad.tts("Did you just said something?") continue # dict_crop() ad.remove_files()
import interface as intf import audio_fn as ad if __name__ == "__main__": while (True): cmmd = ad.stt() if cmmd == None: continue elif ad.find(cmmd, "wake"): intf.start() ad.tts(str(intf.name) + ", What do you intend to do?") b = intf.which_mode() if b == 1: ad.tts( "Ok, I am going to sleep. say wake up when you want my assistance again. Good bye." ) continue elif b == 2: ad.tts("Ok, It was great to assist you.") break else: break else: ad.tts("Did you just said something?") continue # dict_crop() ad.remove_files()