def word_check(name): dicti = enchant.PyPWL("words.txt") word_exists = dicti.check(name) if word_exists: print("Already there!!") return name if not word_exists: suggest = dicti.suggest(name) print("input: ", name) if len(suggest) > 0: print("suggested: ", suggest[0]) a = np.zeros([len(suggest[0]) + 1, len(name) + 1]) # print(a) a[0] = np.arange(len(name) + 1) a[:, 0] = np.arange(len(suggest[0]) + 1) # print(a) for i in range(1, len(suggest[0]) + 1): #print(i) for j in range(1, len(name) + 1): if suggest[0][i - 1] == name[j - 1]: #print('Same') a[i, j] = a[i - 1, j - 1] else: #print('different') a[i, j] = min(a[i - 1, j], a[i, j - 1], a[i - 1, j - 1]) + 1 #print(type(a[-1,-1])) n = float(a[-1, -1]) #print(len(name)) if n <= len(name) / 2: print("lol") if not len(suggest[0]) in range(len(name), len(name) + 2): print("Suggested won't be implemented") return name else: print("Change accepted") #str.replace(name,suggest[0]) return suggest[0] else: print("change rejected") return name else: return name
df.rename(columns={'Unnamed: 3': 'Total'}, inplace=True) #check the data df.head() df.info() df.describe() #spell-checker import enchant #write the name of all zillas in a text file and prepare the custom dictionary allZilla = '\n'.join(zillas for zillas in list(myDF.iloc[:, 2])) with open("zillaNames.txt", "w") as outfile: outfile.write(allZilla) zillaList = enchant.PyPWL("zillaNames.txt") #create the dictionary of total cases df.set_index('District/City', inplace=True) caseDict = df.to_dict(orient='dict') caseDict = caseDict['Total'] #the name of B. Baria is too tough to map to Brahamanbaria caseDict['Brahamanbaria'] = caseDict['B. Baria'] del caseDict['B. Baria'] #correct the zilla name in the data according to the administrative name #I have no clue why "Chapainawabganj" is not in the list of administrative map! #and I don't know - "ami ekhon oke niye ki korbo? -_-" for zilla, cases in caseDict.items(): if (zillaList.check(zilla)):
words2 = str.lower(med).split("_") if word in words1 or word in words2: words_dict[word][0].append(cnt) words_dict[word][1][cnt] = 1 return words_dict def save_hash(total_words: "HASH") -> None: if os.path.exists(HASH_DUMP_FILE_NAME): os.remove(HASH_DUMP_FILE_NAME) with open(HASH_DUMP_FILE_NAME, "a") as f: f.write("\n".join(list(total_words))) load_hash = lambda: enchant.PyPWL(HASH_DUMP_FILE_NAME) def save_data(query_list: "List of queries") -> list: total_words = hash_build(query_list) words_dict = inv_hash_build(query_list, total_words) save_hash(total_words) tot_words_dict = load_hash() pickle.dump( (query_list, total_words, words_dict, tot_words_dict), open(DATA_STORAGE_FILENAME, "wb"), ) return query_list, total_words, words_dict, tot_words_dict
import enchant import sys #accept the movie name from command line arguments input_word = sys.argv[1] #load the personal word list dictionary my_dict = enchant.PyPWL("ingredientslist.txt") ingredients_dict = enchant.PyPWL("ingredientslist.txt") #get suggestions for the input word suggestions = my_dict.suggest(input_word) #check if the word exists in the dictionary word_exists = ingredients_dict.check(input_word) print("word exists: ", word_exists) print ("input:", input_word) print("suggestions:", suggestions) if not word_exists: #get suggestions for the input word if the word doesn't exist in the dictionary suggestions = ingredients_dict.suggest(input_word) print ("input:", input_word) print("suggestions:", suggestions)
#spell-checker.py import enchant import sys #accept the movie name from command line arguments input_movie_name = sys.argv[1] #load the personal word list dictionary movies_dict = enchant.PyPWL("Settings/star_name.txt") #check if the word exists in the dictionary word_exists = movies_dict.check(input_movie_name) print("word exists: ", word_exists) if not word_exists: #get suggestions for the input word suggestions = movies_dict.suggest(input_movie_name) print("input:", input_movie_name) print("suggestions:", suggestions)
def run(self, dispatcher: CollectingDispatcher, tracker: Tracker, domain: Dict[Text, Any]) -> List[Dict[Text, Any]]: response = requests.get( "https://api.covid19india.org/data.json").json() state_name = str((tracker.latest_message)['text']) state_name = state_name.lower() print(state_name) states_dict = enchant.PyPWL("states.txt") word_exists = states_dict.check(state_name) print("word exists: ", word_exists) if not word_exists: #get suggestions for the input word suggestions = states_dict.suggest(state_name) print("input:", state_name) print("suggestions:", suggestions) if (len(suggestions) != 0): state_name = suggestions[0] #state = None entities = tracker.latest_message['entities'] print(entities) #for e in entities: # if(e['entity'] == "state"): # state = e['value'] #message = "Enter proper state name" for data in response["statewise"]: if data["state"] == state_name.title(): print("Active = " + data["active"] + " Confirmed = " + data["confirmed"] + " Deaths = " + data["deaths"] + " Recovered = " + data["recovered"]) message = "Active = " + data["active"] + " Confirmed = " + data[ "confirmed"] + " Deaths = " + data[ "deaths"] + " Recovered = " + data["recovered"] active = int(data["active"]) confirmed = int(data["confirmed"]) deaths = int(data["deaths"]) recovered = int(data["recovered"]) print(active) labels = ['Active', 'Confirmed', 'Deaths', 'Recovered'] backgroundColor = [ "#36a2eb", "#ffcd56", "#ff6384", "#009688", "#c45850" ] #chartsData = [555,242,145,23] chartsData = [] chartsData.extend((active, confirmed, deaths, recovered)) data = { "title": state_name.upper(), "labels": labels, "backgroundColor": backgroundColor, "chartsData": chartsData, "chartType": "pie", "displayLegend": "true" } #dispatcher.utter_message(state_name) #dispatcher.utter_message(message) dispatcher.utter_custom_json({"payload": "chart", "data": data}) return []
#df.rename( columns={'Unnamed: 0':'Total'}, inplace=True ) #check the data df.head() df.info() df.describe() #spell-checker import enchant #write the name of all zillas in a text file and prepare the custom dictionary allZilla = '\n'.join(zillas for zillas in list(myDF['zilla'])) with open("zillaNames_mine.txt", "w") as outfile: outfile.write(allZilla) zillaList = enchant.PyPWL("zillaNames_mine.txt") #create the dictionary of total cases df.set_index('District/City', inplace=True) caseDict = df.to_dict(orient='dict') caseDict = caseDict['Total'] #the name of B. Baria is too tough to map to Brahamanbaria caseDict['Brahamanbaria'] = caseDict['B. Baria'] del caseDict['B. Baria'] #correct the zilla name in the data according to the administrative name for zilla, cases in caseDict.items(): if (zillaList.check(zilla)): print(zilla, cases) else:
import enchant import sys #accept the movie name from command line arguments input_movie_name = sys.argv[1] #load the personal word list dictionary movies_dict = enchant.PyPWL("movies.txt") #check if the word exists in the dictionary word_exists = movies_dict.check(input_movie_name) print("word exists: ", word_exists) if not word_exists: #get suggestions for the input word if the word doesn't exist in the dictionary suggestions = movies_dict.suggest(input_movie_name) print("input:", input_movie_name) print("suggestions:", suggestions)
def execute(img): List1 = [] List2 = [] endpoint = 'https://handwrittingrecognition.cognitiveservices.azure.com/' text_recognition_url = endpoint + "vision/v2.1/read/core/asyncBatchAnalyze" # Set image_path to the local path of an image that you want to analyze. # image = img # img2 = image.copy() # # detecting red color # image = cv2.cvtColor(image, cv2.COLOR_BGR2HSV) # lower = np.array([155, 25, 0]) # upper = np.array([179, 255, 255]) # mask = cv2.inRange(image, lower, upper) # thresh = cv2.threshold(mask, 0, 255, cv2.THRESH_BINARY)[1] # # apply canny edge detection # edges = cv2.Canny(thresh, 100, 200) # # get hough line segments # threshold = 100 # minLineLength = 50 # maxLineGap = 20 # lines = cv2.HoughLinesP(thresh, 1, np.pi / 360, threshold, minLineLength, maxLineGap) # # draw lines # linear = np.zeros_like(thresh) # for [line] in lines: # # print(line) # x1 = line[0] # y1 = line[1] # x2 = line[2] # y2 = line[3] # cv2.line(linear, (x1, y1), (x2, y2), (255), 1) # # get bounds of white pixels # white = np.where(linear == 255) # xmin, ymin, xmax, ymax = np.min(white[1]), np.min(white[0]), np.max(white[1]), np.max(white[0]) # # draw bounding box on input # bounds = img2.copy() # cv2.rectangle(bounds, (xmin, ymin), (xmax, ymax), (0, 0, 255)) # crop the image at the bounds #crop = img2[ymin:ymax, xmin:xmax] crop=img shp=img.shape # cv2.imwrite("crop.jpg", crop) #mp = ((xmax - xmin) / 1.714, (ymax - ymin) / 2) mp=(shp[0]/1.714,shp[1]/2) # image_path = "crop.jpg" # # Read the image into a byte array # image_data = open(image_path, "rb").read() # print(image_data) # crop = cv2.cvtColor(crop, cv2.COLOR_BGR2RGB) # image_data=Image.fromarray(crop) is_success, im_buf_arr = cv2.imencode(".jpg", crop) image_data = im_buf_arr.tobytes() # image_data=crop headers = {'Ocp-Apim-Subscription-Key': subscription_key, 'Content-Type': 'application/octet-stream'} response = requests.post( text_recognition_url, headers=headers, data=image_data) response.raise_for_status() # Extracting text requires two API calls: One call to submit the # image for processing, the other to retrieve the text found in the image. # Holds the URI used to retrieve the recognized text. operation_url = response.headers["Operation-Location"] # The recognized text isn't immediately available, so poll to wait for completion. analysis = {} poll = True notes_text = "" specialization = "" medicine_text = "" m=0 l=0 lab_text = "" while (poll): response_final = requests.get( response.headers["Operation-Location"], headers=headers) analysis = response_final.json() # print(analysis) # Print the detected text, line by line if analysis["status"] == TextOperationStatusCodes.succeeded: for text_result in analysis["recognitionResults"]: for line in text_result["lines"]: if ("Specialization" in line["text"]): specialization = line["text"][17:] elif ("Dr. Name" not in line["text"]): for word in line["words"]: if (word["boundingBox"][0] + word["boundingBox"][2]) / 2 > mp[1]: if (word["boundingBox"][1] + word["boundingBox"][7]) / 2 < mp[0]: medicine_text = medicine_text + word["text"] + " " m=1 else: lab_text = lab_text + word["text"] + " " l=1 else: notes_text = notes_text + word["text"] + " " if(m==1): medicine_text=medicine_text+"," if(l==1): lab_text=lab_text+"," time.sleep(1) if ("recognitionResults" in analysis): poll = False if ("status" in analysis and analysis['status'] == 'Failed'): poll = False # Word Suggestions # #print(os.path.curdir, "medicine/", specialization, ".txt") medicine_lines=medicine_text.split(',') medicine_dict = enchant.PyPWL(os.path.join(scriptdir,specialization+".txt")) for medicine_line in medicine_lines: words = medicine_line.split() # print(words) value=" " for input_medicine_name in words: suggestions = medicine_dict.suggest(input_medicine_name) if(len(suggestions)!=0): #print(suggestions[0]) value=value+suggestions[0]+" " else: value=value+input_medicine_name if(List1!=' '): List1.append(value) #print('line end') test_lines = lab_text.split(',') test_dict = enchant.PyPWL(os.path.join(scriptdir, "tests.txt")) for test_line in test_lines: words=test_line.split() value=" " for input_test_name in words: suggestions = test_dict.suggest(input_test_name) if(len(suggestions)!=0): #print(suggestions[0]) value=value+suggestions[0]+" " else: value=value+input_test_name if(value!=' '): List2.append(value) #print('test end') # print(words) # List1.rstrip() # List2.rstrip() List1 = remove_values_from_list(List1, ' ') List2 = remove_values_from_list(List2, ' ') dictionary={"name":"Hamza", "medicine":List1, "tests":List2, "notes":notes_text} print(dictionary) return dictionary
def __init__(self, wordlist): SpChecker.__init__(self, wordlist) self.spdict = enchant.PyPWL() self.chkr = EnchantSpCheck("en_US") for w in self.wordlist: self.spdict.add(w)
def __init__(self, word_list_path, no_suggestions=5): self.no_suggestions = no_suggestions self.word_list_path = word_list_path self.medical_dict = enchant.PyPWL(self.word_list_path)