def upload_image(): form = MyForm() if form.validate_on_submit(): filename = images.save(form.image.data) # return f'Filename: { filename }' img = cv2.imread(r'D:\HTML\Flask Tutorial\static\Image\Uploads\\' + filename) gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY) _, thresh = cv2.threshold(gray, 127, 255, cv2.THRESH_BINARY) # cv2.imshow('',thresh) mor_img = cv2.morphologyEx(thresh, cv2.MORPH_OPEN, (100, 100), iterations=1) contours, hierarchy = cv2.findContours( mor_img, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE ) # I addapted this part of the code. This is how my version works (2.4.16), but it could be different for OpenCV 3 sorted_contours = sorted(contours, key=cv2.contourArea, reverse=True) for c in sorted_contours[1:]: area = cv2.contourArea(c) if area > 6000: print(area) cv2.drawContours(img, [c], -1, (0, 0, 200), 3) x, y, w, h = cv2.boundingRect( c ) # the lines below are for getting the approximate center of the rooms cx = x + w / 2 cy = y + h / 2 # cv2.putText(img,str(area),(cx,cy), cv2.FONT_HERSHEY_SIMPLEX, .5,(255,0,0),1,cv2.CV_AA) # cv2.imshow('',mor_img) # cv2.imshow('',img) # cv2.waitKey(0) text = pytesseract.image_to_string(img, config="--psm 11") sentence = text print(nums_from_string.get_nums(sentence)) output = nums_from_string.get_nums(sentence) return render_template( r"UploadImage.html", form=form, detected_dimensions='Dimensions are {}'.format(output)) else: return render_template(r"UploadImage.html", form=form)
def get_book_informations(url): soup = get_soup(url) books = [url] product_info = {} for row in soup.find(class_="table table-striped").find_all('tr'): product_info[row.th.text] = row.td.text books.append(product_info['UPC']) books.append(soup.find(class_="product_main").h1.text) books.append(product_info['Price (incl. tax)'].strip('Â')) books.append(product_info['Price (excl. tax)'].strip('Â')) books.append(nums_from_string.get_nums(product_info['Availability'])[0]) if soup.find("div", id={'product_description'}): books.append( soup.find("div", id={'product_description'}).find_next_sibling("p").text) books.append( soup.find('li', class_='active').find_previous_sibling('li').a.text) books.append(soup.find('p', class_='star-rating')['class'][1]) books.append( urlRoot + soup.find('div', class_="item active").img['src'].replace('../', "")) return books
folders = os.listdir(inDir) for folder in folders: PATH = os.path.join(inDir, folder) folder_2 = os.listdir(PATH) for folder in folder_2: PATH_2 = os.path.join(PATH, folder) folder_3 = os.listdir(PATH_2) for folder in folder_3: PATH_3 = os.path.join(PATH_2, folder) folder_4 = os.listdir(PATH_3) for folder in folder_4: PATH_4 = os.path.join(PATH_3, folder) images = [ os.path.basename(x) for x in glob.glob(PATH_4 + '/*.dcm') ] for f in images: nums = nums_from_string.get_nums(f) idx = int(nums[4]) if idx > 30 and idx < 70: # split = int(f.split('_')[13]) # if split > 30 and split < 70: ds = dicom.read_file(PATH_4 + '/' + f) # read dicom image img = ds.pixel_array # get image array img = np.flip(img, axis=0) imageio.imwrite(outDir + f.replace('.dcm', '.png'), img) # In[ ]:
cv2.imshow('', thresh) mor_img = cv2.morphologyEx(thresh, cv2.MORPH_OPEN, (100, 100), iterations=1) contours, hierarchy = cv2.findContours( mor_img, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE ) # I addapted this part of the code. This is how my version works (2.4.16), but it could be different for OpenCV 3 sorted_contours = sorted(contours, key=cv2.contourArea, reverse=True) for c in sorted_contours[1:]: area = cv2.contourArea(c) if area > 6000: print(area) cv2.drawContours(img, [c], -1, (0, 0, 200), 3) x, y, w, h = cv2.boundingRect( c ) # the lines below are for getting the approximate center of the rooms cx = x + w / 2 cy = y + h / 2 # cv2.putText(img,str(area),(cx,cy), cv2.FONT_HERSHEY_SIMPLEX, .5,(255,0,0),1,cv2.CV_AA) cv2.imshow('', mor_img) # cv2.imshow('',img) cv2.waitKey(0) text = pytesseract.image_to_string(img, config="--psm 11") sentence = text print(nums_from_string.get_nums(sentence))