Пример #1
0
def upload_pic(path, layout):

    filet = path
    fl = path.split(".")[1]
    new_img = '/sdcard/' + fl + '_new.png'
    #pic=droid.cameraInteractiveCapturePicture(filet)
    res = urllib.urlopen(filet)
    image = np.asarray(bytearray(res.read()), dtype="uint8")
    image = cv2.imdecode(image, cv2.IMREAD_COLOR)
    # perform the actual resizing of the image according to scaling_factor
    height, width = image.shape[:2]
    resized = cv2.resize(image, (width / 3, height / 3),
                         interpolation=cv2.INTER_AREA)
    cv2.imwrite(new_img, resized)
    # Return gray sale image
    #gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)

    print resized
    layout.views.result.text = "Processing ..."
    layout.views.preview.src = "file://" + new_img
    time.sleep(3)
    args = [
        'tesseract', '--tessdata-dir', '/data/data/com.termux/files/usr/share',
        new_img, 'stdout', '-l', 'eng', '--psm', '3', '--oem', '2'
    ]
    return run(args)
Пример #2
0
def main(argv):
    default_file = 'images/board.JPEG'
    filename = argv[0] if len(argv) > 0 else default_file
    # Loads an image
    src = cv.imread(cv.samples.findFile(filename), cv.IMREAD_GRAYSCALE)
    # Check if image is loaded fine
    if src is None:
        print('Error opening image!')
        print('Usage: hough_lines.py [image_name -- default ' + default_file +
              '] \n')
        return -1

    dst = cv.Canny(src, 50, 200, None, 3)

    # Copy edges to the images that will display the results in BGR
    cdst = cv.cvtColor(dst, cv.COLOR_GRAY2BGR)
    cdstP = np.copy(cdst)

    lines = cv.HoughLines(dst, 1, np.pi / 180, 150, None, 0, 0)

    if lines is not None:
        for i in range(0, len(lines)):
            rho = lines[i][0][0]
            theta = lines[i][0][1]
            a = math.cos(theta)
            b = math.sin(theta)
            x0 = a * rho
            y0 = b * rho
            pt1 = (int(x0 + 1000 * (-b)), int(y0 + 1000 * (a)))
            pt2 = (int(x0 - 1000 * (-b)), int(y0 - 1000 * (a)))
            cv.line(cdst, pt1, pt2, (0, 0, 255), 3, cv.LINE_AA)

    linesP = cv.HoughLinesP(dst, 1, np.pi / 180, 50, None, 50, 10)

    if linesP is not None:
        for i in range(0, len(linesP)):
            l = linesP[i][0]
            cv.line(cdstP, (l[0], l[1]), (l[2], l[3]), (0, 0, 255), 3,
                    cv.LINE_AA)

    y = int(round(1536 / 3))
    x = int(round(2048 / 3))
    src = cv.resize(cdst, (x, y))  # Resize image
    cdst = cv.resize(cdst, (x, y))  # Resize image
    cdstP = cv.resize(cdstP, (x, y))  # Resize image

    cv.imshow("Source", src)
    cv.imshow("Detected Lines (in red) - Standard Hough Line Transform", cdst)
    cv.imshow("Detected Lines (in red) - Probabilistic Line Transform", cdstP)

    cv.imwrite("./houghLines.png", cdstP)

    cv.waitKey()
    return 0
Пример #3
0
def detect_face(path, layout):
    image = cv2.imread(path)
    height, width = image.shape[:2]
    resized = cv2.resize(image, (width / 5, height / 5))
    faces = faceCascade.detectMultiScale(resized,
                                         scaleFactor=1.2,
                                         minNeighbors=5,
                                         minSize=(5, 5),
                                         flags=cv2.cv.CV_HAAR_SCALE_IMAGE)
    (x, y, w, h) = faces[0]
    rect = faces[0]
    draw_rectangle(resized, rect)
    draw_text(resized, "Face", rect[0], rect[1] - 5)
    cv2.imwrite("/sdcard/face.jpg", resized)
    layout.views.preview.src = "file:///sdcard/face.jpg"
    return True
Пример #4
0
def segment_box(image_Edge,contours,type_):
    image_with_MinRect=draw_Min_Rect(image_Edge,contours)
    contours_Ang,box_ang=Angle_filter(contours)
    contours_Rect,box_rect=Rect_filter(contours_Ang)

    #save cpa
    for i in box_rect():
        CutImage(image,box_rect)
        image_Unisize=Unisize(CutImage)
        path_tmp=args["tmp_path"]+name
        cv.imwrite(path_tmp,image_Unisize)
    #SVM classification
    svm= SVM_(type_)
    for i in box_rect():
        CutImage(image,box_rect)
        image_Unisize=Unisize(CutImage)
        svm.predict(Unisize_image)
        if result ==Has:
            image_located=Unisize_image
            break
    return iamge_located #only one image should be output
import cv
from androidhelper import Android

droid = Android()
path = "/sdcard/qpython/photos/test.jpg"
droid.cameraCapturePicture(path)
#droid.cameraInteractiveCapturePicture(path)
img = cv.imread(path)
cv.imwrite('/sdcard/qpython/photos/cvImage.jpg', img)
import cv

#to use relative path, note that the working directory is the main qpython folder
#to use absolute path, just copy and paste the path from your file manager 
srcPath = 'great.jpg'
desPath = 'call.jpg'
img = cv.imread(srcPath, 0)
cv.imwrite(desPath, img)
Пример #7
0
    cv.imshow('window,img)

def track_y(y):
    cv.line(img, (x, 0), (x, h), RED, d)
    cv.line(img, (0, y), (w, y), RED, d)
    cv.imshow('window,img)


file = 'messi.jpg'

# img = cv.imread(file, cv.IMREAD_GRAYSCALE)
img0 = cv.imread(file, cv.IMREAD_COLOR)
cv.imshow('window,img)

w, h = 800, 600
x, y = 100, 100
d = 1

cv.createTrackbar('x', 'window, x, w, track_x)
cv.createTrackbar('y', 'window, y, h, track_y)
cv.line(img, (x, 0), (x, h), RED, d)
cv.line(img, (0, y), (w, y), RED, d)

cv.imshow('window,img)

k = cv.waitKey(0)

print('key', k)

cv.imwrite('messigray.png',img)
cv.destroyAllWindows()
Пример #8
0
import cv as cv
import numpy as np


def normalize(img):
    minimum = np.amin(img)
    return (img - minimum) * (255 / np.amax(img))


out_path = "result/4.1.2/"
baboon = cv.imread('image/Baboon.bmp', cv.IMREAD_GRAYSCALE)
f16 = cv.imread('image/F16.bmp', 0)
lena = cv.imread('image/Lena.bmp', 0)

cv.imwrite(out_path + 'baboon_gray.jpg', baboon)
cv.imwrite(out_path + 'f16_gray.jpg', f16)
cv.imwrite(out_path + 'lena_gray.jpg', lena)

# compute DFT of images
baboon_f = np.abs(np.fft.fft2(baboon))
f16_f = np.abs(np.fft.fft2(f16))
lena_f = np.abs(np.fft.fft2(lena))

# shift
baboon_f_shift = np.fft.fftshift(baboon_f)
f16_f_shift = np.fft.fftshift(f16_f)
lena_f_shift = np.fft.fftshift(lena_f)

# log on no shifted image
baboon_f_log = np.log(baboon_f + 1)
f16_f_log = np.log(f16_f + 1)
Пример #9
0





if __name__ == '__main__':

    args, conf=pre()

    #load the image
    # grab the image paths and randomly shuffle them
    image_paths = sorted(list(paths.list_images(args['path'])))

    #begin to process
    for i in image_paths: # i is the path of single image
        iamge=cv.imread(i)
        name=os.path.split(i)[1]#split path to be name[1] and direction[0]
        
        image_located=locate_mechanism(image,name)

        #save the results
        path_out=args["output_path"]+name
        cv.imwrite(path_out,image_located)

        




Пример #10
0
out = img.resize([int(reduced_percent * s) for s in img.size])
imagePath = "/sdcard/image.jpg"
out.save(imagePath)
sleep(3)
# Set the haarcascade file path
cascPath = "/sdcard/haarcascade_frontalface_default.xml"

# Create the haar cascade
faceCascade = cv2.CascadeClassifier(cascPath)

# Read the image
image = cv2.imread(imagePath)
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)

# Detect faces in the image
faces = faceCascade.detectMultiScale(gray,
                                     scaleFactor=1.2,
                                     minNeighbors=5,
                                     minSize=(30, 30),
                                     flags=cv2.cv.CV_HAAR_SCALE_IMAGE)

print("Found {0} faces!".format(len(faces)))

# Draw a rectangle around the faces
for (x, y, w, h) in faces:
    cv2.rectangle(image, (x, y), (x + w, y + h), (0, 255, 0), 2)
#save the image
cv2.imwrite('/sdcard/out.jpg', image)
#display the image
droid.view("file:///sdcard/out.jpg", "image/*")
Пример #11
0
    return droid, uuid

path = '/storage/7E9B-5A00/Picture/'

#serial通信オープン
droid, uuid = serialStart()


cnt = 0
while True:
    # 写真を取る
    droid.cameraCapturePicture(path + str(cnt) + 'image.png')
    # openCVで開く
    img = cv.imread(path + str(cnt) + 'image.png')
    mask = red_detect(img)  # 赤色検出
    cv.imwrite(path + str(cnt) + 'Redmask.png', mask)  # 保存

    # 面積を求める
    pix_area, per = calc_area(mask)
    # 重心を求める
    x, y = calc_center(mask)
    print("G({},{})".format(x, y))

    # 差分を取る
    if cnt >= 1:
        # 1つ前の画像の計算
        img0 = cv.imread(path + str(cnt - 1) + 'image.png')
        mask0 = red_detect(img0)
        _, per0 = calc_area(mask0)
        x0, y0 = calc_center(mask0)
        print("G0({},{})".format(x0, y0))
import numpy as np 
import cv

 # Create a black image 
#img = np.zeros((512,512,3), np.uint8) 
img = cv.imread('great.jpg', 1)
 # Draw a diagonal blue line with thickness of 5 px 
img = cv.line(img,(0,0),(511,511),(255,0,0),5)
cv.imwrite('blessed.jpg', img) 
Пример #13
0
import sys

# Get user supplied values
imagePath = '/sdcard/cc.png'
cascPath = "/sdcard/haarcascade_frontalface_default.xml"

# Create the haar cascade
faceCascade = cv2.CascadeClassifier(cascPath)

# Read the image
image = cv2.imread(imagePath)
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)

# Detect faces in the image
faces = faceCascade.detectMultiScale(
    gray,
    scaleFactor=1.2,
    minNeighbors=5,
    minSize=(30, 30),
    flags = cv2.cv.CV_HAAR_SCALE_IMAGE
)

print("Found {0} faces!".format(len(faces)))

# Draw a rectangle around the faces
for (x, y, w, h) in faces:
    cv2.rectangle(image, (x, y), (x+w, y+h), (0, 255, 0), 2)
cv2.imwrite('/sdcard/capture_face_detect.jpg', image)
#display the image
droid.view("file:///sdcard/capture_face_detect.jpg","image/*")
Пример #14
0
                                        (new_height, new_width))
    # shift
    img_filtering = np.fft.ifftshift(img_filtering)
    # IDFT and real
    img_ifft = np.real(np.fft.ifft2(img_filtering))
    # resize image
    final_img = np.zeros((height, width))
    for i in range(height):
        for j in range(width):
            final_img[i][j] = img_ifft[i][j]
    return final_img


out_path = "result/4.1.1/"
barbara_gray = cv.imread('image/Barbara.bmp', cv.IMREAD_GRAYSCALE)
cv.imwrite(out_path + 'barbara_gray.jpg', barbara_gray)

# a filter
a = np.array([[1, 2, 1], [2, 4, 2], [1, 2, 1]])
a = a / 16

# separate a
a_separate_c = np.array([1, 2, 1])
a_separate_c = a_separate_c / 4
a_separate_r = np.array([[1, 2, 1]])
a_separate_r = a_separate_r / 4

# b filter
b = np.array([[-1, -1, -1], [-1, 8, -1], [-1, -1, -1]])

# c filter