예제 #1
0
def import_video(video_mp4):
    # Video import
    cap = cv2.VideoCapture(video_mp4)
    # video is a series of images
    while True:
        success, img = cap.read()
        cv2.imgshow("Video", img)
        if cv2.waitkey(1) & 0xFF == ord('q'):
            # if q pressed, break out
            break
예제 #2
0
    def run(self):
        """Runs the worm tracking algorithm indefinitely"""

        while True:
            #Grab image and display
            ret, img = self.camera.read()
            cv2.imgshow('Preview', img)

            #Threshold then compute contours
            ret, img_thresh = cv2.threshold(img, self.threshold, 255, cv2.THRESH_BINARY_INV)
            contours, hierarchy = cv2.findContours(img_thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)

            #Find the biggest contour
            worm_area = 0
            for contour in contours:
                area = cv2.contourArea(contour)
                if area > worm_area:

                    worm = contour
                    worm_area = area

            #Compute the centroid of the worm contour
            moments = cv2.moments(worm)
            cx = int(moments['m10']/moments['m00'])
            cy = int(moments['m01']/moments['m00'])

            #If centroid within margin of image edge, move stage
            width = img.shape[0]
            height = img.shape[1]

            if cx < self.margin:
                self.microscope.move('x', -1*self.step_size)

            if cx > (width - self.margin):
                self.microscope.move('x', self.step_size)

            if cy < self.margin:
                self.microscope.move('y', -1*self.step_size)

            if cy > (height - self.margin):
                self.microscope.move('y', self.step_size)
예제 #3
0
import cv2
import sys

cascPath = 'haarcascade_frontalface_default.xml'
faceCascade = cv2.CascadeClassifier(cascPath)

video_capture = cv2.VideoCapture(0)

while True:

    ret, frame = video_capture.read()
    gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)

    faces = faceCascade.detectMultiScale(gray,
                                         scaleFactor=1.1,
                                         minNeighbors=5,
                                         minSize=(30, 30))

    for (x, y, w, h) in faces:
        cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2)

    cv2.imgshow('image', frame)

    if cv2.waitKey(1) & 0xFF == ord('q'):
        break

video_capture.release()
cv2.destroyAllWindows()
예제 #4
0
      output_dict['detection_scores'] = output_dict['detection_scores'][0]
      if 'detection_masks' in output_dict:
        output_dict['detection_masks'] = output_dict['detection_masks'][0]
  return output_dict


# In[ ]:

while True:
      ret,image_np = cap.read()

  # Expand dimensions since the model expects images to have shape: [1, None, None, 3]
  image_np_expanded = np.expand_dims(image_np, axis=0)
  # Actual detection.
  output_dict = run_inference_for_single_image(image_np, detection_graph)
  # Visualization of the results of a detection.
  vis_util.visualize_boxes_and_labels_on_image_array(
      image_np,
      output_dict['detection_boxes'],
      output_dict['detection_classes'],
      output_dict['detection_scores'],
      category_index,
      instance_masks=output_dict.get('detection_masks'),
      use_normalized_coordinates=True,
      line_thickness=8)
cv2.imgshow('Obj detection', cv2.resize(800,400))
if cv2.waitKey(25)&0xFF == ord('q'):
    cv2.destroyAllWindows()
    cap.release()
    break
예제 #5
0
def callback(self, data):
	img_rgb = self.cvb.imgmsg_to_cv2(data, "bgr8")
	cv2.namedWindow('Camera_Feed')
	cv2.imgshow('Camera_Feed', img_rgb)
	cv2.imwrite('weapon_snapshot_colmustard.png', img_rgb)
예제 #6
0
# activate webcam to see the real moving object to be matched with the image database
cap = cv2.VideoCapture(0)
while True:
    success, img2 = cap.read()
    # copy img2 to new variable called imgOriginal
    imgOriginal = img2.copy()
    img2 = cv2.cvtColor(img2, cv2.COLOR_BGR2GRAY)

    id = findID(img2, desList)

    if id != -1:
        cv2.putText(imgOriginal, classNames[id], (50, 50),
                    cv2.FONT_HERSHEY_PLAIN, 1, (0, 0, 255), 2)

    cv2.imgshow("IMAGE TO BE TRAIN / DETECTED", imgOriginal)
    cv2.waitKey(1)

# # see how many good matches the software got
# # if the number is big then it is a good match (the img and the imgTrain)
# print(len(good))
#
# img3 = cv2.drawMatchesKnn(img, kpimg, imgTrain, kpimgTrain, good, None, flags=2)
#
# imgKpOri = cv2.drawKeypoints(img, kpimg, None)
# imgKpTrain = cv2.drawKeypoints(imgTrain, kpimgTrain, None)
#
# # show the Keypoints that the orb algorithm found to be useful for matching process
# # cv2.imshow("imgKpOri", imgKpOri)
# # cv2.imshow("imgKpTrain", imgKpTrain)
#
예제 #7
0
import cv2
import numpy as np

filename = 'dog.jpg'
img = cv2.imread(filename, 0)
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)

gray = np.float32(gray)
dst = cv2.cornerHarris(gray, 2, 3, 0.04)

dst = cv2.dilate(dst, None)

img[dst > 0.01 * dst.max()] = [0, 0, 225]

cv2.imgshow('dst', img)
if cv2.waitKey(0) & 0xff == 27:
    cv2.destroyAllWindows()
예제 #8
0
import cv2
import numpy as np

img = cv2.imread("image.jpg")

# Dönüştürülecek resim boyutları
width = 600
height = 700

# Çapraz bir resmin köşe pikselleri tespit edilir.
points_1 = np.float32([[230, 1], [1, 472], [540, 150], [338, 617]])
points_2 = np.float32([[0, 0], [0, height], [width, 0], [width, height]])

# Perspeftif alma
matrix = cv2.getPerspectiveTransform(points_1, points_2)

# Dönüştürülmüş resim
img_output = cv2.warpPerspective(img, matrix, (width, height))
cv2.imgshow("Yeni Resim", img_output)
예제 #9
0
# Video içe aktar
cap = cv2.VideoCapture(video_name)

print("Genişlik:", cap.get(3))  #Video genişliği
print("Yükseklik:", cap.get(4))  #Video genişliği

# Video açılmadığında veya boş olduğunda
if cap.isOpened() == False:
    print("Hata")

# Videoyu sürekli okuyabilmek için döngüye alınır.
while True:
    # Frame = Video içerisinde oynayan her bir resim
    # Return = İşlemin başarılı, başarısız olduğunu döner. (True,False)
    ret, frame = cap.read()

    if ret == True:
        # Video hızlı aktığı için yavaşlatıyoruz.
        time.sleep(0.01)
        cv2.imgshow("Video:", frame)
    else:
        break

    # Klavyeden q tuşuna basıldığında videodan çıkar.
    if cv2.waitKey(1) & 0xFF == ord("q"):
        break

# Video yakalama bırakılır.
cap.relaese()
cv2.destroyAllWindows()
예제 #10
0
import cv2

cap = cv2.VideoCapture(0)

while(cap.isOpened()):

    ret, frame = cap.read()
	
    cv2.imgshow('frame', frame)
	
	if cv2.waitKey(1) & 0xFF == ord('q'):
		break

cap.release()

cv2.destroyAllWindows()
예제 #11
0
import cv2

# Dosya yolu yazılır.
img = cv2.imread("image.jpg")

# Resmi siyah beyaz (grayscale) aktarmak için 0 yazılır.
img = cv2.imread("image.jpg", 0)

# Görselleştirme
cv2.imgshow("Resim Adi", img)

# Esc tuşuna basıldığında resim kapanır.
# s tuşuna basıldığında resim kaydedilir ve kapanır.
close_key = cv2.waitKey(0) & 0xFF
if close_key == 27:
    cv2.destroyAllWindows()
elif close_key == ord('s'):
    cv2.imwrite("image_gray.png", img)
    cv2.destroyAllWindows()
예제 #12
0
import cv2
import numpy as np

# path = r'C:\Users\Rebec\Projetos\Ficha 03 - PDI\Dark_Moon.jpg'
# imagem = cv2.imread('Dark_Moon.jpg') #seleciona a imagem desejado
imagem = cv2.imread('Dark_Moon.jpg')
cv2.imgshow("Original", imagem)

y = 0
x = 0

#cv2_imshow(im) #mostra a imagem no programa

heigth = int(input("insira o valor da altura ", ))
width = int(input("insir o valor da largura ", ))
#dimensoes = (heigth, width)

#essa parte daqui é para redimensionar a imagem mas não é oque a questao pede
#image_resize = cv2.resize(imagem, dimensoes, interpolation = cv2.INTER_AREA)
#cv2_imshow(image_resize)

crop = imagem[y:y + heigth, x:x + width]
cv2.imshow(crop, 'imagem')
# distance calculation
# equations d = abs(Red – RedColor) + (Green – GreenColor) + (Blue – BlueColor)
def getColorName(R, G, B):
    minimum = 10000
    for i in range(len(csv)):
        d = abs(R - int(csv.loc[i, "R"])) + abs(G - int(csv.loc[i, "G"])) + abs(B - int(csv.loc[i, "B"]))
        if d <= minimum:
            minimum = d
            cname = csv.loc[i, "color_name"]
        return cname


# display image on window for user to interact with
while True:
    cv2.imgshow("image", img)
    if (clicked):
        # cv2.rectangle(image, startpoint, endpoint, color, thickness) -1 thickness fills rectangle entirely
        cv2.rectangle(img, (20, 20), (750, 60), (b, g, r), -1)
        # creates text string to display (color name and RGB values
        text = getColorName(r, g, b) + ' R=' + str(r) + ' G=' + str(g) + 'B=' + str(b)
        # cvs.putText(img,text,start,font(0-7),frontScale, color, thickness, lineType
        cv2.putText((img, text, (50, 50), 2, 0.8, (255, 255, 255), 2, cv2.LINE_AA))
        # for very light colors txt will show in black
        cv2.putText(img, text, (50, 50), 2, 0.8, (0, 0, 0), 2, cv2.LINE_AA)
        if r + g + b >= 600:
            cv2.putText((img, text, (50, 50), 2, 0.8, (0, 0, 0), 2, cv2.LINE_AA))
        clicked = False
        # break loop if user hits 'esc' key
        if cv2.waitKey(20):
            break
예제 #14
0
import cv2

# pip install opencv-python
# https://github.com/opencv/opencv/blob/master/data/haarcascades/haarcascade_frontalface_default.xml

face_cascade = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')

img = cv2.imread(r'C:\Users\Dell\Desktop\profile.jpg')
img = cv2.resize(img, (500, 500))
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)

# Detect faces
faces = face_cascade.detectMultiScale(gray, 1.1, 4)

# Draw rectangle around the faces
for (x, y, w, h) in faces:
    cv2.rectangle(img, (x, y), (x + w, y + h), (255, 0, 0), 2)

cv2.imgshow('image', img)
cv2.waitKey()