def create_img(path, num):
    '''
    对每一张图片进行翻转,旋转,仿射变换,具体执行哪种操作是随机的,以此来增大样本数量
    参数解释,
    path:需要加载的图片绝对地址
    num:每一张图片生成几张样本图片,如num = 5,则对每一张图片对会再随机生成五张样本图片
    '''
    filename = os.listdir(path)
    for i, name in enumerate(filename):
        i += 1
        imgpath = path + '\\' + name
        img = cv2.imread(imgpath)
        h, w, c = img.shape
        for j in range(num):
            j += 1
            a = random.randint(1, 4)
            if a == 1:
                #  将图片绕x轴翻转
                img = cv2.flip(img, 0)
                cv2.imwrite(path + '\\' + str(i) + '_' + str(j) + '.jpg', img)
            elif a == 2:
                #  将图片绕y轴翻转
                img = cv2.flip(img, 1)
                cv2.imwrite(path + '\\' + str(i) + '_' + str(j) + '.jpg', img)
            elif a == 3:
                #  将图片绕x,y轴翻转
                img = cv2.flip(img, -1)
                cv2.imwrite(path + '\\' + str(i) + '_' + str(j) + '.jpg', img)
def data_augment(xb, yb):
    if np.random.random() < 0.25:
        xb, yb = rotate(xb, yb, 90)

    if np.random.random() < 0.25:
        xb, yb = rotate(xb, yb, 180)

    if np.random.random() < 0.25:
        xb, yb = rotate(xb, yb, 270)

    if np.random.random() < 0.25:
        xb = cv2.flip(xb, 1)  # flipcode > 0:沿y轴翻转

        yb = cv2.flip(yb, 1)

    if np.random.random() < 0.25:
        xb = random_gamma_transform(xb, 1.0)

    if np.random.random() < 0.25:
        xb = blur(xb)

    # 双边过滤
    if np.random.random() < 0.25:
        xb = cv2.bilateralFilter(xb, 9, 75, 75)

    #  高斯滤波
    if np.random.random() < 0.25:
        xb = cv2.GaussianBlur(xb, (5, 5), 1.5)

    if np.random.random() < 0.2:
        xb = add_noise(xb)

    return xb, yb
Esempio n. 3
0
def take_snapshot(cam, bucket, config):
    if cam is not None and cam.isOpened():
        _, frame = cam.read()
        for transform in config['transformations']:
            if transform['action'] == 'flip':
                if transform['axis'] == 'x':
                    frame = cv2.flip(frame, 0)
                elif transform['axis'] == 'y':
                    frame = cv2.flip(frame, 1)
                elif transform['axis'] == 'xy':
                    frame = cv2.flip(frame, -1)
            elif transform['action'] == 'rotate':
                if transform['direction'] == 'CW90':
                    frame = cv2.rotate(frame, cv2.ROTATE_90_CLOCKWISE)
                elif transform['direction'] == 'CCW90':
                    frame = cv2.rotate(frame, cv2.ROTATE_90_COUNTERCLOCKWISE)
                elif transform['direction'] == '180':
                    frame = cv2.rotate(frame, cv2.ROTATE_180)
        cv2.imwrite('./{0}.png'.format(bucket), frame)
        upload_coc('./{0}.png'.format(bucket), bucket)
        dt = datetime.now()
        if (dt.hour < dt_range[0]) | (dt.hour > dt_range[1]):
            ticker = Timer(30.0 * 60.0,
                           take_snapshot,
                           args=[cam, bucket, config])
        else:
            ticker = Timer(config['interval'],
                           take_snapshot,
                           args=[cam, bucket, config])
        tickers[config['index']] = ticker
        ticker.start()
def save_webcam(outPath, fps, mirror=False):
    # Capturing video from webcam:
    cap = cv.VideoCapture(0)
    currentFrame = 0
    # Get current width of frame
    width = cap.get(cv.CAP_PROP_FRAME_WIDTH)  # float
    # Get current height of frame
    height = cap.get(cv.CAP_PROP_FRAME_HEIGHT)  # float
    # Define the codec and create VideoWriter object
    fourcc = cv.VideoWriter_fourcc(*"XVID")
    out = cv.VideoWriter(outPath, fourcc, fps, (int(width), int(height)))

    prev_ret, prev_frame = cap.read()
    if prev_ret:
        prev_frame = cv.flip(prev_frame, 1)

    while cap.isOpened():
        # Capture frame-by-frame
        ret, frame = cap.read()

        if ret:
            if mirror:
                # Mirror the output video frame
                frame = cv.flip(frame, 1)
            # Saves for video
            out.write(frame)

            # Calculate the distance of the 2 pictures
            diff = np.sum(
                (prev_frame.astype("float") - frame.astype("float")) *
                (prev_frame.astype("float") - frame.astype("float")))
            diff = diff / (prev_frame.shape[0] * frame.shape[1])

            if diff > threshold:
                print(
                    "/!\ Attention /!\ Quelqu'un s'est introduit dans la matrice !"
                    + diff.__str__())

            prev_frame = frame

            # Display the resulting frame
            cv.imshow('frame', frame)
        else:
            break

        if cv.waitKey(1) & 0xFF == ord('q'):  # if 'q' is pressed then quit
            break

        # To stop duplicate images
        currentFrame += 1

    # When everything done, release the capture
    cap.release()
    out.release()
    cv.destroyAllWindows()
Esempio n. 5
0
    def _cv2_trim(self) -> bool:
        """
        Remove black borders from a cv2 image array.

        This method is a f*****g waste of time as most sources are already
        properly cropped. We need to use it because of a few shitty WEB sources.
        F*****g unbelievable.

        :param cv2_image: cv2 image array
        """
        logger.info("Trying to remove black borders with cv2")
        og_w, og_h = self._cv2.shape[1], self._cv2.shape[0]
        logger.debug("Original dimensions: %dx%d", og_w, og_h)
        og_quotient = og_w / og_h

        first_img = _remove_lateral_cv2(self._cv2)

        tmp_img = cv2.transpose(first_img)
        tmp_img = cv2.flip(tmp_img, flipCode=1)

        if tmp_img is None:
            raise exceptions.InvalidRequest("Possible all-black image found")

        final = _remove_lateral_cv2(tmp_img)

        out = cv2.transpose(final)

        final_img = cv2.flip(out, flipCode=0)
        if final_img is None:
            raise exceptions.InvalidRequest("Possible all-black image found")

        new_w, new_h = final_img.shape[1], final_img.shape[0]

        logger.debug("New dimensions: %dx%d", new_w, new_h)
        new_quotient = new_w / new_h

        if abs(new_quotient - og_quotient) > 0.9:
            logger.info("Possible bad quotient found: %s -> %s", og_quotient,
                        new_quotient)
            return False

        width_percent = (100 / og_w) * new_w
        height_percent = (100 / og_h) * new_h

        if any(percent <= 65 for percent in (width_percent, height_percent)):
            logger.info("Possible bad trim found: %s -> %s", width_percent,
                        height_percent)
            return False

        self._cv2 = final_img
        return True
Esempio n. 6
0
def process():
    image = request.data
    params = request.args

    actions = [param for param in params]
    actions = json.loads(actions[0])

    # convert image from string to uint8
    nparr = np.fromstring(image, np.uint8)
    # decode image
    img = cv2.imdecode(nparr, cv2.IMREAD_COLOR)

    for action in actions:

        if action['action'] == 'flip_H':
            img = cv2.flip(img, 0)
            continue

        elif action['action'] == 'flip_V':
            img = cv2.flip(img, 1)
            continue

        elif action['action'] == 'rotate_R':
            img = rotateImage(img, -90)
            continue

        elif action['action'] == 'rotate_L':
            img = rotateImage(img, 90)
            continue

        elif action['action'] == 'rotate':
            img = rotateImage(img, int(action['param']))
            continue

        elif action['action'] == 'resize':
            img = resizeImage(int(action['param']), img)
            continue

        elif action['action'] == 'thumbnail':
            img = resizeImage(10, img)
            continue

        elif action['action'] == 'grayscale':
            img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
            continue

    _, img_encoded = cv2.imencode('.jpeg', img)

    return img_encoded.tostring(), 200
def capture_frames(source: Union[str, int], frame_q: Queue, model_class):
    try:
        cap = cv.VideoCapture(source)
        while cap.isOpened():
            # Capture frame-by-frame
            retval, frame = cap.read()
            if retval:
                frame_q.put(cv.flip(frame, 1))

                yield (b'--frame\r\n' + b'Content-Type: image/jpeg\r\n\r\n' +
                       cv.imencode('.jpg', cv.flip(frame, 1))[1].tobytes() +
                       b'\r\n')
            cv.waitKey(1)
    finally:
        cap.release()
Esempio n. 8
0
def dataset_augmentation(images_array):
    dataset = []
    for image in images_array:
            horizontal_flipped_img = cv2.flip(image, 1)
            dataset.append(horizontal_flipped_img)

            vertical_flipped_img = cv2.flip(image, 0)
            dataset.append(vertical_flipped_img)

            angles = [10, 15, 20]
            for angle in angles:
                height, width = image.shape[:2]
                matrix = cv2.getRotationMatrix2D((int(width/2), int(height/2)), angle, 1)
                rotated_img = cv2.warpAffine(image, matrix, (width, height))
                dataset.append(rotated_img)
    return np.array(dataset)
Esempio n. 9
0
def detectAndDisplay(frame):
    frame_gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
    frame_gray = cv2.equalizeHist(frame_gray)
    # Detectar faces
    # Divide a imagem em diversos retângulos e retorna uma lista com eles
    faces = face_cascade.detectMultiScale(frame_gray)
    for (x, y, w, h) in faces:
        center = (x + w // 2, y + h // 2)
        # Elipses rosas
        frame = cv2.ellipse(frame, center, (w // 2, h // 2), 0, 0, 360,
                            (255, 0, 255), 4)
        faceROI = frame_gray[y:y + h, x:x + w]

        # Em cada face, detectar olhos
        eyes = eyes_cascade.detectMultiScale(faceROI)
        for (x2, y2, w2, h2) in eyes:
            eye_center = (x + x2 + w2 // 2, y + y2 + h2 // 2)
            radius = int(round((w2 + h2) * 0.25))
            # Circulos azuis
            frame = cv2.circle(frame, eye_center, radius, (255, 0, 0), 4)

    # Frame invertido horizontalmente por questão de estética =)
    frame = cv2.flip(frame, 1)
    cv2.imshow('Deteccao de Face e Olho', frame)
    return frame
def process_image(img):
    img = cv2.flip(img, 1)
    img = cv2.resize(img, (64, 64))
    img = np.array(img, dtype=np.float32)
    img = np.reshape(img, (-1, 64, 64, 3))
    img = img.astype('float32') / 255.
    return img
Esempio n. 11
0
    def __next__(self):
        self.count += 1
        if cv2.waitKey(1) == ord('q'):  # q to quit
            self.cap.release()
            cv2.destroyAllWindows()
            raise StopIteration

        # Read frame
        if self.pipe == 0:  # local camera
            ret_val, img0 = self.cap.read()
            img0 = cv2.flip(img0, 1)  # flip left-right
        else:  # IP camera
            n = 0
            while True:
                n += 1
                self.cap.grab()
                if n % 30 == 0:  # skip frames
                    ret_val, img0 = self.cap.retrieve()
                    if ret_val:
                        break

        # Print
        assert ret_val, f'Camera Error {self.pipe}'
        img_path = 'webcam.jpg'
        print(f'webcam {self.count}: ', end='')

        # Padded resize
        img = letterbox(img0, new_shape=self.img_size)[0]

        # Convert
        img = img[:, :, ::-1].transpose(2, 0, 1)  # BGR to RGB, to 3x416x416
        img = np.ascontiguousarray(img)

        return img_path, img, img0, None
Esempio n. 12
0
    def _start_stream(self):
        """Actual stream and it's logic"""

        while self._ret:
            self._ret, frame = self._cap.read()

            frame = cv2.flip(frame, 1)

            # Convert image to HSV
            frame_HSV = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)

            # Apply Color Filter
            lr, ur = self._apply_color_filter()

            # Get Mask
            mask = self._create_mask(frame_HSV, lr, ur)

            # Draw bounding boxes
            self._draw_boxes(mask, frame)

            # Draw Center of Area/Mass
            com_center = self._draw_com(frame)

            # Compute Distance from COM to Center of Frame
            self._get_distance(com_center)

            # Show Original Frame
            cv2.imshow("Original Video", frame)

            # Show Masked Frame
            cv2.imshow("Masked Video", mask)

            # Close stream if 'q' is pressed
            if cv2.waitKey(1) & 0xFF == ord('q'):
                return 0
def DynamicHist(src,
                winname,
                section=[0, 256],
                removeInZero=False,
                displaySize=(480, 640)):
    hist = cv2.calcHist([src], [0], None, [section[1] - section[0]], section)
    hist = hist[:, 0]
    if removeInZero:
        hist[0] = 0

    binWidth = int(displaySize[1] / hist.shape[0])
    k = np.max(hist) / displaySize[0]
    hist = hist / k
    black = np.zeros((displaySize[0], hist.shape[0] * binWidth), np.uint8)

    i = 0
    j = 0
    for x in np.nditer(hist):
        j += binWidth
        black[:int(x), i:j] = 200
        i += binWidth

    p = int(displaySize[0] / 20)
    scale = np.arange(0, black.shape[1], 50 * binWidth)
    black[-p:, scale] = 0XFF

    black = cv2.flip(black, 0)
    cv2.imshow(winname, black)
Esempio n. 14
0
    def preprocess(self, image):
        # initialise the list of crops
        crops = []

        height, width = image.shape[:2]
        coords = [[0, 0, self.width, self.height],
                  [width - self.width, 0, width, self.height],
                  [width - self.width, height - self.height, width, height],
                  [0, height - self.height, self.width, height]]

        # also compute the center crop
        dw = int(0.5 * (width - self.width))
        dh = int(0.5 * (height - self.height))
        coords.append(([dw, dh, width - dw, height - dh]))

        # loop over the coords and extract crops
        for startx, starty, endx, endy in coords:
            crop = image[starty:endy, startx:endx]
            crop = cv2.resize(crop,
                              dsize=(self.width, self.height),
                              interpolation=self.inter)
            crops.append(crop)

        # check to see if horiz is True
        if self.horiz:
            mirrors = [cv2.flip(c, 1) for c in crops]
            crops.extend(mirrors)

        return np.array(crops)
    def flip(self, image, dirction=None):
        #flip the image
        if dirction is None:
            dirction = random.randint(-1, 1)

        flipped = cv2.flip(image, dirction)
        return flipped
Esempio n. 16
0
def readFrame(frame_count_up, frame_count_down):
    ret, frame = camera.read()

    if ret:
        frame = imutils.resize(frame, FRAME_WIDTH)
        #(H, W) = frame.shape[:2]

        (corners, ids,
         _rejected) = cv2.aruco.detectMarkers(frame,
                                              arucoDict,
                                              parameters=arucoParams)
        cv2.aruco.drawDetectedMarkers(frame, corners, ids, (0, 255, 0))

        if ids is not None:
            frame_count_up += detect(UP_MARKER, ids)
            frame_count_down += detect(DOWN_MARKER, ids)
            """
            for (markerCorner, _markerID) in zip(corners, ids):
                corners = markerCorner.reshape((4,2))
                (topLeft, _topRight, bottomRight, _bottomLeft) = corners
                cX = int((topLeft[0] + bottomRight[0]) / 2.0)
                cY = int((topLeft[1] + bottomRight[1]) / 2.0)

                if cX < (W // 3) and cY < (H // 3):
                    paddle_a_up
                if cX < (W // 3) and cY > (H // 3):
                    paddle_a_down
            """

        frame = cv2.flip(frame, 1)
        #cv2.rectangle(frame, (0, 0), (W // 3, H // 3), (255, 0, 0), 3)
        #cv2.rectangle(frame, (0, (H // 3) * 2), (W//3, H), (0, 0, 255), 3)
        cv2.imshow('Frame', frame)

    return frame_count_up, frame_count_down
Esempio n. 17
0
def faceRecAndVid():

    FRAME_THICKNESS = 2
    FONT_THICKNESS = 1
    MODEL = 'hog'  # Model for recognition cnn/hog
    TOLERANCE = 0.5  # Similarity
    COLOUR = [77, 77, 200]  # shade of red

    # Loads face encodings stored in .dat file
    with open('FaceRecog/Student_Encodings.dat', 'rb') as f:
        Loaded_face_encodings = pickle.load(f)

        face_names = list(Loaded_face_encodings.keys())
        face_encodings = np.array(list(Loaded_face_encodings.values()))
    f.close()

    Student_Names, Student_Face_Encodings = face_names, face_encodings

    video = VideoCapture(0)

    if video is None:
        popUp('Error Getting Video')
    else:
        while True:
            ret, image = video.read()

            if ret == True:
                image = resize(image, (0, 0), None, .75, .75, INTER_AREA)
                image = cv2.flip(
                    image,
                    1)  # not use cv2.rotate(img, deg) / -1 flips it over
                locations = face_recognition.face_locations(image, model=MODEL)
                encodings = face_recognition.face_encodings(image, locations)

                for face_encoding, face_locations in zip(encodings, locations):
                    results = compare_faces(Student_Face_Encodings,
                                            face_encoding, TOLERANCE)
                    match = None

                    if True in results:
                        # match is the name of the identity found
                        match = Student_Names[results.index(True)]
                        Attendance(match)
                        drawBox(image, FRAME_THICKNESS, FONT_THICKNESS, match,
                                face_locations)
                        alertSFX()
                        imshow('Test', image)
                        waitKey(500)

                imshow('Test', image)
                if waitKey(1) & 0xFF == ord("q"):
                    video.release()
                    cv2.destroyWindow('Test')
                    popUp('Done')
                    break
            else:
                video.release()
                popUp('Error getting Video')
                break
Esempio n. 18
0
def bayer_aug(img, mode):
    if mode == 'hor':
        return cv2.flip(img, 1)[:, 1:-1]
    elif mode == 'ver':
        return img[::-1][1:-1, :]
    elif mode == 'trans':
        return img.T
    return img
def preprocess_image(x, flip=False):
    # x=cv2.cvtColor(x, cv2.COLOR_BGR2GRAY)
    if flip:
        x = cv2.flip(x, 1)
    x = x[50:150, 0:320].copy()
    x = cv2.resize(x, (160, 50))
    x = x.reshape((50, 160, 3))
    return x / 255.0
Esempio n. 20
0
 def showImage(self, img, flip=False):
     img = cv2.resize(img, (240, 240))
     if (flip):
         img = cv2.flip(img, 1)
     img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)
     toshow = QImage(img.data, img.shape[1], img.shape[0],
                     QImage.Format_RGB888)
     self.lb_video.setPixmap(QPixmap.fromImage(toshow))
Esempio n. 21
0
    def _cv2_trim(self) -> bool:
        """
        Remove black borders from a cv2 image array.

        :param cv2_image: cv2 image array
        """
        logger.info("Trying to remove black borders with cv2")
        og_w, og_h = self.cv2.shape[1], self.cv2.shape[0]
        logger.debug("Original dimensions: %dx%d", og_w, og_h)
        og_quotient = og_w / og_h

        first_img = _remove_lateral_cv2(self.cv2)

        tmp_img = cv2.transpose(first_img)
        tmp_img = cv2.flip(tmp_img, flipCode=1)

        if tmp_img is None:
            raise exceptions.NothingFound("Possible all-black image found")

        final = _remove_lateral_cv2(tmp_img)

        out = cv2.transpose(final)

        final_img = cv2.flip(out, flipCode=0)

        new_w, new_h = final_img.shape[1], final_img.shape[0]

        logger.debug("New dimensions: %dx%d", new_w, new_h)
        new_quotient = new_w / new_h

        if abs(new_quotient - og_quotient) > 0.9:
            logger.info("Possible bad quotient found: %s -> %s", og_quotient,
                        new_quotient)
            return False

        width_percent = (100 / og_w) * new_w
        height_percent = (100 / og_h) * new_h

        if any(percent <= 65 for percent in (width_percent, height_percent)):
            logger.info("Possible bad trim found: %s -> %s", width_percent,
                        height_percent)
            return False

        self.cv2 = final_img
        return True
Esempio n. 22
0
    def _imflip_(img, direction='horizontal'):
        """Inplace flip an image horizontally or vertically.

        Args:
            img (ndarray): Image to be flipped.
            direction (str): The flip direction, either "horizontal" or
                "vertical" or "diagonal".

        Returns:
            ndarray: The flipped image (inplace).
        """
        assert direction in ['horizontal', 'vertical', 'diagonal']
        if direction == 'horizontal':
            return cv2.flip(img, 1, img)
        elif direction == 'vertical':
            return cv2.flip(img, 0, img)
        else:
            return cv2.flip(img, -1, img)
Esempio n. 23
0
def video_demo():
    capture = cv.VideoCapture(0)
    while True:
        frame = capture.read()
        frame = cv.flip(frame,1)#镜像翻转
        cv.imshow("video",frame)
        c = cv.waitKey(50)
        if c == 27:
            break
Esempio n. 24
0
def video_demo():
    capture = cv.VideoCapture(0)  #括号中可以是视频路径
    while (True):
        ret, frame = capture.read()
        frame = cv.flip(frame, 1)  #摄像头镜像
        cv.imshow("video", frame)
        c = cv.waitKey(50)
        if c == 27:
            break
Esempio n. 25
0
def flip_kernel(kernel):
    """flip a kernel (probably to prep for convolution).

    :param img: the kernel
    :type img: matrix
    """
    k = np.copy(kernel)

    return (cv2.flip(k, -1))
Esempio n. 26
0
def flip(pic_path,save_path,min_flipValue, max_flipValue):
    file=os.listdir(pic_path)
    for i in file:
        if (".jpg" in n)or(".png" in n)or(".JPG" in n)or(".jpeg" in n):
            image = cv2.imread(pic_path+i)
            xImg = cv2.flip(image,flipValue,dst=None)
            cv2.imwrite(save_path+i[:-4]+"_"+str(flipValue)+"_"+str('flip')+".jpg",xImg)
        else:
            continue
Esempio n. 27
0
 def bayer_aug(self, img, mode):
     mode_dict = {0: 'trans', 1: 'hor', 2: 'ver', 3: ''}
     mode = mode_dict[mode]
     if mode == 'hor':
         return cv2.flip(img, 1)[:, 1:-1]
     elif mode == 'ver':
         return img[::-1][1:-1, :]
     elif mode == 'trans':
         return img.T
     return img
Esempio n. 28
0
def show_webcam(mirror=False):
    cam = cv2.VideoCapture(0)
    while True:
        _, img = cam.read()
        if mirror: 
            img = cv2.flip(img, 1)
        cv2.imshow('my webcam', img)
        if cv2.waitKey(1) == 27: 
            break  # esc to quit
    cv2.destroyAllWindows()
Esempio n. 29
0
def Flip_img(img):
    """
    翻转
    flip(img,1)#1代表水平方向旋转180度
    flip(img,0)#0代表垂直方向旋转180度
    flip(img,-1)#-1代表垂直和水平方向同时旋转
    """
    pic = cv.imread(img_path)  #读入图片

    h_pic = cv.flip(pic, 1)  #水平翻转
    cv.imshow("overturn-h", h_pic)

    v_pic = cv.flip(pic, 0)  #垂直翻转
    cv.imshow("overturn-v", v_pic)

    hv_pic = cv.flip(pic, -1)  #水平垂直翻转
    cv.imshow("overturn-hv", hv_pic)
    cv.waitKey(0)
    cv.destroyAllWindows()
Esempio n. 30
0
def Perspective_transform(orig, screenCnt, ratio): # Funkcija za 2D transoformaciju pronađenog papira
	warped = four_point_transform(orig, screenCnt.reshape(4, 2) * ratio) # Pozivamo funkciju iz drugog programa za izravnjavanje slike
	warped = cv2.flip(warped, 1) # Zrcalimo izravnanu sliku po Y osi

	height, width, channels = warped.shape # Tražimo dimenzije novodobivene slike

	# Ako nam je širina veća od visine znači da je slika polegnuta na desnu stranu te je trebamo poravnati za 90 stupnjeva
	if width > height: 
		warped = imutils.rotate_bound(warped, -90)

	Shadow_removal(warped) # Pozivamo funkciju za otklanjanje sjene sa slike