示例#1
0
def rotation(original):
    global h_f, width_f

    img = cv2.cvtColor(original, cv2.COLOR_BGR2GRAY)  # gray
    img = cv2.blur(img, (3, 3))

    kernel = np.ones((5, 5), np.uint8)
    img = cv2.morphologyEx(img, cv2.MORPH_CLOSE, kernel)

    img = cv2.Canny(img, 50, 200)

    img = cv2.dilate(img, np.ones((5, 5)))
    t, img = cv2.threshold(img, 160, 255, cv2.THRESH_BINARY)

    pos_list = find_squares(img)
    pos_list = positions(pos_list)
    print(len(pos_list), "  ->before")

    print(len(pos_list), "  ->after")

    pos_list = duplicate(pos_list)
    pos_list = sort(pos_list)

    max_pos = []
    next = []
    max_len = 0

    while (True):
        next = extra(pos_list)
        if (len(next) > max_len):
            max_len = len(next)
            max_pos = next

        if (len(pos_list) == 0):
            break
    print(len(max_pos), "\t", max_pos)
    one = []
    two = []
    three = []

    print(max_pos[0], max_pos[len(max_pos) - 1])
    if (len(max_pos) > 1):
        one.append(max_pos[0][0])
        one.append(max_pos[0][3])
        two.append(max_pos[2][1])
        two.append(max_pos[2][3])

        three.append(two[0])
        three.append(one[1])

    ypotinousa = math.sqrt(
        pow((two[0] - one[0]), 2) + pow((two[1] - one[1]), 2))
    platos = math.sqrt(
        pow((three[0] - one[0]), 2) + pow((three[1] - one[1]), 2))
    ipsos = math.sqrt(
        pow((two[0] - three[0]), 2) + pow((two[1] - three[1]), 2))

    num = int(angle(ypotinousa, platos, ipsos) + 1)

    print(num, " This is a num")
    rotated = original

    if (one[1] > two[1]):
        rotated = imutils.rotate_bound(original, num)
    else:
        rotated = imutils.rotate_bound(original, -num)

        # h,width,n = rotated.shape
        # print(rotated.shape)

    h_f, width_f, n = rotated.shape

    return rotated
示例#2
0
def detectRectangle(image):
    resized = imutils.resize(image, width=300)
    ratio = image.shape[0] / float(resized.shape[0])

    # convert the resized image to grayscale, blur it slightly,
    # and threshold it
    gray = cv2.cvtColor(resized, cv2.COLOR_BGR2GRAY)
    blurred = cv2.GaussianBlur(gray, (5, 5), 0)
    thresh1 = cv2.threshold(blurred, 90, 255, cv2.THRESH_BINARY)[1]

    thresh = cv2.threshold(blurred, 160, 220, cv2.THRESH_BINARY)[1]
    cv2.imshow("thresh", thresh)
    cv2.imshow("thresh1", thresh1)

    # find contours in the thresholded image and initialize the
    # shape detector
    cnts = cv2.findContours(thresh.copy(), cv2.RETR_TREE,
                            cv2.CHAIN_APPROX_SIMPLE)
    cnts = imutils.grab_contours(cnts)
    sd = RectangleDetector()

    maxArea = 0
    # loop over the contours
    for c in cnts:
        # compute the center of the contour, then detect the name of the
        # shape using only the contour
        M = cv2.moments(c)
        cX = int((M["m10"] / M["m00"]) * ratio)
        cY = int((M["m01"] / M["m00"]) * ratio)
        shape = sd.detect(c)

        # multiply the contour (x, y)-coordinates by the resize ratio,
        # then draw the contours and the name of the shape on the image
        c = c.astype("float")
        c *= ratio
        c = c.astype("int")
        area = cv2.contourArea(c)
        if (area > maxArea and shape == "rectangle"):
            maxArea = area
            # this gives you the coordinates for the bounds, you have the top left point and using width and height, find the other ones
            (x, y, w, h) = cv2.boundingRect(c)

            topLeft = [y, x]
            topRight = [y, x + w]
            botRight = [y + h, x + w]
            botLeft = [y + h, x]

            # rotated everything forward one because
            array = [topLeft, topRight, botRight, botLeft]
            print(array)

            # dont use both at the same time
            rotatedArray = [botLeft, topLeft, topRight, botRight]
            rotatedImage = imutils.rotate_bound(image, 90)

            # master runner takes topLeft, topRight, botRight, botLeft

            #
            UIN = master_runner(image, botLeft, topLeft, topRight, botRight)

            print('================')
            print('UIN', UIN)
            print('================')

            cv2.rectangle(image, (x, y), (x + w, y + h), (255, 0, 0), 2)
            # cv2.drawContours(image, [c], -1, (0, 255, 0), 2)
            cv2.putText(image, 'rect', (cX, cY), cv2.FONT_HERSHEY_SIMPLEX, 0.5,
                        (255, 255, 255), 2)
            # show the output image
            cv2.imshow("Rectangle bounded", image)
 def rotate_frame(self, image):
     if self.angle is not None:
         return imutils.rotate_bound(image, self.angle)
     else:
         return image
示例#4
0
#what does this do
cv2.imshow("rotated", rotated)
cv2.waitKey(0)

# In[14]:

#same image rotation is possible in one line using imutils
rotated = imutils.rotate(image, -45)  #is obviously better but gain?
cv2.imshow("rotated imutils", rotated)
cv2.waitKey(0)
#imutils uses cv2, image gets clipped, a cool function of rotate bound is there which
#the image with no clipping resizing the place

# In[15]:

rotated = imutils.rotate_bound(image, 45)
#here clockwise made positive
cv2.imshow("disp", rotated)
cv2.waitKey(0)

# In[4]:

#goodpractice to blur images before doing all image processing stuff
#blurring reduces the number of high freq components
#using gaussian blue
blur = cv2.GaussianBlur(image, (11, 11), 0)
#11,11 is the kernel map size here. larger the kernel more the blur range of values covered
cv2.imshow("blur", blur)
cv2.waitKey(0)

# In[5]:
示例#5
0
# load the face mask detector model from disk
print("[INFO] loading face mask detector model...")
maskNet = load_model(args["model"])

# initialize the video stream and allow the camera sensor to warm up
print("[INFO] starting video stream...")
vs = FileVideoStream("./videos/RGB.mp4")
time.sleep(2.0)

# loop over the frames from the video stream
while True:
	# grab the frame from the threaded video stream and resize it
	# to have a maximum width of 400 pixels
	frame = vs.read()
	frame = imutils.resize(imutils.rotate_bound(frame, angle=90), width=400)
    
	# detect faces in the frame and determine if they are wearing a
	# face mask or not
	(locs, preds) = detect_and_predict_mask(frame, faceNet, maskNet)

	# loop over the detected face locations and their corresponding
	# locations
	for (box, pred) in zip(locs, preds):
		# unpack the bounding box and predictions
		(startX, startY, endX, endY) = box
		(mask, withoutMask) = pred

		# determine the class label and color we'll use to draw
		# the bounding box and text
		label = "Mask" if mask > withoutMask else "No Mask"
示例#6
0
文件: image.py 项目: samo1petar/SSD
def rotate_90(image: np.ndarray) -> np.ndarray:
    return imutils.rotate_bound(image, -90)
示例#7
0
    # classification
    orig = cv2.imread(imagePath)

    # load the input image using the Keras helper utility while
    # ensuring the image is resized to 224x224 pixels
    image = load_img(imagePath, target_size=(224, 224))
    image = img_to_array(image)

    # preprocess the image by (1) expanding the dimensions and (2)
    # subtracting the mean RGB pixel intensity from the ImageNet
    # dataset
    image = np.expand_dims(image, axis=0)
    image = imagenet_utils.preprocess_input(image)

    # pass the image through the network to obtain the feature vector
    features = vgg.predict(image)
    features = features.reshape((features.shape[0], 512 * 7 * 7))

    # now that we have the CNN features, pass these through our
    # classifier to obtain the orientation predictions
    angle = model.predict(features)
    angle = labelNames[angle[0]]

    # now that we have the predicted orientation of the image we can
    # correct for it
    rotated = imutils.rotate_bound(orig, 360 - angle)

    # display the original and corrected images
    cv2.imshow("Original", orig)
    cv2.imshow("Corrected", rotated)
    cv2.waitKey(0)
示例#8
0
def path_plot_config(configini):
    pd.options.mode.chained_assignment = None
    config = ConfigParser()
    configFile = str(configini)
    config.read(configFile)
    noAnimals = config.getint('Path plot settings', 'no_animal_pathplot')
    projectPath = config.get('General settings', 'project_path')
    frames_dir_out = os.path.join(projectPath, 'frames', 'output',
                                  'path_plots')
    if not os.path.exists(frames_dir_out):
        os.makedirs(frames_dir_out)
    try:
        maxDequeLines = config.getint('Path plot settings', 'deque_points')
    except ValueError:
        print('ERROR: "Max lines" not set.')
    csv_dir_in = os.path.join(projectPath, 'csv', 'machine_results')
    severityBool = config.get('Path plot settings', 'plot_severity')
    severityTarget = config.get('Path plot settings', 'severity_target')
    trackedBodyPart1 = config.get('Path plot settings', 'animal_1_bp')
    trackedBodyPart2 = config.get('Path plot settings', 'animal_2_bp')
    try:
        severity_brackets = config.getint('Path plot settings',
                                          'severity_brackets')
    except ValueError:
        print('"Severity scale" not set.')
        severity_brackets = 1
    vidInfPath = os.path.join(projectPath, 'logs', 'video_info.csv')
    vidinfDf = pd.read_csv(vidInfPath)
    severityGrades = list(np.arange(0, 1.0, ((10 / severity_brackets) / 10)))
    severityGrades.append(10)
    severityColourRGB, severityColour = [], []
    clrs = sns.color_palette('Reds', n_colors=severity_brackets)
    for color in clrs:
        for value in color:
            value *= 255
            value = int(value)
            severityColourRGB.append(value)
    severityColourList = [
        severityColourRGB[i:i + 3] for i in range(0, len(severityColourRGB), 3)
    ]
    for color in severityColourList:
        r, g, b = color[0], color[1], color[2]
        severityColour.append((b, g, r))
    filesFound = glob.glob(csv_dir_in + "/*.csv")
    print('Generating path plots for ' + str(len(filesFound)) + ' video(s)...')
    fileCounter = 0

    for currentFile in filesFound:
        fileCounter += 1
        loop = 0
        listPaths_mouse1, listPaths_mouse2 = deque(
            maxlen=maxDequeLines), deque(maxlen=maxDequeLines)
        severityCircles = []
        csv_df = pd.read_csv(currentFile, index_col=[0])
        CurrentVideoName = os.path.basename(currentFile)
        videoSettings = vidinfDf.loc[vidinfDf['Video'] == str(
            CurrentVideoName.replace('.csv', ''))]
        try:
            resWidth = int(videoSettings['Resolution_width'])
            resHeight = int(videoSettings['Resolution_height'])
        except TypeError:
            print(
                'Error: make sure all the videos that are going to be analyzed are represented in the project_folder/logs/video_info.csv file'
            )
        if noAnimals == 1:
            trackedBodyPartHeadings = [
                trackedBodyPart1 + '_x', trackedBodyPart1 + '_y'
            ]
        elif noAnimals == 2:
            trackedBodyPartHeadings = [
                trackedBodyPart1 + '_x', trackedBodyPart1 + '_y',
                trackedBodyPart2 + '_x', trackedBodyPart2 + '_y'
            ]

        filter_col = csv_df[trackedBodyPartHeadings]
        shifted_headings = [x + '_shifted' for x in filter_col]
        csv_df_shifted = filter_col.shift(periods=1)
        csv_df_shifted.columns = shifted_headings
        csv_df_combined = pd.concat([filter_col, csv_df_shifted],
                                    axis=1,
                                    join='inner')
        csv_df_combined = csv_df_combined.fillna(0)
        columnNames = list(csv_df_combined)
        maxImageSizeColumn_x, maxImageSizeColumn_y = resWidth, resHeight
        savePath = os.path.join(frames_dir_out,
                                CurrentVideoName.replace('.csv', ''))
        if not os.path.exists(savePath):
            os.makedirs(savePath)
        img_size = (maxImageSizeColumn_y, maxImageSizeColumn_x, 3)
        if severityBool == 'yes':
            csv_df_combined[severityTarget] = csv_df[severityTarget].values
            csv_df_combined['Scaled_movement_M1_M2'] = csv_df[
                'Scaled_movement_M1_M2'].values
            columnNames = list(csv_df_combined)

        for index, row in csv_df_combined.iterrows():
            img = np.ones(img_size) * 255
            overlay = img.copy()
            if noAnimals == 1:
                m1tuple = (int(row[columnNames[0]]), int(row[columnNames[1]]),
                           int(row[columnNames[2]]),
                           (int(row[columnNames[3]])))
            if noAnimals == 2:
                m1tuple = (int(row[columnNames[0]]), int(row[columnNames[1]]),
                           int(row[columnNames[4]]),
                           (int(row[columnNames[5]])))
                m2tuple = (int(row[columnNames[2]]), int(row[columnNames[3]]),
                           int(row[columnNames[6]]),
                           (int(row[columnNames[7]])))
            if index == 0:
                m1tuple, m2tuple = (0, 0, 0, 0), (0, 0, 0, 0)
            listPaths_mouse1.appendleft(m1tuple)
            if noAnimals == 2:
                listPaths_mouse2.appendleft(m2tuple)
            for i in range(len(listPaths_mouse1)):
                tupleM1 = listPaths_mouse1[i]
                cv2.line(img, (tupleM1[2], tupleM1[3]),
                         (tupleM1[0], tupleM1[1]), (255, 191, 0), 2)
                if noAnimals == 2:
                    tupleM2 = listPaths_mouse2[i]
                    cv2.line(img, (tupleM2[2], tupleM2[3]),
                             (tupleM2[0], tupleM2[1]), (0, 255, 0), 2)

            if severityBool == 'yes':
                attackPrediction = int(row[columnNames[8]])
                severityScore = float(row[columnNames[9]])
                if attackPrediction == 1:
                    midpoints = (list(
                        zip(np.linspace(m1tuple[0], m2tuple[0], 3),
                            np.linspace(m1tuple[1], m2tuple[1], 3))))
                    locationEventX, locationEventY = midpoints[1]
                    for i in range(severity_brackets):
                        lowerBound = severityGrades[i]
                        upperBound = severityGrades[i + 1]
                        if (severityScore > lowerBound) and (severityScore <=
                                                             upperBound):
                            severityCircles.append(
                                (locationEventX, locationEventY,
                                 severityColour[i]))
                for y in range(len(severityCircles)):
                    currEventX, currEventY, colour = severityCircles[y]
                    cv2.circle(overlay, (int(currEventX), int(currEventY)), 20,
                               colour, -1)
            print(len(severityCircles))
            image_new = cv2.addWeighted(overlay, 0.2, img, 1 - 0.2, 0)
            m1tuple = (int(row[columnNames[0]]), int(row[columnNames[1]]))
            cv2.circle(image_new, (m1tuple[0], m1tuple[1]), 20, (255, 0, 0),
                       -1)
            if noAnimals == 2:
                m2tuple = (int(row[columnNames[2]]), int(row[columnNames[3]]))
                cv2.circle(image_new, (m2tuple[0], m2tuple[1]), 20,
                           (0, 128, 0), -1)
            imageSaveName = os.path.join(savePath, str(loop) + '.png')
            image_new = imutils.resize(image_new, width=400)
            if img_size[0] < img_size[1]:
                image_new = imutils.rotate_bound(image_new, 270.0000)
            cv2.imwrite(imageSaveName, image_new)
            loop += 1
            print('Path plot ' + str(loop) + '/' + str(len(csv_df_combined)) +
                  ' for video ' + str(fileCounter) + '/' +
                  str(len(filesFound)))
    print(
        'Finished generating path plots. Plots are saved @ project_folder/frames/output/path_plots'
    )
示例#9
0
def easy_tello_query(drone):

    previous_frame = None

    while True:
        # battery = drone.get_battery()
        # speed = drone.get_speed()
        # time = drone.get_time()
        # height = drone.get_height()
        # temp = drone.get_temp()
        pitch, roll, yaw = drone.get_attitude()
        # baro = drone.get_baro()
        # acceleration = drone.get_acceleration()
        # tof = drone.get_tof()
        # wifi = drone.get_wifi()

        # print(f"Roll: {roll}, Pitch: {pitch}, Yaw: {yaw}")
        # m = np.array([[cos(radians(roll)), -sin(radians(roll)), 0],
        #               [sin(radians(roll)), cos(radians(roll)), 0]])

        frame = drone.get_frame()
        if frame is not None:
            rows, columns, _ = frame.shape

            # print(f"Roll: {roll}")
            # print(f"sin: {sin(radians(roll))}, cos: {cos(radians(roll))}")
            # print(f"Width: {int(rows*cos(radians(roll))+columns*sin(radians(roll)))}, "
            #       f"Height: {int(rows*sin(radians(roll))+columns*cos(radians(roll)))}")

            # rot_mat = cv2.getRotationMatrix2D((frame.shape[1]/2, frame.shape[0]/2), -roll, 1)
            #
            # print(rot_mat)
            # displacement_top_left = rot_mat[:, 2].copy()
            # rot_mat[:,2] = rot_mat[:,2]+displacement_top_left
            # print(rot_mat)
            #
            # frame_rotated = cv2.warpAffine(frame, rot_mat,
            #                                (int(rows*fabs(sin(radians(roll)))+columns*fabs(cos(radians(roll)))),
            #                                 int(rows*fabs(cos(radians(roll)))+columns*fabs(sin(radians(roll))))))

            frame_rotated = imutils.rotate_bound(frame, roll)

            if previous_frame is not None:
                image1 = cv2.cvtColor(previous_frame, cv2.COLOR_BGR2GRAY)
                image2 = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)

                image_points1 = cv2.goodFeaturesToTrack(image1, maxCorners=300, qualityLevel=0.01, minDistance=10)

                image_points2, status, err = cv2.calcOpticalFlowPyrLK(image1, image2, image_points1, None)

                idx = np.where(status == 1)[0]
                image_points1 = image_points1[idx]
                image_points2 = image_points2[idx]

                # Estimate affine transformation
                # Produces a transformation matrix :
                # [[cos(theta).s, -sin(theta).s, tx],
                # [sin(theta).s, cos(theta).s, ty]]
                # where theta is rotation, s is scaling and tx,ty are translation
                m = cv2.estimateAffinePartial2D(image_points1, image_points2)[0]

                # Extract translation
                dx = m[0, 2]
                dy = m[1, 2]
                # print(f"dx:{dx}, dy=:{dy}")

            previous_frame = frame

            cv2.imshow('frame', frame_rotated)

        if cv2.waitKey(1) & 0xFF == ord('q'):
            break

    cv2.destroyAllWindows()
    drone.streamoff()
示例#10
0
            noisy = np.clip((img_n + noise*0.5),0,1)
            out_img = save_path + img_name + '_noisy.png'
            plt.imsave(out_img, noisy, vmin=0, vmax=255)
            image_ids.append(out_img)
            class_res.append(0)

        if count % 9 == 4:
            img2 = img_n*2
            n2 = np.clip(np.where(img2 <= 1, (img2*(1 + noise*0.2)), (1-img2+1)*(1 + noise*0.2)*-1 + 2)/2, 0,1)
            out_img = save_path + img_name + '_noisy2.png'
            plt.imsave(out_img, n2, vmin=0, vmax=255)
            image_ids.append(out_img)
            class_res.append(0)

        if count % 9 == 5:
            rotated_90 = imutils.rotate_bound(img, 90)
            out_img = save_path + img_name + '_rotated90.png'
            plt.imsave(out_img, rotated_90, vmin=0, vmax=255)
            image_ids.append(out_img)
            class_res.append(0)

        #rotated_180 = imutils.rotate_bound(img, 180)
        #out_img = save_path + 'rotated180/' + img_name + '_rotated180.png'
        #plt.imsave(out_img, rotated_180, vmin=0, vmax=255)

        if count % 9 == 6:
            rotated_270 = imutils.rotate_bound(img, 270)
            out_img = save_path + img_name + '_rotated270.png'
            plt.imsave(out_img, rotated_270, vmin=0, vmax=255)
            image_ids.append(out_img)
            class_res.append(0)
示例#11
0
def make_video_caption(dir, videoname, format, csv_file_path, image_path, fpm,
                       angle):
    # Print init text
    print("Making video: " + str(videoname))
    print("File path: " + str(image_path))

    # set process to work
    work = True

    # sort all directory files in aphabetical order
    dirfiles = []
    for root, dirs, files in os.walk(image_path):
        dirs.sort()
        for filename in files:
            if filename.endswith(".jpg"):
                dirfiles.append((os.path.join(root, filename)))
            if filename.endswith(".tif"):
                dirfiles.append((os.path.join(root, filename)))
            if filename.endswith(".JPG"):
                dirfiles.append((os.path.join(root, filename)))

    image = dirfiles[0]
    dirfiles = sorted(dirfiles)

    # set video file name and path
    videooutput = dir + videoname + format
    print(videooutput)

    # check if the video file with same name already exists
    my_file = Path(videooutput)
    if my_file.is_file():
        print(
            "Video file name is already taken! Please chose a different file name..."
        )
        work = False  # if yes set work to false and abort
        cv2.destroyAllWindows()

    # set dir for refined image output
    ref_img_path = image_path
    ref_img_dir = ref_img_path + str("/Refined Images/")

    try:
        os.makedirs(ref_img_dir)
    except:
        shutil.rmtree(ref_img_dir)
        os.makedirs(ref_img_dir)

    frame = cv2.imread(dirfiles[1])
    coordinates = []

    def Set_image_bounds(
            event, x, y, flags,
            param):  # method that splits the screen into areas for analysis
        font = cv2.FONT_HERSHEY_SIMPLEX
        bottomLeftCornerOfText = (x + 10, y)
        fontScale = 0.4
        fontColor = (255, 255, 255)
        lineType = 1

        if event == cv2.EVENT_LBUTTONDBLCLK:
            cv2.circle(frame, (x, y), 10, (0, 0, 255), -1)
            cv2.putText(frame,
                        str(x) + ", " + str(y), bottomLeftCornerOfText, font,
                        fontScale, fontColor, lineType)
            coordinates.append([int(x), int(y)])

    frame = cv2.resize(frame, None, fx=1, fy=1)
    cv2.namedWindow('Set Up The Feature Areas', cv2.WINDOW_NORMAL)
    cv2.imshow('Set Up The Feature Areas', frame)
    cv2.resizeWindow('Set Up The Feature Areas', 600, 600)
    cv2.setMouseCallback('Set Up The Feature Areas',
                         Set_image_bounds)  # Set bounds for video

    while (1):
        cv2.imshow('Set Up The Feature Areas', frame)  # show frame
        if cv2.waitKey(20) in [ord('p'), ord('P')]:  # if P is pressed close
            cv2.imshow('Set Up The Feature Areas', frame)
            cv2.destroyAllWindows()
            break

    x_list = []
    y_list = []
    for i in coordinates:
        x_list.append(i[0])
        y_list.append(i[1])

    y1 = np.min(y_list)
    y2 = np.max(y_list)
    x1 = np.min(x_list)
    x2 = np.max(x_list)

    coordinates = y1, y2, x1, x2
    print("Coordinates: " + str(coordinates))

    # Process files
    if work == True:
        print("Processing, please wait...")
        fourcc = cv2.VideoWriter_fourcc(*'XVID')
        video = cv2.VideoWriter(videooutput, fourcc, 16, (x2 - x1, y2 - y1))
        sec = 0
        min = 0
        hr = 0

        counter = 0
        ref_img_count = 1
        pbar_increment = 1
        pbar = tqdm(total=len(dirfiles))

        for image in dirfiles:
            y_top, y_bottom, x_left, x_right = coordinates
            # count frames for time-stamp in video - convert to hh:mm:ss

            sec = sec + fpm * 60

            if sec == 60 or sec > 59:
                min = min + 1
                sec = 0

            # min = min + fpm
            if min == 60:
                hr = hr + 1
                min = 0

            printtime = "{:02d}:{:02d}:{:02d}".format(
                hr, int(min), int(sec))  # generate time string to print

            text = "Time [hh:mm:ss] : " + printtime

            img = cv2.imread(image)  # get image
            img = img[y_top:y_bottom, x_left:x_right, :].copy()
            if angle != 0:
                img = imutils.rotate_bound(img, angle)
                cv2.imshow("Rotated", img)

            # set font of test overlay
            x_max = x_right - x_left
            y_max = y_bottom - y_top

            font = cv2.FONT_HERSHEY_SIMPLEX
            bottomLeftCornerOfText = (x_max - int((x_max * 0.9)), y_max - 25)
            topLeftCornerOfText = (x_max - int((x_max * 0.9)), y_max - int(
                (y_max * 0.85)))
            fontScale = x_max / 600
            fontColor = (255, 255, 255)
            lineType = 3

            # write time-string to image
            cv2.putText(img, text, bottomLeftCornerOfText, font, fontScale,
                        fontColor, lineType)
            counter = counter + 1

            video.write(img)  # write image to video

            ref_img_name = "None"
            if ref_img_count > 999:
                ref_img_name = "Time-" + str(
                    ref_img_count) + "-min.jpg"  # refined image name
            elif ref_img_count > 99:
                ref_img_name = "Time-0" + str(
                    ref_img_count) + "-min.jpg"  # refined image name
            elif ref_img_count < 10:
                ref_img_name = "Time-000" + str(
                    ref_img_count) + "-min.jpg"  # refined image name
            else:
                ref_img_name = "Time-00" + str(
                    ref_img_count) + "-min.jpg"  # refined image name

            cv2.imwrite(ref_img_dir + ref_img_name, img)  # write refined image
            ref_img_count = ref_img_count + 1  # advance refined image counter
            pbar.update(pbar_increment)  # update progress bar

        print("Complete!")
        pbar.close()
        cv2.destroyAllWindows()
        video.release()
示例#12
0
				face_mean = np.mean(roi_color[int(fh*0.25):int(fh*0.75),int(fw*0.25):int(fw*0.75),:], axis=(0,1))
				face_mean_rgb = np.ones((1,1,3)) * face_mean
				face_mean_hsv = cv2.cvtColor(np.array(face_mean_rgb, dtype=np.uint8), cv2.COLOR_BGR2HSV)
				face_mean_hsv = face_mean_hsv.astype('float32')
				value_diff = face_mean_hsv[0,0,2] - hand_mean_hsv[2]

				for y in range(0, hh):
					for x in range(0, hw):
						hand_hsv[y,x,0] = face_mean_hsv[0,0,0]
						hand_hsv[y,x,2] = max(0,min(hand_hsv[y,x,2] + value_diff,255))

				hand_bgr = cv2.cvtColor(hand_hsv, cv2.COLOR_HSV2BGR)
				b, g, r, a = cv2.split(hands[i])
				hand_bgra = cv2.merge((hand_bgr,a))
				hand_bgra = cv2.resize(hand_bgra, tuple(image.shape[:2]))
				hand_bgra = imutils.rotate_bound(hand_bgra, randint(0, 360))
				hand_center = (iw * (0.25 + 0.5 * random()), ih * (0.25 + 0.5 * random()))
				objectOverlay(image, hand_bgra, left_eye[0] - middle_eye[0], hand_center, labels, "background")

			# Stripe occluder
			else:

				occluded = True
				stripe_top_left = (int(iw * (0.0  + 0.5 * random())), int(ih * (0.0  + 0.5 * random())))
				stripe_size = (int(iw * (0.25 + 0.33 * random())), int(ih * (0.25 + 0.33 * random())))
				stripe_bottom_left = (stripe_size[0] + stripe_top_left[0], stripe_size[1] + stripe_top_left[1])
				stripe_color  = (255 * random(), 255 * random(), 255 * random()) 
				cv2.rectangle(image, stripe_top_left, stripe_bottom_left, stripe_color, -1)
				cv2.rectangle(labels, stripe_top_left, stripe_bottom_left, label_colors[0], -1)

		# Save output
示例#13
0
def run_video_stream():
    # q = None
    rotation = 0
    xy = [0, 0]
    # old_xy = xy
    # state = 0
    # count = 0
    rd = 0
    # queue_message = [0, '']
    # extra_text = ''
    mono = False
    blur = [False, 0]
    okay_to_send = 0
    crosshair = 0
    no_circle = 0

    detector = create_detector()
    video_stream = cv2.VideoCapture(ThreadCommunication.camera)

    while True:
        # Process queue messages before frames
        if not ThreadCommunication.txq.empty():
            queue_message = ThreadCommunication.txq.get()
            if queue_message[0] == ThreadCommunication.exit:
                return 0
            elif queue_message[0] == ThreadCommunication.stop:
                okay_to_send = 0
            elif queue_message[0] == ThreadCommunication.ready:
                okay_to_send = 1
            elif queue_message[0] == ThreadCommunication.crosshair:
                crosshair = queue_message[1]
            elif queue_message[0] == ThreadCommunication.extra_text:
                extra_text = queue_message[1]
            elif queue_message[0] == ThreadCommunication.rotate:
                rotation = (rotation + 90) % 360  # Add 90deg but keep wrap around to keep between 0-360
            elif queue_message[0] == ThreadCommunication.reset_rotation:
                rotation = 0
            elif queue_message[0] == ThreadCommunication.message_command:
                try:
                    if 'mono' in queue_message[1]:
                        mono = not mono
                    if 'blur' in queue_message[1]:
                        blur = [not blur, int(queue_message[1].split()[1])]
                    if 'thresh' in queue_message[1]:
                        detector = create_detector(t1=int(queue_message[1].split[1]), t2=int(queue_message[1].split()[2]))
                    if 'all' in queue_message[1]:
                        detector = create_detector(all=float(queue_message[1].split()[1]))
                    if 'area' in queue_message[1]:
                        detector = create_detector(area=int(queue_message[1].split()[1]))

                # Should handle possible exceptions individually, Exception is too broad
                except Exception as e:
                    print('Bad command or argument')

        # Process frames
        (success, cap_frame) = video_stream.read()
        frame = imutils.rotate_bound(cap_frame, rotation)
        target = [int(np.around(frame.shape[1] / 2)), int(np.around(frame.shape[0] / 2))]

        if mono:
            frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
        if blur[0]:
            frame = cv2.medianBlur(frame, 'Q')

        keypoints = detector.detect(frame)

        # Draw timestamp on frame after circle detection to prevent finding circles in text
        frame = put_text(frame, 'timestamp', offsety=99)
        frame = put_text(frame, 'Q', offsetx=99, offsety=-99)
        if not okay_to_send:
            frame = put_text(frame, '-', offsetx=99, offsety=-99)

        if crosshair:
            frame = cv2.line(frame, (target[0], target[1] - 25), (target[0], target[1] + 25), (0, 255, 0), 1)
            frame = cv2.line(frame, (target[0] - 25, target[1]), (target[0] - 25, target[1]), (0, 255, 0), 1)
            cv2.imshow('Nozzle', frame)
            key = cv2.waitKey(1)
            continue

        if no_circle > 25:
            show_blobs(cap_frame)
            no_circle = 0

        number_of_circles = len(keypoints)
        # Found no circles
        if number_of_circles == 0:
            if 25 < int(round(time.time() * 1000)) - rd:
                no_circle += 1
                frame = put_text(frame, 'No circles found', offsety=3)
                cv2.imshow('Nozzle', frame)
                cv2.waitKey(1)
            continue

        # Found too many circles (>1)
        elif number_of_circles > 1:
            if 25 < int(round(time.time() * 1000)) - rd:
                frame = put_text(frame, f'Too many circles found {number_of_circles}', offsety=3, color=(255, 255, 255))
                frame = cv2.drawKeypoints(frame, keypoints, np.array([]), (255, 255, 255), cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)
                cv2.imshow("Nozzle", frame)
                cv2.waitKey(1)
            continue

        # Found 1 circle
        no_circle = 0
        center_position = np.uint16(np.around(keypoints[0].pt))
        center_radius = np.around(keypoints[0].size / 2)
        frame = cv2.drawKeypoints(frame, keypoints, np.array([]), (0, 0, 255), cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)
        frame = put_text(frame, f'X{center_position[0]} Y{center_position[1]} R{center_radius}', offsety=2, color=(0, 255, 0), stroke=2)

        cv2.imshow('Nozzle', frame)
        cv2.waitKey(1)

        rd = int(round(time.time() * 1000))

        if okay_to_send:
            ThreadCommunication.rxq.put([ThreadCommunication.frame_data, center_position, target])
示例#14
0
def apply_sprite(image, path2sprite,w,x,y, angle, ontop = True):
    sprite = cv2.imread(path2sprite,-1)
    #print sprite.shape
    sprite = rotate_bound(sprite, angle)
    (sprite, y_final) = adjust_sprite2head(sprite, w, y, ontop)
    image = draw_sprite(image,sprite,x, y_final)
示例#15
0
import numpy as np
import argparse
import cv2
import imutils

# construct the argument parser and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-i", "--image", required=True, help="Path to the image")
args = vars(ap.parse_args())

# load the image, resize, rotate, clone it for output, and then convert it to grayscale
image_orig = cv2.imread(args["image"])
image_resize = imutils.resize(image_orig, width=900)
image_rotate = imutils.rotate_bound(image_resize, 90)

output = image_rotate.copy()
gray = cv2.cvtColor(image_rotate, cv2.COLOR_BGR2GRAY)
gray = cv2.GaussianBlur(gray, (7, 7), 0)

# perform edge detection, then perform a dilation + erosion to
# close gaps in between object edges
edged = cv2.Canny(gray, 50, 100)
edged = cv2.dilate(edged, None, iterations=1)
edged = cv2.erode(edged, None, iterations=1)

# find contours in the edge map
cnts = cv2.findContours(edged.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
cnts = cnts[0] if imutils.is_cv2() else cnts[1]

# the indexes of the contours that qualify the condition will be in new_cnts
new_cnts = np.ones(len(cnts))
示例#16
0
args = vars(parser.parse_args())

# These are the parameters used to extract the separation zone. The
# modifications are applied in the following order:
# 1. rotation by rotangle
# 2. cropping to rectangle (inlet_x - width/2, inlet_y - height, inlet_x + width/2, inlet_y)
# 3. mirror rectangle content if -m is given
# 3. convert to HSL (Hue, Saturation, Luminance)
# 4. increase saturation by satinc
# 5. convert to gray picture
# 6. remap values to levels (from, to) to cut out background and emphasize streams

# Open file and begin image processing here
image = cv2.imread(args['infile'])
rotated = imutils.rotate_bound(image, args['rot'])
imagecut = rotated[int(args['inlet'][1] -
                       args['zone'][1]):int(args['inlet'][1]),
                   int(args['inlet'][0] -
                       args['zone'][0] / 2):int(args['inlet'][0] +
                                                args['zone'][0] / 2)]

if args['mirror']:
    imagecut = cv2.flip(imagecut, 1)

hls = cv2.cvtColor(imagecut, cv2.COLOR_BGR2HLS)
zonesat = cv2.split(hls)[2]
zonesat = cv2.add(zonesat, args['sat'])
hls[:, :, 2] = zonesat
gray = cv2.cvtColor(cv2.cvtColor(hls, cv2.COLOR_HLS2BGR), cv2.COLOR_BGR2GRAY)
gray = cv2.subtract(gray, args['levels'][0])
示例#17
0
    endY = int(endY * rH)

    # in order to obtain a better OCR of the text we can potentially
    # apply a bit of padding surrounding the bounding box -- here we
    # are computing the deltas in both the x and y directions
    dX = int((endX - startX) * args["padding"])
    dY = int((endY - startY) * args["padding"])

    # apply padding to each side of the bounding box, respectively
    startX = max(0, startX - dX)
    startY = max(0, startY - dY)
    endX = min(origW, endX + (dX * 2))
    endY = min(origH, endY + (dY * 2))

    # extract the actual padded ROI
    roi = rotate_bound(orig[startY:endY, startX:endX], angle)

    # in order to apply Tesseract v4 to OCR text we must supply
    # (1) a language, (2) an OEM flag of 4, indicating that the we
    # wish to use the LSTM neural net model for OCR, and finally
    # (3) an OEM value, in this case, 7 which implies that we are
    # treating the ROI as a single line of text
    config = ("-l spa --oem 1 --psm " + str(args["psm"]))
    text = pytesseract.image_to_string(roi, config=config)

    # strip out non-ASCII text and to upper case
    pattern = re.compile('([^\s\w]|_)+', re.UNICODE)
    text = pattern.sub('', text).upper()
    #Ignore all empty ocr
    if not text:
        continue
示例#18
0
            print('Error opening video file')

        w, h = (int(cap.get(3)), int(cap.get(4)))
        #w, h = (720, 1280)
        if w > h:
            transpose = True
        else:
            transpose = False

        frame = 0

        # Store first frame's prediction
        st, im = cap.read()
        if im is not None:
            if transpose:
                im = rotate_bound(im, 90)
            im = cv2.resize(im, (90, 160))
            hist = compute_3_hists(im, hist_zero).reshape(1, 768)
            im_projected = np.dot(eigen_imgs.T,
                                  (hist - mean_vector).T).reshape(
                                      1, num_components)
            dist1, ind1 = classif1.kneighbors(X=im_projected,
                                              n_neighbors=1,
                                              return_distance=True)
            class1 = classes[ind1[0][0]]
            dist2, ind2 = classif2.kneighbors(X=im_projected,
                                              n_neighbors=1,
                                              return_distance=True)
            class2 = classes[ind2[0][0]]
            info = [
                frame, class1,
示例#19
0
    def recognizeFaces(self):
        # grab the frame from the threaded video stream and resize it
        # to 500px (to speedup processing)
        frame = self.vs.read()
        frame = imutils.rotate_bound(frame, -90)
        frame = imutils.resize(frame, width=500)

        # convert the input frame from (1) BGR to grayscale (for face
        # detection) and (2) from BGR to RGB (for face recognition)
        gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
        rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)

        # detect faces in the grayscale frame
        rects = self.detector.detectMultiScale(gray, scaleFactor=1.1,
            minNeighbors=5, minSize=(30, 30),
            flags=cv2.CASCADE_SCALE_IMAGE)

        # OpenCV returns bounding box coordinates in (x, y, w, h) order
        # but we need them in (top, right, bottom, left) order, so we
        # need to do a bit of reordering
        boxes = [(y, x + w, y + h, x) for (x, y, w, h) in rects]

        # compute the facial embeddings for each face bounding box
        encodings = face_recognition.face_encodings(rgb, boxes)
        names = []
        faceFound = 0
        # loop over the facial embeddings
        for encoding in encodings:
            # attempt to match each face in the input image to our known
            # encodings
            matches = face_recognition.compare_faces(self.data["encodings"],
                encoding)
            #name = "Unknown"
            #faceFound = 0

            # check to see if we have found a match
            if True in matches:
                # find the indexes of all matched faces then initialize a
                # dictionary to count the total number of times each face
                # was matched
                matchedIdxs = [i for (i, b) in enumerate(matches) if b]
                counts = {}
                faceFound = 1
                # loop over the matched indexes and maintain a count for
                # each recognized face face
                for i in matchedIdxs:
                    name = self.data["names"][i]
                    counts[name] = counts.get(name, 0) + 1

                # determine the recognized face with the largest number
                # of votes (note: in the event of an unlikely tie Python
                # will select first entry in the dictionary)
                name = max(counts, key=counts.get)
                names.append(name)
            #else :
               # faceFound = 0

            # update the list of names


        # loop over the recognized faces
        for ((top, right, bottom, left), name) in zip(boxes, names):
            # draw the predicted face name on the image
            cv2.rectangle(frame, (left, top), (right, bottom),
                (0, 255, 0), 2)
            y = top - 15 if top - 15 > 15 else top + 15
            cv2.putText(frame, name, (left, y), cv2.FONT_HERSHEY_SIMPLEX,
                0.75, (0, 255, 0), 2)

        # display the image to our screen
        cv2.imshow("Frame", frame)
        key = cv2.waitKey(1) & 0xFF

        # update the FPS counter
        self.fps.update()
        return names, faceFound;
示例#20
0
def readImage_thread():
    global handle, running, Width, Height, save_flag, acfg, color_mode, save_raw
    global COLOR_BayerGB2BGR, COLOR_BayerRG2BGR, COLOR_BayerGR2BGR, COLOR_BayerBG2BGR
    count = 0
    totalFrame = 0
    time0 = time.time()
    time1 = time.time()
    data = {}
    # cv2.namedWindow("ArduCam Demo", 1)
    counter = 0

    # clahe = cv2.createCLAHE(clipLimit=1.0, tileGridSize=(4, 4))

    frame_h = cfg['frame_height']
    frame_w = cfg['frame_width']
    out = None
    t = time.perf_counter()
    fps = 0
    while running:

        if ArducamSDK.Py_ArduCam_availableImage(handle) > 0:
            rtn_val, data, rtn_cfg = ArducamSDK.Py_ArduCam_readImage(handle)
            datasize = rtn_cfg['u32Size']

            if counter % 10 == 0:
                t2 = time.perf_counter()
                fps = round(10 / (t2 - t), 2)
                t = t2
                reprint(fps)
            if rtn_val != 0:
                print("read data fail!")
                continue

            if datasize == 0:
                continue

            image = convert_image(data, rtn_cfg, color_mode)
            image = imutils.rotate_bound(image, cfg["rotation_angle"])
            kernel = np.array([[-1, -1, -1], [-1, 9, -1], [-1, -1, -1]])
            image = cv2.medianBlur(image, 3)
            # image = cv2.filter2D(image, -1, kernel)

            # image = cv2.resize(image, (frame_w, frame_h), interpolation=cv2.INTER_AREA)

            #            digits_area = image[int(image.shape[0] * 0.965):int((1 - 0) * image.shape[0]), int(image.shape[1] * 0):int((1 - 0.5) * image.shape[1]),:]

            # Defines height
            # From XXX to image.shape[1]
            # a1 = [0, int(image.shape[0] * 0.93)]  # 0,896
            # a2 = [0, int((1 - 0) * image.shape[0])]  # 0,964
            #
            # # Defines width
            # # From XXX to image.shape[1]
            # a3 = [int(image.shape[1] * 0.4), int((1 - 0) * image.shape[0])]  # 512,964
            # a4 = [int(image.shape[1] * 0.4), int(image.shape[0] * 0.93)]  # 512,896
            #
            # digits_area = np.array([[a1, a2, a3, a4]], dtype=np.int32)

            # image shape: [H,W]
            # digits area: [W,H]

            #            digits_area = np.array([[[512,964], [0,964], [0,896], [512,896]]], dtype=np.int32)

            #            print(digits_area)

            # 930
            # 964
            # 0
            # 640

            #            cv2.fillConvexPoly(image, np.array(a1, a2, a3, a4, 'int32'), 255)

            #  cv2.fillPoly(image, digits_area, (0, 0, 0))

            if counter == 0:
                filename = datetime.datetime.now().strftime(
                    "%Y-%m-%d_%H-%M-%S") + "_front_top.avi"
                # out = cv2.VideoWriter(filename, cv2.VideoWriter_fourcc('X', 'V', 'I', 'D'), 8,
                #                       (cfg['output_frame_width'], cfg['output_frame_height']))
                out = cv2.VideoWriter(
                    filename, cv2.VideoWriter_fourcc('X', 'V', 'I', 'D'), 22,
                    (1280, 964))

                # out = cv2.VideoWriter(filename, cv2.VideoWriter_fourcc('X', 'V', 'I', 'D'), 8, (640, 480))
                #                out = cv2.VideoWriter(filename, cv2.VideoWriter_fourcc('M', 'J', '2', 'C'), 8, (1280, 964)) #Lossless
                #                out = cv2.VideoWriter(filename, cv2.VideoWriter_fourcc('H', 'F', 'Y', 'U'), 8, (1280, 964)) #Lossless
                reprint("Creating file " + str(filename))

            cv2.putText(image, str(fps), (10, image.shape[0] - 10),
                        cv2.FONT_HERSHEY_DUPLEX, 0.8, (255, 255, 255), 1,
                        cv2.LINE_AA)
            # ardu = ("Time: " + str((ArducamSDK.Py_ArduCam_readSensorReg(handle, int(12644))[1])) + " ISO: " + str(
            #     (ArducamSDK.Py_ArduCam_readSensorReg(handle, int(12586))[1])) + " lum: " + str(
            #     (ArducamSDK.Py_ArduCam_readSensorReg(handle, int(12626))[1])) + "/" + str(
            #     (ArducamSDK.Py_ArduCam_readSensorReg(handle, int(12546))[1])))
            # cv2.putText(image, ardu, (10, image.shape[0] - 40), cv2.FONT_HERSHEY_DUPLEX, 0.8, (255, 255, 255), 1,
            #             cv2.LINE_AA)

            # try:
            #     colorconversion = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
            # except:
            #     colorconversion = image
            #     pass

            cv2.imshow("stream", image)
            cv2.waitKey(5)
            # cv2.resize(image, (640, 480))

            if out is not None:
                out.write(cv2.resize(image, (1280, 964)))

            # out.write(image)

            #            regAddr = int(12644)
            #            val = hex(ArducamSDK.Py_ArduCam_readSensorReg(handle, regAddr)[1])
            #            print("Integration time\t" + str(hex(12644)) + "\t" + str(hex(ArducamSDK.Py_ArduCam_readSensorReg(handle, int(12644))[1])))
            #            print("Gains\t" + str(hex(12586)) + "\t" + str(hex(ArducamSDK.Py_ArduCam_readSensorReg(handle, int(12586))[1])))
            #            print("Mean gain\t" + str(hex(12626)) + "\t" + str(hex(ArducamSDK.Py_ArduCam_readSensorReg(handle, int(12626))[1])))
            #            print("Dark current\t" + str(hex(12680)) + "\t" + str(hex(ArducamSDK.Py_ArduCam_readSensorReg(handle, int(12680))[1])))
            #            print("Frame exposure\t" + "\t" + str((ArducamSDK.Py_ArduCam_readSensorReg(handle, int(12460))[1])))

            #            logger.write(str(datetime.datetime.now().strftime("%Y-%m-%d: %H:%M:%S")) + "\t")
            #            logger.write(str(hex(ArducamSDK.Py_ArduCam_readSensorReg(handle, int(12644))[1])) + "\t")
            #            logger.write(str(hex(ArducamSDK.Py_ArduCam_readSensorReg(handle, int(12586))[1])) + "\t")
            #            logger.write(str(hex(ArducamSDK.Py_ArduCam_readSensorReg(handle, int(12626))[1])) + "\t")
            #            logger.write(str(hex(ArducamSDK.Py_ArduCam_readSensorReg(handle, int(12680))[1])) + "\n")
            #            logger.flush()

            # try:
            #     colorconversion = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
            # except:
            #     colorconversion = image
            #     pass
            # for i in range(2):
            #   colorconversion = clahe.apply(colorconversion)

            #            image = image[:,:,0]
            #            print(image.shape)
            #            image = cv2.cvtColor(colorconversion, cv2.COLOR_GRAY2BGR)
            #            print(image.shape)
            #            image = cv2.GaussianBlur(image, (3, 3), 0)

            #            for i in range(image.shape[2]):
            #                image[:,:,i] = colorconversion

            # fh.post_image(colorconversion)
            counter += 1

            if counter == 500:
                out.release()
            #     reprint("Sending file " + str(filename))
            #     threading.Thread(target=fh.post_files, args=[filename]).start()
            #    counter = 0
            #            print("Exposure: " + str((ArducamSDK.Py_ArduCam_readSensorReg(handle, int(12460))[1])) + "\tAcq time: " + str((ArducamSDK.Py_ArduCam_readSensorReg(handle, int(12644))[1])) + "\tGain: " + str((ArducamSDK.Py_ArduCam_readSensorReg(handle, int(12586))[1])) + " lum: " + str((ArducamSDK.Py_ArduCam_readSensorReg(handle, int(12626))[1])) + "/" + str((ArducamSDK.Py_ArduCam_readSensorReg(handle, int(12546))[1]))  + " DC: " + str((ArducamSDK.Py_ArduCam_readSensorReg(handle, int(12680))[1])) + "/" + str((ArducamSDK.Py_ArduCam_readSensorReg(handle, int(12580))[1])))

            #            print("Noise correction\t" + "\t" + str((ArducamSDK.Py_ArduCam_readSensorReg(handle, int(12500))[1])))

            # print(str(regAddr) + "\t" + str(val))
            # ["0x3012","0x0032"] = 12306	50
            # 3012 (hex) = 12306 (dec)
            # 0032 (hex) = 50 (dec)

            #            print(str((ArducamSDK.Py_ArduCam_readSensorReg(handle, int(12644))[1])) + "\t" + str((ArducamSDK.Py_ArduCam_readSensorReg(handle, int(12586))[1])) + "\t" + str((ArducamSDK.Py_ArduCam_readSensorReg(handle, int(12626))[1])) + "\t" + str((ArducamSDK.Py_ArduCam_readSensorReg(handle, int(12680))[1])))

            # ["0x3012","0x0032"] = 12306	50
            # 3012 (hex) = 12306 (dec)
            # 0032 (hex) = 50 (dec)

            #            if counter == 5:
            #                cimage = cv2.cvtColor(image, cv2.COLOR_GRAY2BGR)
            #                cv2.imwrite(os.path.join(local_dir, "frame.jpg"), cv2.resize(cimage,(512,384)))
            #                counter = 0
            #            cv2.imwrite(os.path.join(local_dir, "Desktop", "images", str(datetime.datetime.now().strftime("%Y-%m-%d-%H-%M-%S-%f") + ".jpg")), image)
            #            counter += 1

            #            cv2.imshow("ArduCam Demo",image)
            #            cv2.waitKey(10)
            ArducamSDK.Py_ArduCam_del(handle)
        else:
            time.sleep(0.001)
def getCrop():
    
    cap = cv2.VideoCapture(1)
    #cap.set(3,1024)
    #cap.set(4,768)

    #cv2.namedWindow('image')
    ret, resized = cap.read(1)
    
    resized=cv2.imread("arena.jpg")

    cv2.namedWindow('image')
    cv2.createTrackbar('x1','image',0,resized.shape[1],nothing)
    cv2.createTrackbar('y1','image',0,resized.shape[0],nothing)
    cv2.createTrackbar('x2','image',1,resized.shape[1],nothing)
    cv2.createTrackbar('y2','image',1,resized.shape[0],nothing)

    x1,y1=0,0
    (x2,y2)=resized.shape[:2]
    rot=0

    while(True):
        # Capture frame-by-frame
        ret, resized = cap.read(1)
        resized=cv2.imread("arena.jpg")

        #to know which part we are extracting from original image lets draw a rectangle 
        black = (0, 0, 0)

        x1 = cv2.getTrackbarPos('x1','image')
        y1 = cv2.getTrackbarPos('y1','image')
        x2= cv2.getTrackbarPos('x2','image')        
        y2 = cv2.getTrackbarPos('y2','image')

        cv2.rectangle(resized,  (x1,y1), (x2, y2), black,2)
        cv2.imshow('mask',resized)
        black = (0, 0, 0)
        cv2.rectangle(resized,  (x1,y1), (x2, y2), black,2)
        if x2>x1 and y2>y1:
            cropped = resized[y1:y2 , x1:x2]
            cv2.imshow('cropped',cropped)
        p=open( "crop.txt","w")
        p.write(str(y1)+','+str(y2)+','+str(x1)+','+str(x2)+','+str(rot))
        p.close()
        #here we extracta rectangular region of the image, starting at (150, 113) and ending at (200, 200).
        if cv2.waitKey(1) & 0xFF == ord('q'):
            print "crop done"
            cv2.destroyAllWindows()
            break
    cv2.namedWindow('img')
    cv2.createTrackbar('rot','img',0,360,nothing)
    while(True):
        # Capture frame-by-frame
        ret, resized = cap.read(1)
        resized=cv2.imread("arena.jpg")
        cropped = resized[y1:y2 , x1:x2]
        rot = cv2.getTrackbarPos('rot','img')
        resized=imutils.rotate_bound(cropped,rot)
        cv2.imshow('cropped',resized)
        p=open( "crop.txt","w")
        p.write(str(y1)+','+str(y2)+','+str(x1)+','+str(x2)+','+str(rot))
        p.close()
        #here we extracta rectangular region of the image, starting at (150, 113) and ending at (200, 200).
        if cv2.waitKey(1) & 0xFF == ord('q'):
            print "rotate done"
            break


        #cv2.imwrite('image_test.png',resized)
    # When everything done, release the capture
    cap.release()
    cv2.destroyAllWindows()
    return y1,y2,x1,x2,rot
kp_surf, desc_surf = surf.detectAndCompute(image1_gray, None)
kp_surf_mat = create_kp_mat(kp_surf)

print(F"Keypoints found SURF: {len(kp_surf)}")
print(F"Keypoints found SIFT: {len(kp_sift)}")

# %%
angles = np.arange(0, 360 + 1, 15)

# Save all counts in list
count_sift = []
count_surf = []

for angle in tqdm.tqdm(angles, leave=False):
    # Rotate image by angle degrees and find kps
    rot_img = imutils.rotate_bound(image1_gray, angle)

    rot_kp_sift, _ = sift.detectAndCompute(rot_img, None)
    rot_kp_surf, _ = surf.detectAndCompute(rot_img, None)

    rot_kp_mat_sift = create_kp_mat(rot_kp_sift)
    rot_kp_mat_surf = create_kp_mat(rot_kp_surf)

    # Rotate found keypoints in unrotated image by same angle
    org_rot_kp_mat_sift = rotatedPoints(kp_sift, image1_gray, -angle)
    org_rot_kp_mat_surf = rotatedPoints(kp_surf, image1_gray, -angle)

    # Check how many keypoints have matches within 2 pixels distance
    counter_sift, min_vals_sift = check_matches(rot_kp_mat_sift,
                                                org_rot_kp_mat_sift, 2.)
    counter_surf, min_vals_surf = check_matches(rot_kp_mat_surf,
示例#23
0
 def rotate(self, degrees=0):
     self.image = imutils.rotate_bound(self.image, degrees)
示例#24
0
import cv2
import imutils
import numpy as np

image = cv2.imread(r'D:\Projects\Tremor\Markers\new_board.bmp')
print(image)
for angle in np.arange(0, 360, 15):
    rotated = imutils.rotate_bound(image, angle)
    cv2.imwrite(
        r"D:\Projects\Tremor\Markers\Board roation\Angle %03.2f.bmp" % angle,
        rotated)
示例#25
0
def run(filename,const):
	kernel = np.ones((2,2),np.uint8)
	# kernel to dialate thresh
	r=0
	g=0
	b=0

	ellipse = cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(5,5))
	np.array([[0, 0, 1, 0, 0],
	          [0, 1, 1, 1, 0],
	          [1, 1, 1, 1, 1],
	          [0, 1, 1, 1, 0],
	          [0, 0, 1, 0, 0]], dtype=np.uint8)
	# kernel for closing

	img = cv2.imread(filename)
	#img = cv2.imread("Images/detailed.png")
	#img = cv2.imread("Images/ISIC_0000016.png")
	#cv2.imshow("original",img)

	closing = cv2.morphologyEx(img, cv2.MORPH_CLOSE, ellipse)

	bb = abs(img[:,:,0]-closing[:,:,0])
	gg = abs(img[:,:,1]-closing[:,:,1])
	rr = abs(img[:,:,2]-closing[:,:,2])
	# original vs closing

	slices = np.bitwise_and(bb,np.bitwise_and(gg, rr, dtype=np.uint8), dtype = np.uint8)*255
	slices = cv2.dilate(slices,kernel,iterations = 2)
	# combines channels and then dialates them

	groups = []
	for y in range(len(slices)):
	        for x in range(len(slices[0])):
	        	if(slices[y][x] > 20):
	                	img[y][x] = closing[y][x]
	# replaces hair pixels with closing image pixels

	for y in range(len(img)):
		for x in range(len(img[0])):
			b+=img[y][x][0]
			g+=img[y][x][1]
			r+=img[y][x][2]
	pix = len(img)*len(img[0])
	r = r/(len(img)*len(img[0])*const)
	g = g/(len(img)*len(img[0])*const)
	b = b/(len(img)*len(img[0])*const)
	#print(b,g,r)
	size = 10
	if(pix < 240 * 240/9*16):
		size = 4
	elif(pix < 480 * 480/9*16):
		size = 6
	elif(pix < 720 * 720/9*16):
		size = 8
	size = 10

	imgbw = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
	maxcoordx = 0
	currentx = 255
	currenty = 255
	maxcoordy =0
	for y in range(5,len(img)-5):
		average = 0;
		for x in range(5,len(img[0])-5):
			average += imgbw[y][x]
			#print average
			average=float(average)/float(len(img[0]))
			#print average
			if average < currenty:
				currenty = average
				maxcoordy = y
	
	for x in range(5,len(img[0])-5):
        	average = 0;
        	for y in range(5,len(img)-5):
        	        average += imgbw[y][x]
        	average=float(average)/float(len(img[0]))
        	if average < currentx:
                	currentx = average
			maxcoordx = x
	#print(maxcoordx,maxcoordy)
	run = True
	current = maxcoordx
	newImg=copy.deepcopy(img)
	while(True):
		if(img[maxcoordy][current][0]<=b and img[maxcoordy][current][1]<=g and img[maxcoordy][current][2]<=r):
			newImg[maxcoordy][current][0] =0
		elif(img[maxcoordy+1][current][0]<=b and img[maxcoordy+1][current][1]<=g and img[maxcoordy+1][current][2]<=r):
                	newImg[maxcoordy+1][current][0] =0
		elif(img[maxcoordy-1][current][0]<=b and img[maxcoordy-1][current][1]<=g and img[maxcoordy-1][current][2]<=r):
                	newImg[maxcoordy-1][current][0] =0
		else:
			break
		#print((img[maxcoordy][current][0],img[maxcoordy][current][1],img[maxcoordy][current][2]))
		#if current ==0 :
		#	break
		current = current - 1
	i = 1000000
	prev = [1,0]
	coords = [current, maxcoordy]
	#print coords
	#[[-1,0],[-1,1],[0,1],[1,1],[1,0],[1,-1],[0,-1],[-1,-1]]
	order = [[-1,0],[-1,-1],[0,-1],[1,-1],[1,0],[1,1],[0,1],[-1,1]]
	#visited1 =[coords]
	#visited2 = []
	contour =[[current, maxcoordy]]
	approx = 15
	exit = False
	while(i>=0):
		index = order.index(prev)
		for x in range((index+1), (index+9)):
			#print order[x]
			if(abs(coords[0]-current) <2 and abs(coords[1]-maxcoordy)<2 and approx < 0 ):
				exit = True
			point = img[coords[1]+order[x%8][1]][coords[0]+order[x%8][0]]
			#print point
			#tobreak = False
			#coords = [coords[0]+order[x%8][0],coords[1]+order[x%8][1]]
			if(point[0]<=b and point[1]<=g and point[2]<=r):
				#if(False==(coords in visited1)):
                             	#print "hi"
                             	#print "this:", point
			     	newImg[coords[1]+order[x%8][1]][coords[0]+order[x%8][0]][1] = 0
			     
			     	prev[0] = order[x%8][0]
			     	prev[1] = order[x%8][1]
			     	prev[0]=prev[0]*-1
			     	prev[1]=prev[1]*-1
			     	#print order[x%8]
			     	coords = [coords[0]+order[x%8][0],coords[1]+order[x%8][1]]
			     	contour.append([coords[0]+order[x%8][0],coords[1]+order[x%8][1]])
			     	#print coords
			     	#if(coords in visited1):
                             	#    visited2.append(coords)
                             	#else:
                             	#   visited1.append(coords)
			     	#visited1.append(coords)
			     
			     	break
                             	#print point
		
		i = i-1
		approx = approx-1
		if(exit == True):
			break
	#print len(contour)
	ctr = np.array(contour).reshape((-1,1,2)).astype(np.int32)
	cv2.drawContours(img,[ctr],0,(255,255,255),1)
	#print coords
	#below finds center spot via checking of dark sq


	#cv2.imshow("imageedit", newImg)
	
	#if(len(ctr)<100):
	#	run(filename, const+0.1)
	
	(x,y),(MA,ma),angle = cv2.fitEllipse(ctr)
	origangle=angle
	angle = angle/180*3.14
	#x = int(x)
	#y = int(y)
	#cv2.ellipse(img, (x,y),(MA,ma),angle)
	#print angle
	tupleEnd = (int(round(x+MA*math.cos(angle)/2)),int(round(y-MA*math.sin(angle)/2)))
	tupleStart = (int(round(x-MA*math.cos(angle)/2)),int(round(y+MA*math.sin(angle)/2)))
	m = round(math.sin(angle)/math.cos(angle))
	b = int(round(y-m*x))
	m= int(m)
	xcenter =x
	ycenter = y
	cv2.line(img, tupleStart,tupleEnd,(255,255,255))
	cv2.line(newImg, tupleStart,tupleEnd,(255,255,255))
	#cv2.imshow("imageedit", newImg)
	#image at this point has contour and major axis
	def retxy(x,y,m,b):
		if(y<x*m+b):
			return 1 
		if(y>x*m+b):
			return 2
		if(y==x*m+b):
			return 3

	asym = np.zeros((len(img),len(img[0]),3), np.uint8)
	asym[:,:,:] = (255,255,255)    
	cv2.drawContours(asym, ctr, -1, (0,0,0), 2)

	mask = np.zeros((len(img)+2,len(img[0])+2,3), np.uint8)
	mask[:,:,:] = (255,255,255)
	cv2.drawContours(asym, ctr, -1, (0,0,0), 2)
	mask = np.zeros(mask.shape[:2], np.uint8)

	cv2.floodFill(asym,mask,(maxcoordx,maxcoordy),(0,0,0))
	perim= cv2.arcLength(ctr,True)
	area = cv2.contourArea(ctr)
	irreg=float(perim)*perim/4/math.pi/area

	#M = cv2.getRotationMatrix2D((len(img[0]),len(img)),origangle,1)
	#dst = cv2.warpAffine(asym,M,(len(img[0]),len(img)))
	dst = imutils.rotate_bound(asym, origangle)

	maskRotate = np.zeros((len(dst)+2,len(dst[0])+2,3), np.uint8)
	maskRotate[:,:,:] = (255,255,255)
	cv2.drawContours(maskRotate, ctr, -1, (0,0,0), 2)
	maskRotate = np.zeros(maskRotate.shape[:2], np.uint8)

	cv2.floodFill(dst,maskRotate,(0,0),(255,255,255))
	cv2.floodFill(dst,maskRotate,(0,len(dst)-1),(255,255,255))
	cv2.floodFill(dst,maskRotate,(len(dst[0])-1,0),(255,255,255))
	cv2.floodFill(dst,maskRotate,(len(dst[0])-1,len(dst)-1),(255,255,255))

	for y in range(len(dst)):
        	for x in range(len(dst[0])):
			remove = True
			if(dst[y][x][0] != 255):
				for j in range(5):
					for i in range(5):
						if(y+j-2>=0 and y+j-2<len(dst) and x+i-2 >=0 and x+i-2<len(dst[0]) and dst[y+j-2][x+i-2][0]==0):
							remove = False
			if(remove == True):
				dst[y][x][0] = 255
				dst[y][x][1] = 255
				dst[y][x][2] = 255

	graydst = cv2.cvtColor(dst,cv2.COLOR_BGR2GRAY)
	ret,thresh = cv2.threshold(graydst,127,255,cv2.THRESH_BINARY)
	graydst, contours, hierarchy = cv2.findContours(thresh,cv2.RETR_TREE,cv2.CHAIN_APPROX_NONE)
	max = 0
	index=0
	for x in range(len(contours)):
		if(cv2.contourArea(contours[x])>max and cv2.contourArea(contours[x])<len(graydst[0])*len(graydst[0])*0.9):
			max = (cv2.contourArea(contours[x]))
			index = x
	#print index
	#print contours
	#cv2.fitEllipse(contours[0]) 
	#max1 = 
	#for i in range(len(contours)):
	
	(xDst,yDst),(MADst,maDst),angleDst = cv2.fitEllipse(contours[index])
	graydst = cv2.cvtColor(graydst,cv2.COLOR_GRAY2BGR)

	tupleEndDst = (int(round(xDst+MADst*math.cos(angleDst)/2)),int(round(yDst-MADst*math.sin(angleDst)/2)))
	tupleStartDst = (int(round(xDst-MADst*math.cos(angleDst)/2)),int(round(yDst+MADst*math.sin(angleDst)/2)))
	#leftmost = tuple(contours[index][contours[index][:,:,0].argmin()][0])
	#rightmost = tuple(contours[index][contours[index][:,:,0].argmax()][0])
	topmost = tuple(contours[index][contours[index][:,:,1].argmin()][0])
	bottommost = tuple(contours[index][contours[index][:,:,1].argmax()][0])
	yavg = (bottommost[1]+topmost[1])/2
	cv2.line(graydst, (0,yavg),(len(graydst[0]),yavg),(255,255,255))
	#print cv2.contourArea(contours[0])
	#cv2.drawContours(graydst, contours, 2, (0,255,0), 3)
	currentArea = cv2.contourArea(contours[index])

	top = np.zeros((len(dst),len(dst[0]),3), np.uint8)
	top[:,:,:] = (255,255,255)

	bottom = np.zeros((len(dst),len(dst[0]),3), np.uint8)
	bottom[:,:,:] = (255,255,255)

	comb = np.zeros((len(dst),len(dst[0]),3), np.uint8)
	comb[:,:,:] = (255,255,255)

	#print asym[0][0]
	for y in range(len(graydst)):
		for x in range(len(graydst[0])):
			if(y<yavg):
				bottom[y][x] = graydst[y][x]
			if(y>yavg):
				top[y][x] = graydst[y][x]
			if(y==yavg):
				bottom[y][x] = graydst[y][x]
				top[y][x] = graydst[y][x]
	bottom =cv2.flip(bottom,0)
	bottom = cv2.cvtColor(bottom,cv2.COLOR_BGR2GRAY)
	ret,thresh = cv2.threshold(bottom,127,255,cv2.THRESH_BINARY_INV)
	bottom, contours, hierarchy = cv2.findContours(thresh,cv2.RETR_TREE,cv2.CHAIN_APPROX_NONE)
	bottom = cv2.cvtColor(bottom,cv2.COLOR_GRAY2BGR)
	max = 0
	index=0
	for x in range(len(contours)):
        	if(cv2.contourArea(contours[x])>max and cv2.contourArea(contours[x])<len(graydst[0])*len(graydst[0])*0.9):
                	max = (cv2.contourArea(contours[x]))
                	index = x
	topmost = tuple(contours[index][contours[index][:,:,1].argmin()][0])
	x = index
	while(bottom[topmost[1]][x][0]>0):
		x = x-1
	topLeftBot = [x,topmost[1]]

	#M = np.float32([[1,0,-topLeftBot[0]+200],[0,1,-topLeftBot[1]]])
	#bottom = cv2.warpAffine(bottom,M,(len(bottom[0]),len(bottom)))

	top = cv2.cvtColor(top,cv2.COLOR_BGR2GRAY)
	ret,thresh = cv2.threshold(top,127,255,cv2.THRESH_BINARY_INV)
	top, contours, hierarchy = cv2.findContours(thresh,cv2.RETR_TREE,cv2.CHAIN_APPROX_NONE)
	top = cv2.cvtColor(top,cv2.COLOR_GRAY2BGR)
	max = 0
	index=0
	for x in range(len(contours)):
        	if(cv2.contourArea(contours[x])>max and cv2.contourArea(contours[x])<len(graydst[0])*len(graydst[0])*0.9):
                	max = (cv2.contourArea(contours[x]))
                	index = x
	topmost = tuple(contours[index][contours[index][:,:,1].argmin()][0])
	x = index
	while(top[topmost[1]][x][0]>0):
        	x = x-1
	topLeftTop = [x,topmost[1]]



	M = np.float32([[1,0,-100],[0,1,-topLeftBot[1]+5]])
	bottom = cv2.warpAffine(bottom,M,(len(bottom[0]),len(bottom)))

	M = np.float32([[1,0,-100],[0,1,-topLeftTop[1]+5]])
	top = cv2.warpAffine(top,M,(len(bottom[0]),len(bottom)))

	comb = copy.deepcopy(top)
	for y in range(len(comb)):
        	for x in range(len(comb[0])):
			if(bottom[y][x][0] != 0 and comb[y][x][0] != 0):
				comb[y][x][1] = 0
			if(bottom[y][x][0] != 0 and comb[y][x][0] == 0):
				comb[y][x][0] =255
			if(bottom[y][x][0] == 0 and comb[y][x][0] != 0):
				comb[y][x][1] = 0
				comb[y][x][0] = 0
	finalArea=copy.deepcopy(comb)
	for y in range(len(finalArea)):
        	for x in range(len(finalArea[0])):
			if(finalArea[y][x][0] == 0 and finalArea[y][x][2] == 255 ):
				finalArea[y][x][2]=0
			
			if(finalArea[y][x][0] == 255 and finalArea[y][x][2] == 0 ):
				finalArea[y][x][0]=0
			if(finalArea[y][x][0] == 255 and finalArea[y][x][2] == 255 ):
                        	finalArea[y][x][1]=255

	finalArea = cv2.cvtColor(finalArea,cv2.COLOR_BGR2GRAY)
	ret,thresh = cv2.threshold(finalArea,127,255,cv2.THRESH_BINARY)
	finalArea, contours, hierarchy = cv2.findContours(thresh,cv2.RETR_TREE,cv2.CHAIN_APPROX_NONE)
	finalArea = cv2.cvtColor(finalArea,cv2.COLOR_GRAY2BGR)
	max = 0
	index=0
	for x in range(len(contours)):
        	if(cv2.contourArea(contours[x])>max and cv2.contourArea(contours[x])<len(graydst[0])*len(graydst[0])*0.9):
                	max = (cv2.contourArea(contours[x]))
                	index = x
	x = index

	symArea = cv2.contourArea(contours[index])
	final=float(symArea)/currentArea*100
	#cv2.imwrite(os.path.join(os.path.expanduser('~'),"ScienceFair2018","examples","symmetry",filename+"sym.jpg"),finalArea)
	#print("Border Irregularity: ")
	return [irreg,final]
def apply_sprite(image, path2sprite, w, x, y, angle, ontop=True):
    sprite = cv2.imread(path2sprite, -1)
    sprite = rotate_bound(sprite, angle)
    (sprite, y_final) = adjust_sprite2head(sprite, w, y, ontop)
    image = draw_sprite(image, sprite, x, y_final)
示例#27
0
                    # Reset Click Position Back To Negative
                    click_x = -1
                    click_y = -1

                    # Initialize CSRT Tracker
                    trkCSRT.init(frame, (d_xa, d_ya, d_xb - d_xa, d_yb - d_ya))

                    # Compute SIFT Features For 4 Different Orientations (Separated By 90 Deg.)
                    roi = cv2.cvtColor(frame[d_ya:d_yb, d_xa:d_xb],
                                       cv2.COLOR_BGR2GRAY)
                    #cv2.imshow("0", roi)
                    kptSIFT_src[0], dscSIFT_src[0] = sift.detectAndCompute(
                        roi, None)
                    dims_src[0] = roi.shape[:2]
                    roi = imutils.rotate_bound(roi, 90)
                    #cv2.imshow("90", roi)
                    kptSIFT_src[1], dscSIFT_src[1] = sift.detectAndCompute(
                        roi, None)
                    dims_src[1] = roi.shape[:2]
                    roi = imutils.rotate_bound(roi, 90)
                    #cv2.imshow("180", roi)
                    kptSIFT_src[2], dscSIFT_src[2] = sift.detectAndCompute(
                        roi, None)
                    dims_src[2] = roi.shape[:2]
                    roi = imutils.rotate_bound(roi, 90)
                    #cv2.imshow("270", roi)
                    kptSIFT_src[3], dscSIFT_src[3] = sift.detectAndCompute(
                        roi, None)
                    dims_src[3] = roi.shape[:2]
         width = int(rect[1][0])
         height = int(rect[1][1])
         x = np.array([width,height])
         wh = np.append(wh,x)
         
         src_pts = box.astype("float32")
         dst_pts = np.array([[0, height-1],
                             [0, 0],
                             [width-1, 0],
                             [width-1, height-1]], dtype="float32")
         M = cv2.getPerspectiveTransform(src_pts, dst_pts)
         warped = cv2.warpPerspective(img, M, (width, height))
         width  = warped.shape[1]
         height = warped.shape[0]
         if width  >  height :
             warped = imutils.rotate_bound(warped, 90)
             t      =  width
             width  = height
             height = t 
                                    
         m =1    
         subimagename =  imageleterspath + name[0] +'_'+ cats[j]+'_'+ difs[j]+"_"+ str(j)+'.png'
         if np.max(warped) > 0:
             cv2.imwrite( subimagename, warped)
         with open('somefile.txt', 'a') as the_file:
             the_file.write(subimagename + "\t" +str(width) + "\t" + str(height) + "\n" )
             
         
 
 
 for j in range( len(boxes)):
 def rotationImage(self):
     if self.tmp is not None:
         rotated = imutils.rotate_bound(self.tmp, 15)
         self.setPhoto(rotated)
示例#30
0
def make_train_set(tileimg):
    src_org = os.path.join('crater_data', 'slices', 'org', tileimg)
    src_mask = os.path.join('crater_data', 'slices', 'mask', tileimg)
    dst = os.path.join('crater_data', 'slices', 'train')
    counter = 0

    # create new directories if necessary
    for imgtype in ['org', 'mask']:
        tgdir = os.path.join(dst, imgtype)
        if not os.path.isdir(tgdir):
            os.makedirs(tgdir)

    # add original samples to train folder.
    for src_filename in glob.glob(os.path.join(src_org, '*.jpg')):

        # read the original image and get size info
        src_img = cv2.imread(src_filename)
        pathinfo = src_filename.split(os.path.sep)
        img_type = pathinfo[-2]  # org or mask
        filename = pathinfo[-1]  # the actual name of the jpg

        #print("img_type: " + img_type);

        rotated90 = imutils.rotate_bound(src_img, 90)
        rotated180 = imutils.rotate_bound(src_img, 180)
        rotated270 = imutils.rotate_bound(src_img, 270)

        dst_filename = os.path.join(dst, 'org', '0_' + filename)
        dst_filename90 = os.path.join(dst, 'org', '90_' + filename)
        dst_filename180 = os.path.join(dst, 'org', '180_' + filename)
        dst_filename270 = os.path.join(dst, 'org', '270_' + filename)

        # normalizing the image?? No. The model requires a 3 channel picture.
        cv2.imwrite(dst_filename, src_img)
        cv2.imwrite(dst_filename90, rotated90)
        cv2.imwrite(dst_filename180, rotated180)
        cv2.imwrite(dst_filename270, rotated270)

        counter += 4

    # add mask samples to train folder.
    for src_filename in glob.glob(os.path.join(src_mask, '*.jpg')):
        # read the original image and get size info
        src_img = cv2.imread(src_filename)
        pathinfo = src_filename.split(os.path.sep)
        img_type = pathinfo[-2]  # org or mask
        filename = pathinfo[-1]  # the actual name of the jpg

        rotated90 = imutils.rotate_bound(src_img, 90)
        rotated180 = imutils.rotate_bound(src_img, 180)
        rotated270 = imutils.rotate_bound(src_img, 270)

        dst_filename = os.path.join(dst, 'mask', '0_' + filename)
        dst_filename90 = os.path.join(dst, 'mask', '90_' + filename)
        dst_filename180 = os.path.join(dst, 'mask', '180_' + filename)
        dst_filename270 = os.path.join(dst, 'mask', '270_' + filename)

        # normalizing the image??
        cv2.imwrite(dst_filename, src_img)
        cv2.imwrite(dst_filename90, rotated90)
        cv2.imwrite(dst_filename180, rotated180)
        cv2.imwrite(dst_filename270, rotated270)

        counter += 4

    for imgtype in ['org', 'mask']:
        tgdir = os.path.join(dst, imgtype, '.DS_Store')
        if os.path.exists(tgdir):
            os.remove(tgdir)

    print("Adding " + str(counter) + " org and mask samples of " + tileimg +
          " to trainining set.")
示例#31
0
cv2.destroyAllWindows()

print(f"Number of frames = {len(raw_frames)}")

edged: List[imageType] = [get_outlined_image(frame) for frame in raw_frames]
SKEW_list: List[float] = [get_image_skew(edged_frame) for edged_frame in edged]
"""
fixed_frames = []
for num, frame in enumerate(edged):
    skew = SKEW_list[num]
    fixed_frame = imutils.rotate_bound(frame, -skew)
    fixed_frames.append(fixed_frame)
print(len(fixed_frames))
"""
fixed_frames = [
    imutils.rotate_bound(frame, -SKEW_list[num])
    for num, frame in enumerate(edged)
]
"""
CA_list: List[float] = []
for edged_frame in fixed_frames:
    (x, y, w, h) = get_contour_lims(edged_frame)
    ang = calc_contact_angle(w, h)
    CA_list.append(ang)
"""
contour_lims = [get_contour_lims(frame)[2:] for frame in fixed_frames]
CA_list = [calc_contact_angle(w, h) for (w, h) in contour_lims]
widths = [w for (w, h) in contour_lims]
heights = [h for (w, h) in contour_lims]

vid_times = [frame_no / fps for frame_no in range(len(raw_frames))]
(rStart, rEnd) = face_utils.FACIAL_LANDMARKS_IDXS["right_eye"]
leftEyePts = shape[lStart:lEnd]
rightEyePts = shape[rStart:rEnd]

# compute the center of mass for each eye
leftEyeCenter = leftEyePts.mean(axis=0).astype("int")
rightEyeCenter = rightEyePts.mean(axis=0).astype("int")

# compute the angle between the eye centroids
dY = rightEyeCenter[1] - leftEyeCenter[1]
dX = rightEyeCenter[0] - leftEyeCenter[0]
angle = np.degrees(np.arctan2(dY, dX)) - 180

# rotate the sunglasses image by our computed angle, ensuring the
# sunglasses will align with how the head is tilted
sg = imutils.rotate_bound(sg, angle)

# the sunglasses shouldn't be the *entire* width of the face and
# ideally should just cover the eyes -- here we'll do a quick
# approximation and use 90% of the face width for the sunglasses
# width
sgW = int((endX - startX) * 0.9)
sg = imutils.resize(sg, width=sgW)

# our sunglasses contain transparency (the bottom parts, underneath
# the lenses and nose) so in order to achieve that transparency in
# the output image we need a mask which we'll use in conjunction with
# alpha blending to obtain the desired result -- here we're binarizing
# our mask and performing the same image processing operations as
# above
sgMask = cv2.cvtColor(sgMask, cv2.COLOR_BGR2GRAY)
    	# convert the facial landmark (x, y)-coordinates to a NumPy array
        shape = predictor(gray, rect)

    	shape = face_utils.shape_to_np(shape)

        for i in range(1,7):
            (x,y,w,h) = get_face_boundbox(shape, i)
            cv2.rectangle(frame, (x, y), (x+w, y+h), (0, 255, 0), 1)

        incl = calculate_inclination(shape[17], shape[26])

        img = cv2.imread("./sprites/doggy_ears.png")
        rows,cols = img.shape[0], img.shape[1]
        M = cv2.getRotationMatrix2D((cols/2,rows/2),incl,1)
        dst = cv2.warpAffine(img,M,(cols,rows))
        dst = rotate_bound(img, incl)
        cv2.imshow('sprite',dst)

        print "Pixels distance points in mouth: ", shape[66][1] - shape[62][1]

        x,y, w, h = rect.left(), rect.top(), rect.width(), rect.height()

        cv2.rectangle(frame, (x, y), (x+w, y+h), (0, 0, 255), 2)

        # loop over the (x, y)-coordinates for the facial landmarks
    	# and draw them on the image
        for (x, y) in shape:
            cv2.circle(frame, (x, y), 1, (0, 0, 255), -1)

    cv2.imshow("Frame", frame)
    key = cv2.waitKey(1) & 0xFF