示例#1
0
def getColour(image, knn, top=False):
    """[Takes in an image and attemps to classify its top or bottom colour by using the pretrained KNN model. it then returns the colour back to the calling function]
    
    Arguments:
        image {[Object]} -- [The input image to be colour classified]
        knn {[Object]} -- [the pretrained KNN model used to classify colour]
    
    Keyword Arguments:
        top {bool} -- [Flag for chekcing top colour or bottom colour] (default: {False})
    
    Returns:
        [String] -- [the colour clasified by the KNN]
    """
    rgb = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
    shape = rgb.shape
    if top:
        points = [
            int(shape[1] * 0.75),
            int(shape[0] * 0.365),
            int(shape[1] * 0.755),
            int(shape[0] * 0.37)
        ]
    else:
        points = [
            int(shape[1] * 0.77),
            int(shape[0] * 0.62),
            int(shape[1] * 0.775),
            int(shape[0] * 0.625)
        ]
    cropped = im.cropImage(rgb, points)
    average = np.array([cv2.mean(cropped)[0:3]]).astype(np.float32)
    ret, results, neighbours, dist = knn.findNearest(average, 5)

    return colours[str(int(results[0][0]))]
示例#2
0
def whiteBalance(img):
    r, g, b = cv2.split(img)
    r_avg = cv2.mean(r)[0]
    g_avg = cv2.mean(g)[0]
    b_avg = cv2.mean(b)[0]

    k = (r_avg + g_avg + b_avg) / 3
    kr = k / r_avg
    kg = k / g_avg
    kb = k / b_avg

    r = cv2.addWeighted(src1=r, alpha=kr, src2=0, beta=0, gamma=0)
    g = cv2.addWeighted(src1=g, alpha=kg, src2=0, beta=0, gamma=0)
    b = cv2.addWeighted(src1=b, alpha=kb, src2=0, beta=0, gamma=0)

    balanceImg = cv2.merge([b, g, r])
    return balanceImg
示例#3
0
def others_demo(image):
    mean = cv.mean(image)  #cv.mean():均值
    meanStdDev = cv.meanStdDev(
        image)  #cv.meanStdDev():方差,返回2darray,【1】是均值,【2】是方差
    #dst1 = cv.subtract(image,mean)
    print("mean:")
    print(mean)
    print("meanStdDev:")
    print(meanStdDev)
示例#4
0
def avg_pixel(img: np.ndarray):
    """
    average pixel intensity across the entire image

    Args:
        img: image as numpy array

    Returns:
        tuple including the avg for each color space / dimension
    """
    return cv2.mean(img)
示例#5
0
def play_algs():
    for k in range(num_stacks):
        alg_table = [[0 for i in range(num_imgs[k])] for j in range(num_algs)]
        norm_data_table = [[0. for i in range(num_imgs[k])] for j in range(num_algs)]
        time_table = [[0. for i in range(num_imgs[k])] for j in range(num_algs)]

		# Creates a .txt document for each stack with the contrast results of all the algorithms (data<>.txt)
        f2 = open(path_res + "data" + str(k) + ".txt", 'a')
		# Creates a .txt document for each stack with the criteria evaluation (crit_table<>.txt)
        f = open(path_res + "crit_table" + str(k) + ".txt", 'a')
        f.write('Algorithm;Accuracy;Range;False maxima;FWHW;Time\n') 

        for i in range(num_imgs[k]):
			# Opens an image
            cadena = path + "S" + str(k + 1) + "/" + str(i + 1) + ".tif"
            img = cv2.imread(cadena, -1)
            p = np.int64(img)
            mean = cv2.mean(img)[0]

			# Calculates the contrast value for all the algorithms, defining the contrast functions
            for j in range(num_algs):
                point1 = (time.time() * 1000)
                alg_table[j][i] = algorithms[j](m, n, thres[j], l, sigma, mean, p, img, hist_range)
                point2 = (time.time() * 1000)
				
				# The execution time for each algorithm is measured, but will not be included in the analysis 
                time_table[j][i] = point2 - point1

            print("Done img ", i)
        print("Done stack ", k)

		# Evaluates the resulting contrast functions with the criteria and writes the results in crit_table<>.txt
        for j in range(num_algs):
            crit_table[k][j][:] = np.array((analysis(alg_table[j][:], real_maxs[k], k)))
            f.write(names[j])
            for c in range(num_criteria):
                f.write(';' + str(crit_table[k][j][c]))
            mean_t = np.mean(time_table[j])
            f.write(';' + str(mean_t) + '\n')
        f.close()

		# Normalizes the values of the contrast functions for a better graphic representation, and writes them in data<>.txt
        for j in range(num_algs):
            f2.write(names[j])
            mean2 = np.mean(alg_table[j])
            std2 = np.std(alg_table[j])
            if std2 > 0:
                norm_data_table[j] = (alg_table[j] - mean2) / std2
            for i in range(num_imgs[k]):
                f2.write(';' + str(norm_data_table[j][i]))
            f2.write('\n')
        f2.close()
示例#6
0
 def pre_dispose(self):#返回一个面上的颜色平均值HSVRGB
         img = cv2.GaussianBlur(self.frame,(7,7),0)
         b,g,r = cv2.split(img)
         avgb = cv2.mean(b)[0]
         avgg = cv2.mean(g)[0]
         avgr = cv2.mean(r)[0]
         k = (avgb+avgg+avgr)/3
         kb = k/avgb
         kg = k/avgg
         kr = k/avgr
         b = cv2.addWeighted(src1=b, alpha=kb, src2=0, beta=0, gamma=0)
         g = cv2.addWeighted(src1=g, alpha=kg, src2=0, beta=0, gamma=0)
         r = cv2.addWeighted(src1=r, alpha=kr, src2=0, beta=0, gamma=0)
         img = cv2.merge([b,g,r])
         img_hsv = cv2.cvtColor(img,cv2.COLOR_BGR2HSV)
         h,s,v = cv2.split(img_hsv)
         v = cv2.equalizeHist(v)
         img_hsv =  cv2.merge([h,s,v])
         self.img_hsv = img_hsv
         img =  cv2.cvtColor(img_hsv,cv2.COLOR_HSV2BGR)
         self.img_bgr = img
         self.frame = img
示例#7
0
def gamma_correct(imgs_direc,processed_imgs):
    #'imgs_direc' directory where the photos without correction are
    #'processed_imgs' directory where the photos with the correction are going to be saved

    i=0

    if not os.path.exists(processed_imgs):
        os.mkdir(processed_imgs)

    acum = 0

    for file in os.listdir(imgs_direc):
        filename = f"{imgs_direc}/{i}.jpg"
        image = cv2.imread(filename)

        # Converts the photos from RGB to HSV and calculates the avarage V value for each photo
        # to later apply the gamma correction needed

        hsvImage = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)
        v_avg = cv2.mean(hsvImage)
        nameProcessed = f"{processed_imgs}/{i}.jpg"

        if round(v_avg[2]) >= 150:
            gamma_corr_img = np.array(255 * (image / 255)**1.1, dtype='uint8')
            cv2.imwrite(nameProcessed, gamma_corr_img)
        elif round(v_avg[2]) < 150 and round(v_avg[2]) >= 90:
            gamma_corr_img = image
            cv2.imwrite(nameProcessed, gamma_corr_img)
        elif round(v_avg[2]) < 90 and round(v_avg[2]) >= 85:
            gamma_corr_img = np.array(255 * (image / 255)**0.95, dtype='uint8')
            cv2.imwrite(nameProcessed, gamma_corr_img)
        elif round(v_avg[2]) < 85 and round(v_avg[2]) >= 70:
            gamma_corr_img = np.array(255 * (image / 255)**0.9, dtype='uint8')
            cv2.imwrite(nameProcessed, gamma_corr_img)
        elif round(v_avg[2]) < 70 and round(v_avg[2]) >= 60:
            gamma_corr_img = np.array(255 * (image / 255)**0.85, dtype='uint8')
            cv2.imwrite(nameProcessed, gamma_corr_img)
        elif round(v_avg[2]) < 60 and round(v_avg[2]) >= 50:
            gamma_corr_img = np.array(255 * (image / 255)**0.8, dtype='uint8')
            cv2.imwrite(nameProcessed, gamma_corr_img)
        elif round(v_avg[2]) < 50 and round(v_avg[2]) >= 55:
            gamma_corr_img = np.array(255 * (image / 255)**0.75, dtype='uint8')
            cv2.imwrite(nameProcessed, gamma_corr_img)
        else:
            gamma_corr_img = np.array(255 * (image / 255)**0.7, dtype='uint8')
            cv2.imwrite(nameProcessed, gamma_corr_img)
        print('Brightness: ', round(v_avg[2]), 'Photo:', i)
        acum = acum + round(v_avg[2])
        i = i + 1
示例#8
0
def get_color_knn(image, mask, knn, lookup, nn):
    """get the color of an image using knn method.

    :param image: input image
    :type image: cv2 image
    :param mask: mask to extract color from
    :type mask: cv2 image
    :param train: training data
    :type train: csv filename
    :param nn: k value (nearest neighbours)
    :type nn: int
    :return: color
    :rtype: str
    """

    test = cv2.mean(convert.bgr_to_rgb(image), mask)[:3]
    test = np.float32(np.asarray(test).reshape(1, 3))
    prediction = find_knn(test, knn, nn)
    prediction = lookup[int(prediction[0][0])]
    return prediction
示例#9
0
def get_color(image, mask, color_names, color_values):
    """determine the color from a list of pre-set color names and values.

    :param image: input image
    :type image: cv2 image
    :param mask: mask to extract the mean from
    :type mask: cv2 image
    :param color_names: color names
    :type color_names: list
    :param color_values: corresponding color values
    :type color_values: list
    :return: color
    :rtype: str
    """
    # calculate the mean in LAB colorspace
    mean = cv2.mean(convert.bgr_to_lab(image), mask)[:3]

    # change to np array for distance calulation
    mean = np.asarray(mean).reshape(-1, 1, 3)

    # go through our colors and check distance, shortest distance is the color
    min_dist = (None, np.inf)

    # convert the BGR color values to LAB
    color_values = convert.bgr_to_lab(color_values)

    # loop over the color values
    for i, row in enumerate(color_values):

        # get the distnace from this mean to this LAB value
        d = utils.distance(row[0], mean[0][0])

        # if the distance is smaller, then upddate the smallest
        if d < min_dist[1]:
            min_dist = (i, d)

    # index of the color name was save in [0]
    color = color_names[min_dist[0]]
    return color
示例#10
0
print('Solidity:', aspect_ratio)
# Solidity is the ratio of contour area to its convex hull area.

equi_diameter = np.sqrt(4 * area / np.pi)
print('Equivalent_diameter:', aspect_ratio)
# Equivalent Diameter is the diameter of the circle whose area is same as the contour area.

(x, y), (MA, ma), angle = cv2.fitEllipse(cnt)
print('x:', x, 'y:', y, '\n', 'MA:', MA, 'ma:', ma, '\n', 'angle:', angle)
# Orientation is the angle at which object is directed. Above method also gives the Major Axis and Minor Axis lengths.

mask = np.zeros(img.shape, np.uint8)
cv2.drawContours(mask, [cnt], 0, 255, -1)
pixelpoints = np.transpose(np.nonzero(mask))  # using Numpy functions
# pixelpoints = cv2.findNonZero(mask) # using OpenCV function
# Numpy gives coordinates in (row, column) format, while OpenCV gives coordinates in (x,y) format. So basically the answers will be interchanged. row = x and column = y.

min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(img, mask=mask)
print(min_val, max_val, min_loc, max_loc)
# Maximum Value, Minimum Value and their locations

mean_val = cv2.mean(img, mask=mask)
# Find the average color of an object. Or it can be average intensity of the object in grayscale mode. Use the same mask to do it.

leftmost = tuple(cnt[cnt[:, :, 0].argmin()][0])
rightmost = tuple(cnt[cnt[:, :, 0].argmax()][0])
topmost = tuple(cnt[cnt[:, :, 1].argmin()][0])
bottommost = tuple(cnt[cnt[:, :, 1].argmax()][0])
print(leftmost, rightmost, topmost, bottommost)
# Extreme Points means topmost, bottommost, rightmost and leftmost points of the object.
skiprate = skipcount
frameindexs = []
for x in range(1, framecountvideo, skiprate):
    frameindexs.append(x)

print("Frame index array now has " + str(len(frameindexs)))

#get the average color of each frame
#avgs array contains all the averages for each frame
avgs = []
indexpos = 0
i = 0
for i in frameindexs:
    cap.set(1, i)
    ret, frame = cap.read()
    avg = cv2.mean(frame)[:3]
    print("current frame index : " + str(i))
    avgs.append(avg)

#confirming the dimensions for barcode image

barcode_image = "colorcandy.png"
#output image

barcode_width = 1
#should be usally 1, for sample making this as 10

barcode_height = 300
#height of image 250px

barcode = numpy.zeros((barcode_height, len(avgs) * barcode_width, 3),
示例#12
0
def main():
    parser = argparse.ArgumentParser(
        description="combine two images with an arrow in between. Spacing and color is automatically decided and is meant to be nice"
    )

    parser.add_argument(
        "image_file_1", action="store", type=str, help="the image file in the left"
    )
    parser.add_argument(
        "image_file_2", action="store", type=str, help="the image file in the right"
    )
    parser.add_argument(
        "--output",
        "-o",
        action="store",
        required=False,
        type=str,
        help="the output image file. (e.g. out.jpg out.png) If omitted, <img1>-<img2>.png will be generated under current directory",
    )

    parser.add_argument(
        "--scale",
        "-s",
        required=False,
        default=1.0,
        action="store",
        type=float,
        help="the scale of the generated image, 1 for no scaling. 0.5 for half the size, etc",
    )

    argv = parser.parse_args()

    img1 = cv2.imread(argv.image_file_1, 1)
    img2 = cv2.imread(argv.image_file_2, 1)

    s_height = min(img1.shape[0], img2.shape[0])
    b_height = max(img1.shape[0], img2.shape[0])

    s_width = min(img1.shape[1], img2.shape[1])
    b_width = max(img1.shape[1], img2.shape[1])

    frame = np.full((b_height, 2 * s_width + b_width, 3), 255, dtype=np.uint8)

    place_on_top(img1, frame, [(b_height - img1.shape[0]) // 2, 0])
    place_on_top(
        img2, frame, [(b_height - img2.shape[0]) // 2, s_width + img1.shape[1]]
    )

    m1 = cv2.mean(img1)
    m2 = cv2.mean(img2)

    mean_color = []
    for i in range(3):
        mean_color.append(int(m1[i] + m2[i]) // 2)

    cv2.arrowedLine(
        frame,
        (img1.shape[1] + s_width // 5, b_height // 2),
        (img1.shape[1] + s_width - s_width // 5, b_height // 2),
        mean_color,
        8,
        tipLength=0.6,
    )

    assert argv.scale > 0, "scale has to be a positive float"
    frame = cv2.resize(
        frame, (int(frame.shape[1] * argv.scale), int(frame.shape[0] * argv.scale))
    )

    outname = argv.output
    if argv.output is not None:
        try:
            cv2.imwrite(argv.output, frame)
        except cv2.error as e:
            print(e, file=stderr)
            print("Failed to save the image")
            print("Did you forget to specify image format to the output file?")
    else:
        default_name = (
            path.splitext(path.basename(argv.image_file_1))[0]
            + "-"
            + path.splitext(path.basename(argv.image_file_2))[0]
            + ".png"
        )
        outname = default_name
        cv2.imwrite(default_name, frame)

    cv2.imshow(outname, frame)
    while cv2.getWindowProperty(outname, cv2.WND_PROP_VISIBLE) == 1:
        if cv2.waitKey(50) != -1:
            break

    cv2.destroyAllWindows()
示例#13
0
# 컬럼 이미지라면 각 픽셀이 하나의 값이 아닌 여러 개의 값으로 표현
# 이미지가 커질수록 특성의 개수 크게 증가
image = cv2.imread('../img/black.jpg', cv2.IMREAD_GRAYSCALE)
image_10x10 = cv2.resize(image, (10, 10))  # 이미지를 10x10 픽셀 크기로 변환
image_10x10.flatten()  # 이미지 데이터를 1차원 벡터로 변환

plt.imshow(image_10x10, cmap="gray"), plt.axis("off")
plt.show()

image_10x10.shape
image_10x10.flatten().shape

## 평균 색을 특성으로 인코딩
# 이미지 각 픽셀은 여러 색 채널(RGB)의 조합으로 표현, 채널 평균값을 계산하여 이미지 평균 컬러를 나타내는 3개의 컬럼 특성 생성
image_bgr = cv2.imread("../img/black.jpg", cv2.IMREAD_COLOR)  # 색상 이미지 가져오기
channels = cv2.mean(image_bgr)  # 각 채널의 평균을 계산

# 파랑과 빨강을 변경(BGR에서 RGB로 만듭니다)
observation = np.array([(channels[2], channels[1], channels[0])])
observation  # 채널 평균 값을 확인
plt.imshow(observation), plt.axis("off")  # 이미지를 출력
plt.show()

## 색상 히스토그램을 특성으로 인코딩
image_bgr = cv2.imread("../img/black.jpg", cv2.IMREAD_COLOR)
image_rgb = cv2.cvtColor(image_bgr, cv2.COLOR_BGR2RGB)  # RGB로 변환
features = []  # 특성 값을 담을 리스트
colors = ("r", "g", "b")  # 각 컬러 채널에 대해 히스토그램을 계산

# 각 채널을 반복하면서 히스토그램을 계산하고 리스트에 추가
for i, channel in enumerate(colors):
示例#14
0
    return cv2.merge([l, a, b])


red = np.loadtxt('red.csv')
red = red.astype(np.uint8)
orange = np.loadtxt('orange.csv')
orange = orange.astype(np.uint8)

# for opencv
red = red.reshape(-1, 1, 3)
orange = orange.reshape(-1, 1, 3)

# red = cv2.cvtColor(red, cv2.COLOR_RGB2LAB)
# orange = cv2.cvtColor(orange, cv2.COLOR_RGB2LAB)

mean_r = cv2.mean(red)[:3]
mean_o = cv2.mean(orange)[:3]
mean_r = np.asarray(mean_r).reshape(3, 1, -1)
mean_o = np.asarray(mean_o).reshape(3, 1, -1)
mean_r = mean_r.astype(np.uint8)
mean_o = mean_o.astype(np.uint8)

m, n, o = mean_r
p, q, r = mean_o
print(m, n, o)
print(p, q, r)

red = fix_l(red)
orange = fix_l(orange)

red = red.reshape(3, 1, -1)
from PIL import Image as im

barcode_width = 10
barcode_height = 250

avgs = json.loads(open("outputfile").read())
avgs = numpy.array(avgs, dtype="int")

for i, (x, y, z) in enumerate(avgs):
    avgs[i] = (int(x), int(y), int(z))

for i in avgs:
    print(i)

sampleimage = cv2.imread("red.jpg", 1)
cv2.imshow("SampleImage", sampleimage)
cv2.waitKey(0)

#MAIN PROBLEM IS CONVERTING THE ARRAY LIST TO TUPLES
#CONVERT THE ARRAY LIST TO TUPLE AND LOOP FOR RECTANGLE DIMENSIONS

avg = cv2.mean(sampleimage)[:3]
cv2.rectangle(sampleimage, (50, 50), (150, 150), avg, -1)
cv2.imshow("SampleImage", sampleimage)
cv2.waitKey(0)

barcode = numpy.zeros((barcode_height, len(avgs) * barcode_width, 3),
                      dtype="uint8")
cv2.imshow("Barcode", barcode)
cv2.waitKey(0)