示例#1
0
 def __load__(self,crop_x=(0,-1),crop_y=(0,-1)):
     self.__IMG_input__=cv2.imread(self.__filename__)
     if len(self.__IMG_input__.shape)==3:
         self.__IMG_gray__=cv2.imread(self.__filename__,0)[crop_y[0]:crop_y[1],crop_x[0]:crop_x[1]]
     else:                
         self.__IMG_gray__=cv2.imread(self.__filename__,0)[crop_y[0]:crop_y[1],crop_x[0]:crop_x[1]]
         self.__IMG_input__=None
示例#2
0
def make_sets():
    training_data = []
    training_labels = []
    prediction_data = []
    prediction_labels = []
    for emotion in emotions:
        training, prediction = get_files(emotion)
        # Append data to training and prediction list, and generate labels 0-7
        for item in training:
            image = cv2.imread(item)  # open image
            gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)  # convert to grayscale
            clahe_image = clahe.apply(gray)
            landmarks_vectorised = get_landmarks(clahe_image)
            if landmarks_vectorised == "error":
                pass
            else:
                training_data.append(landmarks_vectorised)  # append image array to training data list
                training_labels.append(emotions.index(emotion))

        for item in prediction:
            image = cv2.imread(item)
            gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
            clahe_image = clahe.apply(gray)
            landmarks_vectorised = get_landmarks(clahe_image)
            if landmarks_vectorised == "error":
                pass
            else:
                prediction_data.append(landmarks_vectorised)
                prediction_labels.append(emotions.index(emotion))

    return training_data, training_labels, prediction_data, prediction_labels
def template_matching():
    img = cv2.imread('messi.jpg',0)
    img2 = img.copy()
    template = cv2.imread('face.png',0)
    w, h = template.shape[::-1]

    # All the 6 methods for comparison in a list
    methods = ['cv2.TM_CCOEFF', 'cv2.TM_CCOEFF_NORMED', 'cv2.TM_CCORR',
            'cv2.TM_CCORR_NORMED', 'cv2.TM_SQDIFF', 'cv2.TM_SQDIFF_NORMED']

    for meth in methods:
        img = img2.copy()
        method = eval(meth)

        # Apply template Matching
        res = cv2.matchTemplate(img,template,method)
        min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(res)

        # If the method is TM_SQDIFF or TM_SQDIFF_NORMED, take minimum
        if method in [cv2.TM_SQDIFF, cv2.TM_SQDIFF_NORMED]:
            top_left = min_loc
        else:
            top_left = max_loc
        bottom_right = (top_left[0] + w, top_left[1] + h)

        cv2.rectangle(img,top_left, bottom_right, 255, 2)

        plt.subplot(121),plt.imshow(res,cmap = 'gray')
        plt.title('Matching Result'), plt.xticks([]), plt.yticks([])
        plt.subplot(122),plt.imshow(img,cmap = 'gray')
        plt.title('Detected Point'), plt.xticks([]), plt.yticks([])
        plt.suptitle(meth)

        plt.show()
示例#4
0
def main():
    im1 = cv2.imread('/Users/grub/Desktop/Yale/BabyBlendr/python/image.jpg')
    im2 = cv2.imread('/Users/grub/Desktop/Yale/BabyBlendr/python/image3.jpg')

    avgR1, avgG1, avgB1 = getRGB(im1)
    avgR2, avgG2, avgB2 = getRGB(im2)

    avgR = (avgR1+avgR2)/2
    avgG = (avgG1+avgG2)/2
    avgB = (avgB1+avgB2)/2
    result = np.array([avgR, avgG, avgB])

    light = np.array([255, 218, 190])
    medium = np.array([180, 138, 120])
    dark = np.array([60, 46, 40])
    
    light_diff = np.linalg.norm(result-light)
    medium_diff = np.linalg.norm(result-medium)
    dark_diff = np.linalg.norm(result-dark)

    result_diff = [light_diff, medium_diff, dark_diff]

    if (min(result_diff) == light_diff):
        print "Light skin tone"
    elif (min(result_diff) == medium_diff):
        print "Medium skin tone"
    elif (min(result_diff) == dark_diff):
        print "Dark skin tone"
示例#5
0
def coolBlack():
    IMAGE_WEIGHT = 0.5

    image = cv2.imread("G:/Filters/wasim.jpg",0)
    black = cv2.imread("G:/Filters/black5.jpg",0)
    black = cv2.resize(black, image.shape[::-1])

    res1 = cv2.addWeighted(image, IMAGE_WEIGHT, black, 1 - IMAGE_WEIGHT, 1)


    #NORMALIZE IMAGES
    image = np.float32(image)
    black = np.float32(black)

    image /= 255
    black /= 200

    res = image*black

    cv2.imshow("RES", res)
    cv2.waitKey(0)

    fname = "G:/Filtes/temp.jpg"
    cv2.imwrite(fname, res)
    res = cv2.imread(fname, 0)

    cv2.imshow("BLACK", res)
    cv2.waitKey(0)
    def test_n_largest_area_contours_images__with_invert(self):
        # given
        image = cv2.imread("./images/SnipNLargestAreaContours/"
                           "test_n_largest_area_contours_image__with_invert__input_image.png",
                           cv2.IMREAD_GRAYSCALE)
        n = 2
        invert_flag = True
        snip_n_largest_area_contours = SnipNLargestAreaContours(image, n, invert_flag)

        expected_largest_contour_image_1 = cv2.imread(
            "./images/SnipNLargestAreaContours/test_n_largest_area_contours_images__with_invert__snipped_image_1.png",
            flags=cv2.IMREAD_GRAYSCALE)
        expected_largest_contour_image_2 = cv2.imread(
            "./images/SnipNLargestAreaContours/test_n_largest_area_contours_images__with_invert__snipped_image_2.png",
            flags=cv2.IMREAD_GRAYSCALE)

        expected_n_largest_area_contours_images = [expected_largest_contour_image_1, expected_largest_contour_image_2]

        # when
        actual_n_largest_area_contours_images = snip_n_largest_area_contours.n_largest_area_contours_images

        # that
        self.assertEqual(np.array_equal(actual_n_largest_area_contours_images[0],
                                        expected_n_largest_area_contours_images[0]), True)
        self.assertEqual(np.array_equal(actual_n_largest_area_contours_images[1],
                                        expected_n_largest_area_contours_images[1]), True)
def Color_Features_Extract(img_folder):
    print "Color_Features_Extract Start"
    starttime = datetime.datetime.now()

    back = np.array([255,128,128])

    image_num = len(os.listdir(seg_img_folder))

    Color_Features = []

    for index, image_name in enumerate(os.listdir(img_folder)):
        image_path = img_folder + str("/") +image_name
        image = cv2.cvtColor(cv2.imread(image_path), cv2.COLOR_BGR2LAB)
        rows, columns, lab = image.shape

        # Make densely-sampling color features
        pixel_index = 0

        for x in range(rows):
            for y in range(columns):
                if pixel_index % 9 == 0 and np.array_equal(image[x][y],back) == False:
                    Color_Features.append(image[x][y].tolist())
                pixel_index += 1

    # Get CodeBook of Color_Features
    Color_Features = np.float32(Color_Features)

    # Define criteria = ( type, max_iter = 10 , epsilon = 1.0 )
    criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 10, 1.0)

    # Set flags (Just to avoid line break in the code)
    flags = cv2.KMEANS_RANDOM_CENTERS

    # Apply KMeans
    compactness,labels,centers = cv2.kmeans(Color_Features,800,None,criteria,10,flags)

    Image_Color_Features = [[0 for x in range(800)] for y in range(image_num)]

    color_index = 0

    for image_index, image_name in enumerate(os.listdir(img_folder)):
        image_path = img_folder + str("/") +image_name
        image = cv2.cvtColor(cv2.imread(image_path), cv2.COLOR_BGR2LAB)
        rows, columns, lab = image.shape

        pixel_index = 0

        for x in range(rows):
            for y in range(columns):
                if pixel_index % 9 == 0 and np.array_equal(image[x][y],back) == False:
                    Image_Color_Features[image_index][labels[color_index]] += 1
                    color_index += 1
                pixel_index += 1
        print image_name

    endtime = datetime.datetime.now()
    print "Time: " + str((endtime - starttime).seconds) + "s"
    print "Color_Features_Extract End"

    return Image_Color_Features
示例#8
0
文件: cjpeg.py 项目: Ziul/custom-jpeg
    def __init__(self, filename):
        super(CustomJpeg, self).__init__()
        self.filename = filename
        # 0 is to read as grayscale
        self.figure = cv2.imread(self.filename, 0)
        # generate image RGB -> YCbCr
        self.figure_ycbcr = cv2.cvtColor(
            cv2.imread(self.filename), cv2.COLOR_BGR2YCR_CB)
        # got each element from YCbCr
        self.y, self.cb, self.cr = self._split_channel_(self.figure_ycbcr)

        if not _options.output:
            self.output_filename = self.filename.replace(
                self.filename.split('.')[-1], 'cjpeg')
        else:
            self.output_filename = _options.output
        if self.figure is None:
            raise FileNotFound(self.filename)
        self.shape = self.figure.shape
        self.pixs = _options.size
        self.scrambled = np.array([])

        self.bitarray = Bitset()
        self.bitarray.name = self.output_filename
        self.bitarray.verbose = False
示例#9
0
def realisticTexturemap(H_G_M, scale):
    map_img = cv2.imread('Images/ITUMap.bmp')
    point = getMousePointsForImageWithParameter(map_img, 1)[0]

    texture = cv2.imread('Images/ITULogo.jpg')
    #texture = cv2.cvtColor(texture,cv2.COLOR_BGR2GRAY)
    H_T_M = np.zeros(9).reshape(3,3)
    H_T_M[0][0] = scale
    H_T_M[1][1] = scale

    H_T_M[0][2] = point[0]
    H_T_M[1][2] = point[1]

    H_T_M[2][2] = 1

    H_M_G = np.linalg.inv(H_G_M)

    H_T_G = np.dot(H_M_G, H_T_M)

    fn = "GroundFloorData/sunclipds.avi"
    cap = cv2.VideoCapture(fn)
    #load Tracking data
    running, frame = cap.read()
    while running:
        running, frame = cap.read()
        h,w,d = frame.shape

        warped_texture = cv2.warpPerspective(texture, H_T_G,(w, h))

        result = cv2.addWeighted(frame, .6, warped_texture, .4, 50)

        cv2.imshow("Result", result)
        cv2.waitKey(0)
示例#10
0
def camactivate ():

	with picamera.PiCamera() as camera:
		camera.resolution = (512,512)
		time.sleep(2)
		camera.capture('im1.jpg')
		time.sleep(2)
		camera.capture('im2.jpg')
		time.sleep(2)
		camera.capture('im3.jpg')
		time.sleep(2)
		camera.capture('im4.jpg')

	im1=cv2.imread('im1.jpg',1)
	im2=cv2.imread('im2.jpg',1)
	im3=cv2.imread('im3.jpg',1)
	im4=cv2.imread('im4.jpg',1)

	cv2.putText(im1,'Cam1',(10,20),cv2.FONT_HERSHEY_SIMPLEX,1,(255,255,255),2)
	cv2.putText(im2,'Cam2',(10,20),cv2.FONT_HERSHEY_SIMPLEX,1,(255,255,255),2)
	cv2.putText(im3,'Cam3',(10,20),cv2.FONT_HERSHEY_SIMPLEX,1,(255,255,255),2)
	cv2.putText(im4,'Cam4',(10,20),cv2.FONT_HERSHEY_SIMPLEX,1,(255,255,255),2)


	cv2.namedWindow('Catenated Images',cv2.WINDOW_NORMAL)
	concat=np.zeros((1024,1024,3),np.uint8)
	concat[0:512,0:512,:]=im1
	concat[0:512,512:1024,:]=im2
	concat[512:1024,0:512,:]=im3
	concat[512:1024,512:1024,:]=im4

	cv2.imshow('Catenated Images',concat)
	cv2.imwrite('concat.jpg',concat)
	cv2.waitKey(0)
示例#11
0
    def __init__(self, load_models=True):

        self.api_key = self.__read_api_key()
        self.__load_models = load_models
        if not load_models:
            return

        print("... start building the models")
        t1 = time.clock()

        self.__batch_size = 50
        self.__img_dim_80 = 80
        self.__img_dim_28 = 28

        # some images needed for visualization
        img_sc_prohib = cv2.imread("D:\\_Dataset\\UK\\sc_prohibitory.png", cv2.IMREAD_UNCHANGED)
        img_sc_mandat = cv2.imread("D:\\_Dataset\\UK\\sc_mandatory.png", cv2.IMREAD_UNCHANGED)
        img_sc_warning = cv2.imread("D:\\_Dataset\\UK\\sc_warning.png", cv2.IMREAD_UNCHANGED)
        self.__sc_imgs = (img_sc_warning, img_sc_prohib, img_sc_mandat)

        # load the models once and for all
        prohib_recog_model_path = "D:\\_Dataset\\GTSRB\\cnn_model_p_80.pkl"
        mandat_recog_model_path = "D:\\_Dataset\\GTSRB\\cnn_model_m_80.pkl"
        prohib_detec_model_path = "D:\\_Dataset\\GTSDB\\las_model_p_80_binary.pkl"
        mandat_detec_model_path = "D:\\_Dataset\\GTSDB\\las_model_m_80_binary.pkl"
        superclass_recognition_model_path = "D:\\_Dataset\\SuperClass\\cnn_model_28_lasagne.pkl"

        # build the model once and for all
        self.__detect_net_p = self.__build_detector(prohib_recog_model_path, prohib_detec_model_path, self.__batch_size)
        self.__detect_net_m = self.__build_detector(mandat_recog_model_path, mandat_detec_model_path, self.__batch_size)
        self.__recog_superclass_cnn = self.__build_classifier(superclass_recognition_model_path)

        t2 = time.clock()
        duration = t2 - t1
        print("... finish building the models, time(sec.): %f" % (duration))
def listener():
    global fnobj
    rospy.init_node('reconocimiento', anonymous=True) 
    rospy.Subscriber("chatter", coordenadas, callback)
    
    rate = rospy.Rate(50) #hz
    cap = cv2.VideoCapture(0)
    fnobj = 'logo.png'
    opts, args = getopt.getopt(sys.argv[1:], '', ['feature='])
    opts = dict(opts)
    feature_name = opts.get('--feature', 'sift')

    while not rospy.is_shutdown():
  
        ret, frame = cap.read()
    	crop_img = frame[y:h,x:w]   
    	cv2.imwrite("full.png",crop_img)
	
	
        img1 = cv2.imread('full.png',0)
        img2 = cv2.imread(fnobj, 0)
        detector, matcher = init_feature(feature_name)
	if detector != None:
        	print 'usando', feature_name
        else:
            	print 'unknown feature:', feature_name
            	sys.exit(1)

        kp1, desc1 = detector.detectAndCompute(img1, None)
        kp2, desc2 = detector.detectAndCompute(img2, None)
        print 'img1 - %d features, img2 - %d features' % (len(kp1), len(kp2))
	
	match_and_draw('analisis', matcher,desc1,desc2,kp1,kp2,img1,img2)	
	cv2.waitKey(1)
示例#13
0
def test():
    # 3 cards on flat table
    cards_3 = cv2.imread('images/set-3-texture.jpg')

    # 5 cards at an angle
    cards_5 = cv2.imread('images/set-5-random.jpg')

    thresh_3 = s.get_binary(cards_3)
    contours = s.find_contours(thresh_3, 3)

    assert len(s.transform_cards(cards_3, contours, 3)) == 3

    res5 = s.detect_cards(cards_5)
    assert res5 is not None and len(res5) == 5

    res3 = s.detect_cards(cards_3)
    assert res3 is not None and len(res3) == 3

    for i in range(len(res5)):
        c = res5[i]
        # util.show(c, 'card')
        cv2.imwrite('images/cards/card-5-%d.jpg' % i, c)

    for i in range(len(res3)):
        c = res3[i]
        # util.show(c, 'card')
        cv2.imwrite('images/cards/card-3-%d.jpg' % i, c)

    # for cards detected, get properties
    for link in os.listdir('images/cards'):
        img = cv2.imread('images/cards/%s' % link)
        test_props(img)
    print 'tests pass'
示例#14
0
def get_diffs( file_img1, file_img2, min_pts ):
    # CHECK: file existence
    if not (os.path.isfile(file_img1) and os.path.isfile(file_img2)):
        return -1

    # read in images
    img1 = cv2.imread( file_img1 )
    img2 = cv2.imread( file_img2 )

    # CHECK: img size equality
    if img1.shape != img2.shape:
        return -2

    # get threshold of the difference between the images
    ret, im_diff = cv2.threshold( (img1 - img2), 0,255, cv2.THRESH_BINARY)

    # collapse image into single channel
    im_diff = cv2.add(cv2.split(im_diff)[0],cv2.split(im_diff)[1],cv2.split(im_diff)[2])

    # find groups of contiguous points where images are different
    cont, _ = cv2.findContours( im_diff, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE )

    # put all contours with more than min_pts points into JSON 
    d = {'diff':[]}
    for c in cont:
        if len(c) > min_pts:
            momnt = cv2.moments(c)
            d['diff'].append([momnt['m10']/momnt['m00'],momnt['m01']/momnt['m00']])

    # dump JSON to stdout
    json.dump(d,sys.stdout)
示例#15
0
def normalize_cv(image1,image2,compare_dir):
    #img = Image.open(image1)
    #height < width
    width_heights = []
    width_heights.append(["long_dir/",(100,150)]) 
    #width < height
    width_heights.append(["wide_dir/",(150,100)])
    #equal
    width_heights.append(["equal_dir/",(150,150)])
    #= img.size
    #resize comparison image
    for width_height in width_heights:
        if not os.path.exists(width_height[0]):
            os.mkdir(width_height[0])
        os.chdir(width_height[0])
        img = cv2.imread(image1)
        resized_image1 = cv2.resize(img, width_height[1])
        new_path1 = path_append(image1,width_height[0])
        cv2.imwrite(new_path1,resized_image1)
        img2 = cv2.imread(image2)
        resized_image2 = cv2.resize(img2,width_height[1])
        new_path2 = path_append(image2,width_height[0])
        cv2.imwrite(new_path2,resized_image2)
        os.chdir("../")
        #resize test directory
        for pic in glob(compare_dir+"*"):
            if os.path.isfile(pic):
                full_pic = os.path.abspath(pic)
                im = cv2.imread(full_pic)
                resized_image = cv2.resize(im, width_height[1])
                new_path = path_append(pic,width_height[0])
                cv2.imwrite(new_path,resized_image)
示例#16
0
文件: img.py 项目: ftyszyx/tools
    def getMultiTemplePos(self,srcPicPath,templePicPath):
        print("srcpath",srcPicPath,"temppath",templePicPath)
        img_src=cv2.imread(srcPicPath)
        img_src_gray=cv2.cvtColor(img_src, cv2.COLOR_BGR2GRAY)
        srcw,srch=img_src_gray.shape[::-1]
        print("get pic:",srcw,srch)
        img_temple=cv2.imread(templePicPath)
        img_temple_gray=cv2.cvtColor(img_temple, cv2.COLOR_BGR2GRAY)
        templew,templeh=img_temple_gray.shape[::-1]
        res = cv2.matchTemplate(img_src_gray,img_temple_gray,cv2.TM_CCOEFF_NORMED) 
        # print("get temple",res)
        # cv2.imshow('src',img_src_gray)
        # cv2.imshow('temple',img_temple_gray)
        # cv2.waitKey(0)

        threshold = 0.7 
        loc = np.where( res >= threshold)
        print(loc)
        # zipres=zip(*loc[::-1])
        # print("zipres",zipres)
        # if len(zipres)==0:
        #     return False,None,None,None
        # else:
        #     return True,zipres[0],templew,templeh
        for pt in zip(*loc[::-1]):
            cv2.rectangle(img_src, pt, (pt[0] + templew, pt[1] + templeh),(7,249,151), 2)   
        cv2.imshow('Detected',img_src)
        cv2.waitKey(0)
        cv2.destroyAllWindows()
示例#17
0
文件: get_data.py 项目: Suluo/Kaggle
def pro_progess(filepath="../data"):
    height = 299
    train_files = os.listdir(filepath + '/train')
    train = np.zeros((len(train_files), height, height, 3), dtype=np.uint8)
    labels = list(filter(lambda x: x[:3] == 'dog', train_files))

    test_files = os.listdir(filepath + '/test')
    test = np.zeros((len(test_files), height, height, 3), dtype=np.uint8)

    for i in tqdm(range(len(train_files))):
        filename = filepath + train_files[i]
        img = cv2.imread(filename)
        img = cv2.resize(img, (height, height))
        train[i] = img[:, :, ::-1]

    for i in tqdm(range(len(test_files))):
        filename = filepath + test_files[i]
        img = cv2.imread(filename)
        img = cv2.resize(img, (height, height))
        test[i] = img[:, :, ::-1]

    print ('Training Data Size = %.2 GB' % (sys.getsizeof(train)/1024**3))
    print ('Testing Data Size = %.2 GB' % (sys.getsizeof(test)/1024**3))
    X_train, X_val, y_train, y_val = train_test_split(
        train, labels, shuffle=True, test_size=0.2, random_state=42)
    return X_train, X_val, y_train, y_val
示例#18
0
def register_image(file_path, reference_form_path, output_path, result_writer, config_file):
    reference = cv2.imread(reference_form_path, 0)
    logging.info("read reference %s", reference_form_path)
    orb = cv2.SIFT()
    kp2, des2 = orb.detectAndCompute(reference, None)

    image = cv2.imread(file_path, 0)
    logging.info("read uploaded image %s", file_path)
    kp1, des1 = orb.detectAndCompute(image, None)
    logging.info("detected orb")
    bf = cv2.BFMatcher(cv2.NORM_L2)
    raw_matches = bf.knnMatch(des1, trainDescriptors=des2, k=2)
    logging.info("knn matched")
    matches = filter_matches(kp1, kp2, raw_matches)
    mkp1, mkp2 = zip(*matches)
    p1 = np.float32([kp.pt for kp in mkp1])
    p2 = np.float32([kp.pt for kp in mkp2])
    logging.info("starting RANSAC")
    homography_transform, mask = cv2.findHomography(p1, p2, cv2.RANSAC, 5.0)
    logging.info("RANSAC finished")
    homography, transform = check_homography(homography_transform)

    good_enough_match = check_match(homography, transform)

    h, w = reference.shape
    image_transformed = cv2.warpPerspective(image, homography_transform, (w, h))
    logging.info("transformed image")
    head, file_name = os.path.split(file_path)

    transformed_image = write_transformed_image(image_transformed, homography, transform, good_enough_match, file_name,
                                                output_path, result_writer)
    logging.info("transformed %s", transformed_image)
    return create_response(transformed_image, good_enough_match, config_file)
示例#19
0
文件: img.py 项目: ftyszyx/tools
 def getOneTemplePos(self,srcPicPath,templePicPath):
     # print(srcPicPath,templePicPath)
     img_src=cv2.imread(srcPicPath)
     img_src_gray=cv2.cvtColor(img_src, cv2.COLOR_BGR2GRAY)
     srcw,srch=img_src_gray.shape[::-1]
     print("img_src gray",srcw,srch)
     img_temple=cv2.imread(templePicPath)
     img_temple_gray=cv2.cvtColor(img_temple, cv2.COLOR_BGR2GRAY)
     templew,templeh=img_temple_gray.shape[::-1]
     print("temple gray",templew,templeh)
     # cv2.imshow('rgb',img_src)
     # cv2.imshow('gray',img_src_gray)
     # cv2.imshow('template',img_temple_gray)
     # cv2.waitKey(0)
     # cv2.destroyAllWindows()
     res = cv2.matchTemplate(img_src_gray,img_temple_gray,method) 
     min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(res)
     print(min_val, max_val, min_loc, max_loc)
     # If the method is TM_SQDIFF or TM_SQDIFF_NORMED, take minimum
     if method in [cv2.TM_SQDIFF, cv2.TM_SQDIFF_NORMED]:
         top_left = min_loc
     else:
         top_left = max_loc
     bottom_right = (top_left[0] + templew, top_left[1] + templeh)
     cv2.rectangle(img_src,top_left, bottom_right, 255, 2)
     print(top_left, bottom_right)
示例#20
0
def find_hostiles(screen):
    img_rgb = cv2.imread(screen)
    img_gray = cv2.cvtColor(img_rgb, cv2.COLOR_BGR2GRAY)
    neut_img = cv2.imread('neut.png', 0)
    red_img = cv2.imread('red.png', 0)
    neut2_img = cv2.imread('neut2.png', 0)

    w, h = red_img.shape[::-1]

    neut_match = cv2.matchTemplate(img_gray, neut_img, cv2.TM_CCOEFF_NORMED)
    neut2_match = cv2.matchTemplate(img_gray, neut2_img, cv2.TM_CCOEFF_NORMED)
    neut2_match = cv2.matchTemplate(img_gray, neut2_img, cv2.TM_CCOEFF_NORMED)
    red_match = cv2.matchTemplate(img_gray, red_img, cv2.TM_CCOEFF_NORMED)

    threshold = 0.99

    loc_neut = numpy.where(neut_match >= threshold)
    loc_neut2 = numpy.where(red_match >= threshold)
    loc_red = numpy.where(red_match >= threshold)

    total_match = len(loc_neut[0]) + len(loc_red[0]) + len(loc_neut2[0])

    if total_match > 0:
        for pt in zip(*loc_red[::-1]):
            cv2.rectangle(img_rgb, pt, (pt[0] + w, pt[1] + h), (0, 0, 255), 2)
        for pt in zip(*loc_neut[::-1]):
            cv2.rectangle(img_rgb, pt, (pt[0] + w, pt[1] + h), (0, 0, 255), 2)
        for pt in zip(*loc_neut2[::-1]):
            cv2.rectangle(img_rgb, pt, (pt[0] + w, pt[1] + h), (0, 0, 255), 2)

        cv2.imwrite('res.png', img_rgb)

    return total_match
示例#21
0
def read_image(img_path, image_dims=None, mean=None):
    """
    Reads an image from file path or URL, optionally resizing to given image dimensions and
    subtracting mean.
    :param img_path: path to file, or url to download
    :param image_dims: image dimensions to resize to, or None
    :param mean: mean file to subtract, or None
    :return: loaded image, in RGB format
    """

    import urllib

    filename = img_path.split("/")[-1]
    if img_path.startswith('http'):
        urllib.urlretrieve(img_path, filename)
        img = cv2.imread(filename)
    else:
        img = cv2.imread(img_path)

    img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)

    if image_dims is not None:
        img = cv2.resize(img, image_dims)  # resize to image_dims to fit model
    img = np.rollaxis(img, 2) # change to (c, h, w) order
    img = img[np.newaxis, :]  # extend to (n, c, h, w)
    if mean is not None:
        mean = np.array(mean)
        if mean.shape == (3,):
            mean = mean[np.newaxis, :, np.newaxis, np.newaxis]  # extend to (n, c, 1, 1)
        img = img.astype(np.float32) - mean # subtract mean

    return img
def procesamiento_imagen():    
	## Convertir a grayscale
	img = Image.open(rostro).convert('LA')
	img.save('greyscale.png')

	## Resize

	foo = Image.open("greyscale.png")
	foo = foo.resize((256,256),Image.ANTIALIAS)
	foo.save("greyscale.png",optimize=True,quality=95)	


	##  Eliminar ruido
	img = cv2.imread('greyscale.png')
	dst = cv2.fastNlMeansDenoisingColored(img,None,10,10,7,21)

	## Canny detector
	img = cv2.imread('greyscale.png',0)
	edges = cv2.Canny(img,256,256)

	plt.subplot(121),plt.imshow(img,cmap = 'gray')
	plt.title('Original Image'), plt.xticks([]), plt.yticks([])
	plt.subplot(122),plt.imshow(edges,cmap = 'gray')
	plt.title('Edge Image'), plt.xticks([]), plt.yticks([])

	plt.show()
def tratamento_img(img):

	kernel = np.ones((7,4),np.uint8)

	img = cv2.imread(img,0)

	ret,thresh1 = cv2.threshold(img,127,255,cv2.THRESH_TRUNC)
	titles = ['BINARY']
	images = [thresh1]
	img = plt.imshow(images[0],'gray')
	plt.xticks([]),plt.yticks([])
	plt.savefig("pretobranco", dpi=100)
	#plt.show()

	img = cv2.imread("binaria.png",0)

	dilate = cv2.dilate(img,kernel,iterations = 1)
	cv2.imwrite('dilate.png',dilate)

	img = cv2.imread("dilate.png",0)

	erosion = cv2.erode(img,kernel,iterations = 1)
	cv2.imwrite('erosion.png',erosion)
	
	img = "dilate.png"
	
	os.system("convert erosion.png erosion.pgm")
	os.system("convert dilate.png dilate.pgm")
	
	k = cv2.waitKey(0)
	
	return True
示例#24
0
def baxter_sleep():
    """
    Send the image located at the specified path to the head
    display on Baxter.

    @param path: path to the image file to load and send
    """
    img = cv2.imread("$YOUT_BAXTER_WORKSPACE/src/my_baxter/BaxterFace/sleep1.png")
    msg = cv_bridge.CvBridge().cv2_to_imgmsg(img, encoding="bgr8")
    pub = rospy.Publisher('/robot/xdisplay', Image, latch=True, queue_size=1)
    pub.publish(msg)
    # Sleep to allow for image to be published.
    rospy.sleep(0.5)
    img = cv2.imread("$YOUT_BAXTER_WORKSPACE/src/my_baxter/BaxterFace/sleep2.png")
    msg = cv_bridge.CvBridge().cv2_to_imgmsg(img, encoding="bgr8")
    pub.publish(msg)
    rospy.sleep(0.17)
    img = cv2.imread("$YOUT_BAXTER_WORKSPACE/src/my_baxter/BaxterFace/sleep3.png")
    msg = cv_bridge.CvBridge().cv2_to_imgmsg(img, encoding="bgr8")
    pub.publish(msg)
    rospy.sleep(0.17)
    img = cv2.imread("$YOUT_BAXTER_WORKSPACE/src/my_baxter/BaxterFace/sleep4.png")
    msg = cv_bridge.CvBridge().cv2_to_imgmsg(img, encoding="bgr8")
    pub.publish(msg)
    rospy.sleep(0.17)
示例#25
0
def preprocess_training(image_dict, size=(40,100), norm_width=40):
  processed = defaultdict(list)
  widths = defaultdict(list)
  avg_widths = defaultdict(list)
  avg_width_list = []
  for bond_type in image_dict.keys():
    imgs = image_dict[bond_type]
    for img in imgs:
      im = cv2.imread(img,0)
      widths[bond_type].append(im.shape[1])
  for key in widths:
    avg_width_list.append(np.mean(widths[key]))
    avg_widths[key] = np.mean(widths[key])
  max_width = max(avg_width_list)
  for bond_type in image_dict.keys():
    imgs = image_dict[bond_type]
    for img in imgs:
      im = cv2.imread(img,0)
      ret, im = cv2.threshold(im, THRESH_VAL, 255, cv2.THRESH_BINARY_INV)
      border = max(int((max_width-im.shape[1])/2),0)
      im = cv2.copyMakeBorder(im,0,0,border,border,cv2.BORDER_CONSTANT,0)
      im = cv2.resize(im,size)
      im = cv2.GaussianBlur(im,(5,5),5)
      #plt.imshow(im, cmap="Greys_r")
      #plt.show()
      center = im[20:80,:]
      processed[bond_type].append(center)
  return processed
示例#26
0
 def __init__(self,filename,imageL='tmp/inputL.jpg',imageR='tmp/inputR.jpg'):
     print 'model created'
     self.filename = filename
     try: #make sure OpenCV can actually process the images
         self.testL = cv2.pyrDown(cv2.imread(imageL))
         self.testR = cv2.pyrDown(cv2.imread(imageR))
         assert (self.testR.size == self.testL.size)
         self.width,self.height = self.testL.shape[:2] #scales down
         
         
         #convert image to smaller size
         loadedImageL = cv.LoadImage(imageL,cv.CV_LOAD_IMAGE_COLOR)
         destL = cv.CreateImage((self.height,self.width), 8, 3)
         # 8 = bits, 3 = color
         cv.Resize(loadedImageL,destL,interpolation=cv.CV_INTER_AREA)
         cv.SaveImage(imageL, destL)
         
         #convert image to smaller size
         loadedImageR = cv.LoadImage(imageR,cv.CV_LOAD_IMAGE_COLOR)
         destR = cv.CreateImage((self.height,self.width), 8, 3)
         cv.Resize(loadedImageR,destR,interpolation=cv.CV_INTER_AREA)
         cv.SaveImage(imageR, destR)
         
         self.imageL = cv2.pyrDown(cv2.imread(imageL))
         self.imageR = cv2.pyrDown(cv2.imread(imageR))
     except Exception as e:
         print e
         quit()
示例#27
0
 def run(self, args):
     if not args:
         print 'Invalid option.'
     elif args[0] in ['attack', 'spell', 'tokugi']:
         # 「こうげき」「じゅもん」「とくぎ」でフィールドモード
         self.field_mode(args[0])
     elif args[0] == 'slot':
         # スロットゲームモード
         self.slot_mode()
     elif args[0] == 'debug':
         # debugサブディレクトリ以下すべて読み込み
         print 'DEBUG: Reading all files'
         for filename in [f for f in os.listdir('debug') if f.endswith('.png')]:
             print 'DEBUG:', filename,
             img_orig = cv2.imread(os.path.join('debug', filename))
             img_proc = self.transform(img_orig)
             self.debug_mode(img_proc)
     elif args[0].endswith('.png'):
         # 指定ファイル読み込み(+デバッグファイル出力)
         self.debug = True
         print 'DEBUG:', sys.argv[1]
         img_orig = cv2.imread(sys.argv[1])
         img_proc = self.transform(img_orig)
         print ''
         self.debug_mode(img_proc)
示例#28
0
    def process(self, batch=16, objectName='unknow',times=4):
        gnum=0
        lnum=0

        maxnum=len(self.imagelist)*times
        while gnum < maxnum :

            images=[]
            for lnum in range(0, batch):
                if gnum + lnum < maxnum:
                    images.append(cv2.imread(os.path.join(self.imageFolder, self.imagelist[gnum+lnum])))
                else:
                    break

            images_aug=[]
            for i in range(0,times):
                images_aug_i = self.seq.augment_images(images)
                images_aug.extend(images_aug_i)

            for i,image in enumerate(images_aug):
                bgnum=random.randint(0,len(self.backgroundlist)-1)
                bgfile=os.path.join(self.backgroundFolder,self.backgroundlist[bgnum])
                bgimg=cv2.imread(bgfile)

                xml='%d.xml'%(gnum+i+1)
                jpeg='%d.jpg'%(gnum+i+1)

                print('%d/%d: save image to %s, save xml to %s'%(gnum+i+1,maxnum,jpeg,xml))
                classify2detect.process(image=image,
                                     background=bgimg,
                                     objectName=objectName,
                                     xml=xml,
                                     jpeg=jpeg)

            gnum = gnum + len(images_aug)
def makeMorph(p1_filename, p2_filename, output_filename, points1, points2, tri, alpha):
    base_points = [[0, 0], [0, 400], [0, 799], [300, 799], [599, 799], [599, 400], [599, 0], [300, 0]]
    # points1 += base_points
    # points2 += base_points
    img1 = cv2.imread(p1_filename)
    img2 = cv2.imread(p2_filename)
    # alpha = 0.5
    img1 = np.float32(img1)
    img2 = np.float32(img2)
    points = []
    for i in range(0, len(points1)):
        x = (1 - alpha) * points1[i][0] + alpha * points2[i][0]
        y = (1 - alpha) * points1[i][1] + alpha * points2[i][1]
        points.append((x, y))
    imgMorph = np.zeros(img1.shape, dtype=img1.dtype)
    for t_tri in tri:
        x, y, z = t_tri

        x = int(x)
        y = int(y)
        z = int(z)

        t1 = [points1[x], points1[y], points1[z]]
        t2 = [points2[x], points2[y], points2[z]]
        t = [points[x], points[y], points[z]]

        # Morph one triangle at a time.
        morphTriangle(img1, img2, imgMorph, t1, t2, t, alpha)
    cv2.imwrite(output_filename, np.uint8(imgMorph))
示例#30
0
def mean_squared_error(image1, image2):
	list_of_files = glob.glob('/home/pi/Desktop/camera/*.jpg')
	latest_file = max(list_of_files, key=os.path.getctime) # get latest file in our directory

	text = "Possible motion detected"
	subject = "Pi security camera alert"
	send_to = "test"
	send_from = "test"

	#Load images
	original = cv2.imread("control.jpg", 0) # control file of what the house looks like
	questionable = cv2.imread(latest_file, 0)
		
	image1 = image1.astype(float)
	image2 = image2.astype(float)
	error_rank = np.sum((image1 - image2) ** 2)
	error_rank /= float(image1.shape[0] * image1.shape[1])
	test = calculated(error_ranking)
	if test == True:
		send_mail(send_from, send_to, subject, text, latest_file)
	else: 
		time1 = time.strftime("%d/%m/%Y %H:%M:%S")
		re.sub('[^A-Za-z0-9]+', '', time1)
		f = open(error_rank+"-"time1+".txt", 'w+')
		f.close
示例#31
0
import argparse
import cv2

ap = argparse.ArgumentParser()
ap.add_argument("-i", "--image", required = True, help = "Path to image")
args = vars(ap.parse_args())

image = cv2.imread(args["image"])
cv2.imshow("original", image)

flipped = cv2.flip(image, 1)
cv2.imshow("flipped horizontally", flipped)


flipped = cv2.flip(image, 0)
cv2.imshow("flipped vertically", flipped)

flipped = cv2.flip(image, -1)
cv2.imshow("flipped horizontally and vertically", flipped)

cv2.waitKey(0)
示例#32
0
import os
import cv2
import shutil

def mkdir(path):
	folder = os.path.exists(path)
	if not folder:
		os.makedirs(path)

num=0
right=0

img_width  = 512
img_height = 128
img_channel= 3
jvzhen = img_width * img_height * img_channel

for line in open("./1.txt"):
    img_pa = line.strip('\n')
    if ".png" in img_pa:
	num=num+1
	print  "all_img: "+str(num)
	img = cv2.imread(img_pa)
	if img.size != jvzhen:
	    with open('./error.txt', 'a') as f:
                f.writelines(line)
	else:
	    right=right+1
	    print "right  : "+str(right)
	print "-----------------------------"
# grab the list of images that we'll be describing
print("[INFO] handling images...")
imagePaths = list(paths.list_images(args["dataset"]))

# initialize the raw pixel intensities matrix, the features matrix,
# and labels list
rawImages = []
features = []
labels = []

# loop over the input images
for (i, imagePath) in enumerate(imagePaths):
	# load the image and extract the class label
	# our images were named as labels.image_number.format
	image = cv2.imread(imagePath)
    # get the labels from the name of the images by extract the string before "."
	label = imagePath.split(os.path.sep)[-1].split(".")[0]

	# extract raw pixel intensity "features"
    #followed by a color histogram to characterize the color distribution of the pixels
	# in the image
	pixels = image_to_feature_vector(image)
	hist = extract_color_histogram(image)

	# add the messages we got to the raw images, features, and labels matricies
	rawImages.append(pixels)
	features.append(hist)
	labels.append(label)

	# show an update every 200 images until the last image
示例#34
0
	cv2.setMouseCallback('choose corresponding points',draw_circle)
	while count < 8:	#we only select 9 points, it should be more accurate if we choose more points
		if isleft == 1:
			cv2.imshow('choose corresponding points', image_l)
		elif isleft == 0:
			cv2.imshow('choose corresponding points', image_r)
		if cv2.waitKey(20) & 0xFF == 27:
			break


if len(sys.argv) < 2:
	print '''
Instruction: Run this program with arguments: python as5.py left.jpg right.jpg
	'''
else:
	image_l = cv2.imread(sys.argv[1])
	image_r = cv2.imread(sys.argv[2])
	cv2.imshow('left image', image_l)
	cv2.imshow('right image', image_r)
	print 'Press any key to choose corresponding points'
	key = cv2.waitKey(0)
	if key == 27:
		cv2.destroyAllWindows()

	print 'Please choose the 8 pairs of corresponding points on the images\n'

	image_l = cv2.imread(sys.argv[1])
	image_r = cv2.imread(sys.argv[2])


	leftpoints = []
import cv2
import numpy as np
from pyzbar.pyzbar import decode

img = cv2.imread('')
cap = cv2.VideoCapture(0)
cap.set(3, 640)
cap.set(4, 480)

with open('datas.txt') as f:
    datas_list = f.read().splitlines()

print(datas_list)

while True:

    success, img = cap.read()
    for barcode in decode(img):
        mydata = barcode.data.decode('utf-8')
        print(mydata)

        if mydata in datas_list:
            # print("Successful Authenticated")
            detect_output = 'Authorized'
            color = (0, 255, 0)
        else:
            # print("Failed to authenticate!!!")
            detect_output = 'Un-authorized'
            color = (0, 0, 255,)
        # build a shape around the detected barcode.
        pts = np.array([barcode.polygon], np.int32)
示例#36
0
# https://segmentfault.com/a/1190000015663722
import cv2
import numpy as np

img = cv2.imread('D:\code\opencv_learning\opencv_learning\last\DATA1.png')
cv2.imshow('src', img)

imgray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
ret, thresh = cv2.threshold(imgray, 127, 255, 0)
im2, contours, hierarchy = cv2.findContours(thresh, cv2.RETR_TREE,
                                            cv2.CHAIN_APPROX_SIMPLE)
cnt = contours[1]

epsilon = 0.1 * cv2.arcLength(cnt, True)
approx = cv2.approxpolyDP(cnt, epsilon, True)
cv2.polylines(img, [approx], True, (0, 0, 255), 2)
cv2.imshow('show', img)
cv2.waitkey()
示例#37
0
import cv2
import numpy as np
import math
import random
import matplotlib.pyplot as plt

if  __name__ == "__main__":
    img = cv2.imread("C:\\Users\\ani49\\OneDrive\\Documents\\GitHub\\homework-3-ani4991\\Lenna.png",0)

    #mean = 4
    #variance = 10
    prob_noise = 0.01
    thres = 1 - prob_noise

    for i in range(0, img.shape[0]):
        for j in range(0, img.shape[1]):
            rdn = random.random()
            if rdn < prob_noise:
                img[i][j] = 0
            elif rdn > thres:
                img[i][j] = 255
            else:
                img[i][j] = img[i][j]

    #plt.hist(img, bins='auto')
    #plt.show()
    #print("got noise matrix and waiting for hist")

    cv2.imwrite("img_with_salt&pepper_noise.png",np.uint8(img))
    cv2.waitKey(0)
    cv2.destroyAllWindows()
import cv2
import pandas as pd
from tqdm import tqdm

train = pd.read_csv('Christof/assets/train.csv')
test = pd.read_csv('Christof/assets/sample_submission.csv')

path_to_train = 'Christof/assets/train_rgb_512/'
path_to_test = 'Christof/assets/test_rgb_512/'

fns = [path_to_test + f + '.png' for f in test['Id']]

import numpy as np
channel_avg = np.zeros(3)
channel_std = np.zeros(3)
#images = np.zeros((len(fns),512,512,3))
for i, fn in tqdm(enumerate(fns)):
    image = cv2.imread(fn, cv2.IMREAD_UNCHANGED)
    channel_avg += np.mean(np.reshape(image, (-1, 3)), axis=0)
    channel_std += np.std(np.reshape(image, (-1, 3)), axis=0)

channel_avg /= len(fns)
channel_std /= len(fns)

print(channel_avg / 255)
print(channel_std / 255)
示例#39
0
import cv2
import numpy as np
import matplotlib.pyplot as plt

# Read image
img = cv2.imread("imori.jpg").astype(np.float32)
H, W, C = img.shape

# Otsu binary
## Grayscale
out = 0.2126 * img[..., 2] + 0.7152 * img[..., 1] + 0.0722 * img[..., 0]
out = out.astype(np.uint8)

## Determine threshold of Otsu's binarization
max_sigma = 0
max_t = 0

for _t in range(1, 255):
    v0 = out[np.where(out < _t)]
    m0 = np.mean(v0) if len(v0) > 0 else 0.
    w0 = len(v0) / (H * W)
    v1 = out[np.where(out >= _t)]
    m1 = np.mean(v1) if len(v1) > 0 else 0.
    w1 = len(v1) / (H * W)
    sigma = w0 * w1 * ((m0 - m1) ** 2)
    if sigma > max_sigma:
        max_sigma = sigma
        max_t = _t

## Binarization
#print("threshold >>", max_t)
示例#40
0
def read():
    """
  Demos the largest_rotated_rect function
  """
    for i in range(1, 1001):
        img_name = 'img_' + str(i) + '.jpg'
        gt_name = 'gt_img_' + str(i) + '.txt'
        print img_name
        img_path = os.path.join(file_path, img_name)
        with open(os.path.join(gt_path, gt_name), 'r') as f:
            gt_lines = [
                o.decode('utf-8-sig').encode('utf-8') for o in f.readlines()
            ]
        gt_strs = [g.strip().split(',')[-1] for g in gt_lines]
        gt_coors = [g.strip().split(',')[0:8] for g in gt_lines]
        #gt_coors = [int(g) for g in gt_coors]
        for ii, g in enumerate(gt_coors):
            gt_coors[ii] = [int(a) for a in g]
        #gt_coors = np.array(gt_coor,dtype=np.int32)
        #print 'gt_coors',gt_coors
        image = cv2.imread(img_path)
        image_height, image_width = image.shape[0:2]
        angles = [-90, -75, -60, -45, -30, -15, 0, 15, 30, 45, 60, 75, 90]

        for j in angles:
            print 'angle', j
            image_orig = np.copy(image)
            [image_rotated, boxes_rotated] = rotate_image(image, gt_coors, j)
            image_rotated_cropped, boxes_rotated_cropped = crop_around_center(
                image_rotated, boxes_rotated,
                *rotatedRectWithMaxArea(image_width, image_height,
                                        math.radians(j)))
            new_img_name = 'img_' + str(i) + '_' + str(j) + '.jpg'
            plt.clf()
            fig, ax = plt.subplots(3, 5, figsize=(40, 30))
            fig.tight_layout()
            plt.subplot(1, 3, 1)
            plt.imshow(image)
            currentAxis = plt.gca()
            for index, gt in enumerate(gt_coors):
                gt = np.array(gt).reshape(4, 2)
                currentAxis.add_patch(
                    plt.Polygon(gt, fill=None, edgecolor='r', linewidth=2))
                currentAxis.add_patch(plt.Circle(gt[0], 5))
                currentAxis.add_patch(plt.Circle(gt[1], 5, color='r'))
            plt.subplot(1, 3, 2)
            plt.imshow(image_rotated)
            currentAxis = plt.gca()
            for index in range(boxes_rotated.shape[0]):
                #print 'plot boxes_rotated ',boxes_rotated[index]
                gt_rotated = boxes_rotated[index].reshape(4, 2)
                currentAxis.add_patch(
                    plt.Polygon(gt_rotated,
                                fill=None,
                                edgecolor='b',
                                linewidth=2))
                currentAxis.add_patch(plt.Circle(gt_rotated[0], 5))
                currentAxis.add_patch(plt.Circle(gt_rotated[1], 5, color='r'))
            plt.subplot(1, 3, 3)
            plt.imshow(image_rotated_cropped)
            currentAxis = plt.gca()
            result_lines = []
            for index in range(len(boxes_rotated_cropped)):
                #print 'plot boxes_rotated ',boxes_rotated[index]
                gt_rotated_cropped = boxes_rotated_cropped[index]
                currentAxis.add_patch(
                    plt.Polygon(gt_rotated_cropped,
                                fill=None,
                                edgecolor='g',
                                linewidth=2))
                currentAxis.add_patch(plt.Circle(gt_rotated_cropped[0], 5))
                currentAxis.add_patch(
                    plt.Circle(gt_rotated_cropped[1], 5, color='r'))
                currentAxis.add_patch(
                    plt.Circle(gt_rotated_cropped[2], 6, color='y'))
                currentAxis.add_patch(
                    plt.Circle(gt_rotated_cropped[3], 6, color='black'))
                '''
        result_line = []
        for p in range(4):
          for q in range(2):
            result_line.append(gt_rotated_cropped[p][q])
        print 'result_line',result_line
        result_line = [str(a) for a in result_line]        
        result_lines.append(','.join(result_line))
        '''
            #with open(os.path.join(new_gt_path,'gt_img_'+str(j)+'.txt'),'w') as f:
            #f.write('\r\n'.join(result_lines))
            #cv2.imwrite(os.path.join(save_rotate_path,new_img_name),image_rotated)
            #cv2.imwrite(os.path.join(save_crop_path,new_img_name), image_rotated_cropped)
            plt.savefig(os.path.join(vis_save_path, new_img_name), dpi=200)
            plt.close(fig)
        print "Done"
示例#41
0
def performBatchDetect(thresh= 0.25, configPath = "./cfg/yolov4.cfg", weightPath = "yolov4.weights", metaPath= "./cfg/coco.data", hier_thresh=.5, nms=.45, batch_size=3):
    import cv2
    import numpy as np
    # NB! Image sizes should be the same
    # You can change the images, yet, be sure that they have the same width and height
    img_samples = ['data/person.jpg', 'data/person.jpg', 'data/person.jpg']
    image_list = [cv2.imread(k) for k in img_samples]

    net = load_net_custom(configPath.encode('utf-8'), weightPath.encode('utf-8'), 0, batch_size)
    meta = load_meta(metaPath.encode('utf-8'))
    pred_height, pred_width, c = image_list[0].shape
    net_width, net_height = (network_width(net), network_height(net))
    img_list = []
    for custom_image_bgr in image_list:
        custom_image = cv2.cvtColor(custom_image_bgr, cv2.COLOR_BGR2RGB)
        custom_image = cv2.resize(
            custom_image, (net_width, net_height), interpolation=cv2.INTER_NEAREST)
        custom_image = custom_image.transpose(2, 0, 1)
        img_list.append(custom_image)

    arr = np.concatenate(img_list, axis=0)
    arr = np.ascontiguousarray(arr.flat, dtype=np.float32) / 255.0
    data = arr.ctypes.data_as(POINTER(c_float))
    im = IMAGE(net_width, net_height, c, data)

    batch_dets = network_predict_batch(net, im, batch_size, pred_width,
                                                pred_height, thresh, hier_thresh, None, 0, 0)
    batch_boxes = []
    batch_scores = []
    batch_classes = []
    for b in range(batch_size):
        num = batch_dets[b].num
        dets = batch_dets[b].dets
        if nms:
            do_nms_obj(dets, num, meta.classes, nms)
        boxes = []
        scores = []
        classes = []
        for i in range(num):
            det = dets[i]
            score = -1
            label = None
            for c in range(det.classes):
                p = det.prob[c]
                if p > score:
                    score = p
                    label = c
            if score > thresh:
                box = det.bbox
                left, top, right, bottom = map(int,(box.x - box.w / 2, box.y - box.h / 2,
                                            box.x + box.w / 2, box.y + box.h / 2))
                boxes.append((top, left, bottom, right))
                scores.append(score)
                classes.append(label)
                boxColor = (int(255 * (1 - (score ** 2))), int(255 * (score ** 2)), 0)
                cv2.rectangle(image_list[b], (left, top),
                          (right, bottom), boxColor, 2)
        cv2.imwrite(os.path.basename(img_samples[b]),image_list[b])

        batch_boxes.append(boxes)
        batch_scores.append(scores)
        batch_classes.append(classes)
    free_batch_detections(batch_dets, batch_size)
    return batch_boxes, batch_scores, batch_classes    
示例#42
0
#template matching and render it effectively useless.
 
# import the necessary packages
import argparse
import cv2
import numpy as np
import imutils

# construct the argument parser and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-s", "--source", required=True, help="Path to the source image")
ap.add_argument("-t", "--template", required=True, help="Path to the template image")
args = vars(ap.parse_args())

# load the source and template image
source = cv2.imread(args["source"])
template = cv2.imread(args["template"])
(tempH, tempW) = template.shape[:2]

# find the template in the source image
# The cv2.matchTemplate  method requires three parameters: the source  image, the template , and the template matching method.
# We’ll pass in cv2.TM_CCOEFF  to indicate we want to use the correlation coefficient method.
result = cv2.matchTemplate(source, template, cv2.TM_CCOEFF)
 
# Now that we have computed the result  matrix, we can apply the cv2.minMaxLoc  function to find the (x, y) coordinates of the best match.
# We pass in our result, and in return we receive a 4-tuple consisting of the minimum value in the result,
# the maximum value in result, the (x, y) coordinates of the minimum value, and the (x, y) coordinates of the maximum value, respectively
 
(minVal, maxVal, minLoc, (x, y)) = cv2.minMaxLoc(result)
print("minVal:{}, maxVal: {}, minLoc: {}, (x: {}, y: {})".format(minVal, maxVal, minLoc, x, y))
 
示例#43
0
                image, model, landmarks, bb, resized_crop)

            percent_wearing_masks = num_wearing_masks / len(resized_crop) * 100
            image = cv2.putText(
                image, (str(percent_wearing_masks) + "% wearing masks"),
                (30, 30), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 0, 0), 1,
                cv2.LINE_AA)

        cv2.imshow('Input', image)
        cv2.waitKey(0)
        cv2.destroyAllWindows()

        print()
        func = 0
    elif func == 2:
        image = cv2.imread(
            input("Please enter the complete image file path: ").strip('"'))
        image = image[:, :, ::-1]
        landmarks, bb, cropped_faces, resized_crop = find_faces(image, model2)

        if (type(resized_crop) == int and resized_crop == 0):
            image = cv2.putText(image, "No faces detected", (30, 30),
                                cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 0, 0), 1,
                                cv2.LINE_AA)
        else:
            image, num_wearing_masks = di.convert_image(
                image, model, landmarks, bb, resized_crop)

            percent_wearing_masks = num_wearing_masks / len(resized_crop) * 100
            image = cv2.putText(
                image, (str(percent_wearing_masks) + "% wearing masks"),
                (30, 30), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 0, 0), 1,
示例#44
0
# importing cv2
import cv2
import numpy as np

# path
path = r'C:\Users\admin\Desktop\MDT425\Week5 - CV2_ImageDetection\img\sampleImage.png'
pathHSV = r'C:\Users\admin\Desktop\MDT425\Week5 - CV2_ImageDetection\img\HSV.png'

# Using cv2.imread() method
img = cv2.imread(path)
hsvChart = cv2.imread(pathHSV)

#rows cols
width, height = img.shape[0:2]
width, height = hsvChart.shape[0:2]

hsvImg = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
hsvChartImg = cv2.cvtColor(hsvChart, cv2.COLOR_BGR2HSV)

lower = np.array([100, 200, 200])
upper = np.array([135, 255, 255])

marking = cv2.inRange(hsvImg, lower, upper)
markingChart = cv2.inRange(hsvChartImg, lower, upper)

# Displaying the image
cv2.imshow('Image', img)
cv2.imshow('HSV', marking)
cv2.imshow('Mark Image HSV', markingChart)

cv2.waitKey()
示例#45
0
    def _grab_board(self):
        time.sleep(3)

        samples = np.float32(np.loadtxt('vectors.data'))
        responses = np.float32(np.loadtxt('samples.data'))

        model = cv2.KNearest()
        model.train(samples, responses)

        window = gtk.gdk.get_default_root_window()
        x, y, width, height, _ = window.get_geometry()
        ss = gtk.gdk.Pixbuf.get_from_drawable(gtk.gdk.Pixbuf(gtk.gdk.COLORSPACE_RGB, True, 8, width, height),
                                              gtk.gdk.get_default_root_window(),
                                              gtk.gdk.colormap_get_system(),
                                              0, 0, x, y, width, height)
        ss.save(TMP_FILE, 'png')

        raw = cv2.imread(TMP_FILE)
        gray = cv2.cvtColor(raw, cv2.COLOR_BGR2GRAY)
        threshold = cv2.adaptiveThreshold(gray, 255, 1, 1, 11, 15)
        cache = threshold.copy()
        contours, _ = cv2.findContours(threshold,
                                       cv2.RETR_LIST,
                                       cv2.CHAIN_APPROX_SIMPLE)

        squares = []
        for c in contours:
            c = cv2.approxPolyDP(c, 4, True)
            if len(c) != 4:
                continue
            if not cv2.isContourConvex(c):
                continue
            squares.append(c)

        board_size = max(cv2.contourArea(s) for s in squares)
        board = [s for s in squares if cv2.contourArea(s) == board_size][0]

        min_x = min(s[0][0] for s in board)
        max_x = max(s[0][0] for s in board)
        min_y = min(s[0][1] for s in board)
        max_y = max(s[0][1] for s in board)

        step_x = (max_x - min_x) / 9.0
        step_y = (max_y - min_y) / 9.0

        values = {}
        centroids = {}
        for y in range(9):
            values[y] = {}
            for x in range(9):
                local_min_y = min_y + (y * step_y) + 5
                local_max_y = min_y + ((y+1) * step_y) - 5
                local_min_x = min_x + (x * step_x) + 5
                local_max_x = min_x + ((x+1) * step_x) - 5

                roi = cache[
                    local_min_y:local_max_y,
                    local_min_x:local_max_x]

                centroids[(y, x)] = (
                    int((local_min_y + local_max_y) / 2.0),
                    int((local_min_x + local_max_x) / 2.0))

                cache = cache.copy()
                roi_cache = roi.copy()

                contours, _ = cv2.findContours(roi,
                                               cv2.RETR_LIST,
                                               cv2.CHAIN_APPROX_SIMPLE)

                if not contours:
                    values[y][x] = (0, 0)
                    continue

                item = max(contours, key=lambda c: cv2.contourArea(c))
                _x, _y, _w, _h = cv2.boundingRect(item)

                digit = roi_cache[_y:_y+_h, _x:_x+_w]
                small_digit = cv2.resize(digit, (10, 10))
                vector = small_digit.reshape((1, 100)).astype(np.float32)
                _, results, _, err = model.find_nearest(vector, k=1)

                value = int(results.ravel()[0])
                values[y][x] = (value, err[0][0])

        errs = [values[y][x][1] for y in range(9) for x in range(9)]
        err_threshold = np.percentile(errs, 90)

        # "TODO": relearn 1 :(
        for y in range(9):
            for x in range(9):
                val, err = values[y][x]
                if err > err_threshold and val == 7:
                    values[y][x] = 1
                else:
                    values[y][x] = val

        return values, centroids
示例#46
0
def XShadow(path):
    img  = cv2.imread(path)       
    gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
    height,width = img.shape[:2]
   # print(height,width)
    #blur = cv2.GaussianBlur(gray,(5,5),0)
    
    blur = cv2.blur(gray,(8,8))
    thresh = cv2.adaptiveThreshold(blur,255,1,1,11,2) 
    
    if(width > 500):
        kernel = np.ones((4, 4),np.uint8) #卷积核
    else:
        kernel = np.ones((2, 2),np.uint8) #卷积核
    dilation = cv2.dilate(thresh,kernel,iterations = 1) #膨胀操作使得单个文字图像被黑像素填充
    
    '''
    cv2.imshow('image',thresh)
    cv2.waitKey(0)
    cv2.destroyAllWindows()
    '''
    
    perPixelValue = 1 #每个像素的值
    projectValArry = np.zeros(width, np.int8) #创建一个用于储存每列黑色像素个数的数组


    for i in range(0,width):
        for j in range(0,height):
            perPixelValue = dilation[j,i]
            if (perPixelValue == 255): #如果是黑字
                projectValArry[i] += 1
       # print(projectValArry[i])
            
    canvas = np.zeros((height,width), dtype="uint8")
    
    for i in range(0,width):
        for j in range(0,height):
            perPixelValue = 255 #白色背景
            canvas[j, i] = perPixelValue
   

    for i in range(0,width):
        for j in range(0,projectValArry[i]):
            perPixelValue = 0 #黑色直方图投影
            canvas[height-j-1, i] = perPixelValue
    '''
    cv2.imshow('canvas',canvas)
    cv2.waitKey(0)
    cv2.destroyAllWindows()
    '''
    
    list = []
    startIndex = 0 #记录进入字符区的索引  
    endIndex = 0 #记录进入空白区域的索引  
    inBlock = 0 #是否遍历到了字符区内  


    for i in range(width):
        if (inBlock == 0 and projectValArry[i] != 0):
            inBlock = 1  
            startIndex = i
        elif (inBlock == 1 and projectValArry[i] == 0):
            endIndex = i
            inBlock = 0
            #subImg = gray[0:height, startIndex:endIndex+1] #endIndex+1
            #print(startIndex,endIndex+1)
            list.append([startIndex, 0, endIndex-startIndex-1, height])
    #print(len(list))
    return list
示例#47
0
import cv2
import numpy as np
from matplotlib import pyplot as plt
 
img = cv2.imread('noisy_leaf.jpg',0)
 
#ret1, th1 = cv2.threshold(img, 127, 255, cv2.THRESH_BINARY)
 
ret, imgf = cv2.threshold(img, 0, 255, cv2.THRESH_BINARY+cv2.THRESH_OTSU)
 
#blur = cv2.GaussianBlur(img, (5,5), 0)
#ret3, th3 = cv2.threshold(blur, 0, 255, cv2.THRESH_BINARY+cv2.THRESH_OTSU)
 
plt.subplot(3,1,1), plt.imshow(img,cmap = 'gray')
plt.title('Original Noisy Image'), plt.xticks([]), plt.yticks([])
plt.subplot(3,1,2), plt.hist(img.ravel(), 256)
plt.axvline(x=ret, color='r', linestyle='dashed', linewidth=2)
plt.title('Histogram'), plt.xticks([]), plt.yticks([])
plt.subplot(3,1,3), plt.imshow(imgf,cmap = 'gray')
plt.title('Otsu thresholding'), plt.xticks([]), plt.yticks([])
plt.show()
def removeBackground(image_file, out_folder):
    img = cv2.imread(image_file)

    # Crop out white borders from image so the edge detection algorithm doesnt detect the border as an edge
    im = Image.fromarray(img)
    bg = Image.new(im.mode, im.size, (255, 255, 255))
    diff = ImageChops.difference(im, bg)
    diff = ImageChops.add(diff, diff, 2.0, -100)
    bbox = diff.getbbox()
    if bbox:
        im = im.crop(bbox)
        img = np.array(im)

    height, width, _ = img.shape

    gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
    gray = cv2.GaussianBlur(gray, (5, 5), 0)

    # Calculate otsu threshold for edge detection
    ret, threshed_img = cv2.threshold(gray, 0, 255,
                                      cv2.THRESH_BINARY + cv2.THRESH_OTSU)
    high = ret
    low = high * 0.5

    # Edge detection
    edges = cv2.Canny(gray, low, high)
    edges = cv2.dilate(edges, None)
    edges = cv2.erode(edges, None)

    # Find all contours and get the one with the biggest area
    _, contours, _ = cv2.findContours(edges, cv2.RETR_LIST,
                                      cv2.CHAIN_APPROX_SIMPLE)
    max_contour = sorted(contours,
                         key=lambda c: cv2.contourArea(c),
                         reverse=True)[0]
    x, y, w, h = cv2.boundingRect(max_contour)

    # Add a 5px buffer to bounding box
    x1 = x if x - 0 > 5 else 5
    x2 = x + w if abs(width - (x + w)) > 5 else (x + w) - 5
    y1 = y if y - 0 > 5 else 5
    y2 = y + h if abs(height - (y + h)) > 5 else (y + h) - 5

    # Create bounding box based on the largest contour of the image
    rect = (x1, y1, x2, y2)

    mask = np.zeros(img.shape[:2], dtype=np.uint8)

    # Dummy placeholders
    bgdmodel = np.zeros((1, 65), np.float64)
    fgdmodel = np.zeros((1, 65), np.float64)

    # Iteratively extract foreground object from background
    cv2.grabCut(img, mask, rect, bgdmodel, fgdmodel, 10, cv2.GC_INIT_WITH_RECT)
    cv2.grabCut(img, mask, rect, bgdmodel, fgdmodel, 10, cv2.GC_INIT_WITH_MASK)

    # Remove background from image
    mask2 = np.where((mask == 1) + (mask == 3), 255, 0).astype('uint8')
    output = cv2.bitwise_and(img, img, mask=mask2)

    # Convert black background to white
    output[np.where((output == [0, 0, 0]).all(axis=2))] = [255, 255, 255]
    cv2.imwrite(os.path.join(out_folder, os.path.basename(image_file)), output)
示例#49
0
import cv2
face_cascade=cv2.CascadeClassifier("haarcascade_frontalface_default.xml")#building a casscade classifier
img=cv2.imread("venv/penguin.jpg")
gray_img=cv2.cvtColor(img,cv2.COLOR_BGR2GRAY) #reading the image as grayscale
faces=face_cascade.detectMultiScale(gray_img,scaleFactor=1.05,minNeighbors=5)#finding faces
print(type(faces))
print(faces)
for x,y,w,h in faces:
    img=cv2.rectangle(img,(x,y),(x+w,y+h),(0,255,0),3)
resized_img=cv2.resize(img,(int(img.shape[1]/2),int(img.shape[0]/2)))
cv2.imshow("Gray",resized_img)
cv2.waitKey(0)
cv2.destroyAllWindows()
示例#50
0
    return vis


if __name__ == '__main__':
    print __doc__

    import sys, getopt
    opts, args = getopt.getopt(sys.argv[1:], '', ['feature='])
    opts = dict(opts)
    feature_name = opts.get('--feature', 'sift')
    try: fn1, fn2 = args
    except:
        fn1 = '../c/box.png'
        fn2 = '../c/box_in_scene.png'

    img1 = cv2.imread(fn1, 0)
    img2 = cv2.imread(fn2, 0)
    detector, matcher = init_feature(feature_name)
    if detector != None:
        print 'using', feature_name
    else:
        print 'unknown feature:', feature_name
        sys.exit(1)


    kp1, desc1 = detector.detectAndCompute(img1, None)
    kp2, desc2 = detector.detectAndCompute(img2, None)
    print 'img1 - %d features, img2 - %d features' % (len(kp1), len(kp2))

    def match_and_draw(win):
        print 'matching...'
def binarize(path):
    char_img = cv2.imread(path, 2)
    resized = cv2.resize(char_img, (250, 250))
    ret, thr_image = cv2.threshold(resized.copy(), 0, 255, cv2.THRESH_BINARY)
    # We don't use retVal for the moment, just take the thresholded image, so we ignore
    return thr_image
示例#52
0

def func(x):
    pass


a = 0
a = int(input('Enter 1 for VideoCam else 0 '))
if a == 1:
    cap = cv2.VideoCapture(0)
    if cap.isOpened():
        ret, img = cap.read()
    else:
        ret = False
else:
    img = cv2.imread('a.jpg')
cv2.namedWindow('image', cv2.WINDOW_NORMAL)
cv2.createTrackbar('nfeatures', 'image', 500, 10000, func)
cv2.createTrackbar('table_number', 'image', 6, 100, func)
cv2.createTrackbar('key size', 'image', 12, 31, func)
cv2.createTrackbar('multi', 'image', 1, 100, func)
cv2.createTrackbar('flag', 'image', 0, 2, func)
cv2.createTrackbar('check', 'image', 100, 500, func)
cv2.createTrackbar('algo', 'image', 1, 1, func)
cv2.createTrackbar('min_match', 'image', 10, 1000, func)
cv2.createTrackbar('aise', 'image', 1, 10, func)
while True:
    if a == 1:
        ret, img = cap.read()
        img = cv2.flip(img, 1)
    else:
        br_list = []
        object_list = []



def toggle_selector(event):
    toggle_selector.RS.set_active(True)


if __name__ == '__main__':
    for n, image_file in enumerate(os.scandir(image_folder)):
        img = image_file
        fig, ax = plt.subplots(1, figsize=(10.5, 8))
        mngr = plt.get_current_fig_manager()
        mngr.window.setGeometry(250, 40, 800, 600)
        image = cv2.imread(image_file.path)
        print(image_file.path)
        image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
        ax.imshow(image)

        toggle_selector.RS = RectangleSelector(
            ax, line_select_callback,
            drawtype='box', useblit=True,
            button=[1], minspanx=5, minspany=5,
            spancoords='pixels', interactive=True,
        )
        bbox = plt.connect('key_press_event', toggle_selector)
        key = plt.connect('key_press_event', onkeypress)
        plt.tight_layout()
        plt.show()
        plt.close(fig)
import cv2

image = cv2.imread('/home/congdanh/Desktop/abc.jpg')

# (x,y,w,h) = cv2.selectROI(image)

x = 0
y = 0
h = 1000
w = 1000
# ROI = image[y:y+h, x:x+w]
#
# cv2.imshow("ROI", ROI)
# cv2.imwrite("ROI.png", ROI)
# cv2.waitKey(0)

crop_img = image[y:y + h, x:x + w]
cv2.imshow("cropped", crop_img)
# cv2.waitKey(0)
cv2.imwrite("crop.png", crop_img)
def main():

    #set this to s to save the cells as an image to create an dataset.

    user = "******"

    path = r"images"

    #output director to create the dataset
    output_path = r"Dataset"
    if not os.path.exists(output_path):
        os.makedirs(output_path)

    files = os.listdir(path)

    print(files)
    file_number = 5
    img_path = os.path.join(path, files[file_number - 1])
    img = cv2.imread(img_path)
    print(img_path)

    # input_img = resizing(img)

    # output = input_img.copy()
    input_img, img = resize(img)
    sudoku_squares, intersections, bin_img, edges, output = preprocessing(
        input_img, ret=True)

    cv2.imshow("output", output)
    cv2.imshow("bin_image", bin_img)
    cv2.imshow("edges", edges)
    #
    # cv2.waitKey(0)

    # print(sudoku_squares)
    if user == "s":
        i = 0
        for cubes in sudoku_squares:

            image_name = os.path.join(
                output_path, r"sudoku_{}_{}.jpg".format(file_number, i))
            # print(image_name)
            i += 1

            cv2.imwrite(image_name, cubes)
        exit()

    # sudoku_squares, intersections = preprocessing(input_img)
    # cv2.imshow("bin_img", bin_img)
    # cv2.imshow("input_img", edges)
    cv2.imshow("Sudoku", img)
    # cv2.imshow("input_img", input_img)

    # intersections = intersections.reshape(9, 9)
    sudoku = np.ones(81, dtype=np.int)

    blank_indices = predictions(sudoku_squares, sudoku)
    sudoku = sudoku.reshape(9, 9)
    print(sudoku)

    sudoku_obj = Sudoku_Solver(sudoku)
    print("Sudoku to Solve :")
    sudoku_obj.print()
    print("Number of Unknowns : {}".format(sudoku_obj.unknowns))
    input("Press enter to solve")
    sudoku_obj.solve()
    print("Solved Sudoku :")
    sudoku_obj.print()

    img = print_predictions(img, intersections, blank_indices,
                            sudoku_obj.solved_array)

    cv2.imshow("solved", img)

    cv2.waitKey(0)
示例#56
0
	is_train = tf.placeholder( tf.bool )

	bn1, bn2, reconstruction_ori = csgan.build_reconstruction(cs_meas, is_train)

	summary = tf.merge_all_summaries()
	saver = tf.train.Saver()
	sess = tf.Session()

	saver.restore(sess, checkpointPath)

	psnr = np.array([])


	for imName in imList:
		# Read image
		im = cv2.imread(inputDir + imName,0)
		im = cv2.normalize(im.astype('float'), None, 0.0, 1.0, cv2.NORM_MINMAX)
		[height, width] = im.shape

		# Determine the size of zero pad
		rowPad = blockSize - (height % blockSize)
		colPad = blockSize - (width % blockSize)

		# Do zero padding
		imPad = np.concatenate((im, np.zeros([rowPad, width])), axis=0)
		imPad = np.concatenate((imPad, np.zeros([height+rowPad, colPad])),axis=1)
		print imPad.shape

		numBlocksRow = (height + rowPad)/blockSize
		numBlocksCol = (width + colPad)/blockSize
示例#57
0
        """ Traffic light classification """
        with self.sess_classification.as_default(
        ), self.graph_classification.as_default(), Timer('classification'):
            sfmax = list(
                self.sess_classification.run(
                    tf.nn.softmax(
                        self.out_graph.eval(
                            feed_dict={self.in_graph: [image]}))))
            sf_ind = sfmax.index(max(sfmax))

            ## add a colored bbox and publish traffic light if needed
            ## rosparam set /tl_detector/publish_traffic_light true
            if rospy.get_param('~publish_traffic_light', False):
                cv2.rectangle(image, (0, 0), (31, 31),
                              self.index2color[sf_ind], 1)
                self.traffic_light_pub.publish(
                    self.bridge.cv2_to_imgmsg(image, "rgb8"))

        return self.index2msg[sf_ind]


if __name__ == "__main__":
    classifier = TLClassifier(model_dir='model')
    classifier.publish_traffic_light = False
    for im in glob.glob('images/*.jpg'):
        img = cv2.imread(im)
        if not img is None:
            color = classifier.get_classification(img)
            print("--------------", color, "-------------------")
            print(im, 'detected as', classifier.tl2str(color))
示例#58
0
#!/usr/bin/python
# -*- coding: utf-8 -*-
# @File    : template_match.py
# @Time    : 18-7-18 上午11:43
# @Author  : Jee
# @Email   : [email protected]

import cv2
import numpy as np
from matplotlib import pyplot as plt
img_rgb = cv2.imread('imgs/21.jpg')
img_gray = cv2.cvtColor(img_rgb, cv2.COLOR_BGR2GRAY)
template = cv2.imread('imgs/tmp.png',0)

w, h = template.shape[::-1]
res = cv2.matchTemplate(img_gray,template,cv2.TM_CCOEFF_NORMED)

threshold = 0.75

loc = np.where( res >= threshold)
for pt in zip(*loc[::-1]):
    cv2.rectangle(img_rgb, pt, (pt[0] + w, pt[1] + h), (0,0,255), 2)
cv2.imshow("tmp",img_rgb)
cv2.waitKey(0)
# cv2.imwrite('res.png',img_rgb)
示例#59
0
    for control in controls:
        name = control['name']
        transformation = control['attr']

        min_v = control.get('min', 0)
        max_v = control.get('max', 255)
        print(control, name, transformation, min_v, max_v)
        
        cv.createTrackbar(name, WINDOW_NAME, min_v, max_v, partial(control_callback, image, transformation))

    update_image(sample_image, g_Transformations)

    
sample_image = sys.argv[1]
sample_image = cv.imread(sample_image)

cv.namedWindow(WINDOW_NAME, cv.WINDOW_NORMAL | cv.WINDOW_KEEPRATIO)


controls = [
    {'name': 'blur', 'min': 1, 'max': 33, 'attr': 'blur'},
    {'name': 'eq hist', 'max': 1, 'attr': 'norm_histogram'},
    {'name': 'binarize', 'min': 1, 'attr': 'binarize'},

    # Morphology controls
    {'name': 'Morphology kernel size', 'min': 1, 'max': 10, 'attr': 'morphology/kernel'},
    {'name': 'Number of iterations', 'min': 1, 'max': 20, 'attr': 'morphology/iterations'},

    # Hough controls
    {'name': 'circles scale', 'min': 1, 'max': 10, 'attr': 'circles/scale'},
示例#60
0
import cv2
import numpy as np
from matplotlib import pyplot as plt

img = cv2.imread('f.jpg', 0)
edges = cv2.Canny(img, 100, 200)
img_gaussian = cv2.GaussianBlur(img, (3, 3), 0)

kernelx = np.array([[1, 1, 1], [0, 0, 0], [-1, -1, -1]])
kernely = np.array([[-1, 0, 1], [-1, 0, 1], [-1, 0, 1]])
img_prewittx = cv2.filter2D(img_gaussian, -1, kernelx)
img_prewitty = cv2.filter2D(img_gaussian, -1, kernely)

cv2.imshow("Prewitt X", img_prewittx)
if cv2.waitKey(0) & 0xff == 27:
    cv2.destroyAllWindows()
cv2.imshow("Prewitt Y", img_prewitty)
if cv2.waitKey(0) & 0xff == 27:
    cv2.destroyAllWindows()
cv2.imshow("Prewitt", img_prewittx + img_prewitty)
if cv2.waitKey(0) & 0xff == 27:
    cv2.destroyAllWindows()