def main(): img = read_img('./grace_hopper.png') # Feature Detection if not os.path.exists("./feature_detection"): os.makedirs("./feature_detection") # -- TODO Task 5: Corner Score -- # (a): Complete corner_score() # (b) # Define offsets and window size and calulcate corner score u, v, W = 5, 0, (5, 5) score = corner_score(img, u, v, W) save_img(score, "./feature_detection/corner_score.png") #breakpoint() # (c): No Code # -- TODO Task 6: Harris Corner Detector -- # (a): Complete harris_detector() # (b) harris_corners = harris_detector(img) save_img(harris_corners, "./feature_detection/harris_response.png")
def p2(p1, p2, savename): # read left and right images imgleft = read_img(p1) imgright = read_img(p2) # stitch image output = stitchimage(imgleft, imgright) # save stitched image save_img(output, './{}.jpg'.format(savename))
def LoginCodeVerificatin(driver): try: detailCode = pq(driver.page_source) imageURL = detailCode.find('#J_CheckCodeImg1').attr('src') save_img(imageURL, 'picDic/TaoBaoPicDic.png') driver.find_element_by_xpath('//*[@id="J_CodeInput"]').clear() # 设置要请求的头,让服务器不会以为你是机器人 headers = { 'UserAgent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/60.0.3112.101 Safari/537.36' } f = open('picDic/TaoBaoPicDic.png', 'rb') # 二进制打开图文件 CrawlResult/1111.png ls_f = base64.b64encode(f.read()) # 读取文件内容,转换为base64编码 f.close() values = { "softwareId": 7616, "softwareSecret": "p2AXUYMaTDcV72UoULYQQt7ubVPwTUXXlXIw7A3S", "username": "******", "password": "******", "captchaData": ls_f, "captchaType": 1017, "captchaMinLength": 4, "captchaMaxLength": 8 } # @ZHOUZEnan1993 data = json.dumps(values) # 发送一个http请求 request = urllib2.Request(url=url, headers=headers, data=data) # 获得回送的数据 response = urllib2.urlopen(request) datas = eval(response.read()) print 'data---%s---%s--%s--%s' % (datas['code'], datas['message'], datas['data']['recognition'], datas['data']['captchaId']) code = str(datas['data']['recognition']).replace('\r\n', '').replace( ' ', '').replace('\n', '').replace('\t', '') driver.find_element_by_xpath('//*[@id="J_CodeInput"]').send_keys(code) time.sleep(random.uniform(5, 8)) driver.find_element_by_xpath('//*[@id="J_submit"]').click() time.sleep(random.uniform(5, 8)) print '测试结束************' driver.switch_to.default_content() except Exception as e: print '验证码登录错了子卡-------%s' % e
def main(): # The main function ######################## img = read_img('./grace_hopper.png') ##### Feature Detection ##### if not os.path.exists("./feature_detection"): os.makedirs("./feature_detection") # define offsets and window size and calulcate corner score u, v, W = 0, 2, (5,5) score = corner_score(img, u, v, W) save_img(score, "./feature_detection/corner_score.png") harris_corners = harris_detector(img) save_img(harris_corners, "./feature_detection/harris_response.png")
def draw_matches(img1, img2, kp1, kp2, matches): ''' Creates an output image where the two source images stacked vertically connecting matching keypoints with a line. Input - img1: Input image 1 of shape (H1,W1,3) img2: Input image 2 of shape (H2,W2,3) kp1: Keypoint matrix for image 1 of shape (N,4) kp2: Keypoint matrix for image 2 of shape (M,4) matches: List of matching pairs indices between the 2 sets of keypoints (K,2) Output - Image where 2 input images stacked vertically with lines joining the matched keypoints Hint: see cv2.line ''' #Hint: #Use common.get_match_points() to extract keypoint locations #breakpoint() match_points = common.get_match_points(kp1, kp2, matches).astype(np.int32) test_image = np.vstack((img1, img2)) color = (0, 255, 0) thickness = 1 H1 = img1.shape[0] #breakpoint() #match_points.shape[0] for i in range(match_points.shape[0]): start_point = (match_points[i, 0], match_points[i, 1]) end_point = (match_points[i, 2], match_points[i, 3] + H1) test_image = cv2.line(test_image, start_point, end_point, color, thickness) output = test_image file_name = 'draw_matches1.png' save_img(output, file_name) #plt.imshow(output) #plt.show() #cv2.imshow(window_name, output) #cv2.waitKey(0) return output
def main(): # The main function img = read_img('./grace_hopper.png') """ Image Patches """ if not os.path.exists("./image_patches"): os.makedirs("./image_patches") # -- Image Patches -- # Q1 # patches = image_patches(img) # TODO choose a few patches and save them # chosen_patches = patches[:3] # save_img(chosen_patches[0], "./image_patches/q1_patch1.png") # save_img(chosen_patches[1], "./image_patches/q1_patch2.png") # save_img(chosen_patches[2], "./image_patches/q1_patch3.png") # Q2: No code """ Gaussian Filter """ # if not os.path.exists("./gaussian_filter"): # os.makedirs("./gaussian_filter") # -- Gaussian Filter -- # Q1: No code # Q2 # TODO: Calculate the kernel described in the question. # There is tolerance for the kernel. va = 1 / (2 * np.log(2)) k = np.zeros((3, 3)) for i in range(3): for j in range(3): k[i, j] = 1 / (2 * np.pi * va) * np.exp(-((i - 1)**2 + (j - 1)**2) / (2 * va)) filtered_gaussian = convolve(img, k) save_img(filtered_gaussian, "./gaussian_filter/q2_gaussian.png") # # Q3 edge_detect, _, _ = edge_detection(img) save_img(edge_detect, "./gaussian_filter/q3_edge.png") edge_with_gaussian, _, _ = edge_detection(filtered_gaussian) save_img(edge_with_gaussian, "./gaussian_filter/q3_edge_gaussian.png") print("Gaussian Filter is done. ")
#breakpoint() output = cv2.warpPerspective(img, H, (output_w, output_h), flags=cv2.INTER_LINEAR) #breakpoint() return output if __name__ == "__main__": # Task 5 case_name = "threebody" I = read_img(os.path.join("task5", case_name, "book.jpg")) corners = np.load(os.path.join("task5", case_name, "corners.npy")) size = np.load(os.path.join("task5", case_name, "size.npy")) result = make_synthetic_view(I, corners, size) save_img(result, case_name + "_frontoparallel.jpg") case_name = "palmer" I = read_img(os.path.join("task5", case_name, "book.jpg")) corners = np.load(os.path.join("task5", case_name, "corners.npy")) size = np.load(os.path.join("task5", case_name, "size.npy")) result = make_synthetic_view(I, corners, size) save_img(result, case_name + "_frontoparallel.jpg")
#breakpoint() draw_matches(img1, img2, keypoints1, keypoints2, matches) test = keypoints1[matches[:, [0]], [0]] XY = np.hstack((keypoints1[matches[:, [0]], [0]], keypoints1[matches[:, [0]], [1]])) XY = np.hstack((XY, keypoints2[matches[:, [1]], [0]])) XY = np.hstack((XY, keypoints2[matches[:, [1]], [1]])) H = RANSAC_fit_homography(XY) #breakpoint() stitched = warp_and_combine(img1, img2, H, XY) return stitched if __name__ == "__main__": #Possible starter code; you might want to loop over the task 6 images #to_stitch = 'eynsham' #to_stitch = 'mertoncourtyard' to_stitch_list = [ 'eynsham', 'florence2', 'florence3', 'florence3_alt', 'lowetag', 'mertonchapel', 'mertoncourtyard', 'vgg' ] #to_stitch_list = ['eynsham', 'mertoncourtyard'] for to_stitch in to_stitch_list: I1 = read_img(os.path.join('task6', to_stitch, 'p1.jpg')) I2 = read_img(os.path.join('task6', to_stitch, 'p2.jpg')) res = make_warped(I1, I2) save_img(res, "result_" + to_stitch + ".jpg") print("finished:", to_stitch)
def main(): # The main function img = read_img('./grace_hopper.png') """ Image Patches """ if not os.path.exists("./image_patches"): os.makedirs("./image_patches") # -- TODO Task 1: Image Patches -- #(a) #First complete image_patches() patches = image_patches(img) # Now choose any three patches and save them # chosen_patches should have those patches stacked vertically/horizontally chosen_patches = patches[0] chosen_patches = np.vstack((chosen_patches, patches[1])) chosen_patches = np.vstack((chosen_patches, patches[2])) save_img(chosen_patches, "./image_patches/q1_patch.png") #(b), (c): No code """ Convolution and Gaussian Filter """ if not os.path.exists("./gaussian_filter"): os.makedirs("./gaussian_filter") #breakpoint() # -- TODO Task 2: Convolution and Gaussian Filter -- # (a): No code # (b): Complete convolve() # (c) # Calculate the Gaussian kernel described in the question. # There is tolerance for the kernel. sigma = 0.572 #sigma = 2.0 kernel_gaussian = gaussian_kernel_generator(sigma) filtered_gaussian = convolve(img, kernel_gaussian) save_img(filtered_gaussian, "./gaussian_filter/q2_gaussian.png") #breakpoint() # # (d), (e): No code # (f): Complete edge_detection() # (g) # Use edge_detection() to detect edges # for the orignal and gaussian filtered images. _, _, edge_detect = edge_detection(img) save_img(edge_detect, "./gaussian_filter/q3_edge.png") _, _, edge_with_gaussian = edge_detection(filtered_gaussian) save_img(edge_with_gaussian, "./gaussian_filter/q3_edge_gaussian.png") print("Gaussian Filter is done. ") #breakpoint() # -- TODO Task 3: Sobel Operator -- if not os.path.exists("./sobel_operator"): os.makedirs("./sobel_operator") # (a): No code # (b): Complete sobel_operator() # (c) Gx, Gy, edge_sobel = sobel_operator(img) save_img(Gx, "./sobel_operator/q2_Gx.png") save_img(Gy, "./sobel_operator/q2_Gy.png") save_img(edge_sobel, "./sobel_operator/q2_edge_sobel.png") print("Sobel Operator is done. ") #breakpoint() # -- TODO Task 4: LoG Filter -- if not os.path.exists("./log_filter"): os.makedirs("./log_filter") # (a) kernel_LoG1 = np.array([[0, 1, 0], [1, -4, 1], [0, 1, 0]]) kernel_LoG2 = np.array([[0, 0, 3, 2, 2, 2, 3, 0, 0], [0, 2, 3, 5, 5, 5, 3, 2, 0], [3, 3, 5, 3, 0, 3, 5, 3, 3], [2, 5, 3, -12, -23, -12, 3, 5, 2], [2, 5, 0, -23, -40, -23, 0, 5, 2], [2, 5, 3, -12, -23, -12, 3, 5, 2], [3, 3, 5, 3, 0, 3, 5, 3, 3], [0, 2, 3, 5, 5, 5, 3, 2, 0], [0, 0, 3, 2, 2, 2, 3, 0, 0]]) #breakpoint() filtered_LoG1 = convolve(img, kernel_LoG1) filtered_LoG2 = convolve(img, kernel_LoG2) # Use convolve() to convolve img with kernel_LOG1 and kernel_LOG2 save_img(filtered_LoG1, "./log_filter/q1_LoG1.png") save_img(filtered_LoG2, "./log_filter/q1_LoG2.png") # (b) # Follow instructions in pdf to approximate LoG with a DoG data = np.load('log1d.npz') LoG_50 = data['log50'] gauss_50 = data['gauss50'] gauss_53 = data['gauss53'] DoG = gauss_53 - gauss_50 x = np.arange(-250, 251) plt.plot(x, LoG_50, label='Laplacian') plt.plot(x, DoG, label='DoG') plt.legend() plt.show() #breakpoint() print("LoG Filter is done. ")