model = Network(det, des, cfg.LOSS.SCORE, cfg.LOSS.PAIR, cfg.PATCH.SIZE, cfg.TRAIN.TOPK) print(f"{gct()} : to device") device = torch.device("cuda") model = model.to(device) resume = args.resume print(f"{gct()} : in {resume}") checkpoint = torch.load(resume) model.load_state_dict(checkpoint["state_dict"]) ############################################################################### # detect and compute ############################################################################### img1_path, img2_path = args.imgpath.split("@") kp1, des1, img1, _, _ = model.detectAndCompute(img1_path, device, (600, 460)) kp2, des2, img2, _, _ = model.detectAndCompute(img2_path, device, (460, 600)) #(460, 600) # kp2, des2, img2,_,_ = model.detectAndCompute(img2_path, device, (600, 460)) #(460, 600) predict_label, nn_kp2 = nearest_neighbor_distance_ratio_match( des1, des2, kp2, 0.9) idx = predict_label.nonzero().view(-1) mkp1 = kp1.index_select(dim=0, index=idx.long()) # predict match keypoints in I1 mkp2 = nn_kp2.index_select( dim=0, index=idx.long()) # predict match keypoints in I2 def to_cv2_kp(kp): # kp is like [batch_idx, y, x, channel] return cv2.KeyPoint(kp[2], kp[1], 0)
model = Network(det, des, cfg.LOSS.SCORE, cfg.LOSS.PAIR, cfg.PATCH.SIZE, cfg.TRAIN.TOPK) print(f"{gct()} : to device") device = torch.device("cuda") model = model.to(device) resume = args.resume print(f"{gct()} : in {resume}") checkpoint = torch.load(resume) model.load_state_dict(checkpoint["state_dict"]) ############################################################################### # detect and compute ############################################################################### img1_path, img2_path = args.imgpath.split("@") kp1, des1, img1 = model.detectAndCompute(img1_path, device, (600, 460)) kp2, des2, img2 = model.detectAndCompute(img2_path, device, (600, 460)) predict_label, nn_kp2 = nearest_neighbor_distance_ratio_match( des1, des2, kp2, 0.8) idx = predict_label.nonzero().view(-1) mkp1 = kp1.index_select(dim=0, index=idx.long()) # predict match keypoints in I1 mkp2 = nn_kp2.index_select( dim=0, index=idx.long()) # predict match keypoints in I2 def to_cv2_kp(kp): # kp is like [batch_idx, y, x, channel] return cv2.KeyPoint(kp[2], kp[1], 0) def to_cv2_dmatch(m):
device = torch.device("cuda") model = model.to(device) resume = '/home/wang/workspace/Faster-net2/runs/03/model/e082_NN_0.165_NNT_0.397_NNDR_0.673_MeanMS_0.412.pth.tar' print(f"{gct()} : in {resume}") checkpoint = torch.load(resume) model.load_state_dict(checkpoint["state_dict"]) def to_cv2_kp(kp, scale, angle): # kp is like [batch_idx, y, x, channel] return cv2.KeyPoint(kp[2], kp[1], scale, angle) def to_cv2_dmatch(m): return cv2.DMatch(m, m, m, m) def reverse_img(img): """ reverse image from tensor to cv2 format :param img: tensor :return: RBG image """ img = img.permute(0, 2, 3, 1)[0].cpu().detach().numpy() img = (img * 255).astype(np.uint8) # change to opencv format # img = cv2.cvtColor(img, cv2.COLOR_GRAY2RGB) # gray to rgb return img kp1, des1, img1, scale1, angle1 = model.detectAndCompute('/home/wang/workspace/Faster-net2/ScalableNet_Net0.3/material/img3.png', device, (460, 600)) angle = np.degrees(np.arctan((angle1[:,1]/angle1[:,0]).detach().cpu().numpy())) img1 = reverse_img(img1) keypoints1 = list(map(to_cv2_kp, kp1, scale1, angle)) img = cv2.drawKeypoints(img1, keypoints1, img1, flags=cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS) cv2.imwrite('rfnet_keypoints_text.jpg', img)
model = Network(det, des, cfg.LOSS.SCORE, cfg.LOSS.PAIR, cfg.PATCH.SIZE, cfg.TRAIN.TOPK) print(f"{gct()} : to device") device = torch.device("cuda") model = model.to(device) resume = args.resume print(f"{gct()} : in {resume}") checkpoint = torch.load(resume) model.load_state_dict(checkpoint["state_dict"]) ############################################################################### # detect and compute ############################################################################### img1_path, img2_path = args.imgpath.split("@") kp1, des1, img1 = model.detectAndCompute(img1_path, device, (240, 320)) kp2, des2, img2 = model.detectAndCompute(img2_path, device, (240, 320)) predict_label, nn_kp2 = nearest_neighbor_distance_ratio_match( des1, des2, kp2, 0.7) idx = predict_label.nonzero().view(-1) mkp1 = kp1.index_select(dim=0, index=idx.long()) # predict match keypoints in I1 mkp2 = nn_kp2.index_select( dim=0, index=idx.long()) # predict match keypoints in I2 def to_cv2_kp(kp): # kp is like [batch_idx, y, x, channel] return cv2.KeyPoint(kp[2], kp[1], 0) def to_cv2_dmatch(m):
cfg.MODEL.padding, cfg.MODEL.dilation, cfg.MODEL.scale_list, ) des = HardNetNeiMask(cfg.HARDNET.MARGIN, cfg.MODEL.COO_THRSH) model = Network(det, des, cfg.LOSS.SCORE, cfg.LOSS.PAIR, cfg.PATCH.SIZE, 512) model = model.to(device=device) checkpoint = torch.load(model_file) model.load_state_dict(checkpoint["state_dict"]) img1_path = "./material/4_1.png" img2_path = "./material/4_2.png" img1 = cv2.imread(img1_path) img2 = cv2.imread(img2_path) width = img1.shape[1] kp1, des1, _, _, _, _ = model.detectAndCompute(img1_path, device, (img1.shape[0], img1.shape[1])) kp2, des2, _, _, _, _ = model.detectAndCompute(img2_path, device, (img2.shape[0], img2.shape[1])) def to_cv2_kp(kp): return cv2.KeyPoint(kp[2], kp[1], 0) kp1 = list(map(to_cv2_kp, kp1)) kp2 = list(map(to_cv2_kp, kp2)) bf = cv2.BFMatcher(cv2.NORM_L2) matches = bf.knnMatch(des1.cpu().detach().numpy(), trainDescriptors=des2.cpu().detach().numpy(), k=2)