コード例 #1
0
    def save_network(self, dcn, optimizer, iteration, logging_dict=None):
        """
        Saves network parameters to logging directory
        :return:
        :rtype: None
        """

        network_param_file = os.path.join(
            self._logging_dir,
            utils.getPaddedString(iteration, width=6) + ".pth")
        optimizer_param_file = network_param_file + ".opt"
        torch.save(dcn.state_dict(), network_param_file)
        torch.save(optimizer.state_dict(), optimizer_param_file)

        # also save loss history stuff
        if logging_dict is not None:
            log_history_file = os.path.join(
                self._logging_dir,
                utils.getPaddedString(iteration, width=6) +
                "_log_history.yaml")
            utils.saveToYaml(logging_dict, log_history_file)

            current_loss_file = os.path.join(self._logging_dir, 'loss.yaml')
            current_loss_data = self._get_current_loss(logging_dict)

            utils.saveToYaml(current_loss_data, current_loss_file)
コード例 #2
0
    def find_best_match(self, event, u, v, flags, param):
        """
        For each network, find the best match in the target image to point highlighted
        with reticle in the source image. Displays the result
        :return:
        :rtype:
        """

        img_1_with_reticle = np.copy(self.img1)
        draw_reticle(img_1_with_reticle, u, v, self._reticle_color)
        cv2.imshow("source", img_1_with_reticle)

        alpha = self._config["blend_weight_original_image"]
        beta = 1 - alpha

        img_2_with_reticle = np.copy(self.img2)

        print "\n\n"

        self._res_uv = dict()

        # self._res_a_uv = dict()
        # self._res_b_uv = dict()

        for network_name in self._dcn_dict:
            res_a = self._res_a[network_name]
            res_b = self._res_b[network_name]
            best_match_uv, best_match_diff, norm_diffs = \
                DenseCorrespondenceNetwork.find_best_match((u, v), res_a, res_b)
            print "\n\n"
            print "network_name:", network_name
            self._res_uv[network_name] = dict()
            self._res_uv[network_name]['source'] = res_a[v, u, :].tolist()
            self._res_uv[network_name]['target'] = res_b[v, u, :].tolist()

            # print "res_a[v, u, :]:", res_a[v, u, :]
            # print "res_b[v, u, :]:", res_b[v, u, :]

            print "%s best match diff: %.3f" % (network_name, best_match_diff)

            threshold = self._config["norm_diff_threshold"]
            if network_name in self._config["norm_diff_threshold_dict"]:
                threshold = self._config["norm_diff_threshold_dict"][
                    network_name]

            heatmap = self.scale_norm_diffs_to_make_heatmap(
                norm_diffs, threshold)

            reticle_color = self._network_reticle_color[network_name]
            draw_reticle(heatmap, best_match_uv[0], best_match_uv[1],
                         reticle_color)
            draw_reticle(img_2_with_reticle, best_match_uv[0],
                         best_match_uv[1], reticle_color)
            blended = cv2.addWeighted(self.img2_gray, alpha, heatmap, beta, 0)
            cv2.imshow(network_name, blended)

        cv2.imshow("target", img_2_with_reticle)
        if event == cv2.EVENT_LBUTTONDOWN:
            utils.saveToYaml(self._res_uv, 'clicked_point.yaml')
    def find_best_match(self, event,u,v,flags,param):

        """
        For each network, find the best match in the target image to point highlighted
        with reticle in the source image. Displays the result
        :return:
        :rtype:
        """

        img_1_with_reticle = np.copy(self.img1)
        draw_reticle(img_1_with_reticle, u, v, self._reticle_color)
        cv2.imshow("source", img_1_with_reticle)

        alpha = self._config["blend_weight_original_image"]
        beta = 1 - alpha

        img_2_with_reticle = np.copy(self.img2)


        print "\n\n"

        self._res_uv = dict()

        # self._res_a_uv = dict()
        # self._res_b_uv = dict()

        for network_name in self._dcn_dict:
            res_a = self._res_a[network_name]
            res_b = self._res_b[network_name]
            best_match_uv, best_match_diff, norm_diffs = \
                DenseCorrespondenceNetwork.find_best_match((u, v), res_a, res_b)
            print "\n\n"
            print "network_name:", network_name
            self._res_uv[network_name] = dict()
            self._res_uv[network_name]['source'] = res_a[v, u, :].tolist()
            self._res_uv[network_name]['target'] = res_b[v, u, :].tolist()

            # print "res_a[v, u, :]:", res_a[v, u, :]
            # print "res_b[v, u, :]:", res_b[v, u, :]

            print "%s best match diff: %.3f" %(network_name, best_match_diff)

            threshold = self._config["norm_diff_threshold"]
            if network_name in self._config["norm_diff_threshold_dict"]:
                threshold = self._config["norm_diff_threshold_dict"][network_name]

            heatmap = self.scale_norm_diffs_to_make_heatmap(norm_diffs, threshold)

            reticle_color = self._network_reticle_color[network_name]
            draw_reticle(heatmap, best_match_uv[0], best_match_uv[1], reticle_color)
            draw_reticle(img_2_with_reticle, best_match_uv[0], best_match_uv[1], reticle_color)
            blended = cv2.addWeighted(self.img2_gray, alpha, heatmap, beta, 0)
            cv2.imshow(network_name, blended)

        cv2.imshow("target", img_2_with_reticle)
        if event == cv2.EVENT_LBUTTONDOWN:
            utils.saveToYaml(self._res_uv, 'clicked_point.yaml')
コード例 #4
0
    def save_configs(self):
        """
        Saves config files to the logging directory
        :return:
        :rtype: None
        """
        training_params_file = os.path.join(self._logging_dir, 'training.yaml')
        utils.saveToYaml(self._config, training_params_file)

        dataset_params_file = os.path.join(self._logging_dir, 'dataset.yaml')
        utils.saveToYaml(self._dataset.config, dataset_params_file)        
コード例 #5
0
    def save_configs(self):
        """
        Saves config files to the logging directory
        :return:
        :rtype: None
        """
        training_params_file = os.path.join(self._logging_dir, 'training.yaml')
        utils.saveToYaml(self._config, training_params_file)

        dataset_params_file = os.path.join(self._logging_dir, 'dataset.yaml')
        utils.saveToYaml(self._dataset.config, dataset_params_file)        
コード例 #6
0
    def save_network(self, dcn, optimizer, iteration, logging_dict=None):
        """
        Saves network parameters to logging directory
        :return:
        :rtype: None
        """

        network_param_file = os.path.join(self._logging_dir, utils.getPaddedString(iteration, width=6) + ".pth")
        optimizer_param_file = network_param_file + ".opt"
        torch.save(dcn.state_dict(), network_param_file)
        torch.save(optimizer.state_dict(), optimizer_param_file)

        # also save loss history stuff
        if logging_dict is not None:
            log_history_file = os.path.join(self._logging_dir, utils.getPaddedString(iteration, width=6) + "_log_history.yaml")
            utils.saveToYaml(logging_dict, log_history_file)

            current_loss_file = os.path.join(self._logging_dir, 'loss.yaml')
            current_loss_data = self._get_current_loss(logging_dict)

            utils.saveToYaml(current_loss_data, current_loss_file)
コード例 #7
0
    def save_configs(self):
        """
        Saves config files to the logging directory
        :return:
        :rtype: None
        """
        training_params_file = os.path.join(self._logging_dir, 'training.yaml')
        utils.saveToYaml(self._config, training_params_file)

        dataset_params_file = os.path.join(self._logging_dir, 'dataset.yaml')
        utils.saveToYaml(self._dataset.config, dataset_params_file)

        # make unique identifier
        identifier_file = os.path.join(self._logging_dir, 'identifier.yaml')
        identifier_dict = dict()
        identifier_dict['id'] = utils.get_unique_string()
        utils.saveToYaml(identifier_dict, identifier_file)
    next_image_pair()

    cv2.namedWindow('image1')
    cv2.setMouseCallback('image1',draw_circle1)

    cv2.namedWindow('image2')
    cv2.setMouseCallback('image2',draw_circle2)

    while(1):
        cv2.imshow('image1',img1)
        cv2.imshow('image2',img2)
        k = cv2.waitKey(20) & 0xFF
        if k == 27:
            break
        elif k == ord('a'):
            print ix,iy
        elif k == ord('s'):
            if not check_same_length(img1_points_picked, img2_points_picked):
                print "can't save when not same length"
                print "try choosing a new image pair"
                print "or picking more points on the one with less points"
            else:
                print "saving"
                new_dict = make_savable_correspondence_pairs()
                annotated_data.append(new_dict)
                utils.saveToYaml(annotated_data, "new_annotated_pairs.yaml")
        elif k == ord('n'):
            next_image_pair()
            
    cv2.destroyAllWindows()
    next_image_pair()

    cv2.namedWindow('image1')
    cv2.setMouseCallback('image1', draw_circle1)

    cv2.namedWindow('image2')
    cv2.setMouseCallback('image2', draw_circle2)

    while (1):
        cv2.imshow('image1', img1)
        cv2.imshow('image2', img2)
        k = cv2.waitKey(20) & 0xFF
        if k == 27:
            break
        elif k == ord('a'):
            print(ix, iy)
        elif k == ord('s'):
            if not check_same_length(img1_points_picked, img2_points_picked):
                print("can't save when not same length")
                print("try choosing a new image pair")
                print("or picking more points on the one with less points")
            else:
                print("saving")
                new_dict = make_savable_correspondence_pairs()
                annotated_data.append(new_dict)
                utils.saveToYaml(annotated_data, "new_annotated_pairs.yaml")
        elif k == ord('n'):
            next_image_pair()

    cv2.destroyAllWindows()
コード例 #10
0
    def find_best_match(self, event, u, v, flags, param):
        """
        For each network, find the best match in the target image to point highlighted
        with reticle in the source image. Displays the result
        :return:
        :rtype:
        """

        if self._paused:
            return

        img_1_with_reticle = np.copy(self.img1)
        draw_reticle(img_1_with_reticle, u, v, self._reticle_color)
        cv2.imshow("source", img_1_with_reticle)

        alpha = self._config["blend_weight_original_image"]
        beta = 1 - alpha

        img_2_with_reticle = np.copy(self.img2)

        print("\n\n")

        self._res_uv = dict()

        # self._res_a_uv = dict()
        # self._res_b_uv = dict()

        for network_name in self._dcn_dict:
            res_a = self._res_a[network_name]
            res_b = self._res_b[network_name]
            best_match_uv, best_match_diff, norm_diffs = \
                DenseCorrespondenceNetwork.find_best_match((u, v), res_a, res_b)
            print("\n\n")
            print("network_name:", network_name)
            print("scene_name_1", self._scene_name_1)
            print("image_1_idx", self._image_1_idx)
            print("scene_name_2", self._scene_name_2)
            print("image_2_idx", self._image_2_idx)

            d = dict()
            d['scene_name'] = self._scene_name_1
            d['image_idx'] = self._image_1_idx
            d['descriptor'] = res_a[v, u, :].tolist()
            d['u'] = u
            d['v'] = v

            print("\n-------keypoint info\n", d)
            print("\n--------\n")

            self._res_uv[network_name] = dict()
            self._res_uv[network_name]['source'] = res_a[v, u, :].tolist()
            self._res_uv[network_name]['target'] = res_b[v, u, :].tolist()

            print("res_a[v, u, :]:", res_a[v, u, :])
            print("res_b[v, u, :]:", res_b[best_match_uv[1],
                                           best_match_uv[0], :])

            print("%s best match diff: %.3f" % (network_name, best_match_diff))
            print("res_a", self._res_uv[network_name]['source'])
            print("res_b", self._res_uv[network_name]['target'])

            threshold = self._config["norm_diff_threshold"]
            if network_name in self._config["norm_diff_threshold_dict"]:
                threshold = self._config["norm_diff_threshold_dict"][
                    network_name]

            heatmap_color = vis_utils.compute_gaussian_kernel_heatmap_from_norm_diffs(
                norm_diffs, self._config['kernel_variance'])

            reticle_color = self._network_reticle_color[network_name]

            draw_reticle(heatmap_color, best_match_uv[0], best_match_uv[1],
                         reticle_color)
            draw_reticle(img_2_with_reticle, best_match_uv[0],
                         best_match_uv[1], reticle_color)
            blended = cv2.addWeighted(self.img2, alpha, heatmap_color, beta, 0)
            cv2.imshow(network_name, blended)

        cv2.imshow("target", img_2_with_reticle)
        if event == cv2.EVENT_LBUTTONDOWN:
            utils.saveToYaml(self._res_uv, 'clicked_point.yaml')
コード例 #11
0
 def save_config(self, config_path=None):
     if config_path is None:
         utils.saveToYaml(self.config, self.config_path)
     else:
         utils.saveToYaml(self.config, config_path)