Exemplo n.º 1
0
 def update_hash_templateL0(self, data):
     knn = 3
     if len(self._etalon) == 0:
         self._etalon = data['clusters']
     else:
         matcher = Matcher(matcherForDetector(self.kodsettings.detector_type))
         for index, et_cluster in enumerate(self._etalon):
             dt_cluster = data['clusters'][index]
             if et_cluster is None or len(et_cluster) == 0 or len(et_cluster) < knn:
                 self._etalon[index] = et_cluster
             elif dt_cluster is None or len(dt_cluster) == 0 or len(dt_cluster) < knn:
                 self._etalon[index] = et_cluster
             else:
                 dtype = dtypeForDetector(self.kodsettings.detector_type)
                 matches1 = matcher.knnMatch(listToNumpy_ndarray(et_cluster, dtype),
                                             listToNumpy_ndarray(dt_cluster, dtype), k=knn)
                 matches2 = matcher.knnMatch(listToNumpy_ndarray(dt_cluster, dtype),
                                             listToNumpy_ndarray(et_cluster, dtype), k=knn)
                 good = list(itertools.chain.from_iterable(itertools.imap(
                     lambda(x, _): (et_cluster[x.queryIdx], dt_cluster[x.trainIdx]), itertools.ifilter(
                         lambda(m, n): m.queryIdx == n.trainIdx and m.trainIdx == n.queryIdx, itertools.product(
                             itertools.chain(*matches1), itertools.chain(*matches2)
                         )
                     )
                 )))
                 self._etalon[index] = listToNumpy_ndarray(good)
    def verify_template_L1(self, data):
        knn = 2
        matcher = Matcher(matcherForDetector(self.kodsettings.detector_type))
        count = 0
        prob = 0
        logger.algo_logger.debug("Image: " + data['path'])
        logger.algo_logger.debug("Template size: ")

        for index, et_weight_cluster in enumerate(self._etalon):
            d, c = itertools.izip(
                *itertools.ifilter(lambda (_, c): c > 0, et_weight_cluster))
            et_cluster = list(d)
            cluster_weight = sum(c)
            dt_cluster = data['clusters'][index]

            if et_cluster is None or dt_cluster is None or len(
                    et_cluster) < knn or len(dt_cluster) < knn:
                continue

            if len(et_cluster) > 0 and len(dt_cluster) > 0:
                dtype = dtypeForDetector(self.kodsettings.detector_type)
                matches1 = matcher.knnMatch(
                    listToNumpy_ndarray(et_cluster, dtype),
                    listToNumpy_ndarray(dt_cluster, dtype),
                    k=2)
                matches2 = matcher.knnMatch(
                    listToNumpy_ndarray(dt_cluster, dtype),
                    listToNumpy_ndarray(et_cluster, dtype),
                    k=2)

                ms = [
                    et_cluster[x.queryIdx] for x in itertools.ifilter(
                        lambda (m, n): m.queryIdx == n.trainIdx and m.trainIdx
                        == n.queryIdx,
                        itertools.product(itertools.chain(
                            *matches1), itertools.chain(*matches2)))
                ]
                c_val = sum(
                    lambda (_, x): x[1],
                    itertools.ifilter(
                        lambda (m, n): numpy.array_equal(m, n[0]),
                        itertools.product(iter(ms), iter(et_weight_cluster))))
                count += 1
                val = (c_val / (1.0 * cluster_weight)) * 100
                logger.algo_logger.debug("Cluster #" + str(index + 1) + ": " +
                                         str(cluster_weight) + " Positive: " +
                                         str(c_val) + " Probability: " +
                                         str(val))
                prob += val
            else:
                logger.algo_logger.debug("Cluster #" + str(index + 1) + ": " +
                                         str(cluster_weight) + " Invalid.")
        logger.algo_logger.debug("Probability: " + str((prob / (1.0 * count))))
        return prob / (1.0 * count)
Exemplo n.º 3
0
 def verify_template_L0(self, data):
     knn = 2
     matcher = Matcher(matcherForDetector(self.kodsettings.detector_type))
     prob = 0
     logger.algo_logger.debug("Image: " + data['path'])
     logger.algo_logger.debug("Template size: ")
     summ = sum(itertools.imap(lambda x: len(x) if x is not None else 0, self._etalon))
     for index, et_cluster in enumerate(self._etalon):
         dt_cluster = data['clusters'][index]
         if et_cluster is None or len(et_cluster) < knn:
             logger.algo_logger.debug("Cluster #" + str(index + 1) + ": " + str(-1)
                                      + " Invalid. (Weight: 0)")
             continue
         if dt_cluster is None or len(dt_cluster) < knn:
             logger.algo_logger.debug("Cluster #" + str(index + 1) + ": " + str(len(self._etalon[index]))
                                      + " Positive: 0 Probability: 0 (Weight: " +
                                      str(len(et_cluster) / (1.0 * summ)) + ")")
             continue
         if len(et_cluster) > 0 and len(dt_cluster) > 0:
             dtype = dtypeForDetector(self.kodsettings.detector_type)
             matches1 = matcher.knnMatch(listToNumpy_ndarray(et_cluster, dtype),
                                         listToNumpy_ndarray(dt_cluster, dtype), k=knn)
             matches2 = matcher.knnMatch(listToNumpy_ndarray(dt_cluster, dtype),
                                         listToNumpy_ndarray(et_cluster, dtype), k=knn)
             ms = [
                 x for (x, _) in itertools.ifilter(
                     lambda(m, n): m.queryIdx == n.trainIdx and m.trainIdx == n.queryIdx, itertools.product(
                         itertools.chain(*matches1), itertools.chain(*matches2)
                     )
                 )
             ]
             val = (len(ms) / (1.0 * len(et_cluster))) * 100
             logger.algo_logger.debug("Cluster #" + str(index + 1) + ": " + str(len(et_cluster))
                                      + " Positive: " + str(len(ms)) + " Probability: " + str(val) +
                                      " (Weight: " + str(len(et_cluster) / (1.0 * summ)) + ")")
             prob += (len(et_cluster) / (1.0 * summ)) * val
         else:
             logger.algo_logger.debug("Cluster #" + str(index + 1) + ": " + str(len(et_cluster))
                                      + " Invalid.")
     logger.algo_logger.debug("Probability: " + str(prob))
     return prob
    def update_hash_templateL1(self, data):
        """
        max_weight = se*sum(i=1,k-1: 1+2*i) + k*(n-k)*se,
        where
            n - count of images,
            k - count of identical matches, k <= n,
            se - single estimate, I used se=1

        :param data:
        :return:
        """
        knn = 5

        if len(self._database) == 1:
            self._etalon = [[] if cluster is None else [(desc, 1)
                                                        for desc in cluster]
                            for cluster in data['clusters']]
        else:
            matcher = Matcher(
                matcherForDetector(self.kodsettings.detector_type))

            for index, et_cluster in enumerate(self._etalon):
                dt_cluster = data['clusters'][index]
                if dt_cluster is None or len(
                        dt_cluster) == 0 or len(dt_cluster) < knn:
                    continue

                for obj in self._database:
                    if data['path'] == obj['path']:
                        continue

                    ob_cluster = obj['clusters'][index]
                    if ob_cluster is None or len(
                            ob_cluster) == 0 or len(ob_cluster) < knn:
                        continue

                    dtype = dtypeForDetector(self.kodsettings.detector_type)
                    matches1 = matcher.knnMatch(
                        listToNumpy_ndarray(dt_cluster, dtype),
                        listToNumpy_ndarray(ob_cluster, dtype),
                        k=knn)

                    for v in matches1:
                        if len(v) >= 1:
                            if v[0].distance == 0:
                                best = v[0]
                            else:
                                best = max(
                                    v,
                                    key=(lambda m: next(
                                        (c / (1.0 * m.distance)
                                         for (d, c) in et_cluster
                                         if numpy.array_equal(
                                             d, ob_cluster[m.trainIdx])), -1)))
                            ob_is = False
                            dt_is = False
                            new_cluster = []
                            for d, c in et_cluster:
                                if numpy.array_equal(
                                        d, ob_cluster[best.trainIdx]):
                                    c += 1
                                    ob_is = True
                                if numpy.array_equal(
                                        d, dt_cluster[best.queryIdx]):
                                    c += 1
                                    dt_is = True
                                new_cluster.append((d, c))
                            if not ob_is:
                                new_cluster.append(
                                    (ob_cluster[best.trainIdx], 1))
                            if not dt_is:
                                new_cluster.append(
                                    (dt_cluster[best.queryIdx], 1))
                            et_cluster = new_cluster
                    self._etalon[index] = et_cluster