Exemple #1
0
def final_helper(temp_image_path, probe_id, error, callback_code, result,
                 ai_response_type, try_type, ai_code):
    shutil.rmtree(temp_image_path)
    if error is not None:
        retries_count = AlgorithmsDataStore.instance().decrement_int_value(
            REDiS_TRAINING_RETRIES_COUNT_KEY % probe_id)
        if retries_count == 0:
            AlgorithmsDataStore.instance().delete_data(
                key=REDiS_TRAINING_RETRIES_COUNT_KEY % probe_id)
            logger.debug('Maximum training attempts reached...')
            result = False
            ai_response_type.update(
                dict(status=TRAINING_MAX_RETRIES_STATUS,
                     message=TRAINING_MAX_RETRIES_MESSAGE))
            # _tell_ai_training_results(result, ai_response_type, try_type, ai_code)
        else:
            AlgorithmsDataStore.instance().store_data(
                key=REDIS_UPDATE_TRAINING_KEY % probe_id, error=error)
            result = dict(result=False, error=error)
        AlgorithmsDataStore.instance().store_data(key=REDIS_PROBE_RESULT_KEY %
                                                  callback_code,
                                                  result=result)
        logger.info('Job was finished with internal algorithm error %s ' %
                    error)
    else:
        AlgorithmsDataStore.instance().delete_data(
            key=REDiS_TRAINING_RETRIES_COUNT_KEY % probe_id)
        AlgorithmsDataStore.instance().store_data(key=REDIS_PROBE_RESULT_KEY %
                                                  callback_code,
                                                  result=result)
    tell_ai_training_results(result, ai_response_type, try_type, ai_code)
Exemple #2
0
 def importSources(self, source):
     logger.debug("Database loading started...")
     self._etalon = ClusterL0Estimation.importDatabase(
         source.get('data', dict()))
     # self._etalon = SelfGraphEstimation.importDatabase(source.get('data', dict()))
     self._prob = source.get('threshold', 100)
     logger.info('Dynamic threshold: %f' % self._prob)
     logger.debug("Database loading finished.")
Exemple #3
0
 def verify_template_L0(self, data):
     estimation = ClusterL0Estimation(self.kodsettings.detector_type,
                                      knn=2,
                                      max_distance=0.9,
                                      mode=DEFAULT_MODE)
     # estimation = SelfGraphEstimation(self.kodsettings.detector_type, knn=2)
     logger.debug("Image: " + data['path'])
     return estimation.estimate_verification(data['clusters'], self._etalon)
    def store_vectors(self, hash_data_list, data, data_id=None, callback=None):
        user_hash_data = []
        local_buckets_list = []
        hash_keys_data = {}
        ext_data_key = str(data)
        if data_id is not None:
            ext_data_key += ':' + str(data_id)
        for hash_name, hash_buckets in hash_data_list:
            for key, value in hash_buckets:
                bucket_key = HASH_BUCKET_KEY_FORMAT % (hash_name, key)
                if not local_buckets_list.__contains__(bucket_key):
                    local_buckets_list.append(bucket_key)
                    user_hash_data.append((ext_data_key, str(bucket_key)))
                values = hash_keys_data.get(str(bucket_key), [])
                contains = False
                for v in values:
                    if isEqual(v[0], numpy_ndarrayToList(value)) and str(
                            v[1]) == str(data):
                        contains = True
                        break
                if not contains:
                    values.append((numpy_ndarrayToList(value), str(data)))
                hash_keys_data[str(bucket_key)] = values

        user_records = select_records_by_ids(self._user_hash_table_name,
                                             [ext_data_key], True)
        if len(user_records['records']) > 0:
            loaded_buckets = []
            for record in user_records['records']:
                if not loaded_buckets.__contains__(str(record['bucket_key'])):
                    loaded_buckets.append(str(record['bucket_key']))
            hash_buckets = select_records_by_ids(self._hash_data_table_name,
                                                 loaded_buckets)
            delete_data(self._user_hash_table_name, [ext_data_key])
            for key, value in hash_buckets.iteritems():
                hash_data = deserialize(value['hash_data'])
                logger.debug(hash_data)
                hash_buckets[key] = [v for v in hash_data if v[1] != str(data)]
            for key, value in hash_keys_data.iteritems():
                values = hash_buckets.get(key, [])
                for v in value:
                    values.append(v)
                hash_buckets[key] = values
            if len(hash_buckets.keys()) > 0:
                remove_keys_list = []
                hash_buckets_list = []
                for key, value in hash_buckets.iteritems():
                    if len(value) > 0:
                        hash_buckets_list.append((str(key), serialize(value)))
                    else:
                        remove_keys_list.append(str(key))
                if len(remove_keys_list) > 0:
                    delete_data(self._hash_data_table_name, remove_keys_list)
                if len(hash_buckets_list) > 0:
                    create_records(self._hash_data_table_name,
                                   tuple(hash_buckets_list), True)
        if len(user_hash_data) > 0:
            create_records(self._user_hash_table_name, tuple(user_hash_data))
Exemple #5
0
 def add_cascade(self, path):
     self._relative_cl.append(path)
     abs_path = os.path.join(APP_ROOT, "../../", path)
     logger.debug("####### %s" % abs_path)
     if os.path.exists(abs_path):
         self.__cascades.append(cv2.CascadeClassifier(abs_path))
         self._cascades_list.append(abs_path)
     else:
         logger.debug("Such file does not exist.")
 def get_bucket(self, hash_name, bucket_key):
     redis_key = HASH_BUCKET_KEY_FORMAT % (hash_name, bucket_key)
     logger.debug(redis_key)
     data = self._ihr_redis.get_data(redis_key)
     logger.debug(data)
     if data is not None:
         data = ast.literal_eval(data)
         return data['data']
     else:
         return []
Exemple #7
0
    def detect(self, data):
        new_images = []
        for obj in data:
            img, rects = self._detector.detect(obj['data'])
            if len(rects) <= 0:
                logger.debug(
                    "optimalROIDetection: Face doesn't found on image %s" %
                    obj['name'])
                continue
            obj['data'] = img
            obj['roi_rect'] = rects
            new_images.append(obj)
        images = new_images
        for obj in images:
            image = obj['data']
            temp = image[self._d:image.shape[0] - self._d,
                         self._d:image.shape[1] - self._d]
            res = cv2.minMaxLoc(
                cv2.matchTemplate(images[0]['data'], temp,
                                  cv2.cv.CV_TM_CCORR_NORMED))
            obj['displacement'] = (self._d - res[3][0], self._d - res[3][1])

        rects = []
        for image in images:
            di = image['displacement']
            for rec in image['roi_rect']:
                if len(rec) == 4:
                    rects.append(
                        [rec[0] - di[0], rec[1] - di[1], rec[2], rec[3]])
        optimal_rect = mergeRectangles(rects)
        if len(optimal_rect) == 4:
            if 1.3 * optimal_rect[2] > optimal_rect[3]:
                diff = 1.3 * optimal_rect[2] - optimal_rect[3]
                optimal_rect[1] -= int(0.3 * diff)
                optimal_rect[3] += int(0.7 * diff)
            for image in images:
                di = image['displacement']
                res_roi = [
                    optimal_rect[0] + di[0], optimal_rect[1] + di[1],
                    optimal_rect[2], optimal_rect[3]
                ]
                image['data'] = getROIImage(image['data'], res_roi)
        else:
            new_images = []
            for image in images:
                rects = self._face_classifier.detectAndJoin(
                    image['data'], False, RectsFiltering)
                if len(rects) <= 0:
                    continue
                image['data'] = getROIImage(image['data'], rects)
                new_images.append(image)
            images = new_images
        return images
Exemple #8
0
 def update_database(self):
     try:
         self._prob = 100
         for data in self._database:
             temp_prob = self.verify_template_L0(data)
             if temp_prob < self._prob:
                 self._prob = temp_prob
         logger.debug('Database threshold: %s' % self._prob)
     except Exception as e:
         logger.exception(e)
         self._prob = sys.maxint
     return self._prob > self.kodsettings.probability
Exemple #9
0
    def apply(self, data):
        """

        :param data: dictionary = {
                'temp_image_path': path to the temporary image storage,
                'probe_id': probe identifier,
                'error': error message,
                'callback_code': code of callback function,
                'result': status of result
                'ai_response_type': type of AI response,
                'try_type': type of try
                'ai_code': AI response code
            }
        :return:
        """
        temp_image_path = data['temp_image_path']
        error = data['error']
        probe_id = data['probe_id']
        ai_response_type = data['ai_response_type']
        callback_code = data['callback_code']
        result = data['result']
        try_type = data['try_type']
        ai_code = data['ai_code']

        shutil.rmtree(temp_image_path)
        if error is not None:
            retries_count = AlgorithmsDataStore.instance().decrement_int_value(
                REDiS_TRAINING_RETRIES_COUNT_KEY % probe_id)
            if retries_count == 0:
                AlgorithmsDataStore.instance().delete_data(
                    key=REDiS_TRAINING_RETRIES_COUNT_KEY % probe_id)
                logger.debug('Maximum training attempts reached...')
                result = False
                ai_response_type.update({
                    'status': TRAINING_MAX_RETRIES_STATUS,
                    'message': TRAINING_MAX_RETRIES_MESSAGE
                })
            else:
                AlgorithmsDataStore.instance().store_data(
                    key=REDIS_UPDATE_TRAINING_KEY % probe_id, error=error)
                result = dict(result=False, error=error)
            AlgorithmsDataStore.instance().store_data(
                key=REDIS_PROBE_RESULT_KEY % callback_code, result=result)
            logger.info('Job was finished with internal algorithm error %s ' %
                        error)
        else:
            AlgorithmsDataStore.instance().delete_data(
                key=REDiS_TRAINING_RETRIES_COUNT_KEY % probe_id)
            AlgorithmsDataStore.instance().store_data(
                key=REDIS_PROBE_RESULT_KEY % callback_code, result=result)
        tell_ai_training_results(result, ai_response_type, try_type, ai_code)
        return None
Exemple #10
0
def result_handling(result,
                    probe_id,
                    temp_image_path,
                    settings,
                    callback_code,
                    error=None):
    if error is not None or RedisStorage.persistence_instance().exists(
            key=REDIS_JOB_RESULTS_ERROR % callback_code):
        if not RedisStorage.persistence_instance().exists(
                key=REDIS_JOB_RESULTS_ERROR % callback_code):
            result = dict(verified=False, error=error)
            RedisStorage.persistence_instance().store_data(
                key=REDIS_JOB_RESULTS_ERROR % callback_code,
                ex=300,
                result=result)
            store_verification_results(result=result,
                                       callback_code=callback_code,
                                       probe_id=probe_id)
        if error is not None:
            logger.info('Job was finished with internal algorithm error %s ' %
                        error)
        else:
            logger.info(
                'Job was not stored because of job_results_error key existence.'
            )
    else:
        RedisStorage.persistence_instance().append_value_to_list(
            key=REDIS_PARTIAL_RESULTS_KEY % callback_code, value=result)
        results_counter = RedisStorage.persistence_instance(
        ).decrement_int_value(REDIS_RESULTS_COUNTER_KEY % callback_code)
        if results_counter <= 0:
            gathered_results = RedisStorage.persistence_instance(
            ).get_stored_list(REDIS_PARTIAL_RESULTS_KEY % callback_code)
            logger.debug('All gathered results for verification job - %s' %
                         gathered_results)
            if results_counter < 0:
                logger.exception(
                    'Results count is less than 0, check worker jobs consistency!'
                )
                result = dict(verified=False)
            else:
                true_count = float(gathered_results.count('True'))
                result = dict(verified=(
                    (true_count / len(gathered_results)) * 100) >= 50)
            store_verification_results(result=result,
                                       callback_code=callback_code,
                                       probe_id=probe_id)
    shutil.rmtree(temp_image_path)
    logger.info('Verification finished for user - %s, with result - %s' %
                (settings.get('userID'), result))
def select_records_by_ids(table_class_name, object_ids, flat_result=False):
    logger.info('Getting records for table class - %s, with object_ids - %s' %
                (table_class_name, object_ids))
    try:
        records = MySQLDataStoreInterface.select_data_by_ids(
            table_name=table_class_name,
            object_ids=object_ids,
            flat_result=flat_result)
        logger.debug('Data: %s' % records)
    except Exception as e:
        logger.exception(e)
        records = dict(error=str(e))
    logger.info('Got records for table class - %s, with object_ids - %s' %
                (table_class_name, object_ids))
    return records
    def verify_template_L1(self, data):
        knn = 2
        matcher = Matcher(matcherForDetector(self.kodsettings.detector_type))
        count = 0
        prob = 0
        logger.debug("Image: " + data['path'])
        logger.debug("Template size: ")
        for index, et_weight_cluster in enumerate(self._etalon):
            d, c = itertools.izip(*itertools.ifilter(lambda (_, c): c > 0, et_weight_cluster))
            et_cluster = list(d)
            cluster_weight = sum(c)
            dt_cluster = data['clusters'][index]

            if et_cluster is None or dt_cluster is None or len(et_cluster) < knn or len(dt_cluster) < knn:
                continue

            if len(et_cluster) > 0 and len(dt_cluster) > 0:
                dtype = dtypeForDetector(self.kodsettings.detector_type)
                matches1 = matcher.knnMatch(listToNumpy_ndarray(et_cluster, dtype),
                                            listToNumpy_ndarray(dt_cluster, dtype), k=2)
                matches2 = matcher.knnMatch(listToNumpy_ndarray(dt_cluster, dtype),
                                            listToNumpy_ndarray(et_cluster, dtype), k=2)

                ms = [et_cluster[x.queryIdx] for x in itertools.ifilter(
                        lambda (m, n): m.queryIdx == n.trainIdx and m.trainIdx == n.queryIdx, itertools.product(
                            itertools.chain(*matches1), itertools.chain(*matches2)
                        )
                )]
                c_val = sum(
                    lambda (_, x): x[1], itertools.ifilter(
                        lambda (m, n): numpy.array_equal(m, n[0]), itertools.product(
                            iter(ms), iter(et_weight_cluster)
                        )
                    )
                )
                count += 1
                val = (c_val / (1.0 * cluster_weight)) * 100
                logger.debug("Cluster #" + str(index + 1) + ": " + str(cluster_weight)
                             + " Positive: " + str(c_val) + " Probability: " + str(val))
                prob += val
            else:
                logger.debug("Cluster #" + str(index + 1) + ": " + str(cluster_weight) + " Invalid.")
        logger.debug("Probability: " + str((prob / (1.0 * count))))
        return prob / (1.0 * count)
Exemple #13
0
def store_verification_results(result, callback_code, probe_id):
    if 'error' in result:
        retries_count = AlgorithmsDataStore.instance().decrement_int_value(
            REDIS_VERIFICATION_RETIES_COUNT_KEY % probe_id)
        if retries_count == 0:
            AlgorithmsDataStore.instance().delete_data(
                key=REDIS_VERIFICATION_RETIES_COUNT_KEY % probe_id)
            logger.debug('Max number of verification attempts reached...')
            del result['error']
            result.update({'max_retries': True})
    else:
        AlgorithmsDataStore.instance().delete_data(
            key=REDIS_VERIFICATION_RETIES_COUNT_KEY % probe_id)
    AlgorithmsDataStore.instance().delete_data(key=REDIS_RESULTS_COUNTER_KEY %
                                               callback_code)
    AlgorithmsDataStore.instance().delete_data(key=REDIS_PARTIAL_RESULTS_KEY %
                                               callback_code)
    AlgorithmsDataStore.instance().store_data(key=REDIS_PROBE_RESULT_KEY %
                                              callback_code,
                                              result=result)
    def load_data(self,
                  user_ids=None,
                  data_id=None,
                  user_group_id=None,
                  include_only_from=None):
        if user_ids is None:
            user_ids = select_records_by_ids("", [user_group_id], True)
        if len(user_ids) > 0:
            data_user_ids = []
            for user_id in user_ids:
                user_data_id = str(user_id)
                if data_id is not None:
                    user_data_id += ":" + str(data_id)
                data_user_ids.append(user_data_id)
            user_records = select_records_by_ids(self._user_hash_table_name,
                                                 data_user_ids, True)
            logger.debug(user_records)
            if len(user_records['records']) > 0:
                loaded_buckets = []
                for record in user_records['records']:
                    if not loaded_buckets.__contains__(
                            str(record['bucket_key'])):
                        if include_only_from is not None:
                            if include_only_from.__contains__(
                                    self._buckets_hash(
                                        str(record['bucket_key']))):
                                loaded_buckets.append(str(
                                    record['bucket_key']))
                        else:
                            loaded_buckets.append(str(record['bucket_key']))

                hash_buckets = select_records_by_ids(
                    self._hash_data_table_name, loaded_buckets)
                logger.debug(hash_buckets)
                for key, value in hash_buckets.iteritems():
                    hash_data = deserialize(value['hash_data'])
                    if self._ihr_redis.exists(str(key)):
                        self._ihr_redis.delete_data(str(key))
                    self._ihr_redis.store_data(key=str(key),
                                               **{'data': hash_data})
Exemple #15
0
 def detect(self, img, as_list=False):
     rects = list()
     gray = grayscale(img)
     if len(self.__cascades) == 0:
         logger.debug("Detection impossible. Any cascade not found.")
         return rects
     for cascade in self.__cascades:
         lrects = cascade.detectMultiScale(
             gray,
             scaleFactor=self.classifierSettings.scaleFactor,
             minNeighbors=self.classifierSettings.minNeighbors,
             minSize=self.classifierSettings.minSize,
             maxSize=self.classifierSettings.maxSize,
             flags=self.classifierSettings.flags)
         logger.debug(lrects)
         if as_list:
             rects += [r for r in lrects]
         else:
             rects.append(lrects)
     logger.debug(rects)
     if len(rects) == 0:
         return []
     return rects
Exemple #16
0
def error_handler(message):
    logger.debug(message.get('message', None))
    append_error_handle_format(message)
    print_error_handle_output()
 def importSources(self, source):
     self._etalon = []
     logger.debug("Database loading started...")
     self.importSources_Database(source.get('data', dict()))
     self._prob = source.get('threshold', 100)
     logger.algo_logger.debug("Database loading finished.")
Exemple #18
0
 def dump(self):
     logger.debug('Cascade Classifier Settings')
     logger.debug('Scale Factor: %f' % self.scaleFactor)
     logger.debug('Minimum Neighbors: %d' % self.minNeighbors)
     logger.debug('Minimum Size: %s' % str(self.minSize))
     logger.debug('Maximum Size: %s' % str(self.maxSize))
Exemple #19
0
 def detectAndJoin(self, image, as_list=False, algorithm=RectsUnion):
     rects = self.detect(image, as_list)
     if len(rects) == 0:
         logger.debug("ROI is not found for image")
     return self.joinRectangles(rects, algorithm)
Exemple #20
0
def printKeyPoint(keypoint):
    if keypoint is not None:
        logger.debug(keypoint)
        logger.debug("angle=%s" % str(keypoint.angle))
        logger.debug("class_id=%s" % str(keypoint.class_id))
        logger.debug("octave=%s" % str(keypoint.octave))
        logger.debug("point=%s" % str(keypoint.pt))
        logger.debug("response=%s" % str(keypoint.response))
        logger.debug("size=%s" % str(keypoint.size))