Example #1
0
    def search_dataset(self, query_point, number_of_results, preset_name):
        preset_name = str(preset_name)
        query_point = str(query_point)
        logger.debug("NN search for point with name %s (preset = %s)" % (query_point, preset_name))
        size = self.original_dataset.size()
        if size < SIMILARITY_MINIMUM_POINTS:
            msg = "Not enough datapoints in the dataset (%s < %s)." % (size, SIMILARITY_MINIMUM_POINTS)
            logger.debug(msg)
            return {"error": True, "result": msg}
            # raise Exception('Not enough datapoints in the dataset (%s < %s).' % (size, SIMILARITY_MINIMUM_POINTS))

        if query_point.endswith(".yaml"):
            # The point doesn't exist in the dataset....
            # So, make a temporary point, add all the transformations
            # to it and search for it
            p, p1 = Point(), Point()
            p.load(query_point)
            p1 = self.original_dataset.history().mapPoint(p)
            similar_sounds = self.view.nnSearch(p1, self.metrics[preset_name]).get(int(number_of_results))
        else:
            if not self.original_dataset.contains(query_point):
                msg = "Sound with id %s doesn't exist in the dataset." % query_point
                logger.debug(msg)
                return {"error": True, "result": msg}
                # raise Exception("Sound with id %s doesn't exist in the dataset." % query_point)

            similar_sounds = self.view.nnSearch(query_point, self.metrics[preset_name]).get(int(number_of_results))

        return {"error": False, "result": similar_sounds}
Example #2
0
    def add_point(self, point_location, point_name):
        if self.original_dataset.contains(str(point_name)):
            self.original_dataset.removePoint(str(point_name))
        try:
            p = Point()
            p.load(str(point_location))
            p.setName(str(point_name))
            self.original_dataset.addPoint(p)
            size = self.original_dataset.size()
            logger.debug("Added point with name %s. Index has now %i points." % (str(point_name), size))
        except:
            msg = "Point with name %s could NOT be added. Index has now %i points." % (str(point_name), size)
            logger.debug(msg)
            return {"error": True, "result": msg}

        # If when adding a new point we reach the minimum points for similarity, prepare the dataset, save and create view and distance metrics
        #   This will most never happen, only the first time we start similarity server, there is no index created and we add 2000 points.
        if size == SIMILARITY_MINIMUM_POINTS:
            self.__prepare_original_dataset()
            self.__normalize_original_dataset()
            self.save_index(msg="(reaching 2000 points)")

            # build metrics for the different similarity presets
            self.__build_metrics()
            # create view
            view = View(self.original_dataset)
            self.view = view

        return {"error": False, "result": True}
Example #3
0
 def load_point(signame):
     """Load point data from JSON file."""
     point = Point()
     with open(signame, 'r') as sig:
         jsonsig = json.load(sig)
         if jsonsig.get('metadata', {}).get('tags'):
             del jsonsig['metadata']['tags']
         yamlsig = yaml.dump(jsonsig)
     point.loadFromString(yamlsig)
     return point
Example #4
0
File: pca.py Project: DomT4/gaia
def PCA(x):
    points = []
    layout = PointLayout()
    layout.add('x', RealType)

    for i, l in enumerate(x):
        p = Point()
        p.setName('p%d' % i)
        p.setLayout(layout)
        p['x'] = l
        points.append(p)

    ds = DataSet()
    ds.addPoints(points)

    ds = transform(ds, 'fixlength')
    ds = transform(ds, 'pca', { 'dimension': len(x[0]), 'resultName': 'pca' })

    result = []
    for p in ds.points():
        result.append(p['pca'])

    return result
Example #5
0
    def query_dataset(self, query_parameters, number_of_results):

        size = self.original_dataset.size()
        if size < SIMILARITY_MINIMUM_POINTS:
            msg = "Not enough datapoints in the dataset (%s < %s)." % (size, SIMILARITY_MINIMUM_POINTS)
            logger.debug(msg)
            return {"error": True, "result": msg}
            # raise Exception('Not enough datapoints in the dataset (%s < %s).' % (size, SIMILARITY_MINIMUM_POINTS))

        trans_hist = self.original_dataset.history().toPython()
        layout = self.original_dataset.layout()

        # Get normalization coefficients to transform the input data (get info from the last transformation which has been a normalization)
        coeffs = None
        for i in range(0, len(trans_hist)):
            if trans_hist[-(i + 1)]["Analyzer name"] == "normalize":
                coeffs = trans_hist[-(i + 1)]["Applier parameters"]["coeffs"]

        ##############
        # PARSE TARGET
        ##############

        # Transform input params to the normalized feature space and add them to a query point
        # If there are no params specified in the target, the point is set as empty (probably random sounds are returned)
        q = Point()
        q.setLayout(layout)
        feature_names = []
        # If some target has been specified...
        if query_parameters["target"].keys():
            for param in query_parameters["target"].keys():
                # Only add numerical parameters. Non numerical ones (like key) are only used as filters
                if param in coeffs.keys():
                    feature_names.append(str(param))
                    value = query_parameters["target"][param]
                    if coeffs:
                        a = coeffs[param]["a"]
                        b = coeffs[param]["b"]
                        if len(a) == 1:
                            norm_value = a[0] * value + b[0]
                        else:
                            norm_value = []
                            for i in range(0, len(a)):
                                norm_value.append(a[i] * value[i] + b[i])
                        # text = str(type(param)) + " " + str(type(norm_value))
                        q.setValue(str(param), norm_value)
                    else:
                        q.setValue(str(param), value)

        ##############
        # PARSE FILTER
        ##############

        filter = ""
        # If some filter has been specified...
        if query_parameters["filter"]:
            if type(query_parameters["filter"][0:5]) == str:
                filter = query_parameters["filter"]
            else:
                filter = self.parse_filter_list(query_parameters["filter"], coeffs)

        #############
        # DO QUERY!!!
        #############

        logger.debug(
            "Content based search with target: " + str(query_parameters["target"]) + " and filter: " + str(filter)
        )
        metric = DistanceFunctionFactory.create("euclidean", layout, {"descriptorNames": feature_names})
        # Looks like that depending on the version of gaia, variable filter must go after or before the metric
        # For the gaia version we have currently (sep 2012) in freesound: nnSearch(query,filter,metric)
        # results = self.view.nnSearch(q,str(filter),metric).get(int(number_of_results)) # <- Freesound
        results = self.view.nnSearch(q, metric, str(filter)).get(int(number_of_results))

        return {"error": False, "result": results}