示例#1
0
def heat_kernel(samples, kernel_width=.5, **kwargs):
    """
  Uses a heat kernel for computing similarities in the whole array
  """
    from tools import dist2hd
    distances = dist2hd(samples, samples)**2

    return numpy.exp(-distances / parameter)
示例#2
0
 def __call__(self, parameters):
     """
 Computes the cost for a parameter
 """
     params = parameters.reshape((self.len, -1))
     d = dist2hd(params, params)
     dist = self.distances < self.max_dist
     d = (d - self.distances)**2 * dist
     return numpy.sum(d)
示例#3
0
def reduct(reduction, function, samples, nb_coords, **kwargs):
    """
  Data reduction with euclidian distance approximation:
    - reduction is the algorithm to use
    - function is the function to optimize
    - samples is an array with the samples for the compression
    - nb_coords is the number of coordinates that must be retained
  """
    distances = dist2hd(samples, samples)
    return reduction(distances, function, nb_coords, **kwargs)
def kneigh(samples, neighbors, **kwargs):
    """
  Creates a list of the nearest neighbors in a K-neighborhood
  """
    l = []

    d = dist2hd(samples, samples)

    for dist in d:
        indices = numpy.argsort(dist)
        l.append(indices[:neighbors])

    return l
def parzen(samples, window_size, **kwargs):
    """
  Creates a list of the nearest neighbors in a Parzen window
  """
    l = []

    d = dist2hd(samples, samples)

    for dist in d:
        wi = numpy.where(dist < neighbors)[0]
        l.append(wi)

    return l
示例#6
0
    def gradient(self, parameters):
        """
    Gradient of this cost function
    """
        params = parameters.reshape((self.len, -1))
        d = dist2hd(params, params)
        dist = d < self.max_dist

        grad = numpy.zeros(params.shape)
        for (g, x, d_a, d_r, d_ok) in itertools.izip(grad, params, d,
                                                     self.distances, dist):
            temp = (d_a - d_r) / d_a * (x - params).T * d_ok
            temp[numpy.where(numpy.isnan(temp))] = 0
            g[:] = numpy.sum(temp, axis=1)
        return grad.ravel()
示例#7
0
    def gradient1(self, parameters):
        """
    Gradient of this cost function
    """
        params = parameters.reshape((self.len, -1))
        d = dist2hd(params, params)
        dist = d < self.max_dist
        indice = numpy.random.randint(0, self.len)

        x = params[indice]
        d_a = d[indice]
        d_r = self.distances[indice]
        d_ok = dist[indice]

        temp = (d_r - d_a) / d_a * (x - params).T * d_ok
        temp[numpy.where(numpy.isnan(temp))] = 0

        return temp.ravel()