def test_gaussian_transfer(): assert helpers.approx_equal( calculate.gaussian(numpy.array([-1.0, 0.0, 0.5, 1.0])), [0.367879, 1.0, 0.778801, 0.367879]) assert helpers.approx_equal( calculate.gaussian(numpy.array([-1.0, 0.0, 0.5, 1.0]), variance=0.5), [0.135335, 1.0, 0.606531, 0.135335])
def test_dgaussian_matrix(): tensor_shape = [random.randint(1, 10) for _ in range(2)] helpers.check_gradient( lambda X: calculate.gaussian(X), lambda X: calculate.dgaussian(X, calculate.gaussian(X)), f_arg_tensor=numpy.random.random(tensor_shape), f_shape='lin')
def activate(self, inputs): """Return the model outputs for given inputs.""" # Get distance to each cluster center, and apply gaussian for similarity self._similarities = calculate.gaussian(self._som.activate(inputs), self._variance) # Get output by weighted summation of similarities, weighted by weights output = numpy.dot(self._similarities, self._weight_matrix) if self._scale_by_similarity: self._total_similarity = numpy.sum(self._similarities) output /= self._total_similarity return output
def _move_neurons(self, input_vec): # Perform a competition, and move the winner closer to the input closest = self._get_closest() # Move the winner and neighbors closer # The further the neighbor, the less it should move for i in range(closest-self.neighborhood, closest+self.neighborhood+1): if i >= 0 and i < self._size[0]: # if in range neighbor_distance = float(abs(i-closest)) move_rate_modifier = calculate.gaussian(neighbor_distance, self.neighbor_move_rate) final_rate = move_rate_modifier*self.move_rate self._weights[i] += final_rate*(input_vec-self._weights[i])
def activate(self, input_tensor): """Return the model outputs for given input_tensor.""" # Get distance to each cluster center, and apply gaussian for similarity self._similarity_tensor = calculate.gaussian( self._clustering_model.activate(input_tensor), self._variance) if self._scale_by_similarity: self._similarity_tensor /= numpy.sum(self._similarity_tensor, axis=-1, keepdims=True) # Replace 0. / 0. (nan) with uniform vector self._similarity_tensor[numpy.isnan( self._similarity_tensor)] = (1.0 / self._similarity_tensor.shape[-1]) # Get output by weighted summation of similarities, weighted by weights output = numpy.dot(self._similarity_tensor, self._weight_matrix) + self._bias_vec return output
def activate(self, inputs): """Return the model outputs for given inputs.""" # Calculate similarity between input and each stored input # (gaussian of each distance) similarities = calculate.gaussian( _distances(inputs, self._input_matrix), self._variance) # Then scale each stored target by corresponding similarity, and sum output_vec = _weighted_sum_rows(self._target_matrix, similarities) if self._scale_by_similarity: output_vec /= numpy.sum(similarities) if self._scale_by_class: # Scale output by number of classes (sum of targets) # This minimizes the effect of unbalanced classes # Return 0 when target total is 0 output_vec[:] = calculate.protvecdiv(output_vec, self._target_totals) # Convert output to probabilities, and return output_vec /= sum(output_vec) return output_vec
def test_dgaussian(): helpers.check_gradient( calculate.gaussian, lambda x: calculate.dgaussian(x, calculate.gaussian(x)), f_shape='lin')
def __call__(self, input_vec): return calculate.gaussian(input_vec, self._variance)