def _generate_candidate(self, inputs, targets): errors = self._get_errors(inputs, targets) #mean_errors = [sum([x[y] for x in errors]) / len(errors) for y in range(len(errors[0]))] activations = [] for input in inputs: self._feed_forward_hidden_nodes(input) activations.append(numpy.copy(self._activations[:self._non_output_nodes()])) regularizer_coef = 0.01/self._non_output_nodes() def cost_func(parameters): candidate_activations = [] for activation in activations: candidate_activations.append(self._get_output_node_activation_from_activations(parameters, activation)) return -sum(CascadeNet._real_correlations(candidate_activations, errors)) \ + regularizer_coef*numpy.sum(numpy.abs(candidate_activations)) parameters, error = particle_swarm_optimize(cost_func, self._non_output_nodes(), self.train_candidates_max_epochs, stopping_error=-sys.float_info.max, max_iterations_without_improvement=60, parameter_init=self.weight_initialization_func) print("best candidate had score %s" % (-error / (len(self._output_connections)))) winner = _CandidateNode(parameters) candidate_activations = [] for activation in activations: candidate_activations.append(self._get_output_node_activation_from_activations(parameters, activation)) winner.activations = candidate_activations winner.score = -error return winner
def test_f6(self): def f6(parameters): para = parameters[0:2] numerator = (sin(sqrt((para[0] * para[0]) + (para[1] * para[1])))) * \ (sin(sqrt((para[0] * para[0]) + (para[1] * para[1])))) - 0.5 denominator = (1.0 + 0.001 * ((para[0] * para[0]) + (para[1] * para[1]))) * \ (1.0 + 0.001 * ((para[0] * para[0]) + (para[1] * para[1]))) x = 0.5 - (numerator / denominator) return 1 - x result, best = particle_swarm_optimize(f6, 2, 100) self.assertLess(result[0], 0.1)
def test_f6(self): def f6(parameters): para = parameters[0:2] numerator = (sin(sqrt((para[0] * para[0]) + (para[1] * para[1])))) * ( sin(sqrt((para[0] * para[0]) + (para[1] * para[1]))) ) - 0.5 denominator = (1.0 + 0.001 * ((para[0] * para[0]) + (para[1] * para[1]))) * ( 1.0 + 0.001 * ((para[0] * para[0]) + (para[1] * para[1])) ) x = 0.5 - (numerator / denominator) return 1 - x result, best = particle_swarm_optimize(f6, 2, 100) self.assertLess(result[0], 0.1)
def _generate_candidate(self, inputs, targets): errors = self._get_errors(inputs, targets) #mean_errors = [sum([x[y] for x in errors]) / len(errors) for y in range(len(errors[0]))] activations = [] for input in inputs: self._feed_forward_hidden_nodes(input) activations.append( numpy.copy(self._activations[:self._non_output_nodes()])) regularizer_coef = 0.01 / self._non_output_nodes() def cost_func(parameters): candidate_activations = [] for activation in activations: candidate_activations.append( self._get_output_node_activation_from_activations( parameters, activation)) return -sum(CascadeNet._real_correlations(candidate_activations, errors)) \ + regularizer_coef*numpy.sum(numpy.abs(candidate_activations)) parameters, error = particle_swarm_optimize( cost_func, self._non_output_nodes(), self.train_candidates_max_epochs, stopping_error=-sys.float_info.max, max_iterations_without_improvement=60, parameter_init=self.weight_initialization_func) print("best candidate had score %s" % (-error / (len(self._output_connections)))) winner = _CandidateNode(parameters) candidate_activations = [] for activation in activations: candidate_activations.append( self._get_output_node_activation_from_activations( parameters, activation)) winner.activations = candidate_activations winner.score = -error return winner
size = 20 omega = 0.7298 phip = 1.1959 phin = 1.7959 clamp = 0.5 it = None af = 0.5 cluster = (1.0, 0.0) topology = None constraint = None # optimize try: index = 1 for best, F in pso.particle_swarm_optimize(fitness, 4 * len(cameras), bounds, size, omega, phip, phin, clamp, it, af, cluster, topology_type=topology, constraint_type=constraint): print index, '%.4f' % F i = 0 for camera in cameras: x, y, rho, eta = best[4 * i: 4 * (i + 1)] ex.model[camera].pose = pose_from_dp(x, y, z, rho, eta) if args.vis: ex.model[camera].update_visualization() i += 1 index += 1 except KeyboardInterrupt: pass for cam in cameras: try:
def test_simplest(self): def simple_error(args): return args[0] result, best = particle_swarm_optimize(simple_error, 1, 100) self.assertLess(result[0], 0.1)
# create transport context transport = RangeModel.LinearTargetTransport(ex.model) # define fitness function def fitness(particle): for i in range(len(cameras)): x, h, d, beta = particle[4 * i: 4 * (i + 1)] if d < lut[i].bounds[0] or d > lut[i].bounds[1]: return -float('inf') modify_camera(ex.model, cameras[i][0], lut[i], x, h, d, beta) coverage = ex.model.range_coverage(ex.tasks['scan'], transport) return ex.model.performance(ex.tasks['scan'], coverage=coverage) # load visualization ex.start() # optimize try: for best, F in pso.particle_swarm_optimize(fitness, 4 * len(cameras), bounds, args.size, args.omega, args.phip, args.phin, args.clamp, args.it, args.af, args.cluster, topology_type=args.topology, constraint_type=args.constraint): print('%g' % F) for c, camera in enumerate(cameras): modify_camera(ex.model, camera[0], lut[c], *best[4 * c: 4 * (c + 1)]) ex.model[camera[0]].update_visualization() except KeyboardInterrupt: pass