class Controller:
    def __init__(self, problem):
        self.load_params()
        self._swarm = Swarm(self._num_particles, problem)

    def iteration(self, problem):
        best_particle = self._swarm.get_best_particle(problem)
        for particle in self._swarm:
            # best_particle = self._swarm.get_best_neighbour(particle, problem)
            particle.update(best_particle, self._w, self._c1, self._c2)
            particle.evaluate(problem)

    def run(self, problem):
        for i in range(self._num_iterations):
            self.iteration(problem)

    def best_particle(self, problem):
        return self._swarm.get_best_particle(problem)

    def load_params(self):
        f = open("params.in")
        self._num_particles = int(f.readline())
        self._num_iterations = int(f.readline())
        self._w = float(f.readline())
        self._c1 = float(f.readline())
        self._c2 = float(f.readline())
Exemple #2
0
    def __init__(self):
        super(MainWindow, self).__init__()
        self.swarm = Swarm()

        self.ui = Ui_MainWindow()
        self.ui.setupUi(self)
        self.ui.agregar_final_pushButton.clicked.connect(self.click_agregar)
        self.ui.agregar_inicio_pushButton.clicked.connect(
            self.click_agregar_inicio)
        self.ui.mostrar_pushButton.clicked.connect(self.click_mostrar)
        self.ui.actionAbrir.triggered.connect(self.action_abrir_archivo)
        self.ui.actionGuardar.triggered.connect(self.action_guardar_archivo)

        self.ui.ver_grafos_action.triggered.connect(self.grafos)
        self.ui.actionBusqueda.triggered.connect(self.busqueda_grafos)

        self.ui.actionId.triggered.connect(self.action_ordenar_id)
        self.ui.actionDistancia.triggered.connect(
            self.action_ordenar_distancia)
        self.ui.actionVelocidad.triggered.connect(
            self.action_ordenar_velocidad)

        self.ui.mostrar_tabla_pushButton.clicked.connect(self.mostrar_tabla)
        self.ui.buscar_pushButton.clicked.connect(self.buscar_iid)

        self.ui.dibujar_pushButton.clicked.connect(self.dibujar)
        self.ui.limpiar_pushButton.clicked.connect(self.limpiar)
        self.scene = QGraphicsScene()
        self.ui.graphicsView.setScene(self.scene)
def fitness(particle_position):
    """
    Use particle position as parameters for PSO settings, return similarity of
    resultant model to Rohde et al. experimental data. Expects 6-dimensional
    search space.
    """
    global fit_count
    # Constants:
    dimensions = 3
    group_size = 2
    no_groups = 100

    iterations, particle_settings = get_parameters(particle_position)
    particle_settings['respect_boundaries'] = True

    scores = []
    # run psos with these params
    for i, experiment in enumerate(ROHDE_EXPERIMENTS):  # TODO: parallelize
        print ' ', fit_count, i, '\b' * 100,
        swarm = Swarm(dimensions, group_size, no_groups, **particle_settings)
        for groups in swarm.step_until(experiment.game, max_iterations=iterations, return_groups=True):
            pass
        scores.append(experiment.difference(get_coordination_results(groups)))
    fit_count += 1
    
    return -1 * sum(scores) / float(len(scores))
 def load_data(self, filename):
     # do a read from file
     self.c1 = 1
     self.c2 = 2
     self.population_size = 40
     self.max_iterations = 40
     self.population = Swarm(self.population_size, self.intersection)
Exemple #5
0
    def add_peer_to_swarm(self, peer, resource_id, swarm=None):
        """
        TODO: implement this method
        Based on the resource_id provided, iterate over the
        swarms list, and when resource_id matchs, add the
        new peer to the swarm.
        :param peer: (peer_ip, peer_port)
        :param resource_id:
        :return: VOID
        """
        print(f"Adding peer to {resource_id} swarm")
        swarm = swarm or self._get_swarm_object(resource_id)

        if not swarm:
            swarm = Swarm(resource_id)
            self.add_swarm(swarm)

        # avoid dups
        if not self.peer_in_swarm(peer, swarm):
            if swarm.size() < self.MAX_PEERS:
                swarm.add_peer(peer)
            else:
                # too many peers,
                print(f"Too many peers in {swarm.resource_id}")
                return None
        # self.make_persistent()
        return True
Exemple #6
0
def test_get_closest_particle():
    positions = ([
        [-6,0,0],
        [-4,0,0],
        [-2,0,0],
        [3,0,0],
    ])

    velocities = ([
        [3,0,0],
        [2,0,0],
        [1,0,0],
        [-1,0,0],
    ])

    accelerations = ([
        [0,0,0],
        [0,0,0],
        [0,0,0],
        [0,0,0],
    ])

    s = Swarm(positions, velocities, accelerations)

    s.run(10)

    assert s.getRemainingParticleCount() == 1
def swarm(t, x, e, swarm_size=1000, omega=0.5):
    swarm = Swarm(e, swarm_size)
    omega, fip, fig = 0.5, 0.5, 0.5

    start = time.time()
    while check_time(start, t):
        swarm.update_particles_velocity(e, omega, fip, fig)
def swarm(t, x, e, swarm_size=500, omega=0.5, fip=0.9, fig=0.9):
    swarm = Swarm(e, swarm_size)

    start = time.time()
    while check_time(start,t):
        swarm.update_particles_velocity(e, omega, fip, fig)
        print(swarm.best_swarm_x, swarm.best_swarm_val)
Exemple #9
0
    def run(self, flip, max_chance, bees_number, maxIterations, locIterations):
        total_time = 0

        for itr in range(1, self.nb_exec + 1):
            print("Execution {0}".format(str(itr)))
            self.fsd = FsProblem(self.typeOfAlgo, self.df, self.ql)
            swarm = Swarm(self.fsd, flip, max_chance, bees_number,
                          maxIterations, locIterations)
            t1 = time.time()
            best = swarm.bso(self.typeOfAlgo, flip)
            t2 = time.time()
            total_time += t2 - t1
            print("Time elapsed for execution {0} : {1:.2f} s\n".format(
                itr, t2 - t1))
            self.worksheet.write(itr, 0, itr)
            self.worksheet.write(itr, 1, "{0:.2f}".format(best[0]))
            self.worksheet.write(itr, 2, best[1])
            self.worksheet.write(itr, 3, "{0:.3f}".format(t2 - t1))
            self.worksheet.write(
                itr, 4, "{0}".format(
                    str([j[0] for j in [i for i in swarm.best_features()]])))
            self.worksheet.write(itr, 5, len(Solution.solutions))

        print(
            "Total execution time of {0} executions \nfor dataset \"{1}\" is {2:.2f} s"
            .format(self.nb_exec, self.dataset_name, total_time))
        self.workbook.close()
Exemple #10
0
def metafitness(position,
                size=20,
                dims=10,
                iters=5,
                epochs=100,
                task_names=None):
    # unpack position vector
    (
        w,
        C,
        S,
        swapping,
        velocities,
        decrease_velocity,
        #add_particle,
        replace_particle,
    ) = position

    task_names = task_names if task_names is not None else [
        "rastrigin", "ackley", "sphere"
    ]

    task_errors = []
    for itask, task_name in enumerate(task_names):
        task = fit.string_to_func[task_name]
        task_bounds = fit.bounds[task_name]
        task_best_loc = fit.actual_minimum(task_name, dims)
        task_best_val = task(task_best_loc)

        iterations_errors = []
        for iter_ in range(iters):
            s = Swarm(size,
                      dims,
                      fitness=task_name,
                      bounds=task_bounds,
                      w=w,
                      C=C,
                      S=S,
                      swapping=swapping,
                      velocities=velocities,
                      decrease_velocity=decrease_velocity,
                      add_particle=0.0,
                      replace_particle=replace_particle)
            result = s.Run(epochs=epochs)
            swarm_best_key = sorted(list(result["global_bests"].keys()))[-1]
            swarm_best_loc = result["global_bests"][swarm_best_key]
            swarm_best_val = task(swarm_best_loc)

            error = (task_best_val - swarm_best_val)**2
            iterations_errors.append(error)
        avg_iterations_error = sum(iterations_errors) / len(iterations_errors)
        task_errors.append(avg_iterations_error)

    # now we have a list (per-task) of average errors.
    # combine *those* with an average (TODO: maybe some consistent scaling?)

    # this is our final "meta"-fitness we are trying to optimize.
    avg_task_error = sum(task_errors) / len(task_errors)
    return avg_task_error
def swarm(t, x, e, swarm_size=100, omega=0.8, fip=1.4, fig=1.3):
    swarm = Swarm(e, swarm_size)

    start = time.time()
    while check_time(start, t):
        swarm.update_particles_velocity(e, omega, fip, fig)
        print(swarm.best_swarm_x, swarm.best_swarm_val)
    sys.stdout.write(str(swarm.best_swarm_val) + " " + str(swarm.best_swarm_x))
Exemple #12
0
    def reset(self):
        """
        resets the map and the robots for iterative testing/benchmarking
        """

        self.grid = GridGraph(self.grid_size)
        self.swarm = Swarm(self.num_robots)
        self.swarm.startup_sequence(self.grid.list_of_vertices[0])
 def __init__(self, intersection):
     self.c1 = 1
     self.c2 = 2
     self.intersection = ""
     self.max_iterations = 0
     self.population_size = 0
     self.load_data("mock file name")
     self.population = Swarm(self.c1, self.c2, self.population_size,
                             intersection)
Exemple #14
0
 def runAlg(self):
     count = 0
     s = Swarm(40, 5, 5)
     while s.bestGlobal > 0.000000001:
         count += 1
         for particle in s.particles:
             particle.calFitness()
         s.getBestParticle()
         for particle in s.particles:
             particle.evaluate(s.bestGlobalX, s.bestGlobalY, s.c1, s.c2)
     print("Fount in " + str(count) + " moves")
Exemple #15
0
def test_GUI():
    swarm = [[0, 10, 6, 5, 11, 4], [0, 3, 9, 1, 8, 7], [0, 0, 0, 0, 2, 0],
             [0, 0, 0, 0, 0, 0]]
    swarm = np.array(swarm)
    shape = [[0, 0, 1, 1, 1], [0, 0, 1, 0, 0], [0, 0, 1, 1, 1],
             [0, 0, 1, 0, 0], [0, 0, 1, 1, 1]]
    shape = np.array(shape)

    S = Swarm(11, swarm)
    S.main_sequence(shape, 100, 100, 50)
    print(S.moves_sequence)
    return instructions_to_GUI(6, S, S.moves_sequence)
Exemple #16
0
class MainGame:
    def __init__(self):
        config = pyglet.gl.Config(major_version=2, minor_version=1)
        self.window = pyglet.window.Window(config=config, width=WIDTH, height=HEIGHT, resizable=False, vsync=True)
        #print(self.window.context.get_info().get_renderer())
        #print(self.window.context.get_info().get_vendor())
        #print('OpenGL Version {}'.format(self.window.context.get_info().get_version()))

        # start the background music
        #music = pyglet.resource.media('music.mp3')
        #music.play()

        # manage keys
        self.keys = key.KeyStateHandler()
        self.window.push_handlers(self.keys)

        # init objects
        self.camera = Camera(WIDTH, HEIGHT)

        self.player = Player()
        self.player.Translate(WIDTH/2 - (BLOCK_SIZE * 10)/2, HEIGHT - (BLOCK_SIZE * 10), 0.0)

        #self.enemy = Enemy()
        #self.enemy.Translate(WIDTH/2 - (BLOCK_SIZE * 10)/2, HEIGHT/2 - (BLOCK_SIZE * 10), 0.0)

        self.swarm = Swarm()

        # setup function calls
        self.window.on_draw = self.on_draw
        self.window.on_mouse_press = self.on_mouse_press
        self.window.on_mouse_motion = self.on_mouse_motion
        pyglet.clock.schedule_interval(self.update, 1/60.0) # update at 60Hz

    def on_draw(self):
        glViewport(0, 0, WIDTH, HEIGHT)
        glClearColor(0.114, 0.114, 0.114, 1.0) #1d1d1d
        glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)
        self.player.render()
        self.swarm.render()

    def update(self, dt):
        self.player.update(dt)
        self.swarm.update(dt)

    def on_mouse_press(self, x, y, button, modifiers):
        if button == mouse.LEFT:
            self.player.fire()

    def on_mouse_motion(self, x, y, dx, dy):
        # their x and y is from bottom left
        # my x and y is from top left
        increment_x = x - self.player.position.x
        self.player.translate(increment_x, 0.0, 0.0)
Exemple #17
0
 def swarm_find_tracker(self, finfo):
     peer = self.peer.closest_to_peer(finfo)
     range_from_me = self.peer.get_range(finfo)
     range_from_peer = Utils.get_range(peer, finfo)
     if range_from_me < range_from_peer:
         s = Swarm(self.peer.id)
         s.create_tracker(s.files[finfo].fstruct)
     else:
         packet = {
             "datatype": "swarm",
             "action": "find_tracker",
             "finfo": finfo
         }
         self.peer.client.SendToPeer(peer, packet)
Exemple #18
0
 def swarm_find_tracker_red(self, finfo, source):
     peer = self.peer.closest_to_peer(finfo)
     range_from_me = self.peer.get_range(finfo)
     range_from_peer = Utils.get_range(peer, finfo)
     if range_from_me < range_from_peer:
         packet = {"datatype": "swarm", "action": "found_tracker"}
         self.peer.client.SendToPeer(source, packet)
         s = Swarm(self.peer.id)
         s.create_tracker(None)
     else:
         packet = {
             "datatype": "swarm",
             "action": "find_tracker_red",
             "source": source
         }
Exemple #19
0
    def __init__(self):
        config = pyglet.gl.Config(major_version=2, minor_version=1)
        self.window = pyglet.window.Window(config=config, width=WIDTH, height=HEIGHT, resizable=False, vsync=True)
        #print(self.window.context.get_info().get_renderer())
        #print(self.window.context.get_info().get_vendor())
        #print('OpenGL Version {}'.format(self.window.context.get_info().get_version()))

        # start the background music
        #music = pyglet.resource.media('music.mp3')
        #music.play()

        # manage keys
        self.keys = key.KeyStateHandler()
        self.window.push_handlers(self.keys)

        # init objects
        self.camera = Camera(WIDTH, HEIGHT)

        self.player = Player()
        self.player.Translate(WIDTH/2 - (BLOCK_SIZE * 10)/2, HEIGHT - (BLOCK_SIZE * 10), 0.0)

        #self.enemy = Enemy()
        #self.enemy.Translate(WIDTH/2 - (BLOCK_SIZE * 10)/2, HEIGHT/2 - (BLOCK_SIZE * 10), 0.0)

        self.swarm = Swarm()

        # setup function calls
        self.window.on_draw = self.on_draw
        self.window.on_mouse_press = self.on_mouse_press
        self.window.on_mouse_motion = self.on_mouse_motion
        pyglet.clock.schedule_interval(self.update, 1/60.0) # update at 60Hz
class Controller:
    def __init__(self, intersection):
        self.c1 = 1
        self.c2 = 2
        self.intersection = ""
        self.max_iterations = 0
        self.population_size = 0
        self.load_data("mock file name")
        self.population = Swarm(self.c1, self.c2, self.population_size,
                                intersection)

    def load_data(self, filename):
        # do a read from file
        self.c1 = 1
        self.c2 = 2
        self.population_size = 40
        self.max_iterations = 40
        self.population = Swarm(self.population_size, self.intersection)

    def iteration(self):
        for particle in self.population.particles:
            particle.update(self.population.get_best_particle())
            particle.evaluate()

    def run_alg(self):
        for i in range(0, self.max_iterations):
            self.iteration()
def swarm(t, x, e, swarm_size=1000):
    swarm = Swarm(e, swarm_size)

    start = time.time()
    while check_time(start,t):
        for particle in swarm:
            
Exemple #22
0
    def handle_state_announce(self):
        assert self.conn_id
        if self.conn_id_expires < time.time():
            print timestamp(), self, "Connection ID expired"
            self.goto_state_conn_id()
            return

        if not self.current_swarm in [ x for x in Swarm.list() if not x.closed ]:
            self.goto_state_swarm()
            return

        if time.time() < self.announce_next_attempt:
            return

        retry_text = ''
        if self.announce_retry > 0:
            retry_text = ', retry %d' % self.announce_retry

        print timestamp(), self, "Sending announce for %s%s" % (self.current_swarm, retry_text)

        buf = ''
        buf += struct.pack('!QLL', self.conn_id, 1, self.transaction_id)
        buf += self.current_swarm.sha.decode('hex')
        buf += hashlib.sha1(Peer.my_peerid).digest()
        buf += struct.pack('!QQQLLLlH', 0, 0, 0, 1, 0, self.key, -1, self.listen_port)
        self.send(buf)

        self.announce_next_attempt = time.time() + config.tracker_retry_time * 2**min(self.announce_retry, 4)
        self.announce_retry += 1
Exemple #23
0
def main():

    swarm = Swarm(81, n, 5, -5, 'mesh')
    pso = PSO(swarm, P.Function)
    pso.Optmize()

    x = swarm.particle.bestPosition
Exemple #24
0
 def from_swarm_dicts(cls,
                      swarm_dicts,
                      inference_model,
                      scoring_functions=None,
                      x_min=-1.,
                      x_max=1.,
                      inertia_weight=0.9,
                      phi1=2.,
                      phi2=2.,
                      phi3=2.,
                      **kwargs):
     """
     Classmethod to create a PSO instance from a list of dictionaries each defining an
     individual swarm.
     :param swarm_dicts: A list of dictionaries each defining an individual swarm.
         See Swarm.from_dict for more info.
     :param inference_model: A inference model instance that is used for encoding an decoding
         SMILES to and from the CDDD space.
     :param scoring_functions: List of functions that are used to evaluate a generated molecule.
         Either take a RDKit mol object as input or a point in the cddd space.
     :param kwargs: additional parameters for the PSO class
     :return: A PSOptimizer instance.
     """
     swarms = [
         Swarm.from_dict(dictionary=swarm_dict,
                         x_min=x_min,
                         x_max=x_max,
                         inertia_weight=inertia_weight,
                         phi1=phi1,
                         phi2=phi2,
                         phi3=phi3) for swarm_dict in swarm_dicts
     ]
     return cls(swarms, inference_model, scoring_functions, **kwargs)
Exemple #25
0
def run_task(t: Task):
    try:
        strategy, iterations, num_good_clients, num_free_riders, peer_size, max_up, max_down = t
        swarm = Swarm()
        all_agents = chain(
            (Client(
                strat=strategy,
                up=Points(max_up),
                down=Points(max_down),
                peer_size=peer_size,
                swarm=swarm,
                iterations=iterations
            ) for _ in range(num_good_clients)),
            (Client(
                strat=strategy,
                up=no_points,
                down=Points(max_down),
                peer_size=peer_size,
                swarm=swarm,
                iterations=iterations
            ) for _ in range(num_free_riders))
        )

        [swarm.join(x) for x in all_agents]

        OUTPUT_FILE = f"./results/{strategy.__name__}/{iterations}_{num_good_clients}_{num_free_riders}_{peer_size}_{max_up}_{max_down}.json"
        if not os.path.isdir(os.path.dirname(OUTPUT_FILE)):
            os.makedirs(os.path.dirname(OUTPUT_FILE))
        with open(OUTPUT_FILE, 'w') as f:
            f.write(dumps(
                {
                    'metadata': {
                        'strategy': strategy.__name__,
                        'iterations': iterations,
                        'max_up': max_up,
                        'max_down': max_down,
                        'starting_good_clients': num_good_clients,
                        'starting_bad_clients': num_free_riders,
                        'peer_size': peer_size
                    },
                    'data': tuple(
                        chain.from_iterable(
                            (x.to_json(iteration) for x in y) for iteration, y in enumerate(Model.run(swarm, iterations))))
                }
            ))
    except Exception as e:
        print("got exception ", e)
Exemple #26
0
    def from_query_list(cls,
                        init_smiles,
                        num_part,
                        num_swarms,
                        inference_model,
                        scoring_functions=None,
                        phi1=2.,
                        phi2=2.,
                        phi3=2.,
                        x_min=-1.,
                        x_max=1.,
                        v_min=-0.6,
                        v_max=0.6,
                        **kwargs):
        """
        Classmethod to create a PSO instance with (possible) multiple swarms which particles are
        initialized at the position of the embedded input SMILES. Each swarms is  initialized at
        the position defined by the different SMILES in the input list.
        :param init_smiles: A List of SMILES which each define the molecule which acts as starting
            point of each swarm in the optimization.
        :param num_part: Number of particles in each swarm.
        :param num_swarms: Number of individual swarm to be optimized.
        :param inference_model: A inference model instance that is used for encoding an decoding
            SMILES to and from the CDDD space.
        :param scoring_functions: List of functions that are used to evaluate a generated molecule.
            Either take a RDKit mol object as input or a point in the cddd space.
        :param phi1: PSO hyperparamter.
        :param phi2: PSO hyperparamter.
        :param phi3: PSO hyperparamter.
        :param x_min: min bound of the optimization space (should be set to -1 as its the default
            CDDD embeddings take values between -1 and 1).
        :param x_max: max bound of the optimization space (should be set to -1 as its the default
            CDDD embeddings take values between -1 and 1).
        :param v_min: minimal velocity component of a particle. Also used as lower bound for the
            uniform distribution used to sample the initial velocity.
        :param v_max: maximal velocity component of a particle. Also used as upper bound for the
            uniform distribution used to sample the initial velocity.
        :param kwargs: additional parameters for the PSO class
        :return: A PSOptimizer instance.
        """
        assert isinstance(init_smiles, list)
        assert len(init_smiles) == num_swarms
        embedding = inference_model.seq_to_emb(init_smiles)
        swarms = []
        for i, sml in enumerate(init_smiles):
            swarms.append(
                Swarm.from_query(init_sml=sml,
                                 init_emb=embedding[i],
                                 num_part=num_part,
                                 v_min=v_min,
                                 v_max=v_max,
                                 x_min=x_min,
                                 x_max=x_max,
                                 phi1=phi1,
                                 phi2=phi2,
                                 phi3=phi3))

        return cls(swarms, inference_model, scoring_functions, **kwargs)
Exemple #27
0
    def __init__(self, function_to_be_minimized, constants):
        self.constants = constants
        self.precision = constants.PRECISION
        self.inertia = constants.INERTIA_WEIGHT
        self.cognitive = constants.COGNITIVE_WEIGHT
        self.social = constants.SOCIAL_WEIGHT
        self.max_velocity_allowed = constants.MAX_VELOCITY_ALLOWED

        self.pop_size = constants.POP_SIZE
        self.generations_no = constants.GENERATIONS_NO
        self.dimensions_no = constants.DIMENSIONS_OF_THE_FUNCTION

        self.fitness = function_to_be_minimized
        self.a = constants.INTERVALS_OF_DEFINITION[self.fitness.__name__][0]
        self.b = constants.INTERVALS_OF_DEFINITION[self.fitness.__name__][1]

        self.population = []
        self.swarm = Swarm(self.fitness, self.precision)
Exemple #28
0
    def on_heartbeat(self):
        # Kludge for now.  Just have every tracker announce every swarm
        for x in Swarm.list():
            self.add_swarm(x)

        # Cleanup old swarms
        for x in [x for
                  x in self.swarms.keys()
                  if x.closed or x not in Swarm.list()]:
            print timestamp(), self, "Swarm seems to have gone away, removing:", x
            del self.swarms[x]

        if   self.state == STATE_SOCK:     self.handle_state_sock()
        elif self.state == STATE_SWARM:    self.handle_state_swarm()
        elif self.state == STATE_CONN_ID:  self.handle_state_conn_id()
        elif self.state == STATE_ANNOUNCE: self.handle_state_announce()
        elif self.state == STATE_MUTE:     self.handle_state_mute()
        else:                              raise Exception("State machine broken?")
Exemple #29
0
def main():

    options, files = getopt.getopt(sys.argv[1:], 'ivt:', ['info', 'verbose', 'threads:'])

    for flag, value in options:
        if flag == '-v' or flag == '--verbose':
            configuration['verbose'] = True
        elif flag == '-t' or flag == '--threads':
            try:
                configuration['threads'] = int(value)
            except:
                usage()
        elif flag == '-i' or flag == '--info':
            configuration['info'] = True
        else:
            usage()

    if len(files) != 1:
        usage()

    try:
        torrent = Torrent(files[0])
    except:
        print 'Impossible to read torrent file/magnet link:', files[0]
        exit(1)

    if configuration['info'] == True:
        torrent.show()
        exit(0)

    torrent.start()

    generate_clientid()

    tracker = Tracker(torrent.data['announce'])
    tracker.update(torrent)

    swarm = Swarm()
    swarm.update_peers(tracker)

    threads = configuration['threads']
Exemple #30
0
def test_get_closest_particle():
    positions = ([
        [3, 0, 0],
        [4, 0, 0],
    ])

    velocities = ([
        [2, 0, 0],
        [0, 0, 0],
    ])

    accelerations = ([
        [-1, 0, 0],
        [-2, 0, 0],
    ])

    s = Swarm(positions, velocities, accelerations)

    s.run(10)

    assert s.getClosestParticleIndex() == 0
Exemple #31
0
def main():
    positions = []
    velocities = []
    accelerations = []

    line = f.readline()
    while line:
        m = re.search('p=<(.*)>, v=<(.*)>, a=<(.*)>', line.rstrip('\n'))
        pStrings = m.group(1).split(',')
        vStrings = m.group(2).split(',')
        aStrings = m.group(3).split(',')

        positions.append([int(pStrings[0]), int(pStrings[1]), int(pStrings[2])])
        velocities.append([int(vStrings[0]), int(vStrings[1]), int(vStrings[2])])
        accelerations.append([int(aStrings[0]), int(aStrings[1]), int(aStrings[2])])

        line = f.readline()

    s = Swarm(positions, velocities, accelerations)

    s.run(1000)

    print(s.getRemainingParticleCount())
    def pso(self, w=0.45, c1=1.70, c2=2.49):
        time_to_end = time.time() + self.t
        # C₁ importance of personal best value
        # C₂ importance of social best value.
        # w meaning of velocity vector
        swarm = Swarm(self.SWARM_SIZE, self.VECTOR_SIZE, self.params, w, c1,
                      c2)

        while time.time() < time_to_end:
            swarm.update_swarm()
            swarm.set_best_particle()
            #print(swarm.best_particle)

        #print("#BEST", swarm.best_particle)
        return swarm.best_particle
Exemple #33
0
def main():
    metaswarm = Swarm(size=25,
                      dims=len(meta_params),
                      fitness="sphere",
                      bounds=(0.0, 1.0))
    metaswarm.fitnessFunc = metafitness
    metaswarm._fitnessString = "metafitness"
    metaswarm.CheckConvergence = _dummy_false
    results = metaswarm.Run(100)
    results.save_json("metaswarm_output.json")

    best_settings_key = sorted(list(results["global_bests"].keys()))[-1]
    best_settings = results["global_bests"][best_settings_key]
    for setting, value in zip(meta_params, best_settings):
        print(f"{setting}\t{value}")
Exemple #34
0
class Main(object):
    """
    the Main running class, mainly used for running the algorithm for accuracy
    and for benchmarking its efficiency. visualization.py was used for actual visualization
    """
    def __init__(self, grid_size, num_robots):
        self.grid_size, self.num_robots = grid_size, num_robots
        self.grid = GridGraph(self.grid_size)
        self.swarm = Swarm(self.num_robots)
        self.swarm.startup_sequence(self.grid.list_of_vertices[0])

    def reset(self):
        """
        resets the map and the robots for iterative testing/benchmarking
        """

        self.grid = GridGraph(self.grid_size)
        self.swarm = Swarm(self.num_robots)
        self.swarm.startup_sequence(self.grid.list_of_vertices[0])

    def run(self):
        """
        runs the algorithms until complete, and keeps track of the number of times it calls update,
        which is considered our benchmark
        """

        profile = 0
        while not (all([
                True if robot.state == "standby" else False
                for robot in self.swarm.swarm
        ]) and (not [
                area
                for area in self.swarm.unknown_territory if area.state == "red"
        ])):
            self.swarm.update()
            profile += 1

        # makes sure that the map we made is correct to the environment's map
        actual_graph = {
            vertex.name: {neighbor.name
                          for neighbor in vertex.neighbors}
            for vertex in self.grid.list_of_vertices
        }
        if actual_graph == self.swarm.map:
            return profile
        else:
            raise ValueError("You done g00fed")
Exemple #35
0
  def startSwarm(self, size = None):
    '''
    Runs permutations on model parameters to find the optimal model
    characteristics for the given data.

    * size - A value from grokpy.SwarmSize. Initially, small, medium, or large.
            the default is medium. Small is only good for testing, whereas
            large can take a very long time.
    '''
    if self.swarm and self.swarm.getState() in ['Starting', 'Running']:
      raise GrokError('This model is already swarming.')

    url = self.swarmsUrl
    requestDef = {}
    if size:
      requestDef.update({'options': {"size": size}})

    result = self.c.request('POST', url, requestDef)

    # Save the swarm object
    self.swarm = Swarm(self, result['swarm'])

    return result
    def __init__(self):
        #Start Pygame
        pygame.init()
        pygame.display.set_caption('Graph Exploration')

        #Create Pygame variables
        self.clock = pygame.time.Clock()
        self.sizex = 700
        self.sizey = 700
        self.imgCounter = 0
        self.font = pygame.font.SysFont('Roboto', 25)
        self.background_color = (227, 232, 239)
        self.robot_color = (83, 87, 94)
        self.edge_color = (83, 87, 94)
        self.state_to_color_mapping = {
            'red': pygame.Color('red'),
            'yellow': pygame.Color('yellow'),
            'green': pygame.Color('green')
        }
        self.screen = pygame.display.set_mode((self.sizex, self.sizey))
        self.background = pygame.Surface((self.sizex, self.sizey))

        #Create Grid for robots to walk on
        self.grid = TripleGraph()

        #Create swarm of robots
        self.swarm = Swarm(2)
        self.swarm.startup_sequence(self.grid.list_of_vertices[0])
        self.old_vertices = None
        swarm = Swarm(3)
        swarm.startup_sequence(self.grid.list_of_vertices[0])

        #Set Main loop to running
        self._running = True

        #Draw graph into window
        self.background.fill(self.background_color)
        self.space_out_vertices(self.grid)
        self.draw_grid()
        pygame.draw.circle(self.background, self.robot_color,
                           self.swarm.hive.coords, 15, 3)
        self.screen.blit(self.background, (0, 0))
        pygame.display.update()
        sleep(1)
    def run(swarm: Swarm, iterations: int) -> Iterator[Iterator[Result]]:
        all_agents = list(swarm.all_clients())
        [x.init_peers() for x in all_agents]

        for c in range(iterations):
            print("Iteration", c)

            do_assertions(all_agents)

            remaining_agents = set(all_agents)
            while remaining_agents:
                # filter out the ones that don't want content
                remaining_agents -= {x for x in all_agents if not x.wants_content()}
                # Iterate in random order
                for agent in random_iteration(remaining_agents):
                    # Iterate all peers of that agent in random order
                    for peer in random_iteration(agent.peers):
                        if peer.ask_for_content(agent):  # If they gave us content
                            agent.give_content(peer)
                            break
                    else:
                        # None of the peers gave them something
                        remaining_agents.remove(agent)
            yield (x.get_state() for x in all_agents)

            [x.reset_values() for x in all_agents]

            do_assertions(all_agents)
            # Find new peers
            print("--before--")
            [x.before_reset() for x in random_iteration(all_agents)]
            print("--reset--")
            [x.reset(c) for x in random_iteration(all_agents)]
            print("--after--")
            [x.after_reset(c) for x in random_iteration(all_agents)]
            do_assertions(all_agents)
        "respect_boundaries"
    ] = True  # Actually seems to track more closely without this, just at much lower (1/10th) rates
    # particle_settings['initial_inertia'] = 0.00019464380975982382  # Brennan & Clark 1996
    graph = False

    if graph:
        from matplotlib import pyplot as pl
        from matplotlib import animation
        from mpl_toolkits.mplot3d import Axes3D

        for i, experiment in enumerate(parameter_estimation.ROHDE_EXPERIMENTS):
            fig = pl.figure()
            ax = fig.add_subplot(111, projection="3d")
            # pl.axis([-0.05, 1.05,] * dimensions)

            swarm = Swarm(dimensions, group_size, no_groups, **particle_settings)
            # plot = pl.scatter(*zip(*[particle.position for particle in swarm.step(experiment.game)]), alpha=0.2)
            plot = pl.scatter(*zip(*[particle.position for particle in swarm.step(experiment.game)]))

            anim = animation.FuncAnimation(
                fig, update_plot, frames=xrange(iterations), fargs=(swarm, plot, experiment.game)
            )

            pl.show()

    else:
        # new_experiments = [
        #    Experiment(  # free ambiguous form
        #        Experiment.Settings(
        #            reference_costs=(-60., -120., -280.),
        #            ambiguous_reference_cost=-0.,

if __name__ == '__main__':
    output_location = r'I:\Users\Chase Stevens\Dropbox\Dissertation\swarm_state.json'

    iterations = 10000
    dimensions = 6
    group_size = 100
    no_groups = 1
    save = True

    graph = False
    
    if os.path.exists(output_location):
        with open(output_location, 'r') as f:
            swarm = Swarm.from_dict(json.load(f))
            print "Loaded swarm - resuming from iteration", swarm.particles[0]._time
            print "(Loaded", len(swarm.particles), "particles into", len(swarm.particle_groups), "groups)"
    else:
        swarm = Swarm(
            dimensions,
            group_size,
            no_groups,
            particle_distribution=parameter_estimation.mitchell_sampling_factory(dimensions),
        )
    fitness_func = parameter_estimation.fitness

    if graph:
        from matplotlib import pyplot as pl
        from matplotlib import animation
Exemple #40
0
class Model(object):
  '''
  Object representing a Grok Model.

  * parent - **Either** a `Client` or `Project`.
  * modelDef - A dict, usually returned from a model creation or get action.
    Usually includes:

    * id
    * name
    * streamId
    * swarmsUrl
    * url
  '''

  def __init__(self, parent, modelDef):

    # Give streams access to the parent client/project and its connection
    self.parent = parent
    self.c = self.parent.c

    # Take everything we're passed and make it an instance property.
    self.__dict__.update(modelDef)

    # Prepare to have a swarm associated with the model
    self.swarm = None

  def _runCommand(self, command, **params):
    url = self.commandsUrl
    commandObject = {'command': command}

    if params:
      commandObject['params'] = params

    result = self.c.request('POST', url, commandObject)
    return result

  def delete(self):
    '''
    Permanently deletes the model

    .. warning:: There is currently no way to recover from this opperation.
    '''
    self.c.request('DELETE', self.url)

  def clone(self, params = None):
    '''
    Clones this model
    '''
    if params:
      result = self.c.request('POST', self.cloneUrl, {'model': params})
    else:
      result = self.c.request('POST', self.cloneUrl)

    return Model(self.parent, result['model'])

  #############################################################################
  # Model Configuration

  def setName(self, newName):
    '''
    Renames the model.

    * newName - String
    '''

    # Get the current model state
    modelDef = self.getState()

    # Update the definition
    modelDef['name'] = newName

    # Update remote state
    self.c.request('POST', self.url, {'model': modelDef})

    # Update local state
    self.name = newName

  def setNote(self, newNote):
    '''
    Adds or updates a note for this model.

    * newNote - A String describing this model.
    '''

    # Get the current model state
    modelDef = self.getState()

    # Update the definition
    modelDef['note'] = newNote

    # Update remote state
    self.c.request('POST', self.url, {'model': modelDef})

    # Update local state
    self.note = newNote

  #############################################################################
  # Model states

  def getState(self):
    '''
    Returns the full state of the model from the API server

    TODO: Remove popping of Nones once API is updated
    '''

    modelDef = self.c.request('GET', self.url)['model']

    # API doesn't except None values. Remove them.
    for key, value in modelDef.items():
      if value is None:
        del modelDef[key]

    return modelDef

  def promote(self, **params):
    '''
    Puts the model into a production ready mode.

    .. note: This may take several seconds.
    '''

    # Check how many records are in the swarm model output cache
    headers, data, meta = self.getModelOutput()
    swarmOutputCacheLen = len(data)

    # Promotion command
    self._runCommand('promote', **params)

    ##### WARNING: Ugly hack that will go away #####

    # Check if the state of the model changes
    timeoutCounter = 0
    while True:
      modelDef = self.c.request('GET', self.url)['model']
      status = modelDef['status']
      if timeoutCounter >= 20:
        raise GrokError('The production model did not start in a reasonable '
                        'amount of time. Please try again or contact support.')
      elif status == 'running':
        # The production model has turned on
        break
      else:
        print 'Waiting for the production model to become ready ...'
        time.sleep(.5)
        timeoutCounter += 1

    # Check that the production model has at least caught up to where we were
    # at the end of swarm.
    timeoutCounter = 0
    while True:
      # Production Model Output Cache Length
      try:
        headers, data, meta = self.getModelOutput()
        pmocLen = len(data)
      except requests.exceptions.HTTPError:
        print 'Whoops 500'
        time.sleep(.5)
        timeoutCounter += 1
        continue

      if timeoutCounter >= 80:
        raise GrokError("The production model did not catch up to the swarm "
                        "in a reasonable amount of time. Please try again or "
                        "contact support.")
      elif pmocLen >= swarmOutputCacheLen:
        break
      else:
        print 'Waiting for the production model to catch up with the data ...'
        time.sleep(1)
        timeoutCounter += 1

    ##### END UGLY HACK

  def start(self, **params):
    '''
    Starts up a model, readying it to receive new data from a stream
    '''
    return self._runCommand('start', **params)

  def disableLearning(self, **params):
    '''
    Puts the model into a predictions only state where it will not learn from
    new data.

    .. note:: This method is intended for use with RUNNING models that have
              been promoted. The API server will return an error in other cases.
    '''
    return self._runCommand('disableLearning', **params)

  def enableLearning(self, **params):
    '''
    New records will be integrated into the models future predictions.

    .. note:: This method is intended for use with RUNNING models that have
              been promoted. The API server will return an error in other cases.
    '''
    return self._runCommand('enableLearning', **params)


  def setAnomalyAutoDetectThreshold(self, autoDetectThreshold):
    '''
    Sets the autoDetectThreshold of the model.  Model must be TemporalAnomaly
    for this to succeed.

    * autoDetectThreshold - value to set the auto detect threshold

    .. note:: This method is intended for use with RUNNING models that have
              been promoted. The API server will return an error in other cases.
    '''
    return self._runCommand('setAutoDetectThreshold', \
      autoDetectThreshold=autoDetectThreshold)


  def getAnomalyAutoDetectWaitRecords(self):
    '''
    Gets the autoDetectWaitRecords of the model.  Model must be TemporalAnomaly
    for this to succeed.

    Response on success::

      {
        'autoDetectWaitRecords': integer
      }

    .. note:: This method is intended for use with RUNNING models that have
              been promoted. The API server will return an error in other cases.
    '''
    return self._runCommand('getAutoDetectWaitRecords')


  def setAnomalyAutoDetectWaitRecords(self, autoDetectWaitRecords):
    '''
    Sets the autoDetectWaitRecords of the model.  Model must be TemporalAnomaly
    for this to succeed.

    * autoDetectWaitRecords - value to set the auto detect wait records

    .. note:: This method is intended for use with RUNNING models that have
              been promoted. The API server will return an error in other cases.
    '''
    return self._runCommand('setAutoDetectWaitRecords', \
      autoDetectWaitRecords=autoDetectWaitRecords)


  def getAnomalyAutoDetectThreshold(self):
    '''
    Gets the autoDetectThreshold of the model.  Model must be TemporalAnomaly
    for this to succeed.

    Response on success::

      {
        'autoDetectThreshold': float
      }

    .. note:: This method is intended for use with RUNNING models that have
              been promoted. The API server will return an error in other cases.
    '''
    return self._runCommand('getAutoDetectThreshold')


  def getLabels(self, startRecordID=None, endRecordID=None):
    '''
    Returns a list of labels for a given range of records. Each record has a
    list of labels assocciated. A label may have no labels.

    * startRecordID - ROWID of the first prediction of these label results
    * endRecordID - ROWID of the last prediction record of these label results.
                    (Not inclusive.)

    Response on success::

      {
        'isProcessing': boolean,
        'recordLabels': [
          {
            'ROWID': integer,
            'labels': [str, ...]
          },...
        ]
      }

    .. note:: This method is intended for use with RUNNING models that have
              been promoted. The API server will return an error in other cases.
    '''
    return self._runCommand('getLabels', startRecordID=startRecordID,
      endRecordID=endRecordID)


  def addLabel(self, startRecordID, endRecordID, labelName):
    '''
    Adds a label to a given range of records from startRecordID to endRecordID,
    not inclusive of endRecordID.

    * startRecordID - ROWID of the first prediction to add this label
    * endRecordID - ROWID of the last prediction record to add this label.
                    (Not inclusive.)
    * labelName - string indicating name of the label to add to the given range

    Response on success::

      {
        'status': 'success'
      }

    .. note:: This method is intended for use with RUNNING models that have
              been promoted. The API server will return an error in other cases.
    '''
    return self._runCommand('addLabel', startRecordID=startRecordID, \
      endRecordID=endRecordID, labelName=labelName)


  def removeLabels(self, startRecordID=None, endRecordID=None, labelFilter=None):
    '''
    Removes a label or all labels from a given range of records from
    startRecordID to endRecordID, not inclusive of endRecordID. If labelFilter
    is set, only labels of type labelFilter will be removed, otherwise all
    labels will be removed from the given range.

    * startRecordID - ROWID of the first prediction to remove labels
    * endRecordID - ROWID of the last prediction record to remove labels.
                    (Not inclusive.)
    * labelFilter - string. If not None, only labels equal to this will be
                    removed. Otherwise all labels will be removed from given
                    range.

    Response on success::

      {
        'status': 'success'
      }

    .. note:: This method is intended for use with RUNNING models that have
              been promoted. The API server will return an error in other cases.
    '''
    return self._runCommand('removeLabels', startRecordID=startRecordID, \
      endRecordID=endRecordID, labelFilter=labelFilter)

  #############################################################################
  # Stream

  def getStream(self):
    '''
    Returns the Stream that this model is associated with.
    '''
    return self.parent.getStream(self.streamId)

  #############################################################################
  # Swarms

  def listSwarms(self):
    '''
    Returns a list of Swarm objects for this model
    '''

    # Where to make our request
    url = self.swarmsUrl

    swarmDefs = self.c.request('GET', url)['swarms']

    swarms = []
    for swarmDef in swarmDefs:
      swarms.append(Swarm(self, swarmDef))

    return swarms

  def startSwarm(self, size = None):
    '''
    Runs permutations on model parameters to find the optimal model
    characteristics for the given data.

    * size - A value from grokpy.SwarmSize. Initially, small, medium, or large.
            the default is medium. Small is only good for testing, whereas
            large can take a very long time.
    '''
    if self.swarm and self.swarm.getState() in ['Starting', 'Running']:
      raise GrokError('This model is already swarming.')

    url = self.swarmsUrl
    requestDef = {}
    if size:
      requestDef.update({'options': {"size": size}})

    result = self.c.request('POST', url, requestDef)

    # Save the swarm object
    self.swarm = Swarm(self, result['swarm'])

    return result

  def stop(self):
    '''
    Stops (and checkpoints) a running model. If a swarm is in progress it will
    gracefully stop the swarm progress and make the model available for
    promotion (this can take a few seconds).
    '''
    self._runCommand('stop')

  def getSwarmState(self):
    '''
    Returns the current state of a swarm.
    '''

    if not self.swarm:
      # See if the API knows about a swarm for this model
      swarms = self.listSwarms()
      if not swarms:
        raise GrokError('There is no swarm associated with this model.')
      else:
        # Find the latest swarm
        # Use the string time to sort with the latest time first
        # If details hasn't been populated yet, assume this is the latest
        swarms = sorted(swarms,
                        key = lambda swarm: swarm.details.get('startTime', 0),
                        reverse=True)


        # Set that as *the* swarm for this model
        self.swarm = swarms[0]

    return self.swarm.getState()

  #############################################################################
  # Checkpoints

  def listCheckpoints(self):
    '''
    Returns a list of checkpoint dicts for this model
    '''
    # Where to make our request
    url = self.checkpointsUrl
    checkpoints = self.c.request('GET', url)['checkpoints']
    return checkpoints

  def createCheckpoint(self):
    '''
    Creates a new checkpoint object and returns it as a dict
    '''
    # Where to make our request
    url = self.checkpointsUrl
    checkpoint = self.c.request('POST', url)['checkpoint']
    return checkpoint

  #############################################################################
  # Model data

  def getModelOutput(self, limit=None, offset = None,
                           startAt = None, shift = True):
    '''
    Returns the data in the output cache of the best model found during Swarm.

    * limit - The maximum number of rows to get from the model
    * offset - The number of rows from the last row from which to begin
               returning data from.

      For example::

        If you have 1000 records in the model output cache and set offset to
        100, you will get records with row ID 900 to 999. If you set offset
        above the maximum row ID that exists in the model's output cache you will
        get no records.

    * startAt - The start row ID to begin returning data from.

      For example::

        If you have 1000 records in the model output cache and set startAt to
        100, you will get records with row ID 100 to 1000. If you set startAt
        above the maximum row ID that exists in the model's output cache you will
        get no records.

    * shift - This shifts the records returned so that all predictions are
              aligned with actual values. Note: Set this value to False if you
              are working with realtime data.
    '''

    params = {}
    if limit is not None:
      params['limit'] = limit
    if offset is not None:
      params['offset'] = offset
    if startAt is not None:
      params['startAt'] = startAt
    if shift is not None:
      params['shift'] = shift

    result = self.c.request('GET', self.dataUrl, params = params)['output']

    headers = result['names']
    data = result['data']

    try:
      meta = result['meta']
    except KeyError:
      meta = None

    return headers, data, meta
    scale = numpy.array([5, 10])
    plot.set_offsets(zip([particle.position * scale  for particle in swarm.step(fitness_func)]))
    
    return plot,


if __name__ == '__main__':
    iterations = 600
    dimensions = 2
    group_size = 50
    no_groups = 50
    save = True

    graph = True

    swarm = Swarm(dimensions, group_size, no_groups, respect_boundaries=False, velocity_dampening=0.2)
    fitness_func = lambda (x, y): -abs((y * 10) - (x * 5) ** 2)

    if graph:
        from matplotlib import pyplot as pl
        from matplotlib import animation

        fig = pl.figure()
        #ax = pl.axis([0, 10,] * dimensions)
        ax = pl.axis([0, 5, 0, 10])

        plot = pl.scatter(*zip(*[particle.position for particle in swarm.step(fitness_func)]))

        anim = animation.FuncAnimation(fig, update_plot, frames=iterations, fargs=(swarm, plot, fitness_func))

        anim.save('squares.mp4', fps=10, extra_args=['-vcodec', 'libx264'])
Exemple #42
0
def main():

    # Setup some basic logging
    logging.basicConfig()
    formatter = logging.Formatter('%(name)-24s: %(asctime)s : %(levelname)-8s %(message)s')
    logger = logging.getLogger()
    logger.handlers[0].setFormatter(formatter)

    # Callback for showing statistics
    def log_stats(data):
        sideband = 'USB'
        auto_amps = {}
        for baseline in data.baselines:
            if baseline.is_valid():
                chunk = baseline.left._chk
                interleaved = array(list(p for p in data[baseline][chunk][sideband] if not isnan(p)))
                complex_data = interleaved[0::2] + 1j * interleaved[1::2]
                if baseline.is_auto():
                    auto_amps[baseline] = abs(complex_data).mean()
                    norm = auto_amps[baseline]
                else:
                    norm_left = auto_amps[SwarmBaseline(baseline.left, baseline.left)]
                    norm_right = auto_amps[SwarmBaseline(baseline.right, baseline.right)]
                    norm = sqrt(norm_left * norm_right)
                logger.info(
                    '{baseline!s}[chunk={chunk}].{sideband} : Amp={amp:.4e}, Phase={pha:+.2e}, Corr.={corr:5.2f}%'.format(
                        baseline=baseline, chunk=chunk, sideband=sideband, 
                        corr=100.0*abs(complex_data).mean()/norm,
                        pha=angle(complex_data).mean(),
                        amp=abs(complex_data).mean()
                        )
                    )

    # Callback for saving raw data
    def save_rawdata(rawdata):
        for fid, datas in enumerate(rawdata):
            filename = 'fid%d.dat' % fid
            save_bin(filename, datas)
            logger.info('Data for FID #%d saved to %r' % (fid, filename))

    # Callback for checking ramp
    def check_ramp(rawdata):
        ramp = empty(SWARM_VISIBS_ACC_SIZE)
        for fid, datas in enumerate(rawdata):
            raw = array(unpack('>%dI'%SWARM_VISIBS_ACC_SIZE, datas))
            ramp[0::2] = raw[1::2]
            ramp[1::2] = raw[0::2]
            errors_ind = list(i for i, p in enumerate(ramp) if p !=i)
            logger.info('Ramp errors for FID #%d: %d' % (fid, len(errors_ind)))

    # Parse the user's command line arguments
    parser = argparse.ArgumentParser(description='Catch and process visibility data from a set of SWARM ROACH2s')
    parser.add_argument('-v', dest='verbose', action='store_true', help='display debugging logs')
    parser.add_argument('-m', '--swarm-mapping', dest='swarm_mapping', metavar='SWARM_MAPPING', type=str, default=SWARM_MAPPING,
                        help='Use file SWARM_MAPPING to determine the SWARM input to IF mapping (default="%s")' % SWARM_MAPPING)
    parser.add_argument('-i', '--interface', dest='interface', metavar='INTERFACE', type=str, default='eth2',
                        help='listen for UDP data on INTERFACE (default="eth2")')
    parser.add_argument('-b', '--bitcode', dest='bitcode', metavar='BITCODE', type=str, default=DEFAULT_BITCODE,
                        help='program ROACH2s with BITCODE (default="%s")' % DEFAULT_BITCODE)
    parser.add_argument('-t', '--integrate-for', dest='itime', metavar='INTEGRATION-TIME', type=float, default=30.0,
                        help='integrate for approximately INTEGRATION-TIME seconds (default=30)')
    parser.add_argument('--setup-only', dest='setup_only', action='store_true',
                        help='only program and setup the board; do not wait for data')
    parser.add_argument('--listen-only', dest='listen_only', action='store_true',
                        help='do NOT setup the board; only wait for data')
    parser.add_argument('--visibs-test', dest='visibs_test', action='store_true',
                        help='enable the DDR3 visibility ramp test')
    parser.add_argument('--save-raw-data', dest='save_rawdata', action='store_true',
                        help='Save raw data from each FID to file')
    parser.add_argument('--log-stats', dest='log_stats', action='store_true',
                        help='Print out some baselines statistics (NOTE: very slow!)')
    args = parser.parse_args()

    # Set logging level given verbosity
    if args.verbose:
        logger.setLevel(logging.DEBUG)
    else:
        logger.setLevel(logging.INFO)

    # Silence katcp INFO messages
    katcp_logger = logging.getLogger('katcp')
    katcp_logger.setLevel(logging.WARNING)

    # Setup the listener class
    listener = SwarmListener(args.interface)

    # Create our SWARM instance
    swarm = Swarm()

    if not args.listen_only:

        # Setup using the Swarm class and our parameters
        swarm.setup(args.bitcode, args.itime, listener)

    if args.visibs_test:

        # Enable the visibs test
        swarm.members_do(lambda fid, member: member.visibs_delay(delay_test=True))

    else:

        # Disable the visibs test
        swarm.members_do(lambda fid, member: member.visibs_delay(delay_test=False))

    if not args.setup_only:

        # Create the data handler 
        swarm_handler = SwarmDataHandler(swarm, listener)

        if args.log_stats:

            # Use a callback to show visibility stats
            swarm_handler.add_callback(log_stats)

        if args.save_rawdata:

            # Give a rawback that saves the raw data
            swarm_handler.add_rawback(save_rawdata)

        if args.visibs_test:

            # Give a rawback that checks for ramp errors
            swarm_handler.add_rawback(check_ramp)

        # Start the main loop
        swarm_handler.loop()