def create_latency_vs_percent_found(max_latency, number_samples, max_timestep):
    iterations = []

    for i in trange(max_latency):
        num_yes = 0
        for j in range(number_samples):

            m = BasicMap(15, 15)

            middle = (7, 7)

            # Add some lost persons to the map
            lp00 = RandomWalkLostPerson(m)
            lp00.init(middle)

            # Add some searchers to the map
            searcher00 = RandomWalkSearcher(m)
            searcher00.init(middle)

            s = Scenario(m, [lp00], [searcher00], i)
            count = s.simulate(max_timestep)  # Simulate for N time steps
            if s.num_rescued > 0:
                num_yes += 1
        perc_found = num_yes / number_samples
        iterations.append(perc_found * 100)

    plt.plot(iterations)
    plt.xlabel('Latency')
    plt.ylabel('% lost persons found')
    plt.savefig('latency_vs_percent_found.pdf', bbox_inches='tight')
    plt.close()
def create_searchers_vs_time(max_searchers, latency, number_samples,
                             num_time_steps):
    iterations = []

    for num_searchers in trange(1, max_searchers + 1):
        total = 0
        for i in range(number_samples):
            m = BasicMap(15, 15)
            middle = (7, 7)

            # Add some lost persons to the map
            lp00 = RandomWalkLostPerson(m)
            lp00.init(middle)

            searchers = []

            for j in range(num_searchers):
                # Add some searchers to the map
                searcher = RandomWalkSearcher(m)
                searcher.init(middle)
                searchers.append(searcher)

            s = Scenario(m, [lp00], searchers)
            count = s.simulate(num_time_steps)  # Simulate for N time steps
            total += count
        avg = total / number_samples

        iterations.append(avg)

    plt.plot(list(range(1, max_searchers + 1)), iterations)
    plt.xlabel('Number of searchers')
    plt.ylabel('Average number of search time steps')
    plt.savefig('searchers_vs_time.pdf', bbox_inches='tight')
    plt.close()
def get_quadrantPartitionPerformance(num_searchers=15,
                                     max_timestep=100,
                                     latency=15,
                                     number_samples=1000):
    iterations = []

    for i in range(0, num_searchers + 1):
        total = 0
        for j in range(number_samples):

            m = BasicMap(15, 15)

            partitioner = QuadrantPartitioner()
            [rows_midpoint, cols_midpoint] = partitioner.partition(m)
            quad01_rows = (0, rows_midpoint)
            quad01_cols = (0, cols_midpoint)
            quad02_rows = (0, rows_midpoint)
            quad02_cols = (cols_midpoint, m.numColumns() - 1)
            quad03_rows = (rows_midpoint, m.numRows() - 1)
            quad03_cols = (0, cols_midpoint - 1)
            quad04_rows = (rows_midpoint, m.numRows() - 1)
            quad04_cols = (cols_midpoint, m.numColumns() - 1)

            middle = (7, 7)

            # Add some lost persons to the map
            lp00 = RandomWalkLostPerson(m)
            lp00.init(middle)

            # Add some searchers to the map
            searchers = []
            for j in range(0, i):
                if j % 4 == 0:
                    # Assign to quadrant 1
                    searcher = RandomWalkSearcher(m, quad01_rows, quad01_cols)
                elif j % 4 == 1:
                    # Assign to quadrant 2
                    searcher = RandomWalkSearcher(m, quad02_rows, quad02_cols)
                elif j % 4 == 2:
                    # Assign to quadrant 3
                    searcher = RandomWalkSearcher(m, quad03_rows, quad03_cols)
                else:
                    # Assign to quadrant 4
                    searcher = RandomWalkSearcher(m, quad04_rows, quad04_cols)
                searcher.init(middle)
                searchers.append(searcher)

            s = Scenario(m, [lp00], searchers, latency)
            count = s.simulate(max_timestep)  # Simulate for N time steps
            total += count
        avg = total / number_samples
        iterations.append(avg)

    plt.title("searchers vs time step")
    plt.plot(list(range(len(iterations))), iterations)
    plt.xlabel('searchers')
    plt.ylabel('time step')
    plt.show()
    print(iterations)
    return iterations
def create_latency_vs_timestep(max_latency, number_samples, max_timestep):
    iterations = []

    for i in trange(max_latency):
        total = 0
        for j in range(number_samples):

            m = BasicMap(15, 15)

            middle = (7, 7)

            # Add some lost persons to the map
            lp00 = RandomWalkLostPerson(m)
            lp00.init(middle)

            # Add some searchers to the map
            searcher00 = RandomWalkSearcher(m)
            searcher00.init(middle)

            s = Scenario(m, [lp00], [searcher00], i)
            count = s.simulate(max_timestep)  # Simulate for N time steps
            total += count
        avg = total / number_samples
        iterations.append(avg)

    plt.plot(iterations)
    plt.xlabel('Latency')
    plt.ylabel('Average number of search time steps')
    plt.savefig('latency_vs_time_steps.pdf', bbox_inches='tight')
    plt.close()
Beispiel #5
0
    def test_scenario_one_multiple_searcher(self):
        m = BasicMap(15, 15)

        middle = (7,7)

        # Add some lost persons to the map
        lp00 = RandomWalkLostPerson(m)
        lp00.init(middle)

        searchers = []


        for i in range(10):
            # Add some searchers to the map
            searcher = RandomWalkSearcher(m)
            searcher.init(middle)
            searchers.append(searcher)


        s = Scenario(m, [lp00], searchers)
        s.simulate(100)  # Simulate for N time steps
        print("lost person history: \n")
        print(lp00.get_history())
        print("\n")
        for i in range(10):
            print("searcher history: " , i)
            print(searchers[i].get_history())
            print("\n")
def get_lanePartitionPerformance(num_searchers=15,
                                 max_timestep=100,
                                 max_latency=15,
                                 number_samples=100):
    # TODO - Finish this.
    iterations = []

    num_rows = 15
    num_cols = 15

    for i in range(1, num_searchers + 1):
        # Get our lanes!!!
        lanes = []
        lane_size = (num_cols - 1) / i
        for j in range(0, i):
            lower = j * lane_size
            upper = (j + 1) * lane_size
            lanes.append((math.floor(lower) + 1, math.floor(upper)))
        lanes[0] = (0, lanes[0][1])
        print(lanes)

        total = 0
        for j in range(number_samples):

            m = BasicMap(15, 15)

            middle = (7, 7)

            # Add some lost persons to the map
            lp00 = RandomWalkLostPerson(m)
            lp00.init(middle)

            # Add some searchers to the map
            searchers = []
            for k in range(0, len(lanes)):
                searcher = VerticalSweepSearcher(m, lanes[k])
                searcher.init((0, math.floor((lanes[k][0] + lanes[k][1]) / 2)))
                searchers.append(searcher)

            s = Scenario(m, [lp00], searchers, max_latency)
            count = s.simulate(max_timestep)  # Simulate for N time steps
            total += count
        avg = total / number_samples
        iterations.append(avg)

    plt.title("latency vs time step")
    plt.plot(list(range(len(iterations))), iterations)
    plt.xlabel('latency')
    plt.ylabel('time step')
    plt.show()
    print(iterations)
    return iterations
def getNoPartitionStats(num_searchers, number_samples, max_timestep, latency):
    num_yes = 0
    avg_found = []
    perc_found = 0
    num_timesteps_successful = []
    num_timesteps_overall = []
    for i in range(number_samples):

        m = BasicMap(101, 101)

        middle = (50, 50)

        # Add some lost persons to the map
        lp00 = RandomWalkLostPerson(m)
        lp00.init(middle)

        # Add some searchers to the map
        searchers = []
        for j in range(0, num_searchers):
            searcher = RandomWalkSearcher(m)
            searcher.init(middle)
            searchers.append(searcher)

        s = Scenario(m, [lp00], searchers, latency=latency)
        count = s.simulate(max_timestep)  # Simulate for N time steps
        if count < max_timestep:
            num_yes += 1
            num_timesteps_successful.append(count)
        num_timesteps_overall.append(count)
        perc_found = num_yes / (i + 1)
        perc_found = perc_found * 100
        avg_found.append(perc_found)

    # Graph convergence
    plt.title(
        "Convergence to expected probability of success, no partitioning")
    plt.plot(list(range(number_samples)), avg_found)
    plt.xlabel('Number of Samples')
    plt.ylabel('Probability of Success')
    plt.savefig('convergence_psucess.png')

    print("Result: Percent Lost Person Discovered = " + str(perc_found))
    print("Average Number of Time Steps (when successful): " +
          str(sum(num_timesteps_successful) / len(num_timesteps_successful)))
    print("Standard deviation: " +
          str(statistics.stdev(num_timesteps_successful)))
    print("Average Number of Time Steps (overall): " +
          str(sum(num_timesteps_overall) / len(num_timesteps_overall)))
    print("Standard deviation: " +
          str(statistics.stdev(num_timesteps_overall)))
Beispiel #8
0
    def test_scenario_randomWalker(self):
        m = BasicMap(10, 10)

        # Add some lost persons to the map
        lp00 = RandomWalkLostPerson(m)
        lp00.init((2, 5))

        # Add some searchers to the map
        searcher00 = StationarySearcher(m)
        searcher00.init((4, 8))

        s = Scenario(m, [lp00], [searcher00])
        s.simulate(10)  # Simulate for N time steps
        print(lp00.get_history())
        print(searcher00.get_history())
Beispiel #9
0
    def test_quadrant_partitioner(self):
        m = BasicMap(25, 25)

        middle = (12, 12)

        # Add some lost persons to the map
        lp00 = RandomWalkLostPerson(m)
        lp00.init(middle)

        # Partition map into quadrants
        searchers = []

        partitioner = QuadrantPartitioner()
        [rows_midpoint, cols_midpoint] = partitioner.partition(m)
        quad01_rows = (0, rows_midpoint)
        quad01_cols = (0, cols_midpoint)
        s00 = RandomWalkSearcher(m, quad01_rows, quad01_cols)
        s00.init(middle)
        searchers.append(s00)

        quad02_rows = (0, rows_midpoint)
        quad02_cols = (cols_midpoint, m.numColumns()-1)
        s01 = RandomWalkSearcher(m, quad02_rows, quad02_cols)
        s01.init(middle)
        searchers.append(s01)

        quad03_rows = (rows_midpoint, m.numRows()-1)
        quad03_cols = (0, cols_midpoint-1)
        s02 = RandomWalkSearcher(m, quad03_rows, quad03_cols)
        s02.init(middle)
        searchers.append(s02)

        quad04_rows = (rows_midpoint, m.numRows()-1)
        quad04_cols = (cols_midpoint, m.numColumns()-1)
        s03 = RandomWalkSearcher(m, quad04_rows, quad04_cols)
        s03.init(middle)
        searchers.append(s03)

        scenario = Scenario(m, [lp00], searchers, latency=50)
        scenario.simulate(100)

        print("lost person history: \n")
        print(lp00.get_history())
        print("\n")
        for i in range(4):
            print("searcher history: " , i)
            print(searchers[i].get_history())
            print("\n")
Beispiel #10
0
    def test_scenario_one_random_searcher(self):
        m = BasicMap(15, 15)

        middle = (7,7)

        # Add some lost persons to the map
        lp00 = RandomWalkLostPerson(m)
        lp00.init(middle)

        # Add some searchers to the map
        searcher00 = RandomWalkSearcher(m)
        searcher00.init(middle)

        s = Scenario(m, [lp00], [searcher00])
        s.simulate(100)  # Simulate for N time steps
        print("lost person history: \n")
        print(lp00.get_history())
        print("\n")
        print("searcher history: ")
        print(searcher00.get_history())
Beispiel #11
0
    def test_vertical_sweep_searcher(self):
        m = BasicMap(101, 101)
        # m.print() interval: [0, 29]

        middle = (50, 50)

        # Add some lost persons to the map
        lp00 = RandomWalkLostPerson(m)
        lp00.init(middle)

        num_searchers = 10
        lane_size = int(30 / num_searchers)
        lanes = []
        lower = 0
        upper = lane_size - 1
        lanes.append((lower, upper))
        while upper < 29:
            lower = upper + 1
            upper = upper + lane_size
            lanes.append((lower, upper))

        searchers = []
        for i in range(0, len(lanes)):
            searcher = VerticalSweepSearcher(m, lanes[i])
            searcher.init((0, math.floor((lanes[i][0] + lanes[i][1])/2)))
            searchers.append(searcher)

        self.assertTrue(len(searchers) == num_searchers)
        scenario = Scenario(m, [lp00], searchers)
        scenario.simulate(100)

        print("lost person history: \n")
        print(lp00.get_history())
        print("\n")
        for i in range(num_searchers):
            print("searcher history: " , i)
            print(searchers[i].get_history())
            print(len(searchers[i].get_history()))
            print("\n")
Beispiel #12
0
def getQuadrantPartitionStats(num_searchers, number_samples, max_timestep,
                              latency):
    num_yes = 0
    avg_found = []
    perc_found = 0
    num_timesteps_successful = []
    num_timesteps_overall = []

    for i in range(number_samples):

        m = BasicMap(101, 101)

        middle = (50, 50)

        partitioner = QuadrantPartitioner()
        [rows_midpoint, cols_midpoint] = partitioner.partition(m)
        quad01_rows = (0, rows_midpoint)
        quad01_cols = (0, cols_midpoint)
        quad02_rows = (0, rows_midpoint)
        quad02_cols = (cols_midpoint, m.numColumns() - 1)
        quad03_rows = (rows_midpoint, m.numRows() - 1)
        quad03_cols = (0, cols_midpoint - 1)
        quad04_rows = (rows_midpoint, m.numRows() - 1)
        quad04_cols = (cols_midpoint, m.numColumns() - 1)

        # Add some lost persons to the map
        lp00 = RandomWalkLostPerson(m)
        lp00.init(middle)

        # Add some searchers to the map
        searchers = []
        for j in range(0, num_searchers):
            if j % 4 == 0:
                # Assign to quadrant 1
                searcher = RandomWalkSearcher(m, quad01_rows, quad01_cols)
            elif j % 4 == 1:
                # Assign to quadrant 2
                searcher = RandomWalkSearcher(m, quad02_rows, quad02_cols)
            elif j % 4 == 2:
                # Assign to quadrant 3
                searcher = RandomWalkSearcher(m, quad03_rows, quad03_cols)
            else:
                # Assign to quadrant 4
                searcher = RandomWalkSearcher(m, quad04_rows, quad04_cols)
            searcher.init(middle)
            searchers.append(searcher)

        s = Scenario(m, [lp00], searchers, latency=latency)
        count = s.simulate(max_timestep)  # Simulate for N time steps
        if count < max_timestep:
            num_yes += 1
            num_timesteps_successful.append(count)
        num_timesteps_overall.append(count)
        perc_found = num_yes / (i + 1)
        perc_found = perc_found * 100
        avg_found.append(perc_found)

    # Graph convergence
    plt.title(
        "Convergence to expected probability of success, quadrant partitioning"
    )
    plt.plot(list(range(number_samples)), avg_found)
    plt.xlabel('Number of Samples')
    plt.ylabel('Probability of Success')
    plt.savefig('convergence_psucess_quadrant.png')

    print("Result: Percent Lost Person Discovered = " + str(perc_found))
    print("Average Number of Time Steps (when successful): " +
          str(sum(num_timesteps_successful) / len(num_timesteps_successful)))
    print("Standard deviation: " +
          str(statistics.stdev(num_timesteps_successful)))
    print("Average Number of Time Steps (overall): " +
          str(sum(num_timesteps_overall) / len(num_timesteps_overall)))
    print("Standard deviation: " +
          str(statistics.stdev(num_timesteps_overall)))
Beispiel #13
0
def getLanePartitionStats(num_searchers, number_samples, max_timestep, latency,
                          lane_size):
    num_yes = 0
    avg_found = []
    perc_found = 0
    num_timesteps_successful = []
    num_timesteps_overall = []
    # Final lane is just a bit wider than the others...
    # lanes = [(0, 9), (10, 19), (20, 29), (30, 39), (40, 49),
    #          (50, 59), (60, 69), (70, 79), (80, 89), (90, 100)]
    lanes = []
    lower = 0
    upper = lane_size - 1
    lanes.append((lower, upper))
    for i in range(0, num_searchers - 1):
        # Each searcher gets a dedicated lane
        lower = upper + 1
        upper += lane_size
        lanes.append((lower, upper))
    print(lanes)

    for i in range(number_samples):
        m = BasicMap(101, 101)
        middle = (50, 50)

        # Add some lost persons to the map
        lp00 = RandomWalkLostPerson(m)
        lp00.init(middle)

        # Add some searchers to the map
        searchers = []
        for j in range(0, len(lanes)):
            searcher = VerticalSweepSearcher(m, lanes[j])
            searcher.init((0, math.floor((lanes[j][0] + lanes[j][1]) / 2)))
            searchers.append(searcher)

        s = Scenario(m, [lp00], searchers, latency=latency)
        count = s.simulate(max_timestep)  # Simulate for N time steps
        if count < max_timestep:
            num_yes += 1
            num_timesteps_successful.append(count)
        num_timesteps_overall.append(count)
        perc_found = num_yes / (i + 1)
        perc_found = perc_found * 100
        avg_found.append(perc_found)

    # Graph convergence
    plt.title(
        "Convergence to expected probability of success, lanes partitioning")
    plt.plot(list(range(number_samples)), avg_found)
    plt.xlabel('Number of Samples')
    plt.ylabel('Probability of Success')
    plt.savefig('convergence_psucess_lanes.png')

    print("Result: Percent Lost Person Discovered = " + str(perc_found))
    print("Average Number of Time Steps (when successful): " +
          str(sum(num_timesteps_successful) / len(num_timesteps_successful)))
    print("Standard deviation: " +
          str(statistics.stdev(num_timesteps_successful)))
    print("Average Number of Time Steps (overall): " +
          str(sum(num_timesteps_overall) / len(num_timesteps_overall)))
    print("Standard deviation: " +
          str(statistics.stdev(num_timesteps_overall)))