Пример #1
0
def main(dim, num_bombs):
    # Initialize the game
    # Set up the mask
    # loop
    # print the grid
    # prompt for input()
    # x, y, operation
    # if not board:
    # intialize the board
    # update the grid
    board = grid.create_grid(dim)
    mask = masking_grid.generate_mask(dim)
    initialized = False

    while True:
        print_current_board(board, mask)
        inp = input('enter command (x, y, ["o", "f"]) or "x" to quit:\n')
        if inp == "x":
            break
        if not initialized:
            grid.place_bombs(board, num_bombs)
            initialized = True
        split_inp = inp.split()
        x, y = [int(i) for i in split_inp[0:2]]
        operator = split_inp[2]
        masking_grid.set_true(mask, x, y)
Пример #2
0
    def __init__(self, map_file):
        data = np.loadtxt(map_file, delimiter=',', dtype='Float64', skiprows=2)

        # altitude, minimum distance to stay away from obstacle
        altitude, safe_distance = 5, 3
        self.grid, self.edges = create_grid(data, altitude, safe_distance)
        self.graph = nx.Graph()
Пример #3
0
def main():
    bounds = Polygon([(-35.9948, 25.0052), (-35.9948, 84.9926),
                      (41.7718, 84.9926), (41.7718, 25.0052)])

    session = models.Session()

    g = grid.create_grid('Emodnet', from_shape(bounds, srid=4326), 0.5)
    grid.generate_grid_cells(g)

    # dl = decision_layer.generate_decision_layer(session, g)
    # decision_layer.generate_decision_layer_cells(session, dl)

    # decision_layer.generate_decision_layer_cell_depth(session, '/Users/alexnunes/Desktop/osl_bathymetry/bathymetry/', g)
    # decision_layer.generate_decision_layer_cell_seabed(session,g)

    # wind_files = layers.download_wind_files()
    decision_layer.generate_decision_layer_cell_wind(session, g)

    session.close()
    return
Пример #4
0
def find_random_centroids(filename, number):
    random.seed(0)
    hash_list = []
    centroid_list = []
    dim, lat, lon, data = create_grid(filename, number)
    with open(filename, 'r') as f:
        reader = csv.reader(f)
        reader = list(reader)
        for i in range(number):
            random_block = random.sample(reader, 1)
            hm_tuple = hash_map_index(dim, lat, lon, random_block)
            if hm_tuple not in hash_list:
                hash_list.append(hm_tuple)
                print(hm_tuple)
        centroids = []
        for c in centroid_list:
            formatted_c = []
            for d in c:
                formatted_c.append(float(d))
            centroids.append(formatted_c)

    return centroids
Пример #5
0
    # 2) write a method "can_connect()" that:
        # casts two points as a shapely LineString() object
        # tests for collision with a shapely Polygon() object
        # returns True if connection is possible, False otherwise
    # 3) write a method "create_graph()" that:
        # defines a networkx graph as g = Graph()
        # defines a tree = KDTree(nodes)
        # test for connectivity between each node and 
            # k of it's nearest neighbors
        # if nodes are connectable, add an edge to graph
    # Iterate through all candidate nodes!

# Create a grid map of the world
from grid import create_grid
# This will create a grid map at 1 m above ground level
grid = create_grid(data, 1, 1)

fig = plt.figure()

plt.imshow(grid, cmap='Greys', origin='lower')

nmin = np.min(data[:, 0])
emin = np.min(data[:, 1])

# If you have a graph called "g" these plots should work
# Draw edges
#for (n1, n2) in g.edges:
#    plt.plot([n1[1] - emin, n2[1] - emin], [n1[0] - nmin, n2[0] - nmin], 'black' , alpha=0.5)

# Draw all nodes connected or not in blue
#for n1 in nodes:
Пример #6
0
    # Iterate through all candidate nodes!
    #1
    g = nx.Graph()
    for node in nodes:
        g.add_node((node[0], node[1]))
    #2,3
    tree2 = KDTree(nodes)
    for node in nodes:
        #po = [node[0],node[1]]
        index = tree2.query([node], k=3, return_distance=False)
        possible = can_connect(nodes[index[0][1]], nodes[index[0][2]],
                               polygons)
        if possible:
            g.add_edge(tuple(nodes[index[0][1]]), tuple(nodes[index[0][2]]))

    grid = create_grid(data, zvalsMax, 1)

    fig = plt.figure()

    plt.imshow(grid, cmap='Greys', origin='lower')

    nmin = np.min(data[:, 0])
    emin = np.min(data[:, 1])

    # If you have a graph called "g" these plots should work
    # Draw edges
    for (n1, n2) in g.edges:
        plt.plot([n1[1] - emin, n2[1] - emin], [n1[0] - nmin, n2[0] - nmin],
                 'black',
                 alpha=0.5)
Пример #7
0
                continue

            if can_connect(n1, n2):
                g.add_edge(n1, n2, weight=1)
    return g


import time
t0 = time.time()
g = create_graph(nodes, 10)
print('graph took {0} seconds to build'.format(time.time() - t0))
print("Number of edges", len(g.edges))

# In[44]:

grid = create_grid(data, sampler._zmax, safety_distance)

fig = plt.figure()

# plt.imshow(grid, cmap='Greys', origin='lower')

# nmin = np.min(data[:, 0])
# emin = np.min(data[:, 1])

# # draw edges
# for (n1, n2) in g.edges:
#     plt.plot([n1[1] - emin, n2[1] - emin], [n1[0] - nmin, n2[0] - nmin], 'yellow' , alpha=0.5)

# # draw all nodes
# for n1 in nodes:
#     plt.scatter(n1[1] - emin, n1[0] - nmin, c='blue')
Пример #8
0
    nmin = np.min(data[:, 0])
    emin = np.min(data[:, 1])

    # draw points
    all_pts = np.array(to_keep)
    north_vals = all_pts[:, 0]
    east_vals = all_pts[:, 1]
    plt.scatter(east_vals - emin, north_vals - nmin, c='red')

    plt.ylabel('NORTH')
    plt.xlabel('EAST')

    plt.show()


if __name__ == '__main__':
    filename = 'colliders.csv'
    data = np.loadtxt(filename, delimiter=',', dtype='Float64', skiprows=2)
    print(data)

    polygons = extract_polygons(data)
    print(len(polygons))
    num_samples = 100
    zmin = 0
    zmax = 10
    to_keep = random_sampling(data, polygons, num_samples, zmin, zmax) 
    print(len(to_keep))

    grid = create_grid(data, zmax, 1)
    plot(grid, to_keep) 
Пример #9
0
    """
    Create and save bar charts with best fitness and average fitness for all generations
    """
    x = np.arange(len(best_fitness))
    fig, axes = plt.subplots(nrows=1, ncols=2, figsize=(16, 5), dpi=100)
    fig.suptitle('Fitness Function Evolution', fontsize=20)
    axes[0].bar(x, best_fitness, color='blue')
    axes[0].set_xlabel('Generation')
    axes[0].set_ylabel('Best fitness')
    axes[0].set_title('Best Fitness Evolution')
    axes[0].set_xticks(x)

    axes[1].bar(x, average_fitness, color='blue')
    axes[1].set_xlabel('Generation')
    axes[1].set_ylabel('Average fitness')
    axes[1].set_title('Average Fitness Evolution')
    axes[1].set_xticks(x)

    fig.savefig('best_fitness.png', dpi=100)


if __name__ == "__main__":
    """
    Create new grid and perform evolution over the population
    """
    this_grid = grid.create_grid(GRID_SIZE, WIDTH, HEIGHT)
    #WALLS.extend([[[280, 124], [282, 281]], [[282, 281], [431, 290]], [[433, 124], [426, 286]], [[522, 295], [525, 453]], [[520, 295], [679, 297]], [[679, 297], [676, 105]], [[97, 361], [75, 535]], [[75, 535], [317, 550]], [[850, 362], [844, 545]], [[844, 545], [733, 549]], [[1112, 79], [1121, 280]], [[1121, 280], [980, 286]], [[1116, 77], [930, 92]], [[973, 522], [963, 366]], [[963, 366], [1107, 344]], [[1107, 344], [1117, 440]], [[774, 249], [768, 104]], [[62, 50], [174, 54]], [[108, 173], [106, 263]]])
    e = Evolution(this_grid)
    e.evolve()
    save_graph(e.best_fitness, e.average_fitness)
Пример #10
0
# ## Load Data

# In[6]:


# This is the same obstacle data from the previous lesson.
filename = 'receding_horizon/colliders.csv'
data = np.loadtxt(filename, delimiter=',', dtype='Float64', skiprows=2)
print(data)


# In[8]:
flight_altitude = 3
safety_distance = 3
grid = create_grid(data, flight_altitude, safety_distance)


# In[6]:
fig = plt.figure()

plt.imshow(grid, cmap='Greys', origin='lower')

plt.xlabel('NORTH')
plt.ylabel('EAST')

plt.show()

###################################
sampler = Sampler(data)
polygons = sampler.polygons
Пример #11
0
# Library imports
import sys
import numpy as np

# User defined library imports
from file_read import read_shp, get_values
from grid import create_grid
from a_star import search
from prompt_read import read_search_prompt, read_grid_prompt

# Locate file pats
path = "shape/crime_dt"
# Get data's into data frame
df, bbox = read_shp(path)
# Convert it into numpy array to work with
values = get_values(df, np.float32)
# Get grid configuration from user input
grid_size, threshold = read_grid_prompt()
# Create grid with crime points
grid, fig, ax = create_grid(values,
                            bbox,
                            threshold=threshold,
                            grid_size=grid_size,
                            plot=True)
# Get search start end points from user input
start, end = read_search_prompt(grid)
# Define costs
cost = [1, 1.3, 1.5]
# Search for optimal path
path = search(grid, cost, start, end, [fig, ax])
Пример #12
0
    def plan_path(self):
        self.flight_state = States.PLANNING
        print("Searching for a path ...")
        TARGET_ALTITUDE = 5
        SAFETY_DISTANCE = 5

        self.target_position[2] = TARGET_ALTITUDE

        # DONE: read lat0, lon0 from colliders into floating point values
        # for max_rows attribute a numpy vers. >= 1.16 is required, make sure to have an up to date scikit-image package

        # data_pos = np.loadtxt('colliders.csv',dtype='str', max_rows=1)
        # (lat0,lon0) = [float(data_pos[1][:-1]),float(data_pos[3][:-1])]

        # for numpy vers <1.16
        with open('colliders.csv') as f:
            latLonStrArr = f.readline().rstrip().replace('lat0', '').replace(
                'lon0 ', '').split(',')
            lat0 = float(latLonStrArr[0])
            lon0 = float(latLonStrArr[1])

        # DONE: set home position to (lon0, lat0, 0)
        self.set_home_position(lon0, lat0, 0)
        # DONE: retrieve current global position
        global_position = [self._longitude, self._latitude, self._altitude]
        # DONE: convert to current local position using global_to_local()
        current_local_pos = global_to_local(global_position, self.global_home)
        print('global home {0}, position {1}, local position {2}'.format(
            self.global_home, self.global_position, self.local_position))

        # Read in obstacle map
        data = np.loadtxt('colliders.csv',
                          delimiter=',',
                          dtype='Float64',
                          skiprows=2)

        grid, north_offset, east_offset = create_grid(data, TARGET_ALTITUDE,
                                                      SAFETY_DISTANCE)
        print("North offset = {0}, east offset = {1}".format(
            north_offset, east_offset))

        skeleton = medial_axis(invert(grid))

        # DONE: convert start position to current position rather than map center
        # Define starting point on the grid (this is just grid center)
        start_ne = (int(self.local_position[0] - north_offset),
                    int(self.local_position[1] - east_offset))

        # Set goal as some arbitrary position on the grid
        # arb_goal = (750, 370, 0)

        # Set random goal on map in local coordinate system
        found = False
        while not found:
            goal_ne = (randrange(0, len(grid[:, 1] - 1)),
                       randrange(0, len(grid[1, :] - 1)))
            if grid[goal_ne] == 0:
                found = True

        # DONE: adapt to set goal as latitude / longitude position and convert (can be)
        # global_goal = local_to_global(arb_goal, self.global_home)
        # local_goal to show expected transformation
        # goal_ne = global_to_local(global_goal, self.global_home)
        print(
            "Drone is starting from {0} and the goal was randomly set to {1}".
            format(start_ne, goal_ne))
        skel_start, skel_goal = find_start_goal(skeleton, start_ne, goal_ne)

        # Run A* to find a path from start to goal
        # DONE: add diagonal motions with a cost of sqrt(2) to your A* implementation
        # or move to a different search space such as a graph (not done here)
        path_, cost = a_star(
            invert(skeleton).astype(np.int), tuple(skel_start),
            tuple(skel_goal))
        print("Path length = {0}, path cost = {1}".format(len(path_), cost))
        # DONE: prune path to minimize number of waypoints
        path = collinearity(path_)

        # TODO (if you're feeling ambitious): Try a different approach altogether!

        # Convert path to waypoints
        waypoints = [[
            int(p[0]) + north_offset,
            int(p[1]) + east_offset, TARGET_ALTITUDE
        ] for p in path]

        # get heading angle for next point
        theta = heading(path)
        for i in range(len(waypoints)):
            waypoints[i].append(theta[i])

        print(waypoints)
        # Set self.waypoints
        self.waypoints = waypoints
        # send waypoints to sim
        self.send_waypoints()
Пример #13
0
def main(win, player):
    locked_pos = {}
    grid = create_grid(locked_pos)
    change_piece = False
    current_piece = get_shape()
    next_piece = get_shape()
    clock = pygame.time.Clock()
    fall_time = 0
    hardcore_time = 0
    time_elapsed = 0
    board = Board(WIDTH, HEIGHT, BLOCK_SIZE, BOX_WIDTH, BOX_HEIGHT)

    player.restart_stats()

    run = True
    while run:
        grid = create_grid(locked_pos)
        fall_time += clock.get_rawtime()
        time_elapsed += clock.get_rawtime()
        if mode == 2:
            hardcore_time += clock.get_rawtime()
        clock.tick()

        if time_elapsed / 1000 > 1:
            time_elapsed = 0
            player.timer += 1

        if hardcore_time / 1000 > 5:
            hardcore_time = 0
            if player.fall_speed > 0.1:
                player.fall_speed -= 0.005
                player.speed_level += 1

        if fall_time / 1000 > player.fall_speed:
            fall_time = 0
            current_piece.y += 1
            if not (valid_space(current_piece, grid,
                                convert_shape_format)) and current_piece.y > 0:
                current_piece.y -= 1
                change_piece = True

        for event in pygame.event.get():
            if event.type == pygame.QUIT:
                pygame.quit()
                run = False
                sys.exit()

            # Key handling
            if event.type == pygame.KEYDOWN:
                if event.key == pygame.K_LEFT:
                    current_piece.x -= 1
                    if not (valid_space(current_piece, grid,
                                        convert_shape_format)):
                        current_piece.x += 1
                elif event.key == pygame.K_RIGHT:
                    current_piece.x += 1
                    if not (valid_space(current_piece, grid,
                                        convert_shape_format)):
                        current_piece.x -= 1
                elif event.key == pygame.K_DOWN:
                    current_piece.y += 1
                    if not (valid_space(current_piece, grid,
                                        convert_shape_format)):
                        current_piece.y -= 1
                elif event.key == pygame.K_UP:
                    current_piece.rotation += 1
                    if not (valid_space(current_piece, grid,
                                        convert_shape_format)):
                        current_piece.rotation -= 1
                elif event.key == pygame.K_RETURN:
                    for i in range(20):
                        current_piece.y += 1
                        if not (valid_space(current_piece, grid,
                                            convert_shape_format)):
                            current_piece.y -= 1
                elif event.key == pygame.K_ESCAPE:
                    pause(win, active, WIDTH, HEIGHT, player.restart_stats,
                          main, main_menu, get_leaderboard, player)

        shape_pos = convert_shape_format(current_piece)

        # draw square within the block
        for i in range(len(shape_pos)):
            x, y = shape_pos[i]
            if y > -1:
                grid[y][x] = current_piece.color

        if change_piece:
            for pos in shape_pos:
                p = (pos[0], pos[1])
                locked_pos[p] = current_piece.color
            current_piece = next_piece
            next_piece = get_shape()
            change_piece = False
            player.score += clear_rows(grid, locked_pos,
                                       player) * player.get_score_factor()

        board.draw_window(win, grid, draw_grid)
        board.draw_next_shape(next_piece, win, player.score,
                              player.get_max_score, player.format_timer,
                              player.speed_level, player.combo,
                              player.max_combo)
        pygame.display.update()

        if check_lost(locked_pos):
            draw_name(win, player)

    pygame.display.quit()

import time
t0 = time.time()
g = create_graph(nodes, 10)
print('graph took {0} seconds to build'.format(time.time() - t0))
print("Number of edges", len(g.edges))

# ## Step 4 - Visualize Graph

# In[72]:

# Create a grid map of the world
from grid import create_grid
# This will create a grid map at 1 m above ground level
grid = create_grid(data, sampler._zmax, 1)

fig = plt.figure()

plt.imshow(grid, cmap='Greys', origin='lower')

nmin = np.min(data[:, 0] - data[:, 3])
emin = np.min(data[:, 1] - data[:, 4])

# If you have a graph called "g" these plots should work
# Draw edges
for (n1, n2) in g.edges:
    plt.plot([n1[1] - emin, n2[1] - emin], [n1[0] - nmin, n2[0] - nmin],
             'black',
             alpha=0.5)
    def plan_path(self):
        self.flight_state = States.PLANNING
        print("Searching for a path ...")
        TARGET_ALTITUDE = 5
        SAFETY_DISTANCE = 5

        self.target_position[2] = TARGET_ALTITUDE

        def can_connect(n1, n2, polygons):
            l = LineString([n1, n2])
            for p in polygons:
                if p.crosses(l) and p.height >= min(n1[2], n2[2]):
                    return False
            return True

        def create_graph(nodes, k, polygons):
            g = nx.Graph()
            tree = KDTree(nodes)
            for n1 in nodes:
                # for each node connect try to connect to k nearest nodes
                idxs = tree.query([n1], k, return_distance=False)[0]

                for idx in idxs:
                    n2 = nodes[idx]
                    if n2 == n1:
                        continue

                    if can_connect(n1, n2, polygons):
                        g.add_edge(n1, n2, weight=1)
            return g

        # TODO: read lat0, lon0 from colliders into floating point values
        data_pos = np.loadtxt('colliders.csv', dtype='str', max_rows=1)
        (lat0, lon0) = [float(data_pos[1][:-1]), float(data_pos[3][:-1])]
        # TODO: set home position to (lon0, lat0, 0)
        (east_home, north_home, _, _) = utm.from_latlon(lat0, lon0)

        # TODO: retrieve current global position
        (east, north, _, _) = utm.from_latlon(self.global_position[1],
                                              self.global_position[0])
        # TODO: convert to current local position using global_to_local()
        local_position = np.array([
            north - north_home, east - east_home,
            -(self.global_position[2] - self.global_home[2])
        ])
        print('global home {0}, position {1}, local position {2}'.format(
            self.global_home, self.global_position, self.local_position))
        # Read in obstacle map
        data = np.loadtxt('colliders.csv',
                          delimiter=',',
                          dtype='Float64',
                          skiprows=2)

        # Sample points
        sampler = Sampler(data)
        polygons = sampler._polygons
        nodes = sampler.sample(150)
        print(len(nodes))

        # Connect nodes to graph
        g = create_graph(nodes, 10, polygons)
        #grid = create_grid(data, sampler._zmax, 1)

        # define start as first point from graph, goal point is randomized
        start = list(g.nodes)[0]
        k = np.random.randint(len(g.nodes))
        print("Amount of found nodes:{0} {1}".format(k, len(g.nodes)))
        goal = list(g.nodes)[k]

        # Run A* to find path
        path, cost = a_star(g, heuristic, start, goal)
        path_pairs = zip(path[:-1], path[1:])
        for (n1, n2) in path_pairs:
            print(n1, n2)

        # Define a grid for a particular altitude and safety margin around obstacles
        grid, north_offset, east_offset = create_grid(data, TARGET_ALTITUDE,
                                                      SAFETY_DISTANCE)
        print("North offset = {0}, east offset = {1}".format(
            north_offset, east_offset))
        '''
        # Define starting point on the grid (this is just grid center)
        grid_start = (int(local_position[0]-north_offset), int(local_position[1]-east_offset))
        # TODO: convert start position to current position rather than map center
        
        # Set goal as some arbitrary position on the grid
        grid_goal = (int(local_position[0]-north_offset), int(local_position[1]-east_offset))
        # TODO: adapt to set goal as latitude / longitude position and convert

        # Run A* to find a path from start to goal
        # TODO: add diagonal motions with a cost of sqrt(2) to your A* implementation
        # or move to a different search space such as a graph (not done here)
        print('Local Start and Goal: ', grid_start, grid_goal)
        path_, _ = a_star(grid, heuristic, grid_start, grid_goal)
        # TODO: prune path to minimize number of waypoints
        path = collinearity(path_)
        # TODO (if you're feeling ambitious): Try a different approach altogether!
        '''
        # Convert path to waypoints
        waypoints = [[
            p[0] + north_offset, p[1] + east_offset, TARGET_ALTITUDE, 0
        ] for p in path]
        print(waypoints)
        print(grid_start, grid_goal)
        # Set self.waypoints
        self.waypoints = waypoints
        # TODO: send waypoints to sim (this is just for visualization of waypoints)
        self.send_waypoints()
Пример #16
0
import time
t0 = time.time()
g = create_graph(nodes, 10)
print('graph took {0} seconds to build'.format(time.time() - t0))
print("Number of edges", len(g.edges))

# ## Step 4 - Visualize Graph

# In[9]:

start = list(g.nodes)[0]
k = np.random.randint(len(g.nodes))
print(k, len(g.nodes))
goal = list(g.nodes)[k]

grid = create_grid(data, flight_altitude, safety_distance)

# In[10]:

fig = plt.figure()

plt.imshow(grid, cmap='copper', origin='lower', alpha=0.7)

nmin = np.min(data[:, 0] - data[:, 3])
emin = np.min(data[:, 1] - data[:, 4])

for (n1, n2) in g.edges:
    plt.plot([n1[1] - emin, n2[1] - emin], [n1[0] - nmin, n2[0] - nmin],
             'orange',
             alpha=0.5)
Пример #17
0
    '''

    drow, dcolumn = action.move_value()
    return current[0] + drow, current[1] + dcolumn


plt.rcParams['figure.figsize'] = 12, 12

data = np.loadtxt('colliders.csv', delimiter=',', dtype='Float64', skiprows=2)
print(data)

altitude = 5
# minimum distance to stay away from obstacle
safe_distance = 3

grid = create_grid(data, altitude, safe_distance)
print(grid)

# plt.imshow(grid, origin='lower')
# plt.xlabel('EAST')
# plt.ylabel('NORTH')
# plt.show()

start_ne = (25, 100)
goal_ne = (750., 370.)

traveller = Traveller(grid)
found, paths = traveller.travel(start_ne, goal_ne)

print('found = ', found)
Пример #18
0
def process_shakemaps(shakemaps=None, session=None, scenario=False):
    '''
    Process or reprocess the shakemaps passed into the function
    
    Args:
        shakemaps (list): List of ShakeMap objects to process
        session (Session()): SQLAlchemy session
        scenario (boolean): True for manually triggered events
    
    Returns:
        dict: a dictionary that contains information about the function run
        ::
            data = {'status': either 'finished' or 'failed',
                    'message': message to be returned to the UI,
                    'log': message to be added to ShakeCast log
                           and should contain info on error}
    '''
    clock = Clock()
    sc = SC()
    for shakemap in shakemaps:
        # check if we should wait until daytime to process
        if (clock.nighttime()) is True and scenario is False:
            if shakemap.event.magnitude < sc.night_eq_mag_cutoff:
                continue
            
        shakemap.status = 'processing_started'

        # open the grid.xml file and find groups affected by event
        grid = create_grid(shakemap)
        if scenario is True:
            in_region = (session.query(Group)
                                    .filter(Group.in_grid(grid))
                                    .all())
            groups_affected = [group for group in in_region
                                    if group.gets_notification('damage', scenario=True)]
        else:
            in_region = (session.query(Group)
                                        .filter(Group.in_grid(grid))
                                        .all())
            groups_affected = [group for group in in_region
                                    if group.gets_notification('damage')]
        
        if not groups_affected:
            shakemap.status = 'processed - no groups'
            session.commit()
            continue
        
        # send out new events and create inspection notifications
        for group in groups_affected:
                    
            notification = Notification(group=group,
                                        shakemap=shakemap,
                                        event=shakemap.event,
                                        notification_type='DAMAGE',
                                        status='created')
            
            session.add(notification)
        session.commit()
        
        notifications = (session.query(Notification)
                    .filter(Notification.shakemap == shakemap)
                    .filter(Notification.notification_type == 'DAMAGE')
                    .filter(Notification.status != 'sent')
                    .all())
        
        # get a set of all affected facilities
        affected_facilities = set(itertools
                                    .chain
                                    .from_iterable(
                                        [(session.query(Facility)
                                            .filter(Facility.in_grid(grid))
                                            .filter(Facility.groups
                                                        .any(Group.shakecast_id == group.shakecast_id))
                                            .all())
                                         for g in
                                         groups_affected]))

        geoJSON = {'type': 'FeatureCollection',
                    'features': [None] * len(affected_facilities),
                    'properties': {}}
        if affected_facilities:
            fac_shaking_lst = [None] * len(affected_facilities)
            f_count = 0
            for facility in affected_facilities:
                fac_shaking = make_inspection_priority(facility=facility,
                                                    shakemap=shakemap,
                                                    grid=grid)
                if fac_shaking is False:
                    continue
                
                fac_shaking_lst[f_count] = FacilityShaking(**fac_shaking)

                geoJSON['features'][f_count] = makeImpactGeoJSONDict(facility,
                                                                fac_shaking)

                f_count += 1

            # Remove all old shaking and add all fac_shaking_lst
            shakemap.facility_shaking = []
            session.commit()

            session.bulk_save_objects(fac_shaking_lst)
            session.commit()

            geoJSON['properties']['impact-summary'] = get_event_impact(shakemap)

            saveImpactGeoJson(shakemap, geoJSON)

            # get and attach pdf
            pdf.generate_impact_pdf(shakemap, save=True)
    
            shakemap.status = 'processed'
        else:
            shakemap.status = 'processed - no facs'
        
        if scenario is True:
            shakemap.status = 'scenario'

        if notifications:
            # send inspection notifications for the shaking levels we
            # just computed
            for n in notifications:
                inspection_notification(notification=n,
                                        scenario=scenario,
                                        session=session)
        
        session.commit()
Пример #19
0
def integrate(xmax, xmin, ymax, ymin, delta, H, tmax, rotation, forcing,
              boundary, radiation_type, D, gamma, frecplot):

    print('----------------------------------------')
    print("Running Shallow Water Model using C-grid")
    print(" ")
    print("Parameters: ")
    print('tmax = ' + str(tmax))
    print('rotation = ' + str(rotation))
    print('forcing type = ' + str(forcing))
    print('----------------------------------------')

    # ---------------------------------------------
    # Data to be expoerted at the end of the model run

    start_time = time.time()  # store initial time
    result_u = []
    result_v = []
    result_h = []
    result_csi = []
    # for checking conservation of mass and energy
    Vol = []  # volume
    Ep = []  # potential energy
    Ek = []  # kinetic energy
    ape = []  # absolute potential enstrophy
    # data from one grid point to be stored at all time steps
    histgrid1 = []
    histgrid2 = []
    histgrid3 = []
    histgrid4 = []

    # ---------------------------------------------
    # Create grids

    dx, dy = delta, delta

    tmp = create_grid(xmax, xmin, ymax, ymin, delta)

    lonsu, latsu = tmp[0]['lonsu'], tmp[0]['latsu']
    lonsv, latsv = tmp[0]['lonsv'], tmp[0]['latsv']
    lonsz, latsz = tmp[0]['lonsz'], tmp[0]['latsz']
    nx, ny = tmp[1], tmp[2]
    dt = tmp[3]

    fu = coriolis(latsu, rotation)
    fv = coriolis(latsv, rotation)
    fz = coriolis(latsz, rotation)

    g = 9.8

    # ---------------------------------------------
    # Define forcing

    if forcing == 2:
        a, cx, cy, nrx, nry, dx = 1, 0, 0, 10, 5, delta
    if forcing == 3:
        a, cx, cy, nrx, nry, dx = .1, 0, 0, 15, 15, delta

    gauss = gauss_space(xmin, xmax, ymin, ymax, nx, ny, a, cx, cy, nrx, nry,
                        dx, dy)

    if forcing == 1:
        alpha = 0.8
    if forcing == 2 or forcing == 3:
        alpha = 0.02

    # ---------------------------------------------
    # Initial conditions

    u_next = np.zeros((ny, nx + 1))
    v_next = np.zeros((ny + 1, nx))
    h_next = np.zeros((ny, nx)) + H
    U_next = fluxU(u_next, h_next, nx, ny)
    V_next = fluxV(v_next, h_next, nx, ny)

    # Update prognostic matrices

    u_prev, v_prev, h_prev = u_next * np.nan, v_next * np.nan, h_next * np.nan
    u_curr, v_curr, h_curr = u_next, v_next, h_next
    u_next, v_next, h_next = u_curr * np.nan, v_curr * np.nan, h_curr * np.nan

    U_prev, V_prev = u_next * np.nan, v_next * np.nan
    U_curr, V_curr = U_next, V_next
    U_next, V_next = u_curr * np.nan, v_curr * np.nan

    # ---------------------------------------------
    # Time-loop:
    for t in range(1, tmax):

        frc = gauss * decay(alpha, t, H)

        # Calculate diagnostic matrices
        U_next = fluxU(u_curr, h_curr, nx, ny)
        V_next = fluxV(v_curr, h_curr, nx, ny)
        B_next = Bournelli(u_curr, v_curr, h_curr, nx, ny)
        csi_next = csi(u_curr, v_curr, h_curr, fz, nx, ny, dx, dy)

        if t > 1:
            if boundary == 'radiation':
                if radiation_type == 'constant':
                    U_next = RB_cte.west(U_next, U_curr, V_curr, fv, H, dx, dt)
                    V_next = RB_cte.north(V_next, V_curr, U_curr, fu, H, dy,
                                          dt)
                    V_next = RB_cte.south(V_next, V_curr, U_curr, fu, H, dy,
                                          dt)
                elif radiation_type == 'estimate':
                    U_next = RB_est.west(U_next, U_curr, U_prev, V_curr, fv, H,
                                         dx, dt)
                    V_next = RB_est.north(V_next, V_curr, V_prev, U_curr, fu,
                                          H, dy, dt)
                    V_next = RB_est.south(V_next, V_curr, V_prev, U_curr, fu,
                                          H, dy, dt)
                elif radiation_type == 'Pedro':
                    U_next[:, -1] = 0
                    U_next = RB_p.west(U_next, U_curr, U_prev, V_curr, fv, H,
                                       dx, dt)
                    V_next = RB_p.north(V_next, V_curr, V_prev, U_curr, fu, H,
                                        dy, dt)
                    V_next = RB_p.south(V_next, V_curr, V_prev, U_curr, fu, H,
                                        dy, dt)

        # First time-step:
        # discretize using Euler forward in time and centered in space:
        if t == 1:

            # calculate u field
            V_mean = .5 * ((csi_next[:-1,1:-1] * \
                            .5 * (V_next[:-1,:-1] + V_next[:-1,1:])) +
                           (csi_next[1:,1:-1] * \
                            .5 * (V_next[1:,:-1] + V_next[1:,1:])))
            delta_Bu = (B_next[:, 1:] - B_next[:, :-1]) / delta
            u_next[:, 1:-1] = u_curr[:, 1:-1] + dt * (V_mean - delta_Bu)

            # calculate v field
            U_mean = .5 * ((csi_next[1:-1,1:] * \
                            .5 * (U_next[1:,1:] + U_next[:-1,1:])) +
                           (csi_next[1:-1,:-1] * \
                            .5 * (U_next[1:,:-1] + U_next[:-1,:-1])))
            delta_Bv = (B_next[:-1] - B_next[1:]) / delta
            v_next[1:-1] = v_curr[1:-1] + dt * (-U_mean - delta_Bv)

            # calculate h field
            delta_U = (U_next[:, 1:] - U_next[:, :-1]) / delta
            delta_V = (V_next[:-1, :] - V_next[1:, :]) / delta
            h_next = h_curr + dt * (-delta_U - delta_V)

        # Others time-steps
        # discretize using Leapfrog scheme:

        else:

            # calculate u field
            V_mean = .5 * ((csi_next[:-1,1:-1] * \
                            .5 * (V_next[:-1,:-1] + V_next[:-1,1:])) +
                           (csi_next[1:,1:-1] * \
                            .5 * (V_next[1:,:-1] + V_next[1:,1:])))
            delta_Bu = (B_next[:, 1:] - B_next[:, :-1]) / delta
            u_next[:, 1:-1] = u_prev[:, 1:-1] + 2 * dt * (V_mean - delta_Bu)

            # calculate v field
            U_mean = .5 * ((csi_next[1:-1,1:] * \
                            .5 * (U_next[1:,1:] + U_next[:-1,1:])) +
                           (csi_next[1:-1,:-1] * \
                            .5 * (U_next[1:,:-1] + U_next[:-1,:-1])))
            delta_Bv = (B_next[:-1] - B_next[1:]) / delta
            v_next[1:-1] = v_prev[1:-1] + 2 * dt * (-U_mean - delta_Bv)

            # calculate h field
            delta_U = (U_next[:, 1:] - U_next[:, :-1]) / delta
            delta_V = (V_next[:-1, :] - V_next[1:, :]) / delta
            h_next = h_prev + 2 * dt * (-delta_U - delta_V)

        # ---------------------------------------------
        # Add forcing
        if forcing == 2:  # add forcing
            u_next[1:-1, 1:-1] = u_next[1:-1, 1:-1] + frc[1:-1, 1:-1]

        if forcing == 3:  # add forcing
            h_next[1:-1, 1:-1] = h_next[1:-1, 1:-1] + .5 * (frc[1:-1, 2:-1] +
                                                            frc[1:-1, 1:-2])

        # ---------------------------------------------
        # Boundaries

        # west is always fixed
        if boundary == 'fixed':
            u_next[:, -1], u_next[:, 0], v_next[0], v_next[-1] = 0, 0, 0, 0

        if boundary == 'radiation':

            if radiation_type == 'constant':
                u_next[:, -1] = 0
                v_next = RB_cte.north(v_next, v_curr, u_curr, fu, H, dy, dt)
                v_next = RB_cte.south(v_next, v_curr, u_curr, fu, H, dy, dt)
                u_next = RB_cte.west(u_next, u_curr, v_curr, fv, H, dx, dt)

            elif radiation_type == 'estimate':
                u_next[:, -1] = 0
                u_next = RB_est.west(u_next, u_curr, u_prev, v_curr, fv, H, dx,
                                     dt)
                v_next = RB_est.north(v_next, v_curr, v_prev, u_curr, fu, H,
                                      dy, dt)
                v_next = RB_est.south(v_next, v_curr, v_prev, u_curr, fu, H,
                                      dy, dt)
            elif radiation_type == 'Pedro':
                u_next[:, -1] = 0
                u_next = RB_p.west(u_next, u_curr, u_prev, v_curr, fv, H, dx,
                                   dt)
                v_next = RB_p.north(v_next, v_curr, v_prev, u_curr, fu, H, dy,
                                    dt)
                v_next = RB_p.south(v_next, v_curr, v_prev, u_curr, fu, H, dy,
                                    dt)

        # ---------------------------------------------
        # Apply filters
        if t > 3:

            if D > 0:

                # Difusion in x
                u_curr = difusion_x(u_curr, u_prev, D, dx, dt)
                v_curr = difusion_x(v_curr, v_prev, D, dx, dt)
                h_curr = difusion_x(h_curr, h_prev, D, dx, dt)

#                # Difusion in y
#                u_curr  = difusion_y(u_curr,u_prev,D,dx,dt)
#                v_curr  = difusion_y(v_curr,v_prev,D,dx,dt)
#                h_curr  = difusion_y(h_curr,h_prev,D,dx,dt)

            if gamma > 0:

                # Robert-Asselin in time
                u_curr = RA_filter_t(u_next, u_curr, u_prev, gamma)
                v_curr = RA_filter_t(v_next, v_curr, v_prev, gamma)
                h_curr = RA_filter_t(h_next, h_curr, h_prev, gamma)

                # Robert-asselin-Willians
                u_next = RAW_filter(u_next, u_curr, u_prev, gamma, alpha)[0]
                v_next = RAW_filter(v_next, v_curr, v_prev, gamma, alpha)[0]
                h_next = RAW_filter(h_next, h_curr, h_prev, gamma, alpha)[0]

                u_curr = RAW_filter(u_next, u_curr, u_prev, gamma, alpha)[1]
                v_curr = RAW_filter(v_next, v_curr, v_prev, gamma, alpha)[1]
                h_curr = RAW_filter(h_next, h_curr, h_prev, gamma, alpha)[1]

                # Time-filter
                u_curr = time_filter(u_next, u_curr, u_prev)
                v_curr = time_filter(v_next, v_curr, v_prev)
                h_curr = time_filter(h_next, h_curr, h_prev)

                # Robert-Asselin in x-direction
                u_curr = RA_filter_x(u_curr, gamma)
                v_curr = RA_filter_x(v_curr, gamma)
                h_curr = RA_filter_x(h_curr, gamma)

#                # Robert-Asselin in y-direction
#                u_curr  = RA_filter_y(u_curr,gamma)
#                v_curr  = RA_filter_y(v_curr,gamma)
#                h_curr  = RA_filter_y(h_curr,gamma)

# ---------------------------------------------
# Update prognostic matrices
        u_prev, v_prev, h_prev = u_curr, v_curr, h_curr
        u_curr, v_curr, h_curr = u_next, v_next, h_next
        u_next, v_next, h_next = u_curr * np.nan, v_curr * np.nan, h_curr * np.nan

        # Update diagnostic matrices
        U_prev, V_prev = U_curr, V_curr
        U_curr, V_curr, csi_curr = U_next, V_next, csi_next
        U_next, V_next, csi_next = U_curr * np.nan, V_curr * np.nan, csi_curr * np.nan

        # ---------------------------------------------
        ## Computate conservative properties

        # total volume
        Vol.append(((h_curr) * delta**2).sum())

        # potential energy
        Ep.append(((h_curr)**2 * delta**2).sum() * g / 2)

        # kinectic energy
        EKu = (u_curr[:, :-1] + u_curr[:, 1:]) / 2
        EKv = (v_curr[:-1] + v_curr[1:]) / 2
        h_meanxy = h_curr.mean()
        Ek.append((((EKu**2 + EKv**2) * h_meanxy / 2 * (delta**2)).sum()))

        # enstrophy
        h_meanx = (h_curr[:, :-1] + h_curr[:, 1:]) / 2
        h_meanxy = (h_meanx[:-1] + h_meanx[1:]) / 2
        tmp = csi_curr * np.nan
        tmp[1:-1, 1:-1] = (csi_curr[1:-1, 1:-1]**2) * h_meanxy
        tmp[0, :-1] = (csi_curr[0, :-1]**2) * h_curr[0]
        tmp[-1, 1:] = (csi_curr[-1, 1:]**2) * h_curr[-1]
        tmp[1:, 0] = (csi_curr[1:, 0]**2) * h_curr[:, 0]
        tmp[:-1, -1] = (csi_curr[:-1, -1]**2) * h_curr[:, -1]
        ape.append((tmp * (delta**2)).sum() / 2)

        # ---------------------------------------------
        # Sotre middle points
        histgrid1.append(h_curr[40, 40])
        histgrid2.append(u_curr[40, 41])
        histgrid3.append(v_curr[41, 40])
        histgrid4.append(csi_curr[41, 41])

        # ---------------------------------------------
        # Track mean CFL
        cmax = np.sqrt(np.amax(h_curr) * g)
        cflmax = cmax * dt / dx
        print('timestep = ' + str(t) + ', cfl max: ' + str(cflmax))

        # ---------------------------------------------
        # Store results at some time steps
        if t % frecplot == 0:
            print('')
            print('Storing results, timestep = '+str(t)+\
                  ' (time = '+str(round(t*dt/60/60/24,2))+'d, '+ \
                  str(round(t*dt/60/60,2))+'h)...')
            result_u.append(u_curr)
            result_v.append(v_curr)
            result_h.append(h_curr)
            result_csi.append(csi_curr)

            mx1, mx2, mx3, mx4 = round(np.amax(h_curr)), round(
                np.amax(u_curr)), round(np.amax(v_curr)), round(
                    np.amax(csi_curr[1:-1, 1:-1])),
            m1, m2, m3, m4 = round(np.mean(h_curr)), round(
                np.mean(u_curr)), round(np.mean(v_curr)), round(
                    np.mean(csi_curr[1:-1, 1:-1]))
            mn1, mn2, mn3, mn4 = round(np.amin(h_curr)), round(
                np.amin(u_curr)), round(np.amin(v_curr)), round(
                    np.amin(csi_curr[1:-1, 1:-1]))

            print('max values of h, u, v and csi: ' + str(mx1), str(mx2),
                  str(mx3), str(mx4))
            print('mean values of h, u, v and csi: ' + str(m1), str(m2),
                  str(m3), str(m4))
            print('min values of h, u, v and csi: ' + str(mn1), str(mn2),
                  str(mn3), str(mn4))
            print('')

    # ---------------------------------------------
    # After finishing model integration:
    #   plot h, u and v at the center of each grid
    tx = np.arange(0, len(histgrid1) * dt / 60 / 60 / 24, dt / 60 / 60 / 24)
    fig, axs = plt.subplots(4, figsize=(10, 10), constrained_layout=True)
    axs[0].plot(tx, histgrid1, linewidth=6, color='b')
    axs[0].set_title('h at point 40,40', color='b', fontsize=22)
    axs[0].tick_params(axis='both', which='major', labelsize=16)
    axs[1].plot(tx, histgrid2, linewidth=6, color='g')
    axs[1].set_title('u at point 40,41', color='g', fontsize=22)
    axs[1].tick_params(axis='both', which='major', labelsize=16)
    axs[2].plot(tx, histgrid3, linewidth=6, color='r')
    axs[2].set_title('v at point 41,40', color='r', fontsize=22)
    axs[2].tick_params(axis='both', which='major', labelsize=16)
    axs[2].set_xlabel('time (days)', fontsize=18)
    axs[3].plot(tx, histgrid4, linewidth=6, color='y')
    axs[3].set_title('csi at point 41,41', color='y', fontsize=22)
    axs[3].tick_params(axis='both', which='major', labelsize=16)
    axs[3].set_xlabel('time (days)', fontsize=18)
    pl.savefig('histgrid.png')

    # plot energy and mass
    fig, axs = plt.subplots(4, figsize=(10, 10), constrained_layout=True)
    axs[0].plot(tx, Vol, linewidth=6, color='b')
    axs[0].set_title('Vol', color='b', fontsize=22)
    axs[0].tick_params(axis='both', which='major', labelsize=16)
    axs[1].plot(tx, Ep, linewidth=6, color='g')
    axs[1].set_title('Ep', color='g', fontsize=22)
    axs[1].tick_params(axis='both', which='major', labelsize=16)
    axs[2].plot(tx, Ek, linewidth=6, color='r')
    axs[2].set_title('Ek', color='r', fontsize=22)
    axs[2].tick_params(axis='both', which='major', labelsize=16)
    axs[3].plot(tx, ape, linewidth=6, color='y')
    axs[3].set_title('Abs. P. Entrophy', color='y', fontsize=22)
    axs[3].tick_params(axis='both', which='major', labelsize=16)
    pl.savefig('properties.png')

    endtime = (time.time() - start_time)
    print(" ")
    print('Finished!')
    print('Total time elapsed = ' + str(endtime) + ' seconds')
    print(" ")

    return result_u, result_v, result_h, result_csi, [lonsu,lonsz,lonsv], [latsu,latsz,latsv],\
            endtime, Vol, Ep, Ek, ape, histgrid1, histgrid2, histgrid3, dt
import numpy as np
import matplotlib.pyplot as plt
from grid import create_grid
from skimage.morphology import medial_axis
from skimage.util import invert
from planning import a_star
plt.rcParams['figure.figsize'] = 12,12

filename = 'colliders.csv'
data = np.loadtxt(filename,delimiter=',',dtype='Float64',skiprows=2)
print(data)
start_ne = (25,100)
goal_ne = (650,500)
drone_altitude = 5
safety_distance = 2
grid = create_grid(data,drone_altitude,safety_distance)
#medial axis algorithm
skeleton = medial_axis(invert(grid))

#Search neighborhood
def find_start_goal(skel,start,goal):
    #Index non-0 elements,exchange the order
    skel_cells = np.transpose(skel.nonzero())
    #compute 2-norm by row,The position of the minimum
    start_min_dist = np.linalg.norm(np.array(start)-np.array(skel_cells),axis=1).argmin()
    near_start = skel_cells[start_min_dist]
    goal_min_dist = np.linalg.norm(np.array(goal)-np.array(skel_cells),axis=1).argmin()
    near_goal = skel_cells[goal_min_dist]
    return near_start,near_goal

skel_start, skel_goal = find_start_goal(skeleton, start_ne, goal_ne)
Пример #21
0
def process_shakemaps(shakemaps=None, session=None, scenario=False):
    '''
    Process or reprocess the shakemaps passed into the function
    
    Args:
        shakemaps (list): List of ShakeMap objects to process
        session (Session()): SQLAlchemy session
        scenario (boolean): True for manually triggered events
    
    Returns:
        dict: a dictionary that contains information about the function run
        ::
            data = {'status': either 'finished' or 'failed',
                    'message': message to be returned to the UI,
                    'log': message to be added to ShakeCast log
                           and should contain info on error}
    '''
    for shakemap in shakemaps:
        if can_process_event(shakemap.event, scenario) is False:
            continue
        shakemap.mark_processing_start()

        # open the grid.xml file and find groups affected by event
        grid = create_grid(shakemap)
        groups_affected = get_inspection_groups(grid, scenario, session)

        if not groups_affected:
            shakemap.mark_processing_finished()
            session.commit()
            continue

        # send out new events and create inspection notifications
        new_notifications = create_inspection_notifications(
            groups_affected, shakemap, scenario)

        session.add_all(new_notifications)
        session.commit()

        # get a set of all affected facilities
        affected_facilities = (session.query(Facility).filter(
            Facility.in_grid(grid)).all())

        if affected_facilities:
            impact = compute_event_impact(affected_facilities, shakemap, grid)

            # Remove all old shaking and add all fac_shaking_lst
            shakemap.facility_shaking = []
            session.commit()

            session.bulk_save_objects(impact.facility_shaking)
            session.commit()

            # save impact geo_json
            impact.save_impact_geo_json(shakemap.local_products_dir)

        else:
            shakemap.mark_processing_finished()
            shakemap.status = 'processed - no facs'

            session.commit()
            continue

        # grab new notifications, and any that might have failed to send
        notifications = (session.query(Notification).filter(
            Notification.shakemap == shakemap).filter(
                Notification.notification_type == 'DAMAGE').filter(
                    Notification.status != 'sent').all())

        if notifications:
            # send inspection notifications for the shaking levels we
            # just computed
            for n in notifications:
                # generate pdf for specific group
                pdf_name = '{}_impact.pdf'.format(n.group.name)
                pdf.generate_impact_pdf(n.shakemap,
                                        save=True,
                                        pdf_name=pdf_name,
                                        template_name=n.group.template)
                inspection_notification(notification=n,
                                        scenario=scenario,
                                        session=session)

        shakemap.mark_processing_finished()
        if scenario is True:
            shakemap.status = 'scenario'
        session.commit()