Ejemplo n.º 1
0
def _repel_bounce(free_nodes, vert, smp, rho, fix_nodes, itr, n, delta, max_bounces, bound_force):
    """ 
  nodes are repelled by eachother and bounce off boundaries
  """
    free_nodes = np.array(free_nodes, dtype=float, copy=True)

    # if bound_force then use the domain boundary as the force
    # boundary
    if bound_force:
        bound_vert = vert
        bound_smp = smp
    else:
        bound_vert = np.zeros((0, vert.shape[1]), dtype=float)
        bound_smp = np.zeros((0, vert.shape[1]), dtype=int)

    # this is used for the lengthscale of the domain
    scale = vert.ptp()

    # ensure that the number of nodes used to determine repulsion force
    # is less than or equal to the total number of nodes
    n = min(n, free_nodes.shape[0] + fix_nodes.shape[0])

    for k in range(itr):
        # node positions after repulsion
        free_nodes_new = _repel_step(free_nodes, rho, fix_nodes, n, delta, bound_vert, bound_smp)

        # boolean array of nodes which are now outside the domain
        crossed = ~gm.contains(free_nodes_new, vert, smp)
        bounces = 0
        while np.any(crossed):
            # point where nodes intersected the boundary
            inter = gm.intersection_point(free_nodes[crossed], free_nodes_new[crossed], vert, smp)

            # normal vector to intersection point
            norms = gm.intersection_normal(free_nodes[crossed], free_nodes_new[crossed], vert, smp)

            # distance that the node wanted to travel beyond the boundary
            res = free_nodes_new[crossed] - inter

            # move the previous node position to just within the boundary
            free_nodes[crossed] = inter - 1e-10 * scale * norms

            # 3 is the number of bounces allowed
            if bounces > max_bounces:
                free_nodes_new[crossed] = inter - 1e-10 * scale * norms
                break

            else:
                # bouce node off the boundary
                free_nodes_new[crossed] -= 2 * norms * np.sum(res * norms, 1)[:, None]
                # check to see if the bounced node is now within the domain,
                # if not then iterations continue
                crossed = ~gm.contains(free_nodes_new, vert, smp)
                bounces += 1

        free_nodes = free_nodes_new

    return free_nodes
Ejemplo n.º 2
0
Archivo: fd.m.py Proyecto: cossatot/RBF
def plot_cross_section(stress_component, ax):
    # make a stress cross section along the y=0 plane. This does not
    # extend all the way to the bottom of the domain
    x, z = np.meshgrid(
        np.linspace(*xbounds, 400),
        np.linspace(-0.5*depth, 1.25*np.max(vert[:,2]), 200))
    x = x.flatten()
    z = z.flatten()
    y = np.zeros_like(x)
    points = np.array([x, y, z]).T
    stress = {'xy': s_xy, 'yx': s_xy,
              'zx': s_xz, 'xz': s_xz,
              'yz': s_yz, 'zy': s_yz,
              'zz': s_zz, 'yy': s_yy, 'xx': s_xx}[stress_component]
    stress_interp = LinearNDInterpolator(
        nodes, 
        stress)(points)
    # replace all points that are outside of the domain with the mean
    # stress value. This is done to prevent unexpected color limits in
    # tricontourf
    stress_interp[~contains(points, vert, smp)] = stress.mean()
    # plot stress using tripcolor and mask out points that are outside
    # of the domain. This will show the topography in the cross
    # section
    triang = matplotlib.tri.Triangulation(x, z)
    triang.set_mask(
        ~contains(points[triang.triangles[:,0]], vert, smp) | 
        ~contains(points[triang.triangles[:,1]], vert, smp) | 
        ~contains(points[triang.triangles[:,2]], vert, smp))
    p = ax.tricontourf(
        triang, 
        stress_interp) 
    fig = ax.figure
    cbar = fig.colorbar(p, ax=ax)
    cbar.set_label('stress [MPa]')
    ax.set_aspect('equal')
    ax.set_xlim(*xbounds)
    ax.set_ylim(-0.5*depth, 1.25*np.max(vert[:,2]))
    ax.set_xlabel('x [km]')
    ax.set_ylabel('z (depth) [km]')
    ax.set_title('%s stress cross section at y=0' % stress_component)
    ax.grid(ls=':', color='k')
Ejemplo n.º 3
0
def make_scalar_field(nodes, vals, step=100j, bnd_vert=None, bnd_smp=None):
    ''' 
  Returns a structured data object used for plotting scalar fields 
  with Mayavi
  '''
    xmin = np.min(nodes[:, 0])
    xmax = np.max(nodes[:, 0])
    ymin = np.min(nodes[:, 1])
    ymax = np.max(nodes[:, 1])
    zmin = np.min(nodes[:, 2])
    zmax = np.max(nodes[:, 2])
    x, y, z = np.mgrid[xmin:xmax:step, ymin:ymax:step, zmin:zmax:step]
    f = griddata(nodes, vals, (x, y, z), method='linear')
    # mask all points that are outside of the domain
    grid_points_flat = np.array([x, y, z]).reshape((3, -1)).T
    if (bnd_smp is not None) & (bnd_vert is not None):
        is_outside = ~contains(grid_points_flat, bnd_vert, bnd_smp)
        is_outside = is_outside.reshape(x.shape)
        f[is_outside] = np.nan

    out = mlab.pipeline.scalar_field(x, y, z, f)
    return out
Ejemplo n.º 4
0
data[1][ix["interior"]] += (nodes[ix["interior"], 1] > 10.01).astype(np.float32)
data[1][ix["free"]] += (nodes[ix["free"], 1] > 10.01).astype(np.float32)
data = np.concatenate(data)

idx_noghost = ix["free"] + ix["fixed"] + ix["interior"]
out = solver(G, data)
out = np.reshape(out, (dim, N))
fig, ax = plt.subplots()

inside_nodes = nodes[idx_noghost]
H = Halton(2)
outside_nodes = H(100000) - 0.5
outside_nodes[:, 0] *= 10
outside_nodes[:, 1] += 10.5

outside_nodes = outside_nodes[~contains(outside_nodes, vert, smp)]
soln = out[:, idx_noghost]

nodes = inside_nodes

# cs = ax.tripcolor(nodes[idx_noghost,0],
#                  nodes[idx_noghost,1],
#                  np.linalg.norm(out[:,idx_noghost],axis=0),cmap=myplot.cm.viridis)
cs = ax.tripcolor(nodes[:, 0], nodes[:, 1], np.linalg.norm(soln, axis=0), cmap=myplot.cm.slip2)
# plt.colorbar(cs)
plt.quiver(nodes[::1, 0], nodes[::1, 1], soln[0, ::1], soln[1, ::1], color="k", scale=40)
ax.axes.get_yaxis().set_visible(False)
ax.axes.get_xaxis().set_visible(False)

ax.set_xlim(-4, 4)
ax.set_ylim(3.5, 11.5)
Ejemplo n.º 5
0
def _rejection_sampling_nodes(N, vert, smp, rho=None, max_sample_size=1000000):
    '''
  Returns `N` nodes within the boundaries defined by `vert` and `smp`
  and with density `rho`. The nodes are generated by rejection
  sampling.

  Parameters
  ----------
  nodes : (N, D) float array

  vert : (P, D) float array

  smp : (Q, D) int array

  rho : function

  max_sample_size : int
    max number of nodes allowed in a sample for the rejection
    algorithm. This prevents excessive RAM usage

  '''
    if rho is None:
        rho = _default_rho

    # form bounding box for the domain so that a RNG can produce values
    # that mostly lie within the domain
    lb = np.min(vert, axis=0)
    ub = np.max(vert, axis=0)
    ndim = vert.shape[1]
    # form Halton sequence generator
    H = rbf.halton.Halton(ndim + 1)
    # initiate array of nodes
    nodes = np.zeros((0, ndim), dtype=float)
    # node counter
    total_samples = 0
    # I use a rejection algorithm to get a sampling of nodes that
    # resemble to density specified by rho. The acceptance keeps track
    # of the ratio of accepted nodes to tested nodes
    acceptance = 1.0
    while nodes.shape[0] < N:
        # to keep most of this loop in cython and c code, the rejection
        # algorithm is done in chunks.  The number of samples in each
        # chunk is a rough estimate of the number of samples needed in
        # order to get the desired number of accepted nodes.
        if acceptance == 0.0:
            sample_size = max_sample_size
        else:
            # estimated number of samples needed to get N accepted nodes
            sample_size = int(np.ceil((N - nodes.shape[0]) / acceptance))
            # dont let sample_size exceed max_sample_size
            sample_size = min(sample_size, max_sample_size)

        # In order for a test node to be accepted, rho evaluated at that
        # test node needs to be larger than a random number with uniform
        # distribution between 0 and 1. Here I form the test nodes and
        # those random numbers
        seq = H(sample_size)
        test_nodes, seq1d = seq[:, :-1], seq[:, -1]
        # scale the range of test node to encompass the domain
        test_nodes = (ub - lb) * test_nodes + lb
        # reject test points based on random value
        test_nodes = test_nodes[rho(test_nodes) > seq1d]
        # reject test points that are outside of the domain
        test_nodes = test_nodes[contains(test_nodes, vert, smp)]
        # append what remains to the collection of accepted nodes. If
        # there are too many new nodes, then cut it back down so the total
        # size is `N`
        if (test_nodes.shape[0] + nodes.shape[0]) > N:
            test_nodes = test_nodes[:(N - nodes.shape[0])]

        nodes = np.vstack((nodes, test_nodes))
        logger.debug('accepted %s of %s nodes' % (nodes.shape[0], N))
        # update the acceptance. the acceptance is the ratio of accepted
        # nodes to sampled nodes
        total_samples += sample_size
        acceptance = nodes.shape[0] / total_samples

    return nodes
Ejemplo n.º 6
0
# create "left hand side" matrix
A = np.empty((N, N))
A[idx['interior']] = mq(nodes[idx['interior']], nodes, eps=eps, diff=[2, 0])
A[idx['interior']] += mq(nodes[idx['interior']], nodes, eps=eps, diff=[0, 2])
A[idx['boundary:all']] = mq(nodes[idx['boundary:all']], nodes, eps=eps)
# create "right hand side" vector
d = np.empty(N)
d[idx['interior']] = -1.0  # forcing term
d[idx['boundary:all']] = 0.0  # boundary condition
# Solve for the RBF coefficients
coeff = np.linalg.solve(A, d)
# interpolate the solution on a grid
xg, yg = np.meshgrid(np.linspace(-0.05, 2.05, 400),
                     np.linspace(-0.05, 2.05, 400))
points = np.array([xg.flatten(), yg.flatten()]).T
u = mq(points, nodes, eps=eps).dot(coeff)  # evaluate at the interp points
u[~contains(points, vert, smp)] = np.nan  # mask outside points
ug = u.reshape((400, 400))  # fold back into a grid
# make a contour plot of the solution
fig, ax = plt.subplots()
p = ax.contourf(xg, yg, ug, np.linspace(0.0, 0.16, 9), cmap='viridis')
ax.plot(nodes[:, 0], nodes[:, 1], 'ko', markersize=4)
for s in smp:
    ax.plot(vert[s, 0], vert[s, 1], 'k-', lw=2)

ax.set_aspect('equal')
fig.colorbar(p, ax=ax)
fig.tight_layout()
plt.savefig('../figures/basis.a.png')
plt.show()
Ejemplo n.º 7
0
    np.float32)
data[1][ix['free']] += (nodes[ix['free'], 1] > 10.01).astype(np.float32)
data = np.concatenate(data)

idx_noghost = ix['free'] + ix['fixed'] + ix['interior']
out = solver(G, data)
out = np.reshape(out, (dim, N))
fig, ax = plt.subplots()

inside_nodes = nodes[idx_noghost]
H = Halton(2)
outside_nodes = H(100000) - 0.5
outside_nodes[:, 0] *= 10
outside_nodes[:, 1] += 10.5

outside_nodes = outside_nodes[~contains(outside_nodes, vert, smp)]
soln = out[:, idx_noghost]

nodes = inside_nodes

#cs = ax.tripcolor(nodes[idx_noghost,0],
#                  nodes[idx_noghost,1],
#                  np.linalg.norm(out[:,idx_noghost],axis=0),cmap=myplot.cm.viridis)
cs = ax.tripcolor(nodes[:, 0],
                  nodes[:, 1],
                  np.linalg.norm(soln, axis=0),
                  cmap=myplot.cm.slip2)
#plt.colorbar(cs)
plt.quiver(nodes[::1, 0],
           nodes[::1, 1],
           soln[0, ::1],
Ejemplo n.º 8
0
from rbf.geometry import contains
from rbf.halton import halton

# Define the problem domain with line segments.
vert = np.array([[0.0, 0.0], [2.0, 0.0], [2.0, 1.0], [1.0, 1.0], [1.0, 2.0],
                 [0.0, 2.0]])
smp = np.array([[0, 1], [1, 2], [2, 3], [3, 4], [4, 5], [5, 0]])

N = 500  # total number of nodes

# create N quasi-uniformly distributed nodes over the unit square
nodes = halton(N, 2)
# scale the nodes to encompass the domain
nodes *= 2.0
# remove nodes outside of the domain
nodes = nodes[contains(nodes, vert, smp)]
# evenly disperse the nodes over the domain using 100 iterative steps
for i in range(100):
    nodes = disperse(nodes, vert, smp)
# snap nodes to the boundary
nodes, smpid = snap_to_boundary(nodes, vert, smp, delta=0.5)
# identify boundary and interior nodes
interior = smpid == -1
boundary = smpid > -1

fig, ax = plt.subplots(figsize=(6, 6))
# plot the domain
for s in smp:
    ax.plot(vert[s, 0], vert[s, 1], 'k-')
ax.plot(nodes[interior, 0], nodes[interior, 1], 'ko')
ax.plot(nodes[boundary, 0], nodes[boundary, 1], 'bo')
Ejemplo n.º 9
0
def mcint(f,vert,smp,samples=None,lower_bounds=None,
          upper_bounds=None,rng=None):
  ''' 
  Description
  -----------
    Monte Carlo integration algorithm over an arbitrary 1, 2, or 3  
    dimensional domain. This algorithm treats the integration domain
    as a bounding box and converts all function values for points outside 
    of the simplicial complex to zero. 

  Parameters
  ----------
    f: Scalar value function being integrated.  This function should
      take an (N,D) array as input and return an (N,) array

    vert: vertices of integration domain boundary
   
    smp: simplices describing how the vertices are connected to form
      the domain boundary

    samples (default=20**D): number of samples to use 

    lower_bounds (default=None): If given, then the lower bounds for the 
      integration domain are truncated to these value. Used in rmcint

    upper_bounds (default=None): If given, then the upper bounds for the 
      integration domain are truncated to these value. Used in rmcint

    rng (default=Halton(D)): random number generator. Must take an 
      integer input, N, and return an (N,D) array of random points 

  Returns
  -------
    answer,error,maximum,minimum

    answer: integral over the domain

    error: uncertainty of the solution. Note that this tends to be
      overestimated when using a quasi-random number generator such as
      a Halton sequence

    maximum: maximum function value within the domain

    minimum: minimum function value within the domain

  Note 
  ---- 
    When integrating constant or nearly constant functions over a
    complicated domain, it is far more efficient to use mcint2

  '''
  vert = np.asarray(vert,dtype=float)
  smp = np.asarray(smp,dtype=int)

  dim = smp.shape[1]

  if lower_bounds is None:
    lower_bounds = np.min(vert,0)
  else:
    lower_bounds = np.asarray(lower_bounds)

  if upper_bounds is None:
    upper_bounds = np.max(vert,0)
  else:
    bounded = True
    upper_bounds = np.asarray(upper_bounds)

  if rng is None:
    rng = hlt.Halton(dim)

  if samples is None:
    samples = 20**dim

  if np.any(lower_bounds > upper_bounds):
    raise ValueError(
      'lowers bounds found to be larger than upper bounds')

  if samples < 2:
    raise ValueError(
      'sample size must be at least 2')

  pnts = rng(samples)*(upper_bounds-lower_bounds) + lower_bounds
  val = f(pnts)
  is_inside = gm.contains(pnts,vert,smp)
  # If there are any points within the domain then return
  # the max and min value found within the domain
  if np.any(is_inside):
    minval = np.min(val[is_inside])
    maxval = np.max(val[is_inside])
  else:
    minval = np.inf
    maxval = -np.inf

  # copy val because its contents are going to be changed
  val = np.copy(val)
  val[~is_inside] = 0.0
  volume = np.prod(upper_bounds-lower_bounds)

  soln = np.sum(val)*volume/len(val)
  err = volume*np.std(val,ddof=1)/np.sqrt(len(val))

  return soln,err,minval,maxval
Ejemplo n.º 10
0
def menodes(N,vert,smp,rho=None,fix_nodes=None,
            itr=100,neighbors=None,delta=0.05,
            sort_nodes=True,bound_force=False):
  ''' 
  Generates nodes within the D-dimensional volume enclosed by the 
  simplexes using a minimum energy algorithm.  
  
  At each iteration the nearest neighbors to each node are found and 
  then a repulsion force is calculated using the distance to the 
  nearest neighbors and their charges (which is inversely proportional 
  to the node density). Each node then moves in the direction of the 
  net force acting on it.  The step size is equal to delta times the 
  distance to the nearest node. This is repeated for 2*itr iterations.
  
  During the first *itr* iterations, if a node intersects a boundary 
  then it elastically bounces off the boundary. During the last *itr* 
  iterations, if a node intersects a boundary then it sticks to the 
  boundary at the intersection point.

  Parameters
  ----------
  N : int
    Numbr of nodes
      
  vert : (P,D) array
    Boundary vertices

  smp : (Q,D) array
    Describes how the vertices are connected to form the boundary
    
  rho : function, optional
    Node density function. Takes a (N,D) array of coordinates in D 
    dimensional space and returns an (N,) array of densities which 
    have been normalized so that the maximum density in the domain 
    is 1.0. This function will still work if the maximum value is 
    normalized to something less than 1.0; however it will be less 
    efficient.

  fix_nodes : (F,D) array, optional
    Nodes which do not move and only provide a repulsion force
 
  itr : int, optional
    Number of repulsion iterations. If this number is small then the 
    nodes will not reach a minimum energy equilibrium.

  neighbors : int, optional
    Number of neighboring nodes to use when calculating the 
    repulsion force. When *neighbors* is small, the equilibrium 
    state tends to be a uniform node distribution (regardless of 
    *rho*), when *neighbors* is large, nodes tend to get pushed up 
    against the boundaries.

  delta : float, optional
    Scaling factor for the node step size in each iteration. The 
    step size is equal to *delta* times the distance to the nearest 
    neighbor.

  sort_nodes : bool, optional
    If True, nodes that are close in space will also be close in 
    memory. This is done with the Reverse Cuthill-McKee algorithm
      
  bound_force : bool, optional
    If True, then nodes cannot repel other nodes through the domain 
    boundary. Set to True if the domain has edges that nearly touch 
    eachother. Setting this to True may significantly increase 
    computation time

  Returns
  -------
  nodes: (N,D) float array 

  smpid: (N,) int array
    Index of the simplex that each node is on. If a node is not on a 
    simplex (i.e. it is an interior node) then the simplex index is 
    -1.

  Notes
  -----
  It is assumed that *vert* and *smp* define a closed domain. If 
  this is not the case, then it is likely that an error message will 
  be raised which says "ValueError: No intersection found for 
  segment 
   
  This function tends to fail when adjacent simplices form a sharp 
  angle.  The error message raised will be "ValueError: No 
  intersection found for segment ...".  The only solution is to 
  taper the angle by adding more simplices
      
  '''
  max_sample_size = 1000000

  vert = np.asarray(vert,dtype=float) 
  smp = np.asarray(smp,dtype=int) 
  if fix_nodes is None:
    fix_nodes = np.zeros((0,vert.shape[1]))
  else:
    fix_nodes = np.asarray(fix_nodes)

  if rho is None:
    def rho(p):
      return np.ones(p.shape[0])

  if neighbors is None:
    # number of neighbors defaults to 3 raised to the number of 
    # spatial dimensions
    neighbors = 3**vert.shape[1]
    
  # form bounding box for the domain so that a RNG can produce values
  # that mostly lie within the domain
  lb = np.min(vert,axis=0)
  ub = np.max(vert,axis=0)
  ndim = vert.shape[1]
  
  # form Halton sequence generator
  H = rbf.halton.Halton(ndim+1)

  # initiate array of nodes
  nodes = np.zeros((0,ndim))

  # node counter
  cnt = 0

  # I use a rejection algorithm to get an initial sampling of nodes 
  # that resemble to density specified by rho. The acceptance keeps 
  # track of the ratio of accepted nodes to tested nodes
  acceptance = 1.0
  while nodes.shape[0] < N:
    # to keep most of this loop in cython and c code, the rejection
    # algorithm is done in chunks.  The number of samples in each 
    # chunk is a rough estimate of the number of samples needed in
    # order to get the desired number of accepted nodes.
    if acceptance == 0.0:
      sample_size = max_sample_size    
    else:
      # estimated number of samples needed to get N accepted nodes
      sample_size = int(np.ceil((N-nodes.shape[0])/acceptance))
      # dont let sample_size exceed max_sample_size
      sample_size = min(sample_size,max_sample_size)

    cnt += sample_size
    # form test points
    seqNd = H(sample_size)

    # In order for a test point to be accepted, rho evaluated at that 
    # test point needs to be larger than a random number with uniform 
    # distribution between 0 and 1. Here I form those random numbers
    seq1d = seqNd[:,-1]

    # scale range of test points to encompass the domain  
    new_nodes = (ub-lb)*seqNd[:,:ndim] + lb

    # reject test points based on random value
    new_nodes = new_nodes[rho(new_nodes) > seq1d]

    # reject test points that are outside of the domain
    new_nodes = new_nodes[gm.contains(new_nodes,vert,smp)]

    # append to collection of accepted nodes
    nodes = np.vstack((nodes,new_nodes))

    logger.debug('accepted %s of %s nodes' % (nodes.shape[0],N))
    acceptance = nodes.shape[0]/cnt

  nodes = nodes[:N]

  # use a minimum energy algorithm to spread out the nodes
  logger.debug('repelling nodes with boundary bouncing') 
  nodes = _repel_bounce(nodes,vert,smp,rho,
                        fix_nodes,itr,neighbors,delta,3,
                        bound_force)

  logger.debug('repelling nodes with boundary sticking') 
  nodes,smpid = _repel_stick(nodes,vert,smp,rho,
                             fix_nodes,itr,neighbors,delta,
                             bound_force)

  # sort so that nodes that are close in space are also close in memory
  if sort_nodes:
    idx = _nearest_neighbor_argsort(nodes,n=neighbors)
    nodes = nodes[idx]
    smpid = smpid[idx] 
  
  return nodes,smpid
Ejemplo n.º 11
0
def _repel_stick(free_nodes,vert,smp,rho,   
                 fix_nodes,itr,n,delta,
                 bound_force):
  ''' 
  nodes are repelled by eachother and then become fixed when they hit 
  a boundary
  '''
  free_nodes = np.array(free_nodes,dtype=float,copy=True)

  # if bound_force then use the domain boundary as the force 
  # boundary
  if bound_force:
    bound_vert = vert
    bound_smp = smp
  else:
    bound_vert = np.zeros((0,vert.shape[1]),dtype=float)
    bound_smp = np.zeros((0,vert.shape[1]),dtype=int)

  # Keeps track of whether nodes in the interior or boundary. -1 
  # indicates interior and >= 0 indicates boundary. If its on the 
  # boundary then the number is the index of the simplex that the node 
  # is on
  smpid = np.repeat(-1,free_nodes.shape[0])

  # length scale of the domain
  scale = vert.ptp()

  # ensure that the number of nodes used to compute repulsion force is
  # less than or equal to the total number of nodes
  n = min(n,free_nodes.shape[0]+fix_nodes.shape[0])

  for k in range(itr):
    # indices of all interior nodes
    interior, = (smpid==-1).nonzero()

    # indices of nodes associated with a simplex (i.e. nodes which 
    # intersected a boundary
    boundary, = (smpid>=0).nonzero()

    # nodes which are stationary 
    all_fix_nodes = np.vstack((fix_nodes,free_nodes[boundary]))

    # new position of free nodes
    free_nodes_new = np.array(free_nodes,copy=True)
    # shift positions of interior nodes
    free_nodes_new[interior] = _repel_step(free_nodes[interior],
                                 rho,all_fix_nodes,n,delta,
                                 bound_vert,bound_smp)

    # indices of free nodes which crossed a boundary
    crossed = ~gm.contains(free_nodes_new,vert,smp)
  
    # if a node intersected a boundary then associate it with a simplex
    smpid[crossed] = gm.intersection_index(
                       free_nodes[crossed],     
                       free_nodes_new[crossed], 
                       vert,smp)

    # outward normal vector at intesection points
    norms = gm.intersection_normal(
              free_nodes[crossed],     
              free_nodes_new[crossed], 
              vert,smp)

    # intersection point for nodes which crossed a boundary
    inter = gm.intersection_point(
              free_nodes[crossed],     
              free_nodes_new[crossed],
              vert,smp)

    # new position of nodes which crossed the boundary is just within
    # the intersection point
    free_nodes_new[crossed] = inter - 1e-10*scale*norms
    free_nodes = free_nodes_new

  return free_nodes,smpid
Ejemplo n.º 12
0
def menodes(
    N, vert, smp, rho=None, fix_nodes=None, itr=100, neighbors=None, delta=0.05, sort_nodes=True, bound_force=False
):
    """ 
  Generates nodes within the D-dimensional volume enclosed by the 
  simplexes using a minimum energy algorithm.  
  
  At each iteration the nearest neighbors to each node are found and 
  then a repulsion force is calculated using the distance to the 
  nearest neighbors and their charges (which is inversely proportional 
  to the node density). Each node then moves in the direction of the 
  net force acting on it.  The step size is equal to delta times the 
  distance to the nearest node. This is repeated for 2*itr iterations.
  
  During the first *itr* iterations, if a node intersects a boundary 
  then it elastically bounces off the boundary. During the last *itr* 
  iterations, if a node intersects a boundary then it sticks to the 
  boundary at the intersection point.

  Parameters
  ----------
  N : int
    Numbr of nodes
      
  vert : (P,D) array
    Boundary vertices

  smp : (Q,D) array
    Describes how the vertices are connected to form the boundary
    
  rho : function, optional
    Node density function. Takes a (N,D) array of coordinates in D 
    dimensional space and returns an (N,) array of densities which 
    have been normalized so that the maximum density in the domain 
    is 1.0. This function will still work if the maximum value is 
    normalized to something less than 1.0; however it will be less 
    efficient.

  fix_nodes : (F,D) array, optional
    Nodes which do not move and only provide a repulsion force
 
  itr : int, optional
    Number of repulsion iterations. If this number is small then the 
    nodes will not reach a minimum energy equilibrium.

  neighbors : int, optional
    Number of neighboring nodes to use when calculating the 
    repulsion force. When *neighbors* is small, the equilibrium 
    state tends to be a uniform node distribution (regardless of 
    *rho*), when *neighbors* is large, nodes tend to get pushed up 
    against the boundaries.

  delta : float, optional
    Scaling factor for the node step size in each iteration. The 
    step size is equal to *delta* times the distance to the nearest 
    neighbor.

  sort_nodes : bool, optional
    If True, nodes that are close in space will also be close in 
    memory. This is done with the Reverse Cuthill-McKee algorithm
      
  bound_force : bool, optional
    If True, then nodes cannot repel other nodes through the domain 
    boundary. Set to True if the domain has edges that nearly touch 
    eachother. Setting this to True may significantly increase 
    computation time

  Returns
  -------
  nodes: (N,D) float array 

  smpid: (N,) int array
    Index of the simplex that each node is on. If a node is not on a 
    simplex (i.e. it is an interior node) then the simplex index is 
    -1.

  Notes
  -----
  It is assumed that *vert* and *smp* define a closed domain. If 
  this is not the case, then it is likely that an error message will 
  be raised which says "ValueError: No intersection found for 
  segment 
   
  This function tends to fail when adjacent simplices form a sharp 
  angle.  The error message raised will be "ValueError: No 
  intersection found for segment ...".  The only solution is to 
  taper the angle by adding more simplices
      
  """
    max_sample_size = 1000000

    vert = np.asarray(vert, dtype=float)
    smp = np.asarray(smp, dtype=int)
    if fix_nodes is None:
        fix_nodes = np.zeros((0, vert.shape[1]))
    else:
        fix_nodes = np.asarray(fix_nodes)

    if rho is None:

        def rho(p):
            return np.ones(p.shape[0])

    if neighbors is None:
        # number of neighbors defaults to 3 raised to the number of
        # spatial dimensions
        neighbors = 3 ** vert.shape[1]

    # form bounding box for the domain so that a RNG can produce values
    # that mostly lie within the domain
    lb = np.min(vert, axis=0)
    ub = np.max(vert, axis=0)
    ndim = vert.shape[1]

    # form Halton sequence generator
    H = rbf.halton.Halton(ndim + 1)

    # initiate array of nodes
    nodes = np.zeros((0, ndim))

    # node counter
    cnt = 0

    # I use a rejection algorithm to get an initial sampling of nodes
    # that resemble to density specified by rho. The acceptance keeps
    # track of the ratio of accepted nodes to tested nodes
    acceptance = 1.0
    while nodes.shape[0] < N:
        # to keep most of this loop in cython and c code, the rejection
        # algorithm is done in chunks.  The number of samples in each
        # chunk is a rough estimate of the number of samples needed in
        # order to get the desired number of accepted nodes.
        if acceptance == 0.0:
            sample_size = max_sample_size
        else:
            # estimated number of samples needed to get N accepted nodes
            sample_size = int(np.ceil((N - nodes.shape[0]) / acceptance))
            # dont let sample_size exceed max_sample_size
            sample_size = min(sample_size, max_sample_size)

        cnt += sample_size
        # form test points
        seqNd = H(sample_size)

        # In order for a test point to be accepted, rho evaluated at that
        # test point needs to be larger than a random number with uniform
        # distribution between 0 and 1. Here I form those random numbers
        seq1d = seqNd[:, -1]

        # scale range of test points to encompass the domain
        new_nodes = (ub - lb) * seqNd[:, :ndim] + lb

        # reject test points based on random value
        new_nodes = new_nodes[rho(new_nodes) > seq1d]

        # reject test points that are outside of the domain
        new_nodes = new_nodes[gm.contains(new_nodes, vert, smp)]

        # append to collection of accepted nodes
        nodes = np.vstack((nodes, new_nodes))

        logger.debug("accepted %s of %s nodes" % (nodes.shape[0], N))
        acceptance = nodes.shape[0] / cnt

    nodes = nodes[:N]

    # use a minimum energy algorithm to spread out the nodes
    logger.debug("repelling nodes with boundary bouncing")
    nodes = _repel_bounce(nodes, vert, smp, rho, fix_nodes, itr, neighbors, delta, 3, bound_force)

    logger.debug("repelling nodes with boundary sticking")
    nodes, smpid = _repel_stick(nodes, vert, smp, rho, fix_nodes, itr, neighbors, delta, bound_force)

    # sort so that nodes that are close in space are also close in memory
    if sort_nodes:
        idx = _nearest_neighbor_argsort(nodes, n=neighbors)
        nodes = nodes[idx]
        smpid = smpid[idx]

    return nodes, smpid
Ejemplo n.º 13
0
def _repel_stick(free_nodes, vert, smp, rho, fix_nodes, itr, n, delta, bound_force):
    """ 
  nodes are repelled by eachother and then become fixed when they hit 
  a boundary
  """
    free_nodes = np.array(free_nodes, dtype=float, copy=True)

    # if bound_force then use the domain boundary as the force
    # boundary
    if bound_force:
        bound_vert = vert
        bound_smp = smp
    else:
        bound_vert = np.zeros((0, vert.shape[1]), dtype=float)
        bound_smp = np.zeros((0, vert.shape[1]), dtype=int)

    # Keeps track of whether nodes in the interior or boundary. -1
    # indicates interior and >= 0 indicates boundary. If its on the
    # boundary then the number is the index of the simplex that the node
    # is on
    smpid = np.repeat(-1, free_nodes.shape[0])

    # length scale of the domain
    scale = vert.ptp()

    # ensure that the number of nodes used to compute repulsion force is
    # less than or equal to the total number of nodes
    n = min(n, free_nodes.shape[0] + fix_nodes.shape[0])

    for k in range(itr):
        # indices of all interior nodes
        interior, = (smpid == -1).nonzero()

        # indices of nodes associated with a simplex (i.e. nodes which
        # intersected a boundary
        boundary, = (smpid >= 0).nonzero()

        # nodes which are stationary
        all_fix_nodes = np.vstack((fix_nodes, free_nodes[boundary]))

        # new position of free nodes
        free_nodes_new = np.array(free_nodes, copy=True)
        # shift positions of interior nodes
        free_nodes_new[interior] = _repel_step(
            free_nodes[interior], rho, all_fix_nodes, n, delta, bound_vert, bound_smp
        )

        # indices of free nodes which crossed a boundary
        crossed = ~gm.contains(free_nodes_new, vert, smp)

        # if a node intersected a boundary then associate it with a simplex
        smpid[crossed] = gm.intersection_index(free_nodes[crossed], free_nodes_new[crossed], vert, smp)

        # outward normal vector at intesection points
        norms = gm.intersection_normal(free_nodes[crossed], free_nodes_new[crossed], vert, smp)

        # intersection point for nodes which crossed a boundary
        inter = gm.intersection_point(free_nodes[crossed], free_nodes_new[crossed], vert, smp)

        # new position of nodes which crossed the boundary is just within
        # the intersection point
        free_nodes_new[crossed] = inter - 1e-10 * scale * norms
        free_nodes = free_nodes_new

    return free_nodes, smpid
Ejemplo n.º 14
0
def mcint2(f, vert, smp, samples=None, check_simplices=True, rng=None):
    ''' 
  Description
  -----------
    Monte Carlo integration algorithm over an arbitrary 1, 2, or 3
    dimensional domain. This algorithm uses the simplicial complex
    itself as the integration domain. Doing so requires the ability to
    compute the domain area/volume exactly, which can cause
    significant overhead for very large simplicial complexes if the
    simplices are not properly oriented.

  Parameters
  ----------
    f: Scalar value function being integrated.  This function should
      take an (N,D) array as input and return an (N,) array

    vert: vertices of integration domain boundary
   
    smp: simplices describing how the vertices are connected to form
      the domain boundary

    samples (default=20**D): number of samples to use 

    check_simplices (default=False): Whether to check that the
      simplices define a closed surface and oriented such that their
      normals point outward

    rng (default=Halton(D)): random number generator. Must take an 
      integer input, N, and return an (N,D) array of random points 

  Returns
  -------
    answer,error,maximum,minimum

    answer: integral over the domain

    error: uncertainty of the solution. Note that this tends to be
      overestimated when using a quasi-random number generator such as
      a Halton sequence

    maximum: maximum function value within the domain

    minimum: minimum function value within the domain


  Note 
  ---- 
    Volume calculations require simplices to be oriented such that
    their normal vectors, by the right-hand rule, point outside the
    domain. If check_simplices is True, then the simplices are checked
    and reordered to ensure such an orientation. Checking the
    simplices is an O(N^2) process and should be set to False if the 
    simplices are known to be properly oriented.

  '''
    vert = np.asarray(vert, dtype=float)
    smp = np.asarray(smp, dtype=int)
    if check_simplices:
        smp = gm.oriented_simplices(vert, smp)

    dim = smp.shape[1]

    lower_bounds = np.min(vert, 0)
    upper_bounds = np.max(vert, 0)

    if rng is None:
        rng = hlt.Halton(dim)

    if samples is None:
        samples = 20**dim

    pnts = rng(samples) * (upper_bounds - lower_bounds) + lower_bounds
    val = f(pnts)
    is_inside = gm.contains(pnts, vert, smp)
    # If there are any points within the domain then return
    # the max and min value found within the domain
    if np.any(is_inside):
        minval = np.min(val[is_inside])
        maxval = np.max(val[is_inside])
    else:
        minval = np.inf
        maxval = -np.inf

    val = val[is_inside]
    volume = gm.enclosure(vert, smp, orient=False)
    if (volume < 0.0):
        raise ValueError(
            'Simplicial complex found to have a negative volume. Check the '
            'orientation of simplices and ensure closedness')

    if (volume > 0.0) & (len(val) < 2):
        raise ValueError(
            'Number of values used to estimate the integral is less than 2.'
            'Ensure the simplicial complex is closed and then increase the '
            'sample size')

    if volume == 0.0:
        soln = 0.0
        err = 0.0

    else:
        soln = np.sum(val) * volume / len(val)
        err = volume * np.std(val, ddof=1) / np.sqrt(len(val))

    return soln, err, minval, maxval
Ejemplo n.º 15
0
def mcint(f,
          vert,
          smp,
          samples=None,
          lower_bounds=None,
          upper_bounds=None,
          rng=None):
    ''' 
  Description
  -----------
    Monte Carlo integration algorithm over an arbitrary 1, 2, or 3  
    dimensional domain. This algorithm treats the integration domain
    as a bounding box and converts all function values for points outside 
    of the simplicial complex to zero. 

  Parameters
  ----------
    f: Scalar value function being integrated.  This function should
      take an (N,D) array as input and return an (N,) array

    vert: vertices of integration domain boundary
   
    smp: simplices describing how the vertices are connected to form
      the domain boundary

    samples (default=20**D): number of samples to use 

    lower_bounds (default=None): If given, then the lower bounds for the 
      integration domain are truncated to these value. Used in rmcint

    upper_bounds (default=None): If given, then the upper bounds for the 
      integration domain are truncated to these value. Used in rmcint

    rng (default=Halton(D)): random number generator. Must take an 
      integer input, N, and return an (N,D) array of random points 

  Returns
  -------
    answer,error,maximum,minimum

    answer: integral over the domain

    error: uncertainty of the solution. Note that this tends to be
      overestimated when using a quasi-random number generator such as
      a Halton sequence

    maximum: maximum function value within the domain

    minimum: minimum function value within the domain

  Note 
  ---- 
    When integrating constant or nearly constant functions over a
    complicated domain, it is far more efficient to use mcint2

  '''
    vert = np.asarray(vert, dtype=float)
    smp = np.asarray(smp, dtype=int)

    dim = smp.shape[1]

    if lower_bounds is None:
        lower_bounds = np.min(vert, 0)
    else:
        lower_bounds = np.asarray(lower_bounds)

    if upper_bounds is None:
        upper_bounds = np.max(vert, 0)
    else:
        bounded = True
        upper_bounds = np.asarray(upper_bounds)

    if rng is None:
        rng = hlt.Halton(dim)

    if samples is None:
        samples = 20**dim

    if np.any(lower_bounds > upper_bounds):
        raise ValueError('lowers bounds found to be larger than upper bounds')

    if samples < 2:
        raise ValueError('sample size must be at least 2')

    pnts = rng(samples) * (upper_bounds - lower_bounds) + lower_bounds
    val = f(pnts)
    is_inside = gm.contains(pnts, vert, smp)
    # If there are any points within the domain then return
    # the max and min value found within the domain
    if np.any(is_inside):
        minval = np.min(val[is_inside])
        maxval = np.max(val[is_inside])
    else:
        minval = np.inf
        maxval = -np.inf

    # copy val because its contents are going to be changed
    val = np.copy(val)
    val[~is_inside] = 0.0
    volume = np.prod(upper_bounds - lower_bounds)

    soln = np.sum(val) * volume / len(val)
    err = volume * np.std(val, ddof=1) / np.sqrt(len(val))

    return soln, err, minval, maxval
Ejemplo n.º 16
0
Archivo: fd.i.py Proyecto: dhadjia1/RBF
                           
# create "right hand side" vector
d = np.zeros((N,))
d[groups['interior']] = -1.0
d[groups['boundary:all']] = 0.0

# find the solution at the nodes
u_soln = spsolve(A, d) 

# interpolate the solution on a grid
xg, yg = np.meshgrid(np.linspace(-0.05, 2.05, 400), 
                     np.linspace(-0.05, 2.05, 400))
points = np.array([xg.flatten(), yg.flatten()]).T                    
u_itp = LinearNDInterpolator(nodes, u_soln)(points)
# mask points outside of the domain
u_itp[~contains(points, vert, smp)] = np.nan 
ug = u_itp.reshape((400, 400)) # fold back into a grid
# make a contour plot of the solution
fig, ax = plt.subplots()
p = ax.contourf(xg, yg, ug, np.linspace(-1e-6, 0.16, 9), cmap='viridis')
ax.plot(nodes[:, 0], nodes[:, 1], 'ko', markersize=4)
for s in smp:
  ax.plot(vert[s, 0], vert[s, 1], 'k-', lw=2)

ax.set_aspect('equal')
fig.colorbar(p, ax=ax)
fig.tight_layout()
plt.savefig('../figures/fd.i.png')
plt.show()

Ejemplo n.º 17
0
    # calculate state vector at time *t*
    v = integrator.integrate(t).reshape((2, -1))
    soln += [v[0]]  # only save displacements

# plot the results
fig, axs = plt.subplots(2, 2, figsize=(7, 7))
for i, t in enumerate(times[1:]):
    ax = axs.ravel()[i]
    xg, yg = np.mgrid[0.0:2.0:200j, 0:2.0:200j]
    points = np.array([xg.ravel(), yg.ravel()]).T
    # interpolate the solution onto a grid
    ug = griddata(nodes[interior + boundary],
                  soln[i], (xg, yg),
                  method='linear')
    # mask the points outside of the domain
    ug.ravel()[~contains(points, vert, smp)] = np.nan
    # plot the boudary
    for s in smp:
        ax.plot(vert[s, 0], vert[s, 1], 'k-')
    ax.imshow(ug,
              extent=(0.0, 2.0, 0.0, 2.0),
              origin='lower',
              vmin=-0.2,
              vmax=0.2,
              cmap='seismic')
    ax.set_aspect('equal')
    ax.text(0.6,
            0.85,
            'time : %s\nnodes : %s' % (t, N),
            transform=ax.transAxes,
            fontsize=10)
Ejemplo n.º 18
0
def _repel_bounce(free_nodes,vert,smp,rho,   
                  fix_nodes,itr,n,delta,
                  max_bounces,bound_force):
  ''' 
  nodes are repelled by eachother and bounce off boundaries
  '''
  free_nodes = np.array(free_nodes,dtype=float,copy=True)

  # if bound_force then use the domain boundary as the force 
  # boundary
  if bound_force:
    bound_vert = vert
    bound_smp = smp
  else:
    bound_vert = np.zeros((0,vert.shape[1]),dtype=float)
    bound_smp = np.zeros((0,vert.shape[1]),dtype=int)

  # this is used for the lengthscale of the domain
  scale = vert.ptp()

  # ensure that the number of nodes used to determine repulsion force
  # is less than or equal to the total number of nodes
  n = min(n,free_nodes.shape[0]+fix_nodes.shape[0])

  for k in range(itr):
    # node positions after repulsion 
    free_nodes_new = _repel_step(free_nodes,rho,fix_nodes,
                                 n,delta,bound_vert,bound_smp)

    # boolean array of nodes which are now outside the domain
    crossed = ~gm.contains(free_nodes_new,vert,smp)
    bounces = 0
    while np.any(crossed):
      # point where nodes intersected the boundary
      inter = gm.intersection_point(
                free_nodes[crossed],     
                free_nodes_new[crossed],
                vert,smp)

      # normal vector to intersection point
      norms = gm.intersection_normal(
                free_nodes[crossed],     
                free_nodes_new[crossed],
                vert,smp)
      
      # distance that the node wanted to travel beyond the boundary
      res = free_nodes_new[crossed] - inter

      # move the previous node position to just within the boundary
      free_nodes[crossed] = inter - 1e-10*scale*norms

      # 3 is the number of bounces allowed   
      if bounces > max_bounces:
        free_nodes_new[crossed] = inter - 1e-10*scale*norms
        break

      else: 
        # bouce node off the boundary
        free_nodes_new[crossed] -= 2*norms*np.sum(res*norms,1)[:,None]        
        # check to see if the bounced node is now within the domain, 
        # if not then iterations continue
        crossed = ~gm.contains(free_nodes_new,vert,smp)
        bounces += 1

    free_nodes = free_nodes_new  

  return free_nodes
Ejemplo n.º 19
0
def mcint2(f,vert,smp,samples=None,
           check_simplices=True,rng=None):
  ''' 
  Description
  -----------
    Monte Carlo integration algorithm over an arbitrary 1, 2, or 3
    dimensional domain. This algorithm uses the simplicial complex
    itself as the integration domain. Doing so requires the ability to
    compute the domain area/volume exactly, which can cause
    significant overhead for very large simplicial complexes if the
    simplices are not properly oriented.

  Parameters
  ----------
    f: Scalar value function being integrated.  This function should
      take an (N,D) array as input and return an (N,) array

    vert: vertices of integration domain boundary
   
    smp: simplices describing how the vertices are connected to form
      the domain boundary

    samples (default=20**D): number of samples to use 

    check_simplices (default=False): Whether to check that the
      simplices define a closed surface and oriented such that their
      normals point outward

    rng (default=Halton(D)): random number generator. Must take an 
      integer input, N, and return an (N,D) array of random points 

  Returns
  -------
    answer,error,maximum,minimum

    answer: integral over the domain

    error: uncertainty of the solution. Note that this tends to be
      overestimated when using a quasi-random number generator such as
      a Halton sequence

    maximum: maximum function value within the domain

    minimum: minimum function value within the domain


  Note 
  ---- 
    Volume calculations require simplices to be oriented such that
    their normal vectors, by the right-hand rule, point outside the
    domain. If check_simplices is True, then the simplices are checked
    and reordered to ensure such an orientation. Checking the
    simplices is an O(N^2) process and should be set to False if the 
    simplices are known to be properly oriented.

  '''
  vert = np.asarray(vert,dtype=float)
  smp = np.asarray(smp,dtype=int)
  if check_simplices:
    smp = gm.oriented_simplices(vert,smp)

  dim = smp.shape[1]

  lower_bounds = np.min(vert,0)
  upper_bounds = np.max(vert,0)

  if rng is None:
    rng = hlt.Halton(dim)

  if samples is None:
    samples = 20**dim

  pnts = rng(samples)*(upper_bounds-lower_bounds) + lower_bounds
  val = f(pnts)
  is_inside = gm.contains(pnts,vert,smp)
  # If there are any points within the domain then return
  # the max and min value found within the domain
  if np.any(is_inside):
    minval = np.min(val[is_inside])
    maxval = np.max(val[is_inside])
  else:
    minval = np.inf
    maxval = -np.inf

  val = val[is_inside]
  volume = gm.enclosure(vert,smp,orient=False)
  if (volume < 0.0):
    raise ValueError(
      'Simplicial complex found to have a negative volume. Check the '
      'orientation of simplices and ensure closedness')

  if (volume > 0.0) & (len(val) < 2):
    raise ValueError(
      'Number of values used to estimate the integral is less than 2.'
      'Ensure the simplicial complex is closed and then increase the '
      'sample size')

  if volume == 0.0:
     soln = 0.0
     err = 0.0 
   
  else:
    soln = np.sum(val)*volume/len(val)
    err = volume*np.std(val,ddof=1)/np.sqrt(len(val))

  return soln,err,minval,maxval
Ejemplo n.º 20
0
def record_section(data_list,
                   epicenter,
                   radius_range, 
                   angle_range,
                   name_list=None,
                   basemap=None, 
                   colors=None,
                   mapscale_lonlat=None,
                   map_resolution=100,  
                   minimap_pos=None):

  if colors is None:
    colors = ['k','b','g','y','c','b','y']

  # data list is a list of dictionary-like objects with keys: mean,
  # covariance, mask, position, time
  mask_list = []
  mean_list = []
  cov_list = []
  if name_list is None:
    name_list = ['displacement %s' % i for i in range(len(data_list))]

  for data in data_list:
    mask_list += [data['mask']]
    mean_list += [data['mean']]
    cov_list += [data['covariance']]
    
  #times = data_list[0]['time'][:]
  lon = data_list[0]['position'][:,0]
  lat = data_list[0]['position'][:,1]
  station_names = data_list[0]['name'][:]  

  if basemap is None:
    basemap = create_default_basemap(lat,lon)


  # form polygon which encloses stations to include in the record section
  epicenter_xy = basemap(*epicenter)
  theta = np.linspace(angle_range[0],angle_range[1],100)
  x_inner = epicenter_xy[0] + radius_range[0]*np.cos(theta)
  y_inner = epicenter_xy[1] + radius_range[0]*np.sin(theta)
  x_outer = epicenter_xy[0] + radius_range[1]*np.cos(theta[::-1])
  y_outer = epicenter_xy[1] + radius_range[1]*np.sin(theta[::-1])
  x = np.concatenate((x_inner,x_outer))
  y = np.concatenate((y_inner,y_outer))
  xy = np.array([x,y]).T
  smp = np.array([np.arange(xy.shape[0]),np.roll(np.arange(xy.shape[0]),-1)]).T
  stax,stay = basemap(lon,lat)
  staxy = np.array([stax,stay]).T
  include = np.nonzero(contains(staxy,xy,smp))[0]

  basemap = create_default_basemap(basemap(x,y,inverse=True)[1],basemap(x,y,inverse=True)[0])
  epicenter_xy = basemap(*epicenter)
  theta = np.linspace(angle_range[0],angle_range[1],100)
  x_inner = epicenter_xy[0] + radius_range[0]*np.cos(theta)
  y_inner = epicenter_xy[1] + radius_range[0]*np.sin(theta)
  x_outer = epicenter_xy[0] + radius_range[1]*np.cos(theta[::-1])
  y_outer = epicenter_xy[1] + radius_range[1]*np.sin(theta[::-1])
  x = np.concatenate((x_inner,x_outer))
  y = np.concatenate((y_inner,y_outer))
  xy = np.array([x,y]).T
  smp = np.array([np.arange(xy.shape[0]),np.roll(np.arange(xy.shape[0]),-1)]).T
  stax,stay = basemap(lon,lat)
  staxy = np.array([stax,stay]).T
  #include = np.nonzero(contains(staxy,xy,smp))[0]

  rs_fig = plt.figure('Record Section',figsize=(4.0,6.66))
  rs_ax = rs_fig.add_axes([0.2,0.1,0.7,0.8])
  
  fig = plt.figure('Map View',figsize=(4,7))
  ax = fig.add_axes([0.1,0.1,0.8,0.8])

  ax.plot(stax[include],stay[include],'ko',zorder=3)
  staxy = staxy[include]
  station_names = station_names[include]
  for i in range(len(staxy)):
    if station_names[i] in ['P603','P501']:
      ax.text(staxy[i,0]-25000,staxy[i,1]-12000,station_names[i],zorder=3)
    elif station_names[i] in ['I40A']:
      ax.text(staxy[i,0]+2000,staxy[i,1]-12000,station_names[i],zorder=3)
    elif station_names[i] in ['P499','OPBL']:
      ax.text(staxy[i,0]-27000,staxy[i,1]+3000,station_names[i],zorder=3)
    else:
      ax.text(staxy[i,0]+2000,staxy[i,1]+2000,station_names[i],zorder=3)

  # add record section polygon
  poly = Polygon(xy,closed=True,color='blue',alpha=0.4,edgecolor='none',zorder=2)
  ax.add_artist(poly)

  basemap.drawcoastlines(ax=ax,linewidth=2.0,zorder=1,color=(0.3,0.3,0.3,1.0))
  basemap.drawcountries(ax=ax,linewidth=2.0,zorder=1,color=(0.3,0.3,0.3,1.0))
  basemap.drawstates(ax=ax,linewidth=2.0,zorder=1,color=(0.3,0.3,0.3,1.0))
  basemap.drawmeridians(np.arange(np.floor(basemap.llcrnrlon),
                        np.ceil(basemap.urcrnrlon),1.0),
                        labels=[0,0,0,1],dashes=[2,2],
                        ax=ax,zorder=1,color=(0.3,0.3,0.3,1.0))
  basemap.drawparallels(np.arange(np.floor(basemap.llcrnrlat),
                        np.ceil(basemap.urcrnrlat),1.0),
                        labels=[1,0,0,0],dashes=[2,2],
                        ax=ax,zorder=1,color=(0.3,0.3,0.3,1.0))

  #basemap.drawtopography(ax=ax,vmin=-6000,vmax=4000,
  #                       alpha=0.4,resolution=map_resolution,zorder=0)

  basemap.drawmapscale(units='km',
                         lat=basemap.latmin+(basemap.latmax-basemap.latmin)/15.0,
                         lon=basemap.lonmin+(basemap.lonmax-basemap.lonmin)/4.0,
                         fontsize=8,
                         lon0=(basemap.lonmin+basemap.lonmax)/2.0,
                         lat0=(basemap.latmin+basemap.latmax)/2.0,
                         barstyle='fancy',ax=ax,
                         length=50,zorder=4)
  #stax,stay = basemap(lon,lat)
  #staxy = np.array([stax,stay]).T
  #smp = np.array([np.arange(xy.shape[0]),np.roll(np.arange(xy.shape[0]),-1)]).T
  #include = np.nonzero(contains(staxy,xy,smp))[0]

  # angle by which the displacement components need to be rotated to get radial component
  rotation_angle = np.arctan2(staxy[:,1]-epicenter_xy[1],staxy[:,0]-epicenter_xy[0])
  zeros = np.zeros(len(staxy))
  ones =  np.ones(len(staxy))
  rotation_matrices = np.array([[ np.cos(rotation_angle),np.sin(rotation_angle),zeros],
                                [-np.sin(rotation_angle),np.cos(rotation_angle),zeros],
                                [zeros,                  zeros,                 ones ]])
  rotation_matrices = np.einsum('ijk->kij',rotation_matrices)

  def H(t):
    return (t>=0.0).astype(float)

  def logsys(m,t):
    return m[0]*np.log(1 + H(t-2010.257)*(t-2010.257)*m[1])

  def expsys(m,t):
    return m[0]*(1 - np.exp(-H(t-2010.257)*(t-2010.257)*m[1]))

  ts_width = 1.0
  for idx,d in enumerate(data_list):
    # normalize all displacements so that the width is ts_width
    # rotate displacements to get radial component
    times = d['time'][:]
    mask = np.array(d['mask'][:,include],dtype=bool)
    disp = np.einsum('...ij,...j->...i',rotation_matrices,d['mean'][:,include,:])[...,0]
    #disp = disp[...] - disp[0,...]
    var = np.einsum('...ij,...jk,...lk->...il',rotation_matrices,d['covariance'][:,include,:,:],rotation_matrices)[...,0,0]
    #var = d['covariance'][:,include,1,1]
    std = np.sqrt(var)

    if idx == 0:
      scale = ts_width/(np.max(disp,axis=0) - np.min(disp,axis=0))
      shift = np.copy(disp[0,:])

    disp -= shift
    disp *= scale
    std *= scale   
    dist = np.sqrt((staxy[:,0]-epicenter_xy[0])**2 + (staxy[:,1]-epicenter_xy[1])**2)/1000
    order = np.argsort(dist)
    #dist = np.argsort(dist)*ts_width
    dy = 0
    ytickpos = []
    yticklabel = []
    for i in range(1):
      ytickpos += [dy] 
      yticklabel += [''] 
      dy += 1.0*ts_width

    for i in order:
      if np.any(d['covariance'][...] > 1e-8):
        rs_ax.fill_between(times[~mask[:,i]],
                           disp[~mask[:,i],i]+std[~mask[:,i],i]+dy,
                           disp[~mask[:,i],i]-std[~mask[:,i],i]+dy,
                           color=colors[idx],alpha=0.4,edgecolor='none')
      #pred1 = modest.nonlin_lstsq(expsys,disp[:,i],2,system_args=(times,),output=['predicted'])
      #pred2 = modest.nonlin_lstsq(logsys,disp[:,i],2,system_args=(times,),output=['predicted'])
      rs_ax.plot(times[~mask[:,i]],disp[~mask[:,i],i]+dy,colors[idx]+'-',lw=1)
      ytickpos += [dy] 
      dy += 1.0*ts_width
      #rs_ax.plot(times,pred1+dist[i],'b-')
      #rs_ax.plot(times,pred2+dist[i],'r-')

  min_time = np.min([np.min(d['time'][:]) for d in data_list])
  max_time = np.max([np.max(d['time'][:]) for d in data_list])
  times = np.linspace(min_time,max_time,100)
  ytickpos = np.array(ytickpos)
  station_names = np.array(['%s\n(%s km)'%(i,int(j)) for (i,j) in zip(station_names,dist)])
  yticklabel = np.concatenate((yticklabel,station_names[order]))
  xtickpos = np.arange(np.floor(np.min(times)),np.ceil(np.max(times)))
  xticklabel = np.array([str(int(i)) for i in xtickpos])
  plt.sca(rs_ax)
  plt.yticks(ytickpos,yticklabel,fontsize=8)
  plt.xticks(xtickpos,xticklabel,fontsize=8)
  rs_ax.grid()
  #rs_ax.ticklabel_format(useOffset=False, style='plain')
  rs_ax.set_xlabel('year',fontsize=8)
  rs_ax.set_ylabel('station (epicentral distance)',fontsize=8)
  rs_ax.set_ylim(np.min(ytickpos)-0.1,np.max(ytickpos)+0.1)
  rs_ax.set_xlim(np.min(times)-0.1,np.max(times)+0.1)

  plt.show()
  return