Beispiel #1
0
 def mutate(self, x, sigma):
     for i in range(len(x.start)):
         if (np.random.uniform() < self.probability):
             tmp_start, tmp_slen = copy(x.start[i]), copy(x.slen[i])
             x.slen[i] += int(sigma * (self.smax + 1 - self.smin) *
                              levy.rvs())
             x.slen[i] = (x.slen[i] - self.smin) % (self.smax + 1 -
                                                    self.smin) + self.smin
             x.start[i] = (x.start[i] +
                           int(sigma * len(self.ts) * levy.rvs())) % (
                               len(self.ts) - x.slen[i])
             if not self.validate(x):
                 x.start[i], x.slen[i] = copy(tmp_start), copy(tmp_slen)
     return 0
def EagleStrategyWithHillClimbing(model,iters,rate):
    mn = MinMaxScaler(feature_range=(-3,3))
    r = levy.rvs(size=iters)
    max_accuracy = 0
    final_weights = []
    count = 1
    for i in list(r):
        weights = np.random.uniform(-1,1,120)
        weights = weights * i
        weights = mn.fit_transform(weights.reshape(-1,1))
        acc = predict(weights,X_test,Y_test,model)
        if acc > 0.5:
            clip_max = 5
            fitness = NetworkWeights(X_train,Y_train,[25,4,4,1],relu,True,True,rate)
            problem = ContinuousOpt(120,fitness,maximize=False,min_val=-1*clip_max,max_val=clip_max,step=rate)
            #print(problem.get_length(),len(weights))
            fitted_weights, loss = random_hill_climb(problem, max_attempts=10, max_iters=1000, restarts=0,
                      init_state=weights, curve=False, random_state=None)
            acc = predict(fitted_weights,X_test,Y_test,model)
            if acc > max_accuracy:
                max_accuracy = acc
                final_weights = fitted_weights
            if max_accuracy > 0.95:
                break
            count+=1
    return max_accuracy,count,final_weights        
Beispiel #3
0
def sample_distro(distro_tuple):
    """
    Samples a certain probability distrobution function (PDF) described by a tuple of parameters

    Parameters
    ----------
    distro_tuple : tuple (distrobution_name, arg1,arg2...)
        The PDF to sample from

    Returns
    ---------
    float
        the number generated from the PDF


    """
    distro_type = distro_tuple[0]
    if distro_type == "levy":
        return levy.rvs(loc=distro_tuple[1], scale=distro_tuple[2], size=1)[0]
    elif distro_type == "gaussian":
        return norm.rvs(loc=distro_tuple[1], scale=distro_tuple[2], size=1)[0]
    elif distro_type == "uniform":
        return uniform.rvs(loc=distro_tuple[1],
                           scale=(distro_tuple[2] - distro_tuple[1]),
                           size=1)[0]
Beispiel #4
0
    def runIteration(self, task, pop, fpop, xb, fxb, pa_v, **dparams):
        r"""Core function of CuckooSearch algorithm.

		Args:
			task (Task): Optimization task.
			pop (numpy.ndarray): Current population.
			fpop (numpy.ndarray): Current populations fitness/function values.
			xb (numpy.ndarray): Global best individual.
			fxb (float): Global best individual function/fitness values.
			pa_v (float): TODO
			**dparams (Dict[str, Any]): Additional arguments.

		Returns:
			Tuple[numpy.ndarray, numpy.ndarray, numpy.ndarray, float, Dict[str, Any]]:
				1. Initialized population.
				2. Initialized populations fitness/function values.
				3. New global best solution
				4. New global best solutions fitness/objective value
				5. Additional arguments:
					* pa_v (float): TODO
		"""
        i = self.randint(self.NP)
        Nn = task.repair(
            pop[i] +
            self.alpha * levy.rvs(size=[task.D], random_state=self.Rand),
            rnd=self.Rand)
        Nn_f = task.eval(Nn)
        j = self.randint(self.NP)
        while i == j:
            j = self.randint(self.NP)
        if Nn_f <= fpop[j]: pop[j], fpop[j] = Nn, Nn_f
        pop, fpop = self.emptyNests(pop, fpop, pa_v, task)
        xb, fxb = self.getBest(pop, fpop, xb, fxb)
        return pop, fpop, xb, fxb, {'pa_v': pa_v}
Beispiel #5
0
def plot_distr_dispersal_distance(spp=None,
                                  dispersal_distr_param1=None,
                                  dispersal_distr_param2=None,
                                  dispersal_distance_distr='levy'):
    if spp is not None:
        dispersal_distr_param1 = spp.dispersal_distance_distr_param1
        dispersal_distr_param2 = spp.dispersal_distance_distr_param2
    else:
        assert (dispersal_distr_param1 is not None and dispersal_distr_param2
                is not None), ('If a Species object '
                               'is not provided then '
                               'the parameter values '
                               'must be provided.')
    fig = plt.figure()
    ax = fig.add_subplot(111)
    if dispersal_distance_distr == 'levy':
        fig.suptitle(('dispersal distance: '
                      '~Levy($\loc$=%.4E, $\scale$=%.4E)') %
                     (dispersal_distr_param1, dispersal_distr_param2))
        vals = _s_levy.rvs(dispersal_distr_param1, dispersal_distr_param2,
                           10000)
    elif dispersal_distance_distr == 'wald':
        fig.suptitle(('dispersal distance: '
                      '~Wald($\mean$=%.4E, $\scale$=%.4E)') %
                     (dispersal_distr_param1, dispersal_distr_param2))
        vals = np.random.wald(dispersal_distr_param1, dispersal_distr_param2,
                              10000)
    ax.hist(vals, bins=25)
    ax.set_xlim((min(vals), max(vals)))
    plt.show()
Beispiel #6
0
 def mutate(self, x):
     n = np.size(x)
     scale = self.scale
     x_new = levy.rvs(scale=scale, size=n)
     if is_integer(x):
         x_new = np.array(np.round(x_new), dtype=int)  # optional rounding
     x_new_corrected = self.correction.correct(x_new)
     return x_new_corrected
Beispiel #7
0
    def _sample_cauchy_process(self, n):
        """Generate a realization of a Cauchy process."""
        check_positive_integer(n)

        delta_t = 1.0 * self.t / n
        times = np.cumsum(levy.rvs(loc=0, scale=delta_t**2 / 2, size=n))
        times = np.insert(times, 0, [0])
        return self._sample_brownian_motion_at(times)
Beispiel #8
0
def truncated_levy(n, thres, alpha):
    r = np.zeros(1)
    while r.size < n:
        r = levy.rvs(size=(int)(2 * n))
        r = np.power(r, -(1 + alpha))
        r = r[r < thres]
        r = r[:n]
    return r
    def move(self, type='levy'):
        if type == 'levy':
            # uniformly distributed angles
            angle = uniform.rvs(size=(n, ), loc=.0, scale=2. * np.pi)

            # levy distributed step length
            r = levy.rvs(loc=3, scale=0.5)
            self.location += [r * np.cos(angle), r * np.sin(angle)]
        else:
            pass
Beispiel #10
0
    def _sample_cauchy_process(self, n, zero=True):
        """Generate a realization of a Cauchy process."""
        self._check_increments(n)
        self._check_zero(zero)

        delta_t = 1.0 * self.t / n
        times = np.cumsum(levy.rvs(loc=0, scale=delta_t ** 2 / 2, size=n))

        if zero:
            times = np.insert(times, 0, [0])

        return self.brownian_motion.sample_at(times)
Beispiel #11
0
def _walk(starting_point: np.array, steps: int, type: str,
          speed: float) -> np.array:
    """ Generates a series of 2d points following walk process"""
    if type not in ['levy', 'random']:
        raise ValueError('walk type must be levy or random')
    if type == 'levy':
        l = levy.rvs(size=(steps, ), scale=speed)
    if type == 'random':
        l = np.ones((steps, )) * speed
    angle = uniform.rvs(size=(steps, ), scale=2 * np.pi)
    x = starting_point[0] + np.cumsum(l * np.cos(angle))
    y = starting_point[1] + np.cumsum(l * np.sin(angle))
    return np.array([x, y])
Beispiel #12
0
def _do_dispersal(spp,
                  parent_midpoint_x,
                  parent_midpoint_y,
                  dispersal_distance_distr_param1,
                  dispersal_distance_distr_param2,
                  mu_dir=0,
                  kappa_dir=0):
    within_landscape = False
    while not within_landscape:
        # choose direction using movement surface, if applicable
        if spp._disp_surf:
            # and use those choices to draw movement directions
            direction = spp._disp_surf._draw_directions(
                [int(parent_midpoint_x)], [int(parent_midpoint_y)])[0]
        # else, choose direction using a random walk with a uniform vonmises
        elif not spp._disp_surf:
            direction = _r_vonmises(mu_dir, kappa_dir)
        if spp.dispersal_distance_distr == 'levy':
            distance = _s_levy.rvs(loc=spp.dispersal_distance_distr_param1,
                                   scale=spp.dispersal_distance_distr_param2)
        elif spp.dispersal_distance_distr == 'wald':
            distance = _wald(mean=dispersal_distance_distr_param1,
                             scale=dispersal_distance_distr_param2)
        elif spp.dispersal_distance_distr == 'lognormal':
            distance = _lognormal(mean=spp.dispersal_distance_distr_param1,
                                  sigma=spp.dispersal_distance_distr_param2)

        # decompose distance into x and y components
        dist_x = _cos(direction) * distance
        dist_y = _sin(direction) * distance
        # multiply the x and y distances by the land's resolution-ratios,
        # if they're not 1 and 1 (e.g. using a non-square-resolution raster)
        if spp._land_res_ratio[0] != 1:
            dist_x *= spp._land_res_ratio[0]
        if spp._land_res_ratio[1] != 1:
            dist_y *= spp._land_res_ratio[1]
        offspring_x = parent_midpoint_x + dist_x
        offspring_y = parent_midpoint_y + dist_y
        offspring_x = np.clip(offspring_x,
                              a_min=0,
                              a_max=spp._land_dim[0] - 0.001)
        offspring_y = np.clip(offspring_y,
                              a_min=0,
                              a_max=spp._land_dim[1] - 0.001)
        within_landscape = (
            (offspring_x > 0 and offspring_x < spp._land_dim[0])
            and (offspring_y > 0 and offspring_y < spp._land_dim[1]))
    return (offspring_x, offspring_y)
Beispiel #13
0
    def _sample_cauchy_process_at(self, times):
        """Generate a realization of a Cauchy process."""
        if times[0] != 0:
            zero = False
            times = np.insert(times, 0, [0])
        else:
            zero = True

        deltas = np.diff(times)
        levys = [levy.rvs(loc=0, scale=d ** 2 / 2, size=1) for d in deltas]
        ts = np.cumsum(levys)

        if zero:
            ts = np.insert(ts, 0, [0])

        return self.brownian_motion.sample_at(ts)
Beispiel #14
0
 def runTask(self, task):
     pa_v = self.N * self.pa
     N = task.Lower + self.rand([self.N, task.D]) * task.bRange
     N_f = apply_along_axis(task.eval, 1, N)
     while not task.stopCondI():
         i = self.randint(self.N)
         Nn = task.repair(
             N[i] +
             self.alpha * levy.rvs(size=[task.D], random_state=self.Rand),
             rnd=self.Rand)
         Nn_f = task.eval(Nn)
         j = self.randint(self.N)
         while i == j:
             j = self.randint(self.N)
         if Nn_f <= N_f[j]: N[j], N_f[j] = Nn, Nn_f
         N, N_f = self.emptyNests(N, N_f, pa_v, task)
     return self.getBest(N, N_f, None, inf * task.optType.value)
Beispiel #15
0
def itoint(f, G, y0, tspan, noise="normal"):
    """ Numerically integrate the Ito equation  dy = f(y,t)dt + G(y,t)dW
    where y is the d-dimensional state vector, f is a vector-valued function,
    G is an d x m matrix-valued function giving the noise coefficients and
    dW(t) = (dW_1, dW_2, ... dW_m) is a vector of independent Wiener increments
    Args:
      f: callable(y,t) returning a numpy array of shape (d,)
         Vector-valued function to define the deterministic part of the system
      G: callable(y,t) returning a numpy array of shape (d,m)
         Matrix-valued function to define the noise coefficients of the system
      y0: array of shape (d,) giving the initial state vector y(t==0)
      tspan (array): The sequence of time points for which to solve for y.
        These must be equally spaced, e.g. np.arange(0,10,0.005)
        tspan[0] is the intial time corresponding to the initial state y0.
    Returns:
      y: array, with shape (len(tspan), len(y0))
         With the initial value y0 in the first row
    Raises:
      SDEValueError
    """
    # In future versions we can automatically choose here the most suitable
    # Ito algorithm based on properties of the system and noise.
    (d, m, f, G, y0, tspan, __,
     __) = sdeint.integrate._check_args(f, G, y0, tspan, None, None)
    N = len(tspan)
    h = (tspan[N - 1] - tspan[0]) / (N - 1)  # assuming equal time steps
    if noise == "levy":
        dW = levy.rvs(0., 1e-11,
                      (N - 1, m)) + np.random.normal(0., np.sqrt(h),
                                                     (N - 1, m))
    elif noise == "cauchy":
        dW = cauchy.rvs(0., 1e-4, (N - 1, m))
    else:
        dW = None
    chosenAlgorithm = sdeint.integrate.itoSRI2
    return chosenAlgorithm(f, G, y0, tspan, dW=dW)
Beispiel #16
0
 def run(self):
     start_pos = 1
     direction = np.random.random_sample(size=self.len_tracks) * 2 * np.pi
     stepsize = levy.rvs(self.loc, self.scale, size=self.len_tracks)
Beispiel #17
0
    def levy_flier(self, N, scale_coeff = 1, origin = (0,0), bound_x=(-np.Inf, np.Inf), bound_y=(-np.Inf, np.Inf) ):
        '''
        Defines a levy flight path beginning at the origin, taking N steps,
        with step sizes pulled from a true Levy distribution

        Parameters
        ----------
        N : number of steps in the flight
        step_min : integer value, minimum step size
        origin : initial site of walk
        u = parameter for probability distribution of step sizes

        Returns
        -------
        model_path : a list containing tuples of XY coordinates

        Notes
        -----
        scale = 2, median disp = 4.4
        scale = ~2.3 has a median displacement of 5 units

        Directions are chosen randomly, as in a random walk
        Step sizes however are chosen according to the distribution:

            P(l_j) ~ l_j**(-a)
            where a is a parameter in the range 1 < u <= 3

        Defaults to a = 2 as the optimal parameter for foraging behavior
        see : Viswanathan 1999, Nature

        Obtaining Levy distributed random variables:

        To transform uniformly distributed random variable
        into another distribution, use inverse cum. dist. func. (CDF)

        if F is CDF corresponding to probability density of variable f and u
        is a uniformly distributed random variable on [0,1]

        x = F^-1(u)
        for pure power law distribution P(l) = l**-a
        F(x) = 1 - ( x / x_min )
        where x_min is a lower bound on the random variable

        F^-1(u) = x_min(1 - u)^(-1/a)

        See: (Devroye 1986, Non-uniform random variable generation)

        or

        X = F^-1(U) = c / ( phi^-1(1-U/2) )**2 + mu

        c : scaling parameter
        Phi(x) : CDF of Gaussian distribution
        mu : location

        for a pure Levy distribution

        References
        ----------
        [1] http://www.math.uah.edu/stat/special/Levy.html
        '''
        from scipy.stats import levy

        model_path = [ origin ]
        i = 0
        while i < N:

            rate = levy.rvs(scale=scale_coeff)

            if i == 0:
                direction = np.random.random(1) * 2 * np.pi

            turn = np.random.random() * 2 * np.pi
            direction = (direction + turn) % (2 * np.pi)

            dx = np.cos(direction)*rate
            dy = np.sin(direction)*rate

            step = (float(dx), float(dy))

            try_x = model_path[-1][0] + step[0]
            try_y = model_path[-1][1] + step[1]
            if bound_x[0] < try_x < bound_x[1] and bound_y[0] < try_y < bound_y[1]:
                model_path.append( (try_x, try_y) )
                i += 1
            else:
                pass

        return model_path
# Display the probability density function (``pdf``):

x = np.linspace(levy.ppf(0.01), levy.ppf(0.99), 100)
ax.plot(x, levy.pdf(x), 'r-', lw=5, alpha=0.6, label='levy pdf')

# Alternatively, the distribution object can be called (as a function)
# to fix the shape, location and scale parameters. This returns a "frozen"
# RV object holding the given parameters fixed.

# Freeze the distribution and display the frozen ``pdf``:

rv = levy()
ax.plot(x, rv.pdf(x), 'k-', lw=2, label='frozen pdf')

# Check accuracy of ``cdf`` and ``ppf``:

vals = levy.ppf([0.001, 0.5, 0.999])
np.allclose([0.001, 0.5, 0.999], levy.cdf(vals))
# True

# Generate random numbers:

r = levy.rvs(size=1000)

# And compare the histogram:

ax.hist(r, normed=True, histtype='stepfilled', alpha=0.2)
ax.legend(loc='best', frameon=False)
plt.show()
Beispiel #19
0
def levy_rdv(a, b, size):
    levy_dat = levy.rvs(size=size)
    levy_ab = (b - a) * ((levy_dat - min(levy_dat)) /
                         (max(levy_dat) - min(levy_dat))) + a
    return np.array(levy_ab, dtype=int)
Beispiel #20
0
    def update(self):
        try:
            # Sense

            collided, prox = self.sense_proximity()

            # Occasionally perform graphslam
            Z = self.sense_landmarks()
            self.sensed_landmarks.append(Z)

            if self.localize_timer == 0:
                data = [[self.sensed_landmarks[i], self.sensed_pos[i][0]]
                        for i in range(self.pos_buffer_len)]
                result = slam(self.sensed_pos[0][1], data, len(self.landmarks),
                              self.motion_noise, self.measurement_noise)
                N = len(data) + 1
                num_landmarks = len(self.landmarks)
                landmark_dx = sum([
                    result[2 * (N + i)][0] - self.landmarks[i][1]
                    for i in range(num_landmarks)
                ]) / num_landmarks
                landmark_dy = sum([
                    result[2 * (N + i) + 1][0] - self.landmarks[i][2]
                    for i in range(num_landmarks)
                ]) / num_landmarks
                self.x_sense = result[2 * (N - 1)][0] - landmark_dx
                self.y_sense = result[2 * N - 1][0] - landmark_dy
                self.localize_timer = self.pos_buffer_len
            else:
                self.localize_timer -= 1

            # Record

            x_lower = int(round(self.x_sense - self.sense_range))
            x_higher = int(round(self.x_sense + self.sense_range + 1))
            y_lower = int(round(self.y_sense - self.sense_range))
            y_higher = int(round(self.y_sense + self.sense_range + 1))
            # Increment occ_grids for 0 and 1 by_realdetected type in grid points in sensor range
            self.occ_grid[1][x_lower:x_higher,
                             y_lower:y_higher] += prox.astype(int)
            self.occ_grid[0][x_lower:x_higher,
                             y_lower:y_higher] += np.logical_not(prox).astype(
                                 int)

            # Move
            if (collided):
                self.turn(pi)  # turn 180 degrees

                self.steps_remaining += 3
                self.step(
                )  # TODO: fix_real robot stuck in wall bug, then remove auto steps
                # might be fixed by_realimplementing localization....

            elif self.steps_remaining <= 0:
                self.steps_remaining = int(levy.rvs())
                angle = cauchy.rvs()
                self.turn(angle)
                return True
            else:
                self.step()
                self.steps_remaining -= 1
                return True
        except:
            pass  # no error handling for broken robots
Beispiel #21
0
import matplotlib.pyplot as plt
from scipy.interpolate import interp1d
from scipy.misc import derivative
import seaborn as sns
from scipy.stats import levy
from sklearn import linear_model
from sklearn import neighbors
import scipy

cal = pd.read_excel('CAL.xlsx', sheetname='valori CAL')
#cal = cal.fillna(0)

##################
#### example with levy ####
l = levy.fit(cal['AS16'])
sampei = levy.rvs(loc=l[0], scale=l[1], size=100)

#################

monthwise = OrderedDict()

mesi = [
    'gen', 'feb', 'mar', 'apr', 'mag', 'giu', 'lug', 'ago', 'set', 'ott',
    'nov', 'dic'
]

for y in range(10, 18, 1):
    varn = 'AS' + str(y)
    res = np.repeat(0, 12)
    for m in range(len(mesi)):
        res[m] = cal[varn].ix[cal[cal.columns[8]] == mesi[m]].mean()
Beispiel #22
0
net.fc_mu.register_backward_hook(print_grad)
net.fc_scale.register_backward_hook(print_grad)
opt = torch.optim.SGD(net.parameters(), lr=0.1)

epochs = 100
# test diff through reparam dist

print("Reparm")
use_levy = True
for i in range(epochs):
    for x, target in zip(x_set, target_set):
        n, z = net(x)
        error = loss(n, target)
        print(f"Levy Parameters: mu:{z[0]} scale{z[1]}")
        print(f"Output:{n} | loss:{error}")

        opt.zero_grad()
        error.backward()
        opt.step()

# test differentiing through levy dist
for i in range(epochs):
    z = net.forward(x)
    n = levy.rvs(loc=z[0], scale=z[1], size=1)

    error = loss(m, target)
    print(f"Output:{n} | loss:{error}")
    opt.zero_grad()
    error.backward()
    opt.step()
Beispiel #23
0
import matplotlib.pyplot as plt 
from scipy.interpolate import interp1d
from scipy.misc import derivative
import seaborn as sns
from scipy.stats import levy
from sklearn import linear_model
from sklearn import  neighbors
import scipy

cal = pd.read_excel('CAL.xlsx', sheetname = 'valori CAL')
#cal = cal.fillna(0)

##################
#### example with levy ####
l = levy.fit(cal['AS16'])
sampei = levy.rvs(loc = l[0], scale = l[1], size = 100)


#################

monthwise = OrderedDict()

mesi = ['gen', 'feb', 'mar', 'apr', 'mag', 'giu', 'lug', 'ago', 'set', 'ott', 'nov', 'dic']

for y in range(10,18,1):
    varn = 'AS'+str(y)
    res = np.repeat(0, 12)
    for m in range(len(mesi)):
        res[m] = cal[varn].ix[cal[cal.columns[8]] == mesi[m]].mean()
    monthwise[varn] = res
    
Beispiel #24
0
def bounded_levy(size, limit=10):
    return [
        min(val, LEVY_MAX) * limit / LEVY_MAX for val in levy.rvs(size=size)
    ]
Beispiel #25
0
def _do_movement(spp):
    # get individuals' coordinates (soon to be their old coords, so
    # 'old_x' and 'old_y')
    old_x, old_y = [
        a.flatten() for a in np.split(spp._get_coords(), 2, axis=1)
    ]
    # and get their cells (by rounding down to the int)
    old_x_cells, old_y_cells = [
        a.flatten() for a in np.split(spp._get_cells(), 2, axis=1)
    ]
    # choose direction using movement surface, if applicable
    if spp._move_surf:
        # and use those choices to draw movement directions
        direction = spp._move_surf._draw_directions(old_x_cells, old_y_cells)
        # NOTE: Pretty sure that I don't need to constrain values output
        # for the Gaussian KDE that is approximating the von Mises mixture
        # distribution to 0<=val<=2*pi, because e.g. cos(2*pi + 1) = cos(1),
        # etc...
        # NOTE: indexed out of move_surf as y then x because becuase the
        # list of lists (like a numpy array structure) is indexed i then j,
        # i.e. vertical, then horizontal
    # else, choose direction using a random walk with a uniform vonmises
    elif not spp._move_surf:
        direction = _r_vonmises(spp.direction_distr_mu,
                                spp.direction_distr_kappa,
                                size=len(old_x))

    # choose distance
    # NOTE: Instead of lognormal, could use something with long right tail
    # for Levy-flight type movement, same as below
    if spp.movement_distance_distr == 'levy':
        distance = _s_levy.rvs(loc=spp.movement_distance_distr_param1,
                               scale=spp.movement_distance_distr_param2,
                               size=len(old_x))
    elif spp.movement_distance_distr == 'wald':
        distance = _wald(mean=spp.movement_distance_distr_param1,
                         scale=spp.movement_distance_distr_param2,
                         size=len(old_x))
    elif spp.movement_distance_distr == 'lognormal':
        distance = _lognormal(mean=spp.movement_distance_distr_param1,
                              sigma=spp.movement_distance_distr_param2,
                              size=len(old_x))

    # decompose distance into x and y components
    dist_x = _cos(direction) * distance
    dist_y = _sin(direction) * distance
    # multiply the x and y distances by the land's resolution-ratios,
    # if they're not 1 and 1 (e.g. a non-square-resolution raster was read in)
    if spp._land_res_ratio[0] != 1:
        dist_x *= spp._land_res_ratio[0]
    if spp._land_res_ratio[1] != 1:
        dist_y *= spp._land_res_ratio[1]

    # create the new locations by adding x- and y-dim line segments to their
    # current positions, using trig then clip the values to be within the
    # landscape dimensions
    # NOTE: subtract a small value to avoid having the dimension itself set
    # as a coordinate, when the coordinates are converted to np.float32
    new_x = old_x + dist_x
    new_x = np.clip(new_x, a_min=0, a_max=spp._land_dim[0] - 0.001)
    new_y = old_y + dist_y
    new_y = np.clip(new_y, a_min=0, a_max=spp._land_dim[1] - 0.001)

    # then feed the new locations into each individual's set_pos method
    [ind._set_pos(x, y) for ind, x, y in zip(spp.values(), new_x, new_y)]