Beispiel #1
0
def nearest_point_distance(points, x_pixels=60, y_pixels=60):
    """Computes the distances from grid points to the nearest input point.

  Accepts a (batch of) list of xy points as input. The output is a (batch of)
  matrix of distances, where each entry of the matrix represents the distance
  from the corresponding point in the unit square to the nearest input point.

  Args:
    points: A batch_size x n x 2 matrix of points.
    x_pixels: How many x coordinates the output should have.
    y_pixels: How many y coordinates the output should have.

  Returns:
    A batch of matrices of distances. It is a tensor of dimension
    batch_size x x_pixels x y_pixels.
  """
    x_coords = np.linspace(0.0, 1.0, x_pixels)[np.newaxis, np.newaxis, :]
    y_coords = np.linspace(0.0, 1.0, y_pixels)[np.newaxis, np.newaxis, :]

    x_points = points[:, :, 0:1]
    y_points = points[:, :, 1:2]

    x_diff_squared = jnp.square(x_points - x_coords)
    y_diff_squared = jnp.square(y_points - y_coords)

    distance_squared = jnp.amin(  # distance to *nearest* point only
        x_diff_squared[:, :, :, np.newaxis] +
        y_diff_squared[:, :, np.newaxis, :],
        axis=1)

    return jnp.sqrt(distance_squared)
Beispiel #2
0
    def _get_1d_latent_grid(self, paths_x):

        num_points_grid = 20

        max_x = np.amax(paths_x)
        min_x = np.amin(paths_x)
        x_array = np.linspace(min_x, max_x, 20)[:, None]
        xx, tt = np.meshgrid(np.linspace(min_x, max_x, 20),
                             np.linspace(0, 1, paths_x.shape[1]))
        txpairs = np.transpose(np.vstack([tt.reshape(-1), xx.reshape(-1)]))

        def scan_fn(carry, paths):
            drift, diffusion, index = carry
            time = index * self.config["delta_t"] - 0.5

            gp_matrices, temp_drift_function, temp_diffusion_function = self.model.build(
                self.model.model_vars())
            temp_drift = temp_drift_function(x_array, time)
            temp_diffusion = np.linalg.det(
                temp_diffusion_function(x_array, time))

            drift = ops.index_add(drift, ops.index[index], temp_drift)
            diffusion = ops.index_add(diffusion, ops.index[index],
                                      temp_diffusion)
            index += 1

            return (drift, diffusion, index), np.array([0.])

        drift_grid = np.zeros((paths_x.shape[1], num_points_grid, 1))
        diffusion_grid = np.zeros((paths_x.shape[1], num_points_grid))
        (drift_grid, diffusion_grid,
         index), _ = lax.scan(scan_fn, (drift_grid, diffusion_grid, 0),
                              np.transpose(paths_x, (1, 0, 2)))
        return txpairs, drift_grid.reshape(-1), diffusion_grid.reshape(-1)
Beispiel #3
0
def rescale_points(points, margin=0.1):
    """Rescales and translates points to lie inside unit square.

  Accepts a list of xy points as input, and applies a
  scalar multiplier and a translation so that the bounding box of the points
  becomes either [margin, 1 - margin] x (smaller interval centered on 0.5) or
  (smaller interval centered on 0.5) x [margin, 1 - margin].

  Args:
    points: The points to transform. A batch_size x n x 2 matrix.
    margin: How much empty space to leave on each side of the unit square.

  Returns:
    A new array consisting of the translated and rotates points.
  """
    min_corner = jnp.amin(points, axis=1, keepdims=True)  # num_images x 1 x 2
    points = points - min_corner  # bounding boxes: [0, x_size] x [0, y_size]
    rectangular_size = jnp.amax(points, axis=1,
                                keepdims=True)  # num_images x 1 x 2
    points = points - 0.5 * rectangular_size
    # bounding boxes: [-x_size/2, -y_size/2] x [x_size/2, y_size/2]
    square_size = jnp.amax(rectangular_size, axis=2,
                           keepdims=True)  # num_images x 1 x 1
    points = points * (1 - 2 * margin) / jnp.maximum(square_size, 1e-5)
    points = points + 0.5  # center on (0.5, 0.5)
    return points
Beispiel #4
0
def uniform_stochastic_quantize(v: jnp.ndarray,
                                num_levels: int,
                                rng: PRNGKey,
                                v_min: Optional[float] = None,
                                v_max: Optional[float] = None) -> jnp.ndarray:
  """Uniform stochastic algorithm in https://arxiv.org/pdf/1611.00429.pdf.

  Args:
    v: vector to be quantized.
    num_levels: Number of levels of quantization.
    rng: jax random key.
    v_min: minimum threshold for quantization. If None, sets it to jnp.amin(v).
    v_max: maximum threshold for quantization. If None, sets it to jnp.amax(v).

  Returns:
    Quantized array.
  """
  # Rescale the vector to be between zero to one.
  if v_min is None:
    v_min = jnp.amin(v)
  if v_max is None:
    v_max = jnp.amax(v)
  v = jnp.nan_to_num((v - v_min) / (v_max - v_min))
  v = jnp.maximum(0., jnp.minimum(v, 1.))
  # Compute the upper and lower boundary of each value.
  v_ceil = jnp.ceil(v * (num_levels - 1)) / (num_levels - 1)
  v_floor = jnp.floor(v * (num_levels - 1)) / (num_levels - 1)
  # uniformly quantize between v_ceil and v_floor.
  rand = jax.random.uniform(key=rng, shape=v.shape)
  threshold = jnp.nan_to_num((v - v_floor) / (v_ceil - v_floor))
  quantized = jnp.where(rand > threshold, v_floor, v_ceil)
  # Rescale the values and return it.
  return v_min + quantized * (v_max - v_min)
Beispiel #5
0
def test_force(a_tensor, b_tensor):
    # a_eval, a_evec = np.linalg.eigh(a_tensor)
    # b_eval, b_evec = np.linalg.eigh(b_tensor)

    a_eval, a_evec = evp.dsyevv3(a_tensor)
    b_eval, b_evec = evp.dsyevv3(b_tensor)

    # print("ref w", a_eval)
    # print("test w", evp.dsyevc3(a_tensor))

    # print("ref v", a_evec)
    # print("test v", evp.dsyevv3(a_tensor))

    # assert 0

    r = np.matmul(np.transpose(a_evec), b_evec)
    I = np.eye(3)
    rI = r * I  # 3x3 -> 3x3
    pos = np.sum(rI, axis=-1)  # 3x3 -> 3
    neg = -np.sum(rI, axis=-1)  # 3x3 -> 3
    acos_pos = np.arccos(pos)  # 3 -> 3
    acos_neg = np.arccos(neg)  # 3 -> 3
    a = np.amin([acos_pos, acos_neg], axis=0)  # 2x3 -> 3
    a2 = a * a  # 3->3
    l = np.sum(a2)  # 3->1

    # derivatives, start backprop
    dl_da2 = np.ones(3)  # 1 x 3
    da2_da = 2 * a * np.eye(3)  # 3 x 3
    da_darg = np.stack(
        [np.eye(3) * (acos_pos < acos_neg),
         np.eye(3) * (acos_neg < acos_pos)])

    darg_dpn = np.stack([
        np.eye(3) * (-1 / np.sqrt(1 - pos * pos)),
        np.eye(3) * (-1 / np.sqrt(1 - neg * neg))
    ])

    dl_darg = np.matmul(np.matmul(dl_da2, da2_da), da_darg)
    dpos = dl_darg[0] * (-1 / np.sqrt(1 - pos * pos))
    dneg = dl_darg[1] * (-1 / np.sqrt(1 - neg * neg))
    dneg = -dneg

    dpn_dr = np.array([
        [[1, 1, 1], [0, 0, 0], [0, 0, 0]],
        [[0, 0, 0], [1, 1, 1], [0, 0, 0]],
        [[0, 0, 0], [0, 0, 0], [1, 1, 1]],
    ])

    # element wise
    dr = (np.matmul(dpos, dpn_dr) + np.matmul(dneg, dpn_dr)) * np.eye(3)

    dr_daevec = np.matmul(b_evec, dr.T)
    dr_dbevec = np.matmul(a_evec, dr.T)

    dl_datensor = grad_eigh(a_eval, a_evec, np.zeros_like(a_eval), dr_daevec)
    dl_dbtensor = grad_eigh(b_eval, b_evec, np.zeros_like(b_eval), dr_dbevec)

    return dl_datensor, dl_dbtensor
Beispiel #6
0
    def set_minmax(self, nn):
      try:
        self.xmin = nn.model.xmin
        self.xmax = nn.model.xmax
      except (NameError,AttributeError):
        self.xmin = np.amin(nn.model.x.data.numpy(),axis=0)
        self.xmax = np.amax(nn.model.x.data.numpy(),axis=0)

      self.range = (self.xmax - self.xmin)
Beispiel #7
0
  def encode(self,x):
    try:
      self.xmin
      self.xmax
    except (NameError,AttributeError):
      self.xmin = np.amin(x.data.numpy(),axis=0)
      self.xmax = np.amax(x.data.numpy(),axis=0)

    x = (x.data.numpy()-self.xmin)/(self.xmax-self.xmin)
    return Variable(torch.from_numpy(x.copy()).type(dtype))
Beispiel #8
0
 def sample(self, key, sample_shape=()):
     key_dirichlet, key_multinom = random.split(key)
     probs = self._dirichlet.sample(key_dirichlet, sample_shape)
     total_count = jnp.amax(self.total_count)
     if not_jax_tracer(total_count):
         # NB: the error can't be raised if inhomogeneous issue happens when tracing
         if jnp.amin(self.total_count) != total_count:
             raise NotImplementedError(
                 "Inhomogeneous total count not supported"
                 " by `sample`.")
     return Multinomial(total_count, probs).sample(key_multinom)
Beispiel #9
0
def pmi_u(r):
    I = np.eye(3)

    loss = []
    for v, e in zip(r, I):
        a_pos = np.arccos(np.sum(v * e))  # norm is always 1
        a_neg = np.arccos(np.sum(-v * e))  # norm is always 1
        a = np.amin([a_pos, a_neg])
        loss.append(a * a)

    return np.sum(loss)
Beispiel #10
0
 def enumerate_support(self, expand=True):
     total_count = jnp.amax(self.total_count)
     if not_jax_tracer(total_count):
         # NB: the error can't be raised if inhomogeneous issue happens when tracing
         if jnp.amin(self.total_count) != total_count:
             raise NotImplementedError("Inhomogeneous total count not supported"
                                       " by `enumerate_support`.")
     values = jnp.arange(total_count + 1).reshape((-1,) + (1,) * len(self.batch_shape))
     if expand:
         values = jnp.broadcast_to(values, values.shape[:1] + self.batch_shape)
     return values
Beispiel #11
0
        def scan_fn(carry, paths):
            x, drift, diffusion, index = carry
            time = index * self.config["delta_t"] - 0.5
            max_x = np.amax(paths_x[:, :, 0])
            min_x = np.amin(paths_x[:, :, 0])
            max_y = np.amax(paths_x[:, :, 1])
            min_y = np.amin(paths_x[:, :, 1])
            xx, yy = np.meshgrid(np.linspace(min_x, max_x, num_points_grid),
                                 np.linspace(min_y, max_y, num_points_grid))
            temp = np.transpose(np.vstack([xx.reshape(-1), yy.reshape(-1)]))

            gp_matrices, temp_drift_function, temp_diffusion_function = self.model.build(
                self.model.model_vars())
            temp_drift = temp_drift_function(temp, time)
            temp_diffusion = np.linalg.det(temp_diffusion_function(temp, time))

            x = ops.index_add(x, ops.index[index], temp)
            drift = ops.index_add(drift, ops.index[index], temp_drift)
            diffusion = ops.index_add(diffusion, ops.index[index],
                                      temp_diffusion)
            index += 1

            return (x, drift, diffusion, index), np.array([0.])
Beispiel #12
0
    def __init__(self, data, leafsize=10):
        self.data = np.asarray(data)
        # if self.data.dtype.kind == 'c':
        #     raise TypeError("KDTree does not work with complex data")

        self.n, self.m = np.shape(self.data)
        self.leafsize = int(leafsize)
        # if self.leafsize < 1:
        #     raise ValueError("leafsize must be at least 1")
        self.maxes = np.amax(self.data,axis=0)
        self.mins = np.amin(self.data,axis=0)

        print('building tree')
        self.tree = self.__build(np.arange(self.n), self.maxes, self.mins)
Beispiel #13
0
def get_dx(grids):
    """Gets the grid spacing from grids array.

  Args:
    grids: Float numpy array with shape (num_grids,).

  Returns:
    Float, grid spacing.

  Raises:
    ValueError: If grids.ndim is not 1.
  """
    if grids.ndim != 1:
        raise ValueError('grids.ndim is expected to be 1 but got %d' %
                         grids.ndim)
    return (jnp.amax(grids) - jnp.amin(grids)) / (grids.size - 1)
Beispiel #14
0
def simplified_u(a_tensor, b_tensor):
    a_eval, a_evec = np.linalg.eigh(a_tensor)
    b_eval, b_evec = np.linalg.eigh(b_tensor)
    r = np.matmul(np.transpose(a_evec), b_evec)
    I = np.eye(3)
    rI = r * I  # 3x3 -> 3x3
    pos = np.sum(rI, axis=-1)
    neg = np.sum(-rI, axis=-1)
    acos_pos = np.arccos(pos)
    acos_neg = np.arccos(neg)
    # [a,b,c]
    # [d,e,f]
    # -------
    # [min(a,d), min(b,e), min(c,f)]
    a = np.amin([acos_pos, acos_neg], axis=0)
    return np.sum(a * a)
Beispiel #15
0
    def __build(self, idx, maxes, mins):
        if len(idx) <= self.leafsize:
            return KDTree.leafnode(idx)
        else:
            data = self.data[idx]
            # maxes = np.amax(data,axis=0)
            # mins = np.amin(data,axis=0)
            d = np.argmax(maxes-mins)
            maxval = maxes[d]
            minval = mins[d]
            if maxval == minval:
                # all points are identical; warn user?
                return KDTree.leafnode(idx)
            data = data[:,d]

            # sliding midpoint rule; see Maneewongvatana and Mount 1999
            # for arguments that this is a good idea.
            split = (maxval+minval)/2
            less_idx = np.nonzero(data <= split)[0]
            greater_idx = np.nonzero(data > split)[0]
            if len(less_idx) == 0:
                split = np.amin(data)
                less_idx = np.nonzero(data <= split)[0]
                greater_idx = np.nonzero(data > split)[0]
            if len(greater_idx) == 0:
                split = np.amax(data)
                less_idx = np.nonzero(data < split)[0]
                greater_idx = np.nonzero(data >= split)[0]
            if len(less_idx) == 0:
                # _still_ zero? all must have the same value
                if not np.all(data == data[0]):
                    raise ValueError("Troublesome data array: %s" % data)
                split = data[0]
                less_idx = np.arange(len(data)-1)
                greater_idx = np.array([len(data)-1])

            # lessmaxes = maxes.copy()
            # lessmaxes = index_update(lessmaxes, index[d], split)
            lessmaxes = np.asarray([x if (ii != d) else split for ii,x in enumerate(maxes)])
            # lessmaxes[d] = split
            # greatermins = mins.copy()
            # greatermins = index_update(greatermins, index[d], split)
            greatermins = np.asarray([x if (ii != d) else split for ii,x in enumerate(mins)])
            # greatermins[d] = split
            return KDTree.innernode(d, split,
                    self.__build(idx[less_idx],lessmaxes,mins),
                    self.__build(idx[greater_idx],maxes,greatermins))
Beispiel #16
0
  def fprop(self, inputs, paddings=None):
    """Apply the stacking to inputs along the time axis.

    Args:
      inputs: The inputs tensor. It is expected to be of shape [batch, time,
        feature].
      paddings: The paddings tensor. It is expected to be of shape [batch, time,
        1], where all but the last dimension match inputs. Each value is 0 or 1
        indicating whether a time step of a sequence is padded in the inputs to
        reach the max length in the batch.

    Returns:
      (outputs, out_paddings) pair.
        outputs is of shape [batch, ceil(time / stride), feature * stacking].
        out_paddings is of shape [batch, ceil(time / stride), 1]. out_paddings
        will be 0 if any of the corresponding input padding is 0.
    """
    p = self.params
    if paddings is None:
      paddings = jnp.zeros(
          jnp.concatenate([jnp.array(inputs.shape[:-1]),
                           jnp.array([1])]),
          dtype=inputs.dtype)

    # Checks the inputs shape, paddings has 3 dimensions.
    base_layer.assert_has_shape(inputs, [-1, -1, -1])
    base_layer.assert_has_shape(paddings, [-1, -1, 1])

    # Trivial case.
    if 0 == p.left_context == p.right_context and 1 == p.stride:
      return inputs, paddings

    outputs = self._applystack(inputs)

    # Stack the padding values with the same context and stride parameters.
    # Then take the minimum padding values within each stacking window, since
    # an output time step becomes a padded one only if all of the underlying
    # stacked steps are padded ones.
    out_paddings = self._applystack(paddings, pad_value=1)
    if p.padding_reduce_option == 'reduce_min':
      out_paddings = jnp.amin(out_paddings, axis=2, keepdims=True)
    else:
      out_paddings = jnp.amax(out_paddings, axis=2, keepdims=True)

    return outputs, out_paddings
Beispiel #17
0
def pmi_restraints(conf, params, box, lamb, a_idxs, b_idxs, masses,
                   angle_force, com_force):

    a_com, a_tensor = inertia_tensor(conf[a_idxs], masses[a_idxs])
    b_com, b_tensor = inertia_tensor(conf[b_idxs], masses[b_idxs])

    a_eval, a_evec = np.linalg.eigh(a_tensor)  # already sorted
    b_eval, b_evec = np.linalg.eigh(b_tensor)  # already sorted

    r = np.matmul(np.transpose(a_evec), b_evec)
    I = np.eye(3)

    loss = []
    for v, e in zip(r, I):
        a_pos = np.arccos(np.sum(v * e))  # norm is always 1
        a_neg = np.arccos(np.sum(-v * e))  # norm is always 1
        a = np.amin([a_pos, a_neg])
        loss.append(a * a)
Beispiel #18
0
    def __fitCensoredMRR(self):
        '''
        Median rank regression method (50%) to estimate parameters. Better for low number of failures. this method does
        not provides confidence bonds for parameters, however it is possible to estimate confidence bound in time using
        median rank for specified limit (other than 50%).
        :return:
        '''
        iks = jnp.log(self.est_data['time'].to_numpy())
        igrek = jnp.log(
            jnp.log(1.0 / (1.0 - util.median_rank(
                self.N, self.est_data['new_order_num'], 0.5))))

        if self.method == Method.MRRCensored2p:
            slope, intercept, r, _, _ = self.__lineregress(iks, igrek)
        elif self.method == Method.MRRCensored3p:
            locs = jnp.linspace(0, jnp.amin(iks), 10)
            arr = np.empty((0, 4), float)
            for l in locs:
                l_iks = iks - l
                l_slope, l_intercept, l_r, _, _ = self.__lineregress(
                    iks, igrek)
                arr = np.append(arr,
                                np.array([[l, l_slope, l_intercept, l_r]]),
                                axis=0)
            g = arr[np.argmax(arr[:, 3])]
            slope = g[1]
            intercept = g[2]
            r = g[3]
            self.loc = g[0]

        # assigning estimated parameters
        self.shape = slope
        self.scale = jnp.exp(-intercept / slope)

        self.est_data['ub'] = (jnp.log(1.0 / (1.0 - util.median_rank(
            self.N, self.est_data['new_order_num'], self.CL)))
                               **(1.0 / self.shape) * self.scale)
        self.est_data['lb'] = (jnp.log(1.0 / (1.0 - util.median_rank(
            self.N, self.est_data['new_order_num'], 1.0 - self.CL)))
                               **(1.0 / self.shape) * self.scale)
        self.r2 = r**2
        self.est_data = self.est_data[['time', 'cdf', 'lb', 'ub']]
        self.method = Method.MRRCensored2p
        self.converged = True
def logit_transformer(logits,
                      temp=1.0,
                      confidence_quantile_threshold=1.0,
                      self_supervised_label_transformation='soft',
                      logit_indices=None):
    """Transforms logits into labels used as targets in a loss functions.

  Args:
    logits: jnp float array; Prediction of a model.
    temp: float; Softmax temp.
    confidence_quantile_threshold: float; Training examples are weighted based
      on this.
    self_supervised_label_transformation: str; Type of labels to produce (soft
      or sharp).
    logit_indices: list(int); Usable Indices for logits (list of indices to
      use).

  Returns:

  """
    # Compute confidence for each prediction:
    confidence = jnp.amax(logits, axis=-1) - jnp.amin(logits, axis=-1)

    # Compute confidence threshold:
    alpha = jnp.quantile(confidence, confidence_quantile_threshold)
    # Only train on confident outputs:
    weights = jnp.float32(confidence >= alpha)

    if self_supervised_label_transformation == 'sharp':
        if logit_indices:
            logits = logits[Ellipsis, logit_indices]
        new_labels = jnp.argmax(logits, axis=-1)
    elif self_supervised_label_transformation == 'soft':
        new_labels = nn.softmax(logits / (temp or 1.0), axis=-1)
    else:
        new_labels = logits

    return new_labels, weights
Beispiel #20
0
def binary_stochastic_quantize(v: jnp.ndarray,
                               rng: PRNGKey,
                               v_min: Optional[float] = None,
                               v_max: Optional[float] = None) -> jnp.ndarray:
  """Binary stochastic algorithm in https://arxiv.org/pdf/1611.00429.pdf.

  Args:
    v: vector to be quantized.
    rng: jax random key.
    v_min: minimum threshold for quantization. If None, sets it to jnp.amin(v).
    v_max: maximum threshold for quantization. If None, sets it to jnp.amax(v).

  Returns:
    Quantized array.
  """
  if v_min is None:
    v_min = jnp.amin(v)
  if v_max is None:
    v_max = jnp.amax(v)
  v = jnp.nan_to_num((v - v_min) / (v_max - v_min))
  v = jnp.maximum(0., jnp.minimum(v, 1.))
  rand = jax.random.uniform(key=rng, shape=v.shape)
  return jnp.where(rand > v, v_min, v_max)
Beispiel #21
0
    def get_groups(self, rng, state):
        if self.split_factor is None:
            # if no factor is given by default, we use prior infection rate.
            if np.size(state.prior_infection_rate) > 1:
                raise ValueError(
                    'Dorfman Splitting cannot be used with individual infection rates.'
                    + ' Consider using Informative Dorfman instead.')

            # set group size to value defined by Dorfman testing
            group_size = 1 + np.ceil(
                1 / np.sqrt(np.squeeze(state.prior_infection_rate)))
            # adjust to take into account testing limits
            group_size = np.amin((group_size, state.max_group_size))
            split_factor = -(-state.num_patients // group_size)
        else:
            # ensure the split factor does not produce groups that are too large
            min_splits = -(-state.num_patients // state.max_group_size)
            split_factor = np.maximum(self.split_factor, min_splits)

        indices = onp.array_split(np.arange(state.num_patients), split_factor)
        new_groups = onp.zeros((len(indices), state.num_patients))
        for i in range(len(indices)):
            new_groups[i, indices[i]] = True
        return np.array(new_groups, dtype=bool)
def subtract_min(x):
    y = x - np.amin(x)
    return (y)
Beispiel #23
0
def _amin(x, dim, keepdims=False):
    return np.amin(x, axis=dim, keepdims=keepdims)
Beispiel #24
0
lamb = 1e-10
print("len of spectrum")
print(len(spectrum))
pvals = npo.logspace(np.log10(10), np.log10(num_pca-1), 500)
sols = solve_implicit_z(npo.array(spectrum), pvals, lamb)

mode_errs = theory_learning_curves(spectrum, vecs, pvals, lamb, y)
sort_inds = np.argsort(spectrum)[::-1]


theory0 = npo.sum(mode_errs, axis = 1)
theory_adj = npo.sum(mode_errs, axis = 1) * num_pca / (num_pca - pvals + 1e-3)
plt.loglog(pvals, theory0, label = 'original theory')
plt.loglog(pvals, theory_adj, label = 'rescaled theory')
plt.legend()
plt.ylim([np.amin(theory_adj), np.amax(theory_adj)])
plt.xlabel(r'$p$', fontsize = 20)
plt.ylabel(r'$E_g$', fontsize=20)
plt.tight_layout()
plt.savefig('rescale_risk.pdf')
plt.show()

inds = [10, 100, 1000]


for i, j in enumerate(sort_inds[inds]):
    if inds[i]==0:
        plt.loglog(pvals, mode_errs[:,j] / mode_errs[0,j], label = r'$k=0$')
    else:
        plt.loglog(pvals, mode_errs[:,j] / mode_errs[0,j], label = r'$k = 10^{%d}$' % int(np.log10(inds[i])+0.01) )