예제 #1
0
def roll_dist_1d(y, kernel):
    n = kernel.size
    samples = rolling_window_1d(y, n) 
    a = samples.sum(axis = 1).reshape((-1,1))
    samples = samples / np.broadcast_to(a, samples.shape)
    K = np.broadcast_to(kernel, samples.shape)
    return np.sum(np.square(samples - K), axis = 1)
    def get_input_features(self, mol):
        """get input features

        Args:
            mol (Mol):

        Returns:

        """
        type_check_num_atoms(mol, self.max_atoms)
        num_atoms = mol.GetNumAtoms()

        # Construct the atom array and adjacency matrix.
        atom_array = construct_atomic_number_array(mol, out_size=self.out_size)
        adj_array = construct_adj_matrix(mol, out_size=self.out_size)

        # Adjust the adjacency matrix.
        degree_vec = numpy.sum(adj_array[:num_atoms], axis=1)
        degree_sqrt_inv = 1. / numpy.sqrt(degree_vec)

        adj_array[:num_atoms, :num_atoms] *= numpy.broadcast_to(
            degree_sqrt_inv[:, None], (num_atoms, num_atoms))
        adj_array[:num_atoms, :num_atoms] *= numpy.broadcast_to(
            degree_sqrt_inv[None, :], (num_atoms, num_atoms))
        super_node_x = construct_supernode_feature(mol, atom_array, adj_array, out_size=self.out_size_super)

        return atom_array, adj_array, super_node_x
예제 #3
0
  def _initialize_updated_shapes(self, session):
    shapes = array_ops.shape_n(self._vars)
    var_shapes = list(map(tuple, session.run(shapes)))

    if self._var_shapes is not None:
      new_old_shapes = zip(self._var_shapes, var_shapes)
      if all([old == new for old, new in new_old_shapes]):
        return

    self._var_shapes = var_shapes
    vars_and_shapes = zip(self._vars, self._var_shapes)
    vars_and_shapes_dict = dict(vars_and_shapes)

    packed_bounds = None
    if self._var_to_bounds is not None:
      left_packed_bounds = []
      right_packed_bounds = []
      for var, var_shape in vars_and_shapes:
        shape = list(var_shape)
        bounds = (-np.infty, np.infty)
        if var in var_to_bounds:
          bounds = var_to_bounds[var]
        left_packed_bounds.extend(list(np.broadcast_to(bounds[0], shape).flat))
        right_packed_bounds.extend(list(np.broadcast_to(bounds[1], shape).flat))
      packed_bounds = list(zip(left_packed_bounds, right_packed_bounds))
    self._packed_bounds = packed_bounds

    self._update_placeholders = [
        array_ops.placeholder(var.dtype) for var in self._vars
    ]
    self._var_updates = [
        var.assign(array_ops.reshape(placeholder, vars_and_shapes_dict[var]))
        for var, placeholder in zip(self._vars, self._update_placeholders)
    ]

    loss_grads = _compute_gradients(self._loss, self._vars)
    equalities_grads = [
        _compute_gradients(equality, self._vars)
        for equality in self._equalities
    ]
    inequalities_grads = [
        _compute_gradients(inequality, self._vars)
        for inequality in self._inequalities
    ]

    self._packed_var = self._pack(self._vars)
    self._packed_loss_grad = self._pack(loss_grads)
    self._packed_equality_grads = [
        self._pack(equality_grads) for equality_grads in equalities_grads
    ]
    self._packed_inequality_grads = [
        self._pack(inequality_grads) for inequality_grads in inequalities_grads
    ]

    dims = [_prod(vars_and_shapes_dict[var]) for var in self._vars]
    accumulated_dims = list(_accumulate(dims))
    self._packing_slices = [
        slice(start, end)
        for start, end in zip(accumulated_dims[:-1], accumulated_dims[1:])
    ]
예제 #4
0
def multinomial(n, p, size=None):

    plates_n = np.shape(n)
    plates_p = np.shape(p)[:-1]
    k = np.shape(p)[-1]

    if size is None:
        size = misc.broadcasted_shape(plates_n, plates_p)

    if not misc.is_shape_subset(plates_n, size):
        raise ValueError("Shape of n does not broadcast to the given size")

    if not misc.is_shape_subset(plates_p, size):
        raise ValueError("Shape of p does not broadcast to the given size")

    # This isn't a very efficient implementation. One could use NumPy's
    # multinomial once for all those plates for which n and p is the same.

    n = np.broadcast_to(n, size)
    p = np.broadcast_to(p, size + (k,))

    x = np.empty(size + (k,))

    for i in misc.nested_iterator(size):
        x[i] = np.random.multinomial(n[i], p[i])

    return x.astype(np.int)
예제 #5
0
    def __init__(self, *args):
        if len(args) == 1:
            # From Game
            num_players = args[0].num_players
            num_strategies = args[0].num_strategies
        elif len(args) == 2:
            # Default constructor
            num_players = args[0]
            num_strategies = args[1]
        else:
            raise ValueError('Invalid constructor arguments')

        num_players = np.asarray(num_players, int)
        num_strategies = np.asarray(num_strategies, int)
        self.num_roles = max(num_players.size, num_strategies.size)
        self.num_players = np.broadcast_to(num_players, self.num_roles)
        self.num_strategies = np.broadcast_to(num_strategies, self.num_roles)
        self.num_role_strats = self.num_strategies.sum()
        self.role_starts = np.insert(self.num_strategies[:-1].cumsum(), 0, 0)
        self.role_index = self.role_repeat(np.arange(self.num_roles))
        self.num_strategies.setflags(write=False)
        self.num_players.setflags(write=False)
        self.role_starts.setflags(write=False)
        self.role_index.setflags(write=False)
        self._hash = hash((self.num_strategies.data.tobytes(),
                           self.num_players.data.tobytes()))

        assert np.all(self.num_players >= 0)
        assert np.all(self.num_strategies > 0)
예제 #6
0
파일: sigmath.py 프로젝트: jfigui/pyart
def texture_along_ray(myradar, var, wind_size=7):
    """
    Compute field texture along ray using a user specified
    window size.

    Parameters
    ----------
    myradar : radar object
        The radar object where the field is
    var : str
        Name of the field which texture has to be computed
    wind_size : int
        Optional. Size of the rolling window used

    Returns
    -------
    tex : radar field
        the texture of the specified field

    """
    half_wind = int(wind_size/2)
    fld = myradar.fields[var]['data']
    tex = np.ma.zeros(fld.shape)
    tex[:] = np.ma.masked
    tex.set_fill_value(get_fillvalue())

    tex_aux = np.ma.std(rolling_window(fld, wind_size), -1)
    tex[:, half_wind:-half_wind] = tex_aux
    tex[:, 0:half_wind] = np.broadcast_to(
        tex_aux[:, 0].reshape(tex.shape[0], 1), (tex.shape[0], half_wind))
    tex[:, -half_wind:] = np.broadcast_to(
        tex_aux[:, -1].reshape(tex.shape[0], 1), (tex.shape[0], half_wind))

    return tex
예제 #7
0
 def test_integer_split_2D_rows_greater_max_int32(self):
     a = np.broadcast_to([0], (1 << 32, 2))
     res = array_split(a, 4)
     chunk = np.broadcast_to([0], (1 << 30, 2))
     tgt = [chunk] * 4
     for i in range(len(tgt)):
         assert_equal(res[i].shape, tgt[i].shape)
예제 #8
0
def _assign_to_class(zh, zdr, kdp, rhohv, relh, mass_centers,
                    weights=np.array([1., 1., 1., 0.75, 0.5])):
    """
    assigns an hydrometeor class to a radar range bin computing
    the distance between the radar variables an a centroid

    Parameters
    ----------
    zh,zdr,kdp,rhohv,relh : radar field
        variables used for assigment normalized to [-1, 1] values

    mass_centers : matrix
        centroids normalized to [-1, 1] values

    weights : array
        optional. The weight given to each variable

    Returns
    -------
    hydroclass : int array
        the index corresponding to the assigned class
    mind_dist : float array
        the minimum distance to the centroids
    """
    # prepare data
    nrays = zh.shape[0]
    nbins = zdr.shape[1]
    nclasses = mass_centers.shape[0]
    nvariables = mass_centers.shape[1]

    data = np.ma.array([zh, zdr, kdp, rhohv, relh])
    weights_mat = np.broadcast_to(
        weights.reshape(nvariables, 1, 1),
        (nvariables, nrays, nbins))
    dist = np.ma.zeros((nclasses, nrays, nbins), dtype='float64')

    # compute distance: masked entries will not contribute to the distance
    for i in range(nclasses):
        centroids_class = mass_centers[i, :]
        centroids_class = np.broadcast_to(
            centroids_class.reshape(nvariables, 1, 1),
            (nvariables, nrays, nbins))
        dist[i, :, :] = np.ma.sqrt(np.ma.sum(
            ((centroids_class-data)**2.)*weights_mat, axis=0))

    # use very large fill_value so that masked entries will be sorted at the
    # end. There should not be any masked entry anyway
    class_vec = dist.argsort(axis=0, fill_value=10e40)

    # get minimum distance. Acts as a confidence value
    dist_sorted = dist.sort(axis=0, fill_value=10e40)
    min_dist = dist[0, :, :]

    # Entries with non-valid reflectivity values are set to 0 (No class)
    mask = np.ma.getmaskarray(zh)
    hydroclass = class_vec[0, :, :]+1
    hydroclass[mask] = 0

    return hydroclass, min_dist
예제 #9
0
    def __init__(self, shape, spacing=(1., 1.), origin=(0., 0.)):
        spacing = np.broadcast_to(spacing, 2)
        origin = np.broadcast_to(origin, 2)

        node_y_and_x = (np.arange(shape[0]) * spacing[0] + origin[0],
                       np.arange(shape[1]) * spacing[1] + origin[1])

        super(DualUniformRectilinearGraph, self).__init__(node_y_and_x)
예제 #10
0
def setup_node_coords(shape, spacing=1., origin=0.):
    spacing = np.broadcast_to(spacing, 2)
    origin = np.broadcast_to(origin, 2)

    rows = np.arange(shape[0], dtype=float) * spacing[0] + origin[0]
    cols = np.arange(shape[1], dtype=float) * spacing[1] + origin[1]

    return setup_node_coords_rectilinear((rows, cols))
예제 #11
0
def analytic_dipole_setup(nside, nfreq, sigma=0.4, z0_cza=None):
    def transform_basis(nside, jones, z0_cza, R_z0):

        npix = hp.nside2npix(nside)
        hpxidx = np.arange(npix)
        cza, ra = hp.pix2ang(nside, hpxidx)

        fR = R_z0

        tb, pb = rotate_sphr_coords(fR, cza, ra)

        cza_v = t_hat_cart(cza, ra)
        ra_v = p_hat_cart(cza, ra)

        tb_v = t_hat_cart(tb, pb)

        fRcza_v = np.einsum('ab...,b...->a...', fR, cza_v)
        fRra_v = np.einsum('ab...,b...->a...', fR, ra_v)

        cosX = np.einsum('a...,a...', fRcza_v, tb_v)
        sinX = np.einsum('a...,a...', fRra_v, tb_v)


        basis_rot = np.array([[cosX, sinX],[-sinX, cosX]])
        basis_rot = np.transpose(basis_rot,(2,0,1))

        return np.einsum('...ab,...bc->...ac', jones, basis_rot)

    if z0_cza is None:
        z0_cza = np.radians(120.72)

    npix = hp.nside2npix(nside)
    hpxidx = np.arange(npix)
    th, phi = hp.pix2ang(nside, hpxidx)

    R_z0 = hp.rotator.Rotator(rot=[0,-np.degrees(z0_cza)])

    th_l, phi_l = R_z0(th, phi)
    phi_l[phi_l < 0] += 2. * np.pi

    ct,st = np.cos(th_l), np.sin(th_l)
    cp,sp = np.cos(phi_l), np.sin(phi_l)

    jones_dipole = np.array([
            [ct * cp, -sp],
            [ct * sp, cp]
        ], dtype=np.complex128).transpose(2,0,1)

    jones_c = transform_basis(nside, jones_dipole, z0_cza, np.array(R_z0.mat))

    G = np.exp(-(th_l/sigma)**2. /2.)

    G = np.broadcast_to(G, (2,2,npix)).T

    jones_c *= G
    jones_out = np.broadcast_to(jones_c, (nfreq, npix, 2,2))

    return jones_out
예제 #12
0
파일: vis.py 프로젝트: tlhr/plumology
def dist2D(dist: pd.DataFrame,
           ranges: pd.DataFrame,
           nlevels: int=16,
           nx: int=2,
           size: int=6,
           colorbar: bool=True,
           name: str='dist') -> plt.Figure:
    """
    Plot 2D probability distributions.

    Parameters
    ----------
    dist : Multiindexed dataframe with force field as primary
        index and distributions as created by dist2D().
    ranges : Multiindexed dataframe with force field as primary
        index and edges as created by dist1D().
    nlevels : Number of contour levels to use.
    nx : Number of plots per row.
    size : Relative size of each plot.
    colorbar : If true, will plot a colorbar.
    name : Name of the distribution.

    Returns
    -------
    fig : matplotlib figure.

    """

    # Setup plotting parameters
    nplots = dist.shape[1]
    xsize, ysize = nx, (nplots // nx) + 1
    cmap = plt.get_cmap('viridis')
    fig = plt.figure(figsize=(xsize * size, ysize * size))

    for i, k in enumerate(dist.keys()):

        # Get keys for both CVs
        kx, ky = k.split('.')

        # Prepare plotting grid (np.meshgrid doesn't work)
        X = np.broadcast_to(ranges[kx], dist[k].unstack().shape)
        Y = np.broadcast_to(ranges[ky], dist[k].unstack().shape).T
        Z = dist[k].unstack().values.T

        # Contour levels taking inf into account
        levels = np.linspace(np.amin(Z[~np.isinf(Z)]),
                             np.amax(Z[~np.isinf(Z)]), nlevels)
        ax = fig.add_subplot(ysize, xsize, i + 1)
        cm = ax.contourf(X, Y, Z, cmap=cmap, levels=levels)
        ax.set_xlabel(kx)
        ax.set_ylabel(ky)
        ax.set_title(name)

    if colorbar:
        fig.colorbar(cm)

    return fig
예제 #13
0
def scalar_broadcast_match(a, b):
    """ Returns arguments as np.array, if one is a scalar it will broadcast the other one's shape.
    """
    a, b = np.atleast_1d(a, b)
    if a.size == 1 and b.size != 1:
        a = np.broadcast_to(a, b.shape)
    elif b.size == 1 and a.size != 1:
        b = np.broadcast_to(b, a.shape)
    return a, b
예제 #14
0
    def __init__(self, shape, spacing=(1., 1.), origin=(0., 0.)):

        spacing = np.broadcast_to(spacing, 2)
        origin = np.broadcast_to(origin, 2)

        rows = np.arange(shape[0], dtype=float) * spacing[0] + origin[0]
        cols = np.arange(shape[1], dtype=float) * spacing[1] + origin[1]

        super(UniformRectilinearGraph, self).__init__((rows, cols))
예제 #15
0
def _get_merged_embeddings(data_dict, mapping_fn, out_prefix):
    region_names = data_dict['region_names']
    region_weights = data_dict['region_weights']

    squeezed = region_weights.ndim == 1
    if squeezed:
        region_weights = region_weights[:, np.newaxis]

    n_subsets = region_weights.shape[1]

    mapped_names = [mapping_fn(r) for r in region_names]
    m_names = sorted(set(mapped_names))
    m_names_lookup = {n: i for i, n in enumerate(m_names)}

    transform = np.zeros(
        (len(m_names), len(region_names), n_subsets))
    for r_i, (m, w) in enumerate(zip(mapped_names, region_weights)):
        transform[m_names_lookup[m], r_i, :] = w

    m_weights = transform.sum(axis=1)

    # normalize transform so that its sum along axis 1 is 1
    # this is kind of gross to allow for zero sums...maybe there's a better way
    nz = np.broadcast_to((m_weights != 0)[:, np.newaxis, :], transform.shape)
    transform[nz] /= \
        np.broadcast_to(m_weights[:, np.newaxis, :], transform.shape)[nz]

    ret = {'{}_names'.format(out_prefix): m_names,
           '{}_weights'.format(out_prefix): m_weights}
    for k in data_dict:
        if k.startswith('emb_'):
            print("Mapping {}...".format(k), end='', file=sys.stderr)
            emb = data_dict[k]
            if squeezed:
                emb = emb[:, :, np.newaxis]

            # need to do a matrix multiply for each subset:
            #  - np.einsum('grs,rfs->gfs') would do this, but doesn't call BLAS
            #  - rolling the subset axis to the front and calling np.matmul
            #    would do this, but it just calls einsum anyway:
            #    https://github.com/numpy/numpy/issues/7569

            out = np.empty((n_subsets, len(m_names), emb.shape[1]))
            for i in xrange(n_subsets):
                np.dot(transform[:, :, i], emb[:, :, i], out=out[i])
            ret[k] = np.rollaxis(out, 0, 3)

            if squeezed:
                ret[k] = ret[k][:, :, 0]
            print("done", file=sys.stderr)
        elif k in {'region_names', 'region_weights'}:
            pass
        else:
            ret[k] = data_dict[k]
    return ret
예제 #16
0
 def mfunc(self, x):
     N, n = x.shape
     if n != self._n:
         raise Exception("Input dimension mismatch")           
     p = np.broadcast_to(self._p, (N, self._m, self._n))
     q = np.broadcast_to(self._q, (N, self._m, self._n))
     r = np.broadcast_to(self._r, (N, self._m, self._n))        
     X = np.broadcast_to(x, (self._m, N, self._n))
     X = np.swapaxes(X, 0, 1)       
     self._M = self._mfunc(X, p, q, r)
     return self._M
예제 #17
0
 def test_issue919(self):
     with Dataset(self.file,'w') as f:
         f.createDimension('time',2)
         f.createDimension('lat',10)
         f.createDimension('lon',9)
         f.createVariable('v1',np.int,('time', 'lon','lat',))
         arr = np.arange(9*10).reshape((9, 10))
         f['v1'][:] = arr
         assert_array_equal(f['v1'][:],np.broadcast_to(arr,f['v1'].shape))
         arr = np.arange(10)
         f['v1'][:] = arr
         assert_array_equal(f['v1'][:],np.broadcast_to(arr,f['v1'].shape))
예제 #18
0
def stabilization(data, m_hat, sigma, N, mask=None, clip_eta=True, return_eta=False, n_cores=None, mp_method=None):

    data = np.asarray(data)
    m_hat = np.asarray(m_hat)
    sigma = np.atleast_3d(sigma)
    N = np.atleast_3d(N)

    if mask is None:
        mask = np.ones(data.shape[:-1], dtype=np.bool)
    else:
        mask = np.asarray(mask, dtype=np.bool)

    if N.ndim < data.ndim:
        N = np.broadcast_to(N[..., None], data.shape)

    if sigma.ndim == (data.ndim - 1):
        sigma = np.broadcast_to(sigma[..., None], data.shape)

    # Check all dims are ok
    if (data.shape != sigma.shape):
        raise ValueError('data shape {} is not compatible with sigma shape {}'.format(data.shape, sigma.shape))

    if (data.shape[:-1] != mask.shape):
        raise ValueError('data shape {} is not compatible with mask shape {}'.format(data.shape, mask.shape))

    if (data.shape != m_hat.shape):
        raise ValueError('data shape {} is not compatible with m_hat shape {}'.format(data.shape, m_hat.shape))

    arglist = ((data[..., idx, :],
                m_hat[..., idx, :],
                mask[..., idx],
                sigma[..., idx, :],
                N[..., idx, :],
                clip_eta)
               for idx in range(data.shape[-2]))

    parallel_stabilization = multiprocesser(multiprocess_stabilization, n_cores=n_cores, mp_method=mp_method)
    output = parallel_stabilization(arglist)

    data_stabilized = np.zeros_like(data, dtype=np.float32)
    eta = np.zeros_like(data, dtype=np.float32)

    for idx, content in enumerate(output):
        data_stabilized[..., idx, :] = content[0]
        eta[..., idx, :] = content[1]

    if return_eta:
        return data_stabilized, eta
    return data_stabilized
예제 #19
0
파일: Atoms.py 프로젝트: usnistgov/atomman
        def __setitem__(self, key, value):
            """
            Modifies OrderedDict__setitem__() such that
            1. All values are converted to numpy.ndarrays.
            2. For new keys, checks are done that data is compatible with host.natoms.
            3. New keys are also added as attributes to host Atoms object, if allowed.
            4. For existing keys, new values are saved over the old ones as opposed to
            only changing the name assignment.

            Parameters
            ----------
            key : str
                The property key name to assign values to.
            value : numpy.ndarray
                The values to assign.
            """
            # Shortcut to host
            host = self.__host
            
            # Python 2: change to unicode if needed.
            try:
                key = key.decode('UTF-8')
            except:
                pass
            
            # Convert to numpy.ndarray if needed
            value = np.asarray(value)

            # Broadcast if needed and allowed
            if value.shape == ():
                value = np.array(np.broadcast_to(value, (host.natoms,) + value.shape))
            elif value.shape[0] == 1:
                value = np.array(np.broadcast_to(value, (host.natoms,) + value.shape[1:]))
            elif value.shape[0] != host.natoms:
                raise ValueError('First dimension of value must be 1 or natoms')

            # If key is already assigned, save value over existing values
            if key in self.keys():
                self[key][:] = value
            
            # Otherwise, set new item and try assigning attribute to host
            else:
                super(Atoms.PropertyDict, self).__setitem__(key, value)
                try:
                    assert key not in dir(host)
                    super(Atoms, host).__setattr__(key, value)
                except:
                    pass
예제 #20
0
def root_finder_sigma(data, sigma, N, mask=None):
    """Compute the local corrected standard deviation for the adaptive nonlocal
    means according to the correction factor xi.

    Input
    --------
    data : ndarray
        Signal intensity
    sigma : ndarray
        Noise magnitude standard deviation
    N : ndarray or double
        Number of coils of the acquisition (N=1 for Rician noise)
    mask : ndarray, optional
        Compute only the corrected sigma value inside the mask.

    Return
    --------
    output, ndarray
        Corrected sigma value, where sigma_gaussian = sigma / sqrt(xi)
    """
    data = np.array(data)
    sigma = np.array(sigma)
    N = np.array(N)

    if mask is None:
        mask = np.ones_like(sigma, dtype=np.bool)
    else:
        mask = np.array(mask, dtype=np.bool)

    # Force 3D/4D broadcasting if needed
    if sigma.ndim == (data.ndim - 1):
        sigma = np.broadcast_to(sigma[..., None], data.shape)

    if N.ndim < data.ndim:
        N = np.broadcast_to(N[..., None], data.shape)

    corrected_sigma = np.zeros_like(data, dtype=np.float32)

    # To not murder people ram, we process it slice by slice and reuse the arrays in a for loop
    gaussian_SNR = np.zeros(np.count_nonzero(mask), dtype=np.float32)
    theta = np.zeros_like(gaussian_SNR)

    for idx in range(data.shape[-1]):
        theta[:] = data[..., idx][mask] / sigma[..., idx][mask]
        gaussian_SNR[:] = vec_root_finder(theta, N[..., idx][mask])
        corrected_sigma[..., idx][mask] = sigma[..., idx][mask] / np.sqrt(vec_xi(gaussian_SNR, 1, N[..., idx][mask]))

    return corrected_sigma
예제 #21
0
 def get_scipy_batch_logpdf(self, idx):
     if not self.scipy_arg_fn:
         return
     dist_params = self.get_dist_params(idx, wrap_tensor=False)
     dist_params_wrapped = self.get_dist_params(idx)
     dist_params = self._convert_logits_to_ps(dist_params)
     test_data = self.get_test_data(idx, wrap_tensor=False)
     test_data_wrapped = self.get_test_data(idx)
     shape = self.pyro_dist.shape(test_data_wrapped, **dist_params_wrapped)
     batch_log_pdf = []
     for i in range(len(test_data)):
         batch_params = {}
         for k in dist_params:
             param = np.broadcast_to(dist_params[k], shape)
             batch_params[k] = param[i]
         args, kwargs = self.scipy_arg_fn(**batch_params)
         if self.is_discrete:
             batch_log_pdf.append(self.scipy_dist.logpmf(test_data[i],
                                                         *args,
                                                         **kwargs))
         else:
             batch_log_pdf.append(self.scipy_dist.logpdf(test_data[i],
                                                         *args,
                                                         **kwargs))
     return batch_log_pdf
예제 #22
0
    def __get__(self, instance, frame_cls=None):
        if instance is None:
            out = self.default
        else:
            out = getattr(instance, '_' + self.name, self.default)
            if out is None:
                out = getattr(instance, self.secondary_attribute, self.default)

        out, converted = self.convert_input(out)
        if instance is not None:
            instance_shape = getattr(instance, 'shape', None)
            if instance_shape is not None and (getattr(out, 'size', 1) > 1 and
                                               out.shape != instance_shape):
                # If the shapes do not match, try broadcasting.
                try:
                    if isinstance(out, ShapedLikeNDArray):
                        out = out._apply(np.broadcast_to, shape=instance_shape,
                                         subok=True)
                    else:
                        out = np.broadcast_to(out, instance_shape, subok=True)
                except ValueError:
                    # raise more informative exception.
                    raise ValueError(
                        "attribute {0} should be scalar or have shape {1}, "
                        "but is has shape {2} and could not be broadcast."
                        .format(self.name, instance_shape, out.shape))

                converted = True

            if converted:
                setattr(instance, '_' + self.name, out)

        return out
예제 #23
0
def two_player_zero_sum_game(num_strategies,
                             distribution=default_distribution):
    """Generate a two-player, zero-sum game"""
    # Generate player 1 payoffs
    num_strategies = np.broadcast_to(num_strategies, 2)
    p1_payoffs = distribution(num_strategies)[..., None]
    return rsgame.game_matrix(np.concatenate([p1_payoffs, -p1_payoffs], -1))
예제 #24
0
파일: datasets.py 프로젝트: Fhrozen/chainer
    def get_example(self, i):
        """Called by the iterator to fetch a data sample.

        A data sample from MSCOCO consists of an image and its corresponding
        caption.

        The returned image has the shape (channel, height, width).
        """
        ann = self.anns[i]

        # Load the image
        img_id = ann['image_id']
        img_file_name = self.coco.loadImgs([img_id])[0]['file_name']
        img = Image.open(
            os.path.join(self.coco_root, self.coco_data, img_file_name))
        if img.mode == 'RGB':
            img = np.asarray(img, np.float32).transpose(2, 0, 1)
        elif img.mode == 'L':
            img = np.asarray(img, np.float32)
            img = np.broadcast_to(img, (3,) + img.shape)
        else:
            raise ValueError('Invalid image mode {}'.format(img.mode))

        # Load the caption, i.e. sequence of tokens
        tokens = [self.vocab.get(w, _unk) for w in
                  ['<bos>'] + split(ann['caption']) + ['<eos>']]
        tokens = np.array(tokens, np.int32)

        return img, tokens
예제 #25
0
 def predict(self, x):
     N, n = x.shape
     W = np.broadcast_to(self._W, (N, self._m, self._n))
     M = self.mfunc(x)
     Y = np.multiply(W, M)
     y = Y.sum(axis=(1,2))
     return y
예제 #26
0
 def fit(self, X_train, y_train, adaptive=True, inc=1.2, dec=0.5):
     N, n = X_train.shape
     if n != self._n:
         raise Exception("Input dimension mismatch")   
     if inc <= 1.0:
         raise Exception("Step increment should be > 1")
     if dec >= 1.0:
         raise Exception("Step decrement should be < 1")
         
     y = self.predict(X_train)  
     e = y_train - y
     err = np.square(e).mean()
     if adaptive:
         if err < self._err:
             self._W_prev = self._W
             self._step *= inc 
         else:
             self._W = self._W_prev
             self._step *= dec 
             
     dE_dM = np.broadcast_to(e, (self._n, self._m, N)) 
     dE_dM = np.swapaxes(dE_dM, 0, 2)
     dM_dW = self._M        
     dE_dW = dE_dM * dM_dW
     self._W += self._step * dE_dW.mean(axis = 0)   
     self._err = err
     return err
예제 #27
0
def paths_to_3d_segments(paths, zs=0, zdir='z'):
    """Convert paths from a collection object to 3D segments."""

    zs = np.broadcast_to(zs, len(paths))
    segs = [path_to_3d_segment(path, pathz, zdir)
            for path, pathz in zip(paths, zs)]
    return segs
예제 #28
0
파일: irfs.py 프로젝트: jefemagril/fermipy
def create_edisp(event_class, event_type, erec, egy, cth):
    """Create an array of energy response values versus energy and
    inclination angle.

    Parameters
    ----------
    egy : `~numpy.ndarray`
        Energy in MeV.

    cth : `~numpy.ndarray`
        Cosine of the incidence angle.

    """
    irf = create_irf(event_class, event_type)
    theta = np.degrees(np.arccos(cth))
    v = np.zeros((len(erec), len(egy), len(cth)))
    m = (erec[:,None] / egy[None,:] < 3.0) & (erec[:,None] / egy[None,:] > 0.33333)
    #    m |= ((erec[:,None] / egy[None,:] < 3.0) &
    #          (erec[:,None] / egy[None,:] > 0.5) & (egy[None,:] < 10**2.5))    
    m = np.broadcast_to(m[:,:,None], v.shape)

    try:    
        x = np.ones(v.shape)*erec[:,None,None]
        y = np.ones(v.shape)*egy[None,:,None]
        z = np.ones(v.shape)*theta[None,None,:]
        v[m] = irf.edisp().value(np.ravel(x[m]), np.ravel(y[m]), np.ravel(z[m]), 0.0)
    except:
        for i, x in enumerate(egy):
            for j, y in enumerate(theta):
                m = (erec / x < 3.0) & (erec / x > 0.333)
                v[m, i, j] = irf.edisp().value(erec[m], x, y, 0.0)
        
    return v
예제 #29
0
def add_noise_width(game, num_samples, max_width, noise=width_gaussian):
    """Create sample game where each profile has different noise level

    Parameters
    ----------
    game : Game
        The game to generate samples from. These samples are additive noise to
        standard payoff values.
    num_samples : int
        The number of samples to generate for each profile.
    max_width : float
        A parameter describing how much noise to generate. Larger max_width
        generates more noise.
    noise : (float, int, int) -> ndarray (optional)
        The noise generating function to use. The function must take three
        parameters: the max_width, the number of profiles, and the number of
        samples, and return an ndarray of the additive noise for each profile
        (shape: (num_profiles, num_samples)). The max_width should be used to
        generate sufficient statistics for each profile, and then each sample
        per profile should come from a distribution derived from those. For
        this to be accurate, this distribution should have expectation 0.
        Several default versions are specified in gamegen, and they're all
        prefixed with `width_`. By default, this uses `width_gaussian`.
    """
    spayoffs = game.payoffs[..., None].repeat(num_samples, -1)
    mask = game.profiles > 0
    samples = noise(max_width, mask.sum(), num_samples)
    expand_mask = np.broadcast_to(mask[..., None], mask.shape + (num_samples,))
    spayoffs[expand_mask] += samples.flat
    return rsgame.samplegame_copy(game, game.profiles, [spayoffs])
예제 #30
0
 def testBroadcastToBasic(self):
   for dtype in [np.uint8, np.uint16, np.int8, np.int16, np.int32, np.int64]:
     with self.test_session(use_gpu=True):
       x = np.array([1, 2, 3], dtype=dtype)
       v_tf = array_ops.broadcast_to(constant_op.constant(x), [3, 3])
       v_np = np.broadcast_to(x, [3, 3])
       self.assertAllEqual(v_tf.eval(), v_np)
예제 #31
0
    def __init__(self,
                 problem_params,
                 dtype_u=parallel_mesh,
                 dtype_f=parallel_imex_mesh):
        """
        Initialization routine

        Args:
            problem_params (dict): custom parameters for the example
            dtype_u: fft data type (will be passed to parent class)
            dtype_f: fft data type wuth implicit and explicit parts (will be passed to parent class)
        """

        if 'L' not in problem_params:
            problem_params['L'] = 1.0
        if 'init_type' not in problem_params:
            problem_params['init_type'] = 'circle'
        if 'comm' not in problem_params:
            problem_params['comm'] = None
        if 'dw' not in problem_params:
            problem_params['dw'] = 1.0

        # these parameters will be used later, so assert their existence
        essential_keys = [
            'nvars', 'eps', 'L', 'radius', 'dw', 'spectral', 'TM', 'D'
        ]
        for key in essential_keys:
            if key not in problem_params:
                msg = 'need %s to instantiate problem, only got %s' % (
                    key, str(problem_params.keys()))
                raise ParameterError(msg)

        if not (isinstance(problem_params['nvars'], tuple)
                and len(problem_params['nvars']) > 1):
            raise ProblemError('Need at least two dimensions')

        # creating FFT structure
        ndim = len(problem_params['nvars'])
        axes = tuple(range(ndim))
        self.fft = PFFT(problem_params['comm'],
                        list(problem_params['nvars']),
                        axes=axes,
                        dtype=np.float,
                        collapse=True)

        # get test data to figure out type and dimensions
        tmp_u = newDistArray(self.fft, problem_params['spectral'])

        # add two components to contain field and temperature
        self.ncomp = 2
        sizes = tmp_u.shape + (self.ncomp, )

        # invoke super init, passing the communicator and the local dimensions as init
        super(allencahn_temp_imex,
              self).__init__(init=(sizes, problem_params['comm'], tmp_u.dtype),
                             dtype_u=dtype_u,
                             dtype_f=dtype_f,
                             params=problem_params)

        L = np.array([self.params.L] * ndim, dtype=float)

        # get local mesh
        X = np.ogrid[self.fft.local_slice(False)]
        N = self.fft.global_shape()
        for i in range(len(N)):
            X[i] = (X[i] * L[i] / N[i])
        self.X = [np.broadcast_to(x, self.fft.shape(False)) for x in X]

        # get local wavenumbers and Laplace operator
        s = self.fft.local_slice()
        N = self.fft.global_shape()
        k = [np.fft.fftfreq(n, 1. / n).astype(int) for n in N[:-1]]
        k.append(np.fft.rfftfreq(N[-1], 1. / N[-1]).astype(int))
        K = [ki[si] for ki, si in zip(k, s)]
        Ks = np.meshgrid(*K, indexing='ij', sparse=True)
        Lp = 2 * np.pi / L
        for i in range(ndim):
            Ks[i] = (Ks[i] * Lp[i]).astype(float)
        K = [np.broadcast_to(k, self.fft.shape(True)) for k in Ks]
        K = np.array(K).astype(float)
        self.K2 = np.sum(K * K, 0, dtype=float)

        # Need this for diagnostics
        self.dx = self.params.L / problem_params['nvars'][0]
        self.dy = self.params.L / problem_params['nvars'][1]
예제 #32
0
def _broadcast_to(array, shape):
    if hasattr(numpy, "broadcast_to"):
        return numpy.broadcast_to(array, shape)
    dummy = numpy.empty(shape, array.dtype)
    return numpy.broadcast_arrays(array, dummy)[0]
예제 #33
0
def parse_data(save=True):

    data_dict_x = {}  #rgb classified by object
    data_dict_d = {}  #rgb classified by object
    data_dict_y = {}  #rgb classified by object
    batch_dict = {}
    save_path_train = '/home/zhouzixuan/notebooks/proj_new/data/train/'
    save_path_test = '/home/zhouzixuan/notebooks/proj_new/data/test/'
    # example data extraction of x value of object/item 0 in training example 0: data.states[0].items[0].x
    num_examples = 18000  # number or screenshots
    num_items = []  # number of items in each example
    labels = []
    labels_rgb = []
    X_rgb = np.empty([0, 299, 299, 3])

    X_d = np.empty([0, 299, 299])
    batch_size = 32
    path = struct()
    path.data_name = '_SessionStateData.proto'
    path.data_folder = 'TeleOpVRSession_2018-02-05_15-44-11/'
    data0 = parse_protobufs(path)
    path.data_folder = 'TeleOpVRSession_2018-03-07_14-38-06_Camera1/'
    data1 = parse_protobufs(path)
    path.data_folder = 'TeleOpVRSession_2018-03-07_14-38-06_Camera2/'
    data2 = parse_protobufs(path)
    path.data_folder = 'TeleOpVRSession_2018-03-07_14-38-06_Camera3/'
    data3 = parse_protobufs(path)
    path.data_folder = 'TeleOpVRSession_2018-03-07_14-38-06_Camera4/'
    data4 = parse_protobufs(path)
    # format labels into n x 6 array
    for i in range(18000):
        print i
        path = struct()
        if i < 10000:
            t = i
            data = data0
        else:
            if i < 12000:
                t = i - 10000
                data = data1
            else:
                if i < 14000:
                    t = i - 12000
                    data = data2
                else:
                    if i < 16000:
                        t = i - 14000
                        data = data3
                    else:
                        t = i - 16000
                        data = data4
        num_items.append(len(data.states[i].items))
        img_name = str(data.states[i].snapshot.name)
        depth_name = img_name[:-4] + '-Depth.jpg'

        # read in rgb and depth images and add a new axis to them to indicate which snapshot index for each image
        rgb_img = np.expand_dims(cv2.imread(img_name, 1), axis=0)
        depth_img = np.expand_dims(cv2.imread(depth_name, 0), axis=0)

        for j in range(num_items[i]):

            item_id = str(data.states[i].items[j].id)
            item_id_int = data.states[i].items[j].id
            if item_id_int != 35:
                continue
            else:
                print 666
            '''
			RGB label, classified by name
			input label (X)
			D label, classified by name
			input label (X)
			'''
            if item_id not in data_dict_x:
                data_dict_x[item_id] = np.empty([0, 299, 299, 3])
                data_dict_d[item_id] = np.empty([0, 299, 299])
            data_dict_x[item_id] = np.vstack([data_dict_x[item_id], rgb_img])
            data_dict_d[item_id] = np.vstack([data_dict_d[item_id], depth_img])
            '''
			RGB-D label, classified by name
			Batch split
			'''
            if item_id not in batch_dict:
                batch_dict[item_id] = 0

            # Output label (Y)
            rlabel = data.states[i].items[j]
            current_label = [
                data.states[i].snapshot.name, rlabel.x, rlabel.y, rlabel.z,
                rlabel.roll, rlabel.pitch, rlabel.yaw
            ]
            #print data.states[i].items[j].id
            labels.append(current_label)
            '''
			RGB label
			'''
            current_label_rgb = [rlabel.x, rlabel.y, rlabel.z]
            labels_rgb.append(current_label_rgb)
            '''
			RGB label, classified by name
			Output label (Y)
			'''

            if item_id not in data_dict_y:
                data_dict_y[item_id] = []
            data_dict_y[item_id].append(current_label_rgb)

            if len(data_dict_x[item_id]) == batch_size:
                batch = batch_dict[item_id]
                if i % 10 != 0:
                    tmp_path = save_path_train
                else:
                    tmp_path = save_path_test
                if not os.path.exists(tmp_path):
                    os.makedirs(tmp_path)

                np.save(tmp_path + "/" + str(batch) + "_x.npy",
                        data_dict_x[item_id])
                np.save(tmp_path + "/" + str(batch) + "_d.npy",
                        data_dict_d[item_id])
                np.save(tmp_path + "/" + str(batch) + "_y.npy",
                        np.array(data_dict_y[item_id]))

                train_path = "/home/zhouzixuan/notebooks/proj_new/3ddata/train/"
                test_path = "/home/zhouzixuan/notebooks/proj_new/3ddata/test/"
                if not os.path.exists(train_path):
                    os.makedirs(train_path)
                if not os.path.exists(test_path):
                    os.makedirs(test_path)

                d_batch = data_dict_d[item_id]
                x_batch = data_dict_x[item_id]
                d_round = np.floor(d_batch / 25.5)
                sess = tf.InteractiveSession()
                v = tf.transpose(tf.one_hot(d_round,
                                            depth=10,
                                            axis=2,
                                            on_value=1.0,
                                            off_value=0.0),
                                 perm=[0, 1, 3, 2])
                v = v.eval()
                combine = np.empty([32, 299, 299, 3, 0])
                for i in range(10):
                    i = 1
                    v_tmp = v[:, :, :, i]
                    v_tmp = np.transpose(
                        np.broadcast_to(v_tmp, (3, 32, 299, 299)),
                        (1, 2, 3, 0))
                    v_tmp = v_tmp == 1
                    x_tmp = np.multiply(x_batch, v_tmp)
                    x_cur = np.expand_dims(x_tmp, axis=4)
                    combine = np.concatenate((combine, x_cur), axis=4)
                np.save(train_path + "/" + str(batch) + ".npy", combine)
                np.save(test_path + "/" + str(batch) + "_y.npy",
                        np.array(data_dict_y[item_id]))

                data_dict_x[item_id] = np.empty([0, 299, 299, 3])
                data_dict_d[item_id] = np.empty([0, 299, 299])
                data_dict_y[item_id] = []
                batch_dict[item_id] = 1 + batch
예제 #34
0
파일: __init__.py 프로젝트: emaitee/visdom
    def quiver(self, X, Y, gridX=None, gridY=None,
                            win=None, env=None, opts=None):
        """
        This function draws a quiver plot in which the direction and length of the
        arrows is determined by the `NxM` tensors `X` and `Y`. Two optional `NxM`
        tensors `gridX` and `gridY` can be provided that specify the offsets of
        the arrows; by default, the arrows will be done on a regular grid.

        The following `options` are supported:

        - `options.normalize`:  length of longest arrows (`number`)
        - `options.arrowheads`: show arrow heads (`boolean`; default = `true`)
        """

        # assertions:
        assert X.ndim == 2, 'X should be two-dimensional'
        assert Y.ndim == 2, 'Y should be two-dimensional'
        assert Y.shape == X.shape, 'X and Y should have the same size'

        # make sure we have a grid:
        N, M = X.shape[0], X.shape[1]
        if gridX is None:
            gridX = np.broadcast_to(np.expand_dims(np.arange(0, N), axis=1), (N, M))
        if gridY is None:
            gridY = np.broadcast_to(np.expand_dims(np.arange(0, M), axis=0), (N, M))
        assert gridX.shape == X.shape, 'X and gridX should have the same size'
        assert gridY.shape == Y.shape, 'Y and gridY should have the same size'

        # default options:
        opts = {} if opts is None else opts
        opts['mode'] = 'lines'
        opts['arrowheads'] = opts.get('arrowheads', True)
        _assert_opts(opts)

        # normalize vectors to unit length:
        if opts.get('normalize', False):
            assert isinstance(opts['normalize'], numbers.Number) and \
                opts['normalize'] > 0, \
                'opts.normalize should be positive number'
            magnitude = np.sqrt(np.add(np.multiply(X, X),
                                       np.multiply(Y, Y))).max()
            X = X / (magnitude / opts['normalize'])
            Y = Y / (magnitude / opts['normalize'])

        # interleave X and Y with copies / NaNs to get lines:
        nans = np.full((X.shape[0], X.shape[1]), np.nan).flatten()
        tipX = gridX + X
        tipY = gridY + Y
        dX = np.column_stack((gridX.flatten(), tipX.flatten(), nans))
        dY = np.column_stack((gridY.flatten(), tipY.flatten(), nans))

        # convert data to scatter plot format:
        dX = np.resize(dX, (dX.shape[0] * 3, 1))
        dY = np.resize(dY, (dY.shape[0] * 3, 1))
        data = np.column_stack((dX.flatten(), dY.flatten()))

        # add arrow heads:
        if opts['arrowheads']:

            # compute tip points:
            alpha = 0.33  # size of arrow head relative to vector length
            beta = 0.33   # width of the base of the arrow head
            Xbeta = (X + 1e-5) * beta
            Ybeta = (Y + 1e-5) * beta
            lX = np.add(-alpha * np.add(X, Ybeta), tipX)
            rX = np.add(-alpha * np.add(X, -Ybeta), tipX)
            lY = np.add(-alpha * np.add(Y, -Xbeta), tipY)
            rY = np.add(-alpha * np.add(Y, Xbeta), tipY)

            # add to data:
            hX = np.column_stack((lX.flatten(), tipX.flatten(),
                                  rX.flatten(), nans))
            hY = np.column_stack((lY.flatten(), tipY.flatten(),
                                  rY.flatten(), nans))
            hX = np.resize(hX, (hX.shape[0] * 4, 1))
            hY = np.resize(hY, (hY.shape[0] * 4, 1))
            data = np.concatenate((data, np.column_stack(
                (hX.flatten(), hY.flatten()))), axis=0)

        # generate scatter plot:
        return self.scatter(X=data, opts=opts, win=win, env=env)
예제 #35
0
def _as_pairs(x, ndim, as_index=False):
    """
    Broadcast `x` to an array with the shape (`ndim`, 2).

    A helper function for `pad` that prepares and validates arguments like
    `pad_width` for iteration in pairs.

    Parameters
    ----------
    x : {None, scalar, array-like}
        The object to broadcast to the shape (`ndim`, 2).
    ndim : int
        Number of pairs the broadcasted `x` will have.
    as_index : bool, optional
        If `x` is not None, try to round each element of `x` to an integer
        (dtype `np.intp`) and ensure every element is positive.

    Returns
    -------
    pairs : nested iterables, shape (`ndim`, 2)
        The broadcasted version of `x`.

    Raises
    ------
    ValueError
        If `as_index` is True and `x` contains negative elements.
        Or if `x` is not broadcastable to the shape (`ndim`, 2).
    """
    if x is None:
        # Pass through None as a special case, otherwise np.round(x) fails
        # with an AttributeError
        return ((None, None), ) * ndim

    x = np.array(x)
    if as_index:
        x = np.round(x).astype(np.intp, copy=False)

    if x.ndim < 3:
        # Optimization: Possibly use faster paths for cases where `x` has
        # only 1 or 2 elements. `np.broadcast_to` could handle these as well
        # but is currently slower

        if x.size == 1:
            # x was supplied as a single value
            x = x.ravel()  # Ensure x[0] works for x.ndim == 0, 1, 2
            if as_index and x < 0:
                raise ValueError("index can't contain negative values")
            return ((x[0], x[0]), ) * ndim

        if x.size == 2 and x.shape != (2, 1):
            # x was supplied with a single value for each side
            # but except case when each dimension has a single value
            # which should be broadcasted to a pair,
            # e.g. [[1], [2]] -> [[1, 1], [2, 2]] not [[1, 2], [1, 2]]
            x = x.ravel()  # Ensure x[0], x[1] works
            if as_index and (x[0] < 0 or x[1] < 0):
                raise ValueError("index can't contain negative values")
            return ((x[0], x[1]), ) * ndim

    if as_index and x.min() < 0:
        raise ValueError("index can't contain negative values")

    # Converting the array with `tolist` seems to improve performance
    # when iterating and indexing the result (see usage in `pad`)
    return np.broadcast_to(x, (ndim, 2)).tolist()
예제 #36
0
def circles(x, y, s, c='b', ax=None, vmin=None, vmax=None, **kwargs):
    """
    Make a scatter plot of circles.
    Similar to plt.scatter, but the size of circles are in data scale.
    Parameters
    ----------
    x, y : scalar or array_like, shape (n, )
        Input data
    s : scalar or array_like, shape (n, )
        Radius of circles.
    c : color or sequence of color, optional, default : 'b'
        `c` can be a single color format string, or a sequence of color
        specifications of length `N`, or a sequence of `N` numbers to be
        mapped to colors using the `cmap` and `norm` specified via kwargs.
        Note that `c` should not be a single numeric RGB or RGBA sequence
        because that is indistinguishable from an array of values
        to be colormapped. (If you insist, use `color` instead.)
        `c` can be a 2-D array in which the rows are RGB or RGBA, however.
    vmin, vmax : scalar, optional, default: None
        `vmin` and `vmax` are used in conjunction with `norm` to normalize
        luminance data.  If either are `None`, the min and max of the
        color array is used.
    kwargs : `~matplotlib.collections.Collection` properties
        Eg. alpha, edgecolor(ec), facecolor(fc), linewidth(lw), linestyle(ls),
        norm, cmap, transform, etc.
    Returns
    -------
    paths : `~matplotlib.collections.PathCollection`
    Examples
    --------
    a = np.arange(11)
    circles(a, a, s=a*0.2, c=a, alpha=0.5, ec='none')
    plt.colorbar()
    License
    --------
    This code is under [The BSD 3-Clause License]
    (http://opensource.org/licenses/BSD-3-Clause)
    """

    if np.isscalar(c):
        kwargs.setdefault('color', c)
        c = None

    if 'fc' in kwargs:
        kwargs.setdefault('facecolor', kwargs.pop('fc'))
    if 'ec' in kwargs:
        kwargs.setdefault('edgecolor', kwargs.pop('ec'))
    if 'ls' in kwargs:
        kwargs.setdefault('linestyle', kwargs.pop('ls'))
    if 'lw' in kwargs:
        kwargs.setdefault('linewidth', kwargs.pop('lw'))
    # You can set `facecolor` with an array for each patch,
    # while you can only set `facecolors` with a value for all.

    zipped = np.broadcast(x, y, s)
    patches = [
        matplotlib.patches.Circle((x_, y_), s_) for x_, y_, s_ in zipped
    ]
    collection = matplotlib.collections.PatchCollection(patches, **kwargs)
    if c is not None:
        c = np.broadcast_to(c, zipped.shape).ravel()
        collection.set_array(c)
        collection.set_clim(vmin, vmax)

    if ax is None:
        ax = plt.gca()
    ax.add_collection(collection)
    ax.autoscale_view()
    plt.draw_if_interactive()
    if c is not None:
        plt.sci(collection)
    return collection
예제 #37
0
result = wms_bands[image_idx].copy()
mask2d = cloud_masks[image_idx].copy()
cloud_masks[image_idx].shape

# #### copy mask x times (no. of bands)

# In[28]:

mask3d = np.repeat(cloud_masks[image_idx][:, :, np.newaxis], 10, axis=2)
mask3d.shape

# following solutions are two ways of doing the same thing and seem correct

# In[69]:

sentinel_pre_mask = np.broadcast_to(mask3d == 1, result.shape)
sentinel_pre_cl_free = np.ma.masked_array(result, mask=sentinel_pre_mask)
#resultmasked = np.ma.masked_array(result[1],mask=cloud_masks[4])
resultmasked = np.ma.array(result, mask=mask3d)

#xm2=np.ma.array(result, mask=result*m3d)
#plot_image(sentinel_pre_cl_free)
#plot_image(sentinel_pre_cl_free)
#plot_image(resultmasked)
spm = np.broadcast_to(mask3d == 1, result.shape)
np.unique(spm)
spcf = np.ma.masked_array(result, mask=spm)

masked = spcf.data

# In[36]:
예제 #38
0
 def make_history_mask(self, block):
     batch, length = block.shape
     arange = np.arange(length)
     history_mask = (arange[None, ] <= arange[:, None])[None, ]
     history_mask = np.broadcast_to(history_mask, (batch, length, length))
     return history_mask
예제 #39
0
파일: base.py 프로젝트: khprash/vectorbt
    def from_order_func(cls, main_price, order_func_nb, *args, init_capital=None, freq=None, **kwargs):
        """Build portfolio from a custom order function.

        Starting with initial capital `init_capital`, iterates over shape `main_price.shape`, and for
        each data point, generates an order using `order_func_nb`. This way, you can specify order
        size, price and transaction costs dynamically (for example, based on the current balance).

        To iterate over a bigger shape than `main_price`, you should tile/repeat `main_price` to the desired shape.

        Args:
            main_price (pandas_like): Main price of the asset, such as close.

                Must be a pandas object.
            order_func_nb (function): Function that returns an order.

                See `vectorbt.portfolio.enums.Order`.
            *args: Arguments passed to `order_func_nb`.
            init_capital (int, float or array_like): The initial capital.

                Single value or value per column.
            freq (any): Index frequency in case `main_price.index` is not datetime-like.
            **kwargs: Keyword arguments passed to the `__init__` method.

        For defaults, see `vectorbt.defaults.portfolio`.

        All time series will be broadcasted together using `vectorbt.base.reshape_fns.broadcast`.
        At the end, they will have the same metadata.

        !!! note
            `order_func_nb` must be Numba-compiled.

        Example:
            Portfolio from buying daily:
            ```python-repl
            >>> from vectorbt.portfolio import Order

            >>> @njit
            ... def order_func_nb(col, i, run_cash, run_shares, price):
            ...     return Order(10, price[i], fees=0.01, fixed_fees=1., slippage=0.01)

            >>> portfolio = vbt.Portfolio.from_order_func(
            ...     price, order_func_nb, price.values, init_capital=100)

            >>> print(portfolio.orders.records)
               col  idx  size  price   fees  side
            0    0    0  10.0   1.01  1.101     0
            1    0    1  10.0   2.02  1.202     0
            2    0    2  10.0   3.03  1.303     0
            3    0    3  10.0   2.02  1.202     0
            4    0    4  10.0   1.01  1.101     0
            >>> print(portfolio.equity)
            2020-01-01     98.799
            2020-01-02    107.397
            2020-01-03    125.794
            2020-01-04     94.392
            2020-01-05     53.191
            Name: a, dtype: float64
            ```
        """
        # Get defaults
        if init_capital is None:
            init_capital = defaults.portfolio['init_capital']

        # Perform checks
        checks.assert_type(main_price, (pd.Series, pd.DataFrame))
        checks.assert_numba_func(order_func_nb)

        # Broadcast inputs
        target_shape = (main_price.shape[0], main_price.shape[1] if main_price.ndim > 1 else 1)
        init_capital = np.broadcast_to(init_capital, (target_shape[1],))

        # Perform calculation
        order_records, cash, shares = nb.simulate_nb(
            target_shape,
            init_capital,
            order_func_nb,
            *args)

        # Bring to the same meta
        wrapper = ArrayWrapper.from_obj(main_price, freq=freq)
        cash = wrapper.wrap(cash)
        shares = wrapper.wrap(shares)
        orders = Orders(order_records, main_price, freq=freq)
        if checks.is_series(main_price):
            init_capital = init_capital[0]
        else:
            init_capital = wrapper.wrap_reduced(init_capital)

        return cls(main_price, init_capital, orders, cash, shares, freq=freq, **kwargs)
예제 #40
0
def test_broadcast_2():
    input_data = np.arange(4)
    new_shape = [3, 4, 2, 4]
    expected = np.broadcast_to(input_data, new_shape)
    result = run_op_node([input_data], ng.broadcast, new_shape)
    assert np.allclose(result, expected)
예제 #41
0
파일: base.py 프로젝트: khprash/vectorbt
    def from_signals(cls, main_price, entries, exits, size=np.inf, entry_price=None, exit_price=None,
                     init_capital=None, fees=None, fixed_fees=None, slippage=None, accumulate=False,
                     broadcast_kwargs={}, freq=None, **kwargs):
        """Build portfolio from entry and exit signals.

        At each entry signal in `entries`, buys `size` of shares for `entry_price` to enter
        a position. At each exit signal in `exits`, sells everything for `exit_price`
        to exit the position. Accumulation of orders is disabled by default.

        Args:
            main_price (pandas_like): Main price of the asset, such as close.
            entries (array_like): Boolean array of entry signals.
            exits (array_like): Boolean array of exit signals.
            size (int, float or array_like): The amount of shares to order.

                To buy/sell everything, set the size to `np.inf`.
            entry_price (array_like): Entry price. Defaults to `main_price`.
            exit_price (array_like): Exit price. Defaults to `main_price`.
            init_capital (int, float or array_like): The initial capital.

                Single value or value per column.
            fees (float or array_like): Fees in percentage of the order value.

                Single value, value per column, or value per element.
            fixed_fees (float or array_like): Fixed amount of fees to pay per order.

                Single value, value per column, or value per element.
            slippage (float or array_like): Slippage in percentage of price.

                Single value, value per column, or value per element.
            accumulate (bool): If `accumulate` is `True`, entering the market when already
                in the market will be allowed to increase a position.
            broadcast_kwargs: Keyword arguments passed to `vectorbt.base.reshape_fns.broadcast`.
            freq (any): Index frequency in case `main_price.index` is not datetime-like.
            **kwargs: Keyword arguments passed to the `__init__` method.

        For defaults, see `vectorbt.defaults.portfolio`.

        All time series will be broadcasted together using `vectorbt.base.reshape_fns.broadcast`.
        At the end, they will have the same metadata.

        Example:
            Portfolio from various signal sequences:
            ```python-repl
            >>> entries = pd.DataFrame({
            ...     'a': [True, False, False, False, False],
            ...     'b': [True, False, True, False, True],
            ...     'c': [True, True, True, True, True]
            ... }, index=index)
            >>> exits = pd.DataFrame({
            ...     'a': [False, False, False, False, False],
            ...     'b': [False, True, False, True, False],
            ...     'c': [True, True, True, True, True]
            ... }, index=index)
            >>> portfolio = vbt.Portfolio.from_signals(
            ...     price, entries, exits, size=10,
            ...     init_capital=100, fees=0.0025, fixed_fees=1., slippage=0.001)

            >>> print(portfolio.orders.records)
               col  idx  size  price      fees  side
            0    0    0  10.0  1.001  1.025025     0
            1    1    0  10.0  1.001  1.025025     0
            2    1    1  10.0  1.998  1.049950     1
            3    1    2  10.0  3.003  1.075075     0
            4    1    3  10.0  1.998  1.049950     1
            5    1    4  10.0  1.001  1.025025     0
            6    2    0  10.0  1.001  1.025025     0
            >>> print(portfolio.equity)
                                 a           b           c
            2020-01-01   98.964975   98.964975   98.964975
            2020-01-02  108.964975  107.895025  108.964975
            2020-01-03  118.964975  106.789950  118.964975
            2020-01-04  108.964975   95.720000  108.964975
            2020-01-05   98.964975   94.684975   98.964975
            ```
        """
        # Get defaults
        if entry_price is None:
            entry_price = main_price
        if exit_price is None:
            exit_price = main_price
        if init_capital is None:
            init_capital = defaults.portfolio['init_capital']
        if fees is None:
            fees = defaults.portfolio['fees']
        if fixed_fees is None:
            fixed_fees = defaults.portfolio['fixed_fees']
        if slippage is None:
            slippage = defaults.portfolio['slippage']

        # Perform checks
        checks.assert_type(main_price, (pd.Series, pd.DataFrame))
        checks.assert_dtype(entries, np.bool_)
        checks.assert_dtype(exits, np.bool_)

        # Broadcast inputs
        main_price, entries, exits, size, entry_price, exit_price, fees, fixed_fees, slippage = \
            reshape_fns.broadcast(
                main_price, entries, exits, size, entry_price, exit_price, fees,
                fixed_fees, slippage, **broadcast_kwargs, writeable=True)
        target_shape = (main_price.shape[0], main_price.shape[1] if main_price.ndim > 1 else 1)
        init_capital = np.broadcast_to(init_capital, (target_shape[1],))

        # Perform calculation
        order_records, cash, shares = nb.simulate_from_signals_nb(
            target_shape,
            init_capital,
            reshape_fns.to_2d(entries, raw=True),
            reshape_fns.to_2d(exits, raw=True),
            reshape_fns.to_2d(size, raw=True),
            reshape_fns.to_2d(entry_price, raw=True),
            reshape_fns.to_2d(exit_price, raw=True),
            reshape_fns.to_2d(fees, raw=True),
            reshape_fns.to_2d(fixed_fees, raw=True),
            reshape_fns.to_2d(slippage, raw=True),
            accumulate)

        # Bring to the same meta
        wrapper = ArrayWrapper.from_obj(main_price, freq=freq)
        cash = wrapper.wrap(cash)
        shares = wrapper.wrap(shares)
        orders = Orders(order_records, main_price, freq=freq)
        if checks.is_series(main_price):
            init_capital = init_capital[0]
        else:
            init_capital = wrapper.wrap_reduced(init_capital)

        return cls(main_price, init_capital, orders, cash, shares, freq=freq, **kwargs)
예제 #42
0
def write(filename, mesh, binary=True):  # noqa: C901

    with open_file(filename, "wb") as fh:
        fh.write(b"ply\n")

        if binary:
            fh.write(
                f"format binary_{sys.byteorder}_endian 1.0\n".encode("utf-8"))
        else:
            fh.write(b"format ascii 1.0\n")

        fh.write("comment Created by meshio v{}, {}\n".format(
            __version__,
            datetime.datetime.now().isoformat()).encode("utf-8"))

        # counts
        fh.write("element vertex {:d}\n".format(
            mesh.points.shape[0]).encode("utf-8"))
        #
        dim_names = ["x", "y", "z"]
        # From <https://en.wikipedia.org/wiki/PLY_(file_format)>:
        #
        # > The type can be specified with one of char uchar short ushort int uint float
        # > double, or one of int8 uint8 int16 uint16 int32 uint32 float32 float64.
        #
        # We're adding [u]int64 here.
        type_name_table = {
            numpy.dtype(numpy.int8): "int8",
            numpy.dtype(numpy.int16): "int16",
            numpy.dtype(numpy.int32): "int32",
            numpy.dtype(numpy.int64): "int64",
            numpy.dtype(numpy.uint8): "uint8",
            numpy.dtype(numpy.uint16): "uint16",
            numpy.dtype(numpy.uint32): "uint32",
            numpy.dtype(numpy.uint64): "uint64",
            numpy.dtype(numpy.float32): "float",
            numpy.dtype(numpy.float64): "double",
        }
        for k in range(mesh.points.shape[1]):
            type_name = type_name_table[mesh.points.dtype]
            fh.write("property {} {}\n".format(type_name,
                                               dim_names[k]).encode("utf-8"))

        pd = []
        for key, value in mesh.point_data.items():
            if len(value.shape) > 1:
                warnings.warn(
                    "PLY writer doesn't support multidimensional point data yet. Skipping {}."
                    .format(key))
                continue
            type_name = type_name_table[value.dtype]
            fh.write(f"property {type_name} {key}\n".encode("utf-8"))
            pd.append(value)

        num_cells = 0
        for cell_type, c in mesh.cells:
            if cell_type_to_count(cell_type):
                num_cells += c.data.shape[0]
        if num_cells > 0:
            fh.write(f"element face {num_cells:d}\n".encode("utf-8"))

            # possibly cast down to int32
            has_cast = False
            for k, (cell_type, data) in enumerate(mesh.cells):
                if data.dtype == numpy.int64:
                    has_cast = True
                    mesh.cells[k] = CellBlock(cell_type,
                                              data.astype(numpy.int32))

            if has_cast:
                warnings.warn(
                    "PLY doesn't support 64-bit integers. Casting down to 32-bit."
                )

            # assert that all cell dtypes are equal
            cell_dtype = None
            for _, cell in mesh.cells:
                if cell_dtype is None:
                    cell_dtype = cell.dtype
                if cell.dtype != cell_dtype:
                    raise WriteError()

            if cell_dtype is not None:
                ply_type = numpy_to_ply_dtype[cell_dtype]
                fh.write("property list {} {} vertex_indices\n".format(
                    "uint8", ply_type).encode("utf-8"))

        # TODO other cell data
        fh.write(b"end_header\n")

        if binary:
            # points and point_data
            out = numpy.rec.fromarrays([coord for coord in mesh.points.T] + pd)
            fh.write(out.tobytes())

            # cells
            for cell_type, data in mesh.cells:
                if cell_type_to_count(cell_type) is None:
                    warnings.warn(
                        'cell_type "{}" is not supported by ply format - skipping'
                    )
                    continue
                # prepend with count
                out = numpy.rec.fromarrays([
                    numpy.broadcast_to(numpy.uint8(data.shape[1]),
                                       data.shape[0]),
                    *data.T,
                ])
                fh.write(out.tobytes())
        else:
            # vertices
            # numpy.savetxt(fh, mesh.points, "%r")  # slower
            # out = numpy.column_stack([mesh.points] + list(mesh.point_data.values()))
            out = numpy.rec.fromarrays([coord for coord in mesh.points.T] + pd)
            fmt = " ".join(["{}"] * len(out[0]))
            out = "\n".join([fmt.format(*row) for row in out]) + "\n"
            fh.write(out.encode("utf-8"))

            # cells
            for cell_type, data in mesh.cells:
                #                if cell_type not in cell_type_to_count.keys():
                #                    continue
                out = numpy.column_stack([
                    numpy.full(data.shape[0], data.shape[1], dtype=data.dtype),
                    data
                ])
                # savetxt is slower
                # numpy.savetxt(fh, out, "%d  %d %d %d")
                fmt = " ".join(["{}"] * out.shape[1])
                out = "\n".join([fmt.format(*row) for row in out]) + "\n"
                fh.write(out.encode("utf-8"))
예제 #43
0
파일: plot.py 프로젝트: SofianeB/xarray
    def newplotfunc(darray, x=None, y=None, figsize=None, size=None,
                    aspect=None, ax=None, row=None, col=None,
                    col_wrap=None, xincrease=True, yincrease=True,
                    add_colorbar=None, add_labels=True, vmin=None, vmax=None,
                    cmap=None, center=None, robust=False, extend=None,
                    levels=None, infer_intervals=None, colors=None,
                    subplot_kws=None, cbar_ax=None, cbar_kwargs=None,
                    xscale=None, yscale=None, xticks=None, yticks=None,
                    xlim=None, ylim=None, norm=None, **kwargs):
        # All 2d plots in xarray share this function signature.
        # Method signature below should be consistent.

        # Decide on a default for the colorbar before facetgrids
        if add_colorbar is None:
            add_colorbar = plotfunc.__name__ != 'contour'
        imshow_rgb = (
            plotfunc.__name__ == 'imshow' and
            darray.ndim == (3 + (row is not None) + (col is not None)))
        if imshow_rgb:
            # Don't add a colorbar when showing an image with explicit colors
            add_colorbar = False
            # Matplotlib does not support normalising RGB data, so do it here.
            # See eg. https://github.com/matplotlib/matplotlib/pull/10220
            if robust or vmax is not None or vmin is not None:
                darray = _rescale_imshow_rgb(darray, vmin, vmax, robust)
                vmin, vmax, robust = None, None, False

        # Handle facetgrids first
        if row or col:
            allargs = locals().copy()
            allargs.pop('imshow_rgb')
            allargs.update(allargs.pop('kwargs'))

            # Need the decorated plotting function
            allargs['plotfunc'] = globals()[plotfunc.__name__]

            return _easy_facetgrid(**allargs)

        plt = import_matplotlib_pyplot()

        # colors is mutually exclusive with cmap
        if cmap and colors:
            raise ValueError("Can't specify both cmap and colors.")
        # colors is only valid when levels is supplied or the plot is of type
        # contour or contourf
        if colors and (('contour' not in plotfunc.__name__) and (not levels)):
            raise ValueError("Can only specify colors with contour or levels")
        # we should not be getting a list of colors in cmap anymore
        # is there a better way to do this test?
        if isinstance(cmap, (list, tuple)):
            warnings.warn("Specifying a list of colors in cmap is deprecated. "
                          "Use colors keyword instead.",
                          DeprecationWarning, stacklevel=3)

        rgb = kwargs.pop('rgb', None)
        xlab, ylab = _infer_xy_labels(
            darray=darray, x=x, y=y, imshow=imshow_rgb, rgb=rgb)

        if rgb is not None and plotfunc.__name__ != 'imshow':
            raise ValueError('The "rgb" keyword is only valid for imshow()')
        elif rgb is not None and not imshow_rgb:
            raise ValueError('The "rgb" keyword is only valid for imshow()'
                             'with a three-dimensional array (per facet)')

        # better to pass the ndarrays directly to plotting functions
        xval = darray[xlab].values
        yval = darray[ylab].values

        # check if we need to broadcast one dimension
        if xval.ndim < yval.ndim:
            xval = np.broadcast_to(xval, yval.shape)

        if yval.ndim < xval.ndim:
            yval = np.broadcast_to(yval, xval.shape)

        # May need to transpose for correct x, y labels
        # xlab may be the name of a coord, we have to check for dim names
        if imshow_rgb:
            # For RGB[A] images, matplotlib requires the color dimension
            # to be last.  In Xarray the order should be unimportant, so
            # we transpose to (y, x, color) to make this work.
            yx_dims = (ylab, xlab)
            dims = yx_dims + tuple(d for d in darray.dims if d not in yx_dims)
            if dims != darray.dims:
                darray = darray.transpose(*dims)
        elif darray[xlab].dims[-1] == darray.dims[0]:
            darray = darray.transpose()

        # Pass the data as a masked ndarray too
        zval = darray.to_masked_array(copy=False)

        # Replace pd.Intervals if contained in xval or yval.
        xplt, xlab_extra = _resolve_intervals_2dplot(xval, plotfunc.__name__)
        yplt, ylab_extra = _resolve_intervals_2dplot(yval, plotfunc.__name__)

        _ensure_plottable(xplt, yplt)

        if 'contour' in plotfunc.__name__ and levels is None:
            levels = 7  # this is the matplotlib default

        cmap_kwargs = {'plot_data': zval.data,
                       'vmin': vmin,
                       'vmax': vmax,
                       'cmap': colors if colors else cmap,
                       'center': center,
                       'robust': robust,
                       'extend': extend,
                       'levels': levels,
                       'filled': plotfunc.__name__ != 'contour',
                       'norm': norm,
                       }

        cmap_params = _determine_cmap_params(**cmap_kwargs)

        if 'contour' in plotfunc.__name__:
            # extend is a keyword argument only for contour and contourf, but
            # passing it to the colorbar is sufficient for imshow and
            # pcolormesh
            kwargs['extend'] = cmap_params['extend']
            kwargs['levels'] = cmap_params['levels']
            # if colors == a single color, matplotlib draws dashed negative
            # contours. we lose this feature if we pass cmap and not colors
            if isinstance(colors, str):
                cmap_params['cmap'] = None
                kwargs['colors'] = colors

        if 'pcolormesh' == plotfunc.__name__:
            kwargs['infer_intervals'] = infer_intervals

        if 'imshow' == plotfunc.__name__ and isinstance(aspect, str):
            # forbid usage of mpl strings
            raise ValueError("plt.imshow's `aspect` kwarg is not available "
                             "in xarray")

        ax = get_axis(figsize, size, aspect, ax)
        primitive = plotfunc(xplt, yplt, zval, ax=ax, cmap=cmap_params['cmap'],
                             vmin=cmap_params['vmin'],
                             vmax=cmap_params['vmax'],
                             norm=cmap_params['norm'],
                             **kwargs)

        # Label the plot with metadata
        if add_labels:
            ax.set_xlabel(label_from_attrs(darray[xlab], xlab_extra))
            ax.set_ylabel(label_from_attrs(darray[ylab], ylab_extra))
            ax.set_title(darray._title_for_slice())

        if add_colorbar:
            cbar_kwargs = {} if cbar_kwargs is None else dict(cbar_kwargs)
            cbar_kwargs.setdefault('extend', cmap_params['extend'])
            if cbar_ax is None:
                cbar_kwargs.setdefault('ax', ax)
            else:
                cbar_kwargs.setdefault('cax', cbar_ax)
            cbar = plt.colorbar(primitive, **cbar_kwargs)
            if add_labels and 'label' not in cbar_kwargs:
                cbar.set_label(label_from_attrs(darray))
        elif cbar_ax is not None or cbar_kwargs is not None:
            # inform the user about keywords which aren't used
            raise ValueError("cbar_ax and cbar_kwargs can't be used with "
                             "add_colorbar=False.")

        # origin kwarg overrides yincrease
        if 'origin' in kwargs:
            yincrease = None

        _update_axes(ax, xincrease, yincrease, xscale, yscale,
                     xticks, yticks, xlim, ylim)

        # Rotate dates on xlabels
        # Do this without calling autofmt_xdate so that x-axes ticks
        # on other subplots (if any) are not deleted.
        # https://stackoverflow.com/questions/17430105/autofmt-xdate-deletes-x-axis-labels-of-all-subplots
        if np.issubdtype(xplt.dtype, np.datetime64):
            for xlabels in ax.get_xticklabels():
                xlabels.set_rotation(30)
                xlabels.set_ha('right')

        return primitive
예제 #44
0
 def _broadcast(self, x, shape):
     x_shape = np.array(x).shape
     x = x.reshape(x_shape + (1,) * (len(shape) - len(x_shape)))
     return np.broadcast_to(x, shape)
예제 #45
0
def ones(shape, dtype=onp.dtype("float64")):
  shape = (shape,) if onp.isscalar(shape) else shape
  dtype = xla_bridge.canonicalize_dtype(dtype)
  return onp.broadcast_to(onp.ones((), dtype), tuple(shape))
예제 #46
0
 def _T(self, time_step) -> np.ndarray:  # (Nj, W), read-only
     return np.broadcast_to(self.temperature(time_step), (self.circuit._Nj(), self.get_problem_count()))\
         if hasattr(self.temperature, "__call__") else self.temperature[:, :, time_step]
예제 #47
0
    def group_keypoints(self, all_keypoints_by_type, pafs, pose_entry_size=20):
        all_keypoints = np.concatenate(all_keypoints_by_type, axis=0)
        pose_entries = []
        # For every limb.
        for part_id, paf_channel in enumerate(self.paf_indices):
            kpt_a_id, kpt_b_id = self.skeleton[part_id]
            kpts_a = all_keypoints_by_type[kpt_a_id]
            kpts_b = all_keypoints_by_type[kpt_b_id]
            n = len(kpts_a)
            m = len(kpts_b)
            if n == 0 or m == 0:
                continue

            # Get vectors between all pairs of keypoints, i.e. candidate limb vectors.
            a = kpts_a[:, :2]
            a = np.broadcast_to(a[None], (m, n, 2))
            b = kpts_b[:, :2]
            vec_raw = (b[:, None, :] - a).reshape(-1, 1, 2)

            # Sample points along every candidate limb vector.
            steps = (1 / (self.points_per_limb - 1) * vec_raw)
            points = steps * self.grid + a.reshape(-1, 1, 2)
            points = points.round().astype(dtype=np.int32)
            x = points[..., 0].ravel()
            y = points[..., 1].ravel()

            # Compute affinity score between candidate limb vectors and part affinity field.
            part_pafs = pafs[0, :, :, paf_channel:paf_channel + 2]
            field = part_pafs[y, x].reshape(-1, self.points_per_limb, 2)
            vec_norm = np.linalg.norm(vec_raw, ord=2, axis=-1, keepdims=True)
            vec = vec_raw / (vec_norm + 1e-6)
            affinity_scores = (field * vec).sum(-1).reshape(
                -1, self.points_per_limb)
            valid_affinity_scores = affinity_scores > self.min_paf_alignment_score
            valid_num = valid_affinity_scores.sum(1)
            affinity_scores = (affinity_scores * valid_affinity_scores
                               ).sum(1) / (valid_num + 1e-6)
            success_ratio = valid_num / self.points_per_limb

            # Get a list of limbs according to the obtained affinity score.
            valid_limbs = np.where(
                np.logical_and(affinity_scores > 0, success_ratio > 0.8))[0]
            if len(valid_limbs) == 0:
                continue
            b_idx, a_idx = np.divmod(valid_limbs, n)
            affinity_scores = affinity_scores[valid_limbs]

            # Suppress incompatible connections.
            a_idx, b_idx, affinity_scores = self.connections_nms(
                a_idx, b_idx, affinity_scores)
            connections = list(
                zip(kpts_a[a_idx, 3].astype(np.int32),
                    kpts_b[b_idx, 3].astype(np.int32), affinity_scores))
            if len(connections) == 0:
                continue

            # Update poses with new connections.
            pose_entries = self.update_poses(kpt_a_id, kpt_b_id, all_keypoints,
                                             connections, pose_entries,
                                             pose_entry_size)

        # Remove poses with not enough points.
        pose_entries = np.asarray(pose_entries, dtype=np.float32).reshape(
            -1, pose_entry_size)
        pose_entries = pose_entries[pose_entries[:, -1] >= 3]
        return pose_entries, all_keypoints
예제 #48
0
def rayleigh_scattering_matrix_and_angle_maetzler06(mu_s, mu_i, dphi, npol=2):
    """compute the Rayleigh matrix and half scattering angle. Based on Mätzler 2006 book p111.
This version is relatively slow because it uses phase matrix rotations which is unnecessarily complex for the Rayleigh phase matrix
but would be of interest for other phase matrices.

"""

    # cos and sin of scattering and incident angles in the main frame
    cos_ti = np.atleast_1d(mu_i)[np.newaxis, np.newaxis, :]
    sin_ti = np.sqrt(1. - cos_ti**2)

    cos_t = np.atleast_1d(mu_s)[np.newaxis, :, np.newaxis]
    sin_t = np.sqrt(1. - cos_t**2)

    dphi = np.atleast_1d(dphi)
    cos_pd = np.cos(dphi)[:, np.newaxis, np.newaxis]
    sin_pd_sign = np.where(dphi >= np.pi, -1, 1)[:, np.newaxis, np.newaxis]

    # Scattering angle in the 1-2 frame
    cosT = np.clip(cos_t * cos_ti + sin_t * sin_ti * cos_pd, -1.0,
                   1.0)  # Prevents occasional numerical error
    cosT2 = cosT**2  # cos^2 (Theta)
    sinT = np.sqrt(1. - cosT2)

    # Apply non-zero scattering denominator
    nonnullsinT = sinT >= 1e-6

    # Create arrays of rotation angles
    cost_sinti = cos_t * sin_ti
    costi_sint = cos_ti * sin_t

    cos_i1 = cost_sinti - costi_sint * cos_pd
    np.divide(cos_i1, sinT, where=nonnullsinT, out=cos_i1)
    np.clip(cos_i1, -1.0, 1.0, out=cos_i1)

    cos_i2 = costi_sint - cost_sinti * cos_pd
    np.divide(cos_i2, sinT, where=nonnullsinT, out=cos_i2)
    np.clip(cos_i2, -1.0, 1.0, out=cos_i2)

    # Special condition if theta and theta_i = 0 to preserve azimuth dependency
    dege_dphi = np.broadcast_to((sin_t < 1e-6) & (sin_ti < 1e-6), cos_i1.shape)
    cos_i1[dege_dphi] = 1.
    cos_i2[dege_dphi] = np.broadcast_to(cos_pd, cos_i2.shape)[dege_dphi]

    # # See Matzler 2006 pg 111 Eq. 3.20
    # # Calculate rotation angles alpha, alpha_i
    # # Convention follows Matzler 2006, Thermal Microwave Radiation, p111, eqn 3.20

    Li = Lmatrix(cos_i1, -sin_pd_sign, (3, npol))  # L (-i1)

    if npol == 2:
        RLi = np.array([[cosT2 * Li[0][0], cosT2 * Li[0][1]], Li[1],
                        [cosT * Li[2][0], cosT * Li[2][1]]])

    elif npol == 3:
        RLi = np.array([[cosT2 * Li[0][0], cosT2 * Li[0][1], cosT2 * Li[0][2]],
                        Li[1],
                        [cosT * Li[2][0], cosT * Li[2][1], cosT * Li[2][2]]])
    else:
        raise RuntimeError("invalid value of npol")

    Ls = Lmatrix(-cos_i2, sin_pd_sign, (npol, 3))  # L (pi - i2)
    p = np.einsum('ij...,jk...->ik...', Ls,
                  RLi)  # multiply the outer dimension (=polarization)

    sin_half_scatt = np.sqrt(0.5 *
                             (1 - cosT))  # compute half the scattering angle

    return p, sin_half_scatt
예제 #49
0
	def __mul__(self,other):
		if isinstance(other,spAD2):
			value = self.value*other.value
			coef1_a,coef1_b = _add_dim(other.value)*self.coef1,_add_dim(self.value)*other.coef1
			index_a,index_b = np.broadcast_to(self.index,coef1_a.shape),np.broadcast_to(other.index,coef1_b.shape)
			coef2_a,coef2_b = _add_dim(other.value)*self.coef2,_add_dim(self.value)*other.coef2
			index_row_a,index_row_b = np.broadcast_to(self.index_row,coef2_a.shape),np.broadcast_to(other.index_row,coef2_b.shape)
			index_col_a,index_col_b = np.broadcast_to(self.index_col,coef2_a.shape),np.broadcast_to(other.index_col,coef2_b.shape)

			len_a,len_b = self.coef1.shape[-1],other.coef1.shape[-1]
			coef2_ab = np.repeat(self.coef1,len_b,axis=-1) * np.tile(other.coef1,len_a) 
			index2_a = np.broadcast_to(np.repeat(self.index,len_b,axis=-1),coef2_ab.shape)
			index2_b = np.broadcast_to(np.tile(other.index,len_a),coef2_ab.shape)

			return spAD2(value,_concatenate(coef1_a,coef1_b),_concatenate(index_a,index_b),
				np.concatenate((coef2_a,coef2_b,coef2_ab,coef2_ab),axis=-1),
				np.concatenate((index_row_a,index_row_b,index2_a,index2_b),axis=-1),
				np.concatenate((index_col_a,index_col_b,index2_b,index2_a),axis=-1))
		elif isinstance(other,np.ndarray):
			value = self.value*other
			coef1 = _add_dim(other)*self.coef1
			index = np.broadcast_to(self.index,coef1.shape)
			coef2 = _add_dim(other)*self.coef2
			index_row = np.broadcast_to(self.index_row,coef2.shape)
			index_col = np.broadcast_to(self.index_col,coef2.shape)
			return spAD2(value,coef1,index,coef2,index_row,index_col)
		else:
			return spAD2(self.value*other,other*self.coef1,self.index,other*self.coef2,self.index_row,self.index_col)
예제 #50
0
파일: accessors.py 프로젝트: yamen/vectorbt
    def generate_random_both(cls, shape, n=None, entry_prob=None, exit_prob=None, seed=None, **kwargs):
        """Generate entry and exit signals randomly and iteratively.

        If `n` is set, see `vectorbt.signals.nb.generate_rand_enex_nb`.
        If `prob` is set, see `vectorbt.signals.nb.generate_rand_enex_by_prob_nb`.

        `entry_prob` and `exit_prob` must be either a single number or an array that will be
        broadcast to match `shape`. `**kwargs` will be passed to pandas constructor.

        Example:
            For each column, generate two entries and exits randomly:
            ```python-repl
            >>> en, ex = pd.DataFrame.vbt.signals.generate_random_both(
            ...      (5, 3), n=2, seed=42, index=sig.index, columns=sig.columns)
            >>> en
                            a      b      c
            2020-01-01   True   True   True
            2020-01-02  False  False  False
            2020-01-03   True   True  False
            2020-01-04  False  False   True
            2020-01-05  False  False  False
            >>> ex
                            a      b      c
            2020-01-01  False  False  False
            2020-01-02   True   True   True
            2020-01-03  False  False  False
            2020-01-04  False   True  False
            2020-01-05   True  False   True
            ```

            For each column and time step, pick entry with 50% probability and exit right after:
            ```python-repl
            >>> en, ex = pd.DataFrame.vbt.signals.generate_random_both(
            ...     (5, 3), entry_prob=0.5, exit_prob=1.,
            ...     seed=42, index=sig.index, columns=sig.columns)
            >>> en
                            a      b      c
            2020-01-01   True   True  False
            2020-01-02  False  False   True
            2020-01-03  False   True  False
            2020-01-04   True  False   True
            2020-01-05  False  False  False
            >>> ex
                            a      b      c
            2020-01-01  False  False  False
            2020-01-02   True   True  False
            2020-01-03  False  False   True
            2020-01-04  False   True  False
            2020-01-05   True  False   True
            ```"""
        if not isinstance(shape, tuple):
            shape = (shape, 1)
        elif isinstance(shape, tuple) and len(shape) == 1:
            shape = (shape[0], 1)

        if n is not None:
            entries, exits = nb.generate_rand_enex_nb(shape, n, seed=seed)
        elif entry_prob is not None and exit_prob is not None:
            entry_prob = np.broadcast_to(entry_prob, shape)
            exit_prob = np.broadcast_to(exit_prob, shape)
            entries, exits = nb.generate_rand_enex_by_prob_nb(shape, entry_prob, exit_prob, seed=seed)
        else:
            raise ValueError("At least n, or entry_prob and exit_prob must be set")

        if cls.is_series():
            if shape[1] > 1:
                raise ValueError("Use DataFrame accessor")
            return pd.Series(entries[:, 0], **kwargs), pd.Series(exits[:, 0], **kwargs)
        return pd.DataFrame(entries, **kwargs), pd.DataFrame(exits, **kwargs)
    def new_function(self, agent, scope, edge_id, q_function_index):
        '''
        updates e function of the agent
        updates e function variables (list of e function inputs excluding the agent)
        updates e function argmax giving best response conditional on e function variables actions
        '''
        E = []
        for i in range(len(scope)):
            if scope[i] in self.agents_to_eliminate:
                if E == []:
                    if q_function_index[i] == 0:
                        E = self.q_functions[edge_id[i]]
                        variables = np.array([scope[i]])
                    else:
                        E = list(map(list, zip(*self.q_functions[edge_id[i]])))
                        variables = np.array([scope[i]])
                else:
                    old_E = E
                    if q_function_index[i] == 0:
                        new_E = self.q_functions[edge_id[i]]
                    else: 
                        new_E = list(map(list, zip(*self.q_functions[edge_id[i]])))
                    new_shape = np.shape(new_E)
                    old_shape = np.shape(old_E)
                    index = self.findindex(variables, scope[i])
                    new_E = np.asarray(new_E)
                    old_E = np.asarray(old_E)
                    if index == -1:
                        variables = np.append(variables,scope[i])
                        new_shape = np.append(old_shape,new_shape[1:])
                        if len(old_shape) > 1:
                            new_E = np.broadcast_to(new_E[:,self.printnewaxis(len(old_shape)-1),:], new_shape)
                        # add one new axis to old E as we know new E is a Q function
                        E = np.broadcast_to(old_E[:,np.newaxis],new_shape) + new_E
                    else:
                        new_E = np.broadcast_to(new_E, old_shape)
                        new_E = np.swapaxes(new_E, 1, index+1)
                        E = old_E + new_E

            # if this local Q function has been replaced by an e:
            else:
                if E == [] and self.functions_e_used[scope[i]] == False:
                    E = self.functions_e[scope[i]]
                    E = np.swapaxes(E, 0, self.findindex(self.functions_e_variables[scope[i]],agent))
                    variables = self.functions_e_variables[scope[i]]
                    variables = self.check_variables(variables, agent)
                    self.functions_e_used[scope[i]] = True 

                elif self.functions_e_used[scope[i]] == False:
                    old_E = E
                    new_E = self.functions_e[scope[i]]
                    new_E = np.swapaxes(new_E, 0, self.findindex(self.functions_e_variables[scope[i]],agent))
                    self.functions_e_used[scope[i]] = True 
                    if old_E != []:
                        old_shape = np.shape(old_E)
                        new_shape = np.shape(new_E)
                        index = self.findindex(variables, scope[i])
                        new_E = np.asarray(new_E)
                        old_E = np.asarray(old_E)
                        if index == -1:
                            variables = np.append(variables,scope[i])
                            variables = self.check_variables(variables, agent)
                            next_shape = old_shape + new_shape[1:]
                            if len(old_shape) > 1:
                                if len(new_shape) > 1:
                                    new_E = np.broadcast_to(new_E[:,self.printnewaxis(len(old_shape)-1),:], next_shape)
                                else:
                                    new_E = np.broadcast_to(new_E[:,self.printnewaxis(len(old_shape)-1)], next_shape)
                            if len(new_shape) > 1:
                                old_E = np.broadcast_to(old_E[:,self.printnewaxis(len(new_shape)-1)], next_shape)
                            E = old_E + new_E
                        else:
                            if old_shape >= new_shape:
                                new_E = np.broadcast_to(new_E, old_shape)
                                new_E = np.swapaxes(new_E, 1, index+1)
                            else:
                                old_E = np.broadcast_to(old_E, new_shape)
                                old_E = np.swapaxes(old_E, 1, index+1)
                            E = old_E + new_E

        variables = self.check_variables(variables, agent)
        E_size = np.shape(E)
        if len(E_size) == 1:
            best_actions = np.argwhere(E == np.amax(E, axis=0))
            self.functions_e[agent] = int(random.choice(best_actions))
            self.functions_e_variables[agent] = []
        else:     
            self.functions_e[agent] = np.amax(E, axis=0)
            self.findargmax(E, E_size, agent)
            self.functions_e_variables[agent] = variables
예제 #52
0
	def broadcast_to(self,shape):
		shape1 = shape+(self.size_ad1,)
		shape2 = shape+(self.size_ad2,)
		return spAD2(np.broadcast_to(self.value,shape), np.broadcast_to(self.coef1,shape1), np.broadcast_to(self.index,shape1),
			np.broadcast_to(self.coef2,shape2), np.broadcast_to(self.index_row,shape2), np.broadcast_to(self.index_col,shape2))
예제 #53
0
 def _Vs(self, time_step) -> np.ndarray: # (Nj, W), read-only
     return np.broadcast_to(self.voltage_sources(time_step), (self.circuit._Nj(), self.get_problem_count()))\
         if hasattr(self.voltage_sources, "__call__") else self.voltage_sources[:, :, time_step]
예제 #54
0
    def __setitem__(self, key: Union[int, np.ndarray], value: Any) -> None:
        """Set one or more values inplace.

        Parameters
        ----------
        key : int, ndarray, or slice
            When called from, e.g. ``Series.__setitem__``, ``key`` will be
            one of

            * scalar int
            * ndarray of integers.
            * boolean ndarray
            * slice object

        value : ExtensionDtype.type, Sequence[ExtensionDtype.type], or object
            value or values to be set of ``key``.

        Returns
        -------
        None
        """
        key = check_array_indexer(self, key)

        if is_integer(key):
            if not is_scalar(value):
                raise ValueError("Must pass scalars with scalar indexer")
            elif isna(value):
                value = None
            elif not isinstance(value, str):
                raise ValueError("Scalar must be NA or str")

            # Slice data and insert in-between
            new_data = [
                *self._data[0:key].chunks,
                pa.array([value], type=pa.string()),
                *self._data[(key + 1):].chunks,
            ]
            self._data = pa.chunked_array(new_data)
        else:
            # Convert to integer indices and iteratively assign.
            # TODO: Make a faster variant of this in Arrow upstream.
            #       This is probably extremely slow.

            # Convert all possible input key types to an array of integers
            if is_bool_dtype(key):
                # TODO(ARROW-9430): Directly support setitem(booleans)
                key_array = np.argwhere(key).flatten()
            elif isinstance(key, slice):
                key_array = np.array(range(len(self))[key])
            else:
                # TODO(ARROW-9431): Directly support setitem(integers)
                key_array = np.asanyarray(key)

            if is_scalar(value):
                value = np.broadcast_to(value, len(key_array))
            else:
                value = np.asarray(value)

            if len(key_array) != len(value):
                raise ValueError("Length of indexer and values mismatch")

            for k, v in zip(key_array, value):
                self[k] = v
예제 #55
0
 def compute(self, node, input_vals, output_val, use_numpy=True):
     assert (len(input_vals) == 2)
     if use_numpy:
         output_val[:] = np.broadcast_to(input_vals[0], input_vals[1].shape)
     else:
         gpu_op.broadcast_to(input_vals[0], output_val)
예제 #56
0
 def _f(self, time_step) -> np.ndarray:  # (Nf, W), read-only
     return np.broadcast_to(self.frustration(time_step), (self.circuit._Nf(), self.get_problem_count())) \
         if hasattr(self.frustration, "__call__") else self.frustration[:, :, time_step]
예제 #57
0
        np.ones(shape=(5, ), dtype=("f8", 32)),
        np.ones(shape=(5, ), dtype=[("x", "f8", 32)]),
        np.ones(shape=(5, ),
                dtype=np.dtype([("a", "i1"), ("b", "f8")], align=False)),
        np.ones(shape=(5, ),
                dtype=np.dtype([("a", "i1"), ("b", "f8")], align=True)),
        np.ones(shape=(5, ), dtype=np.dtype([("a", "m8[us]")], align=False)),
        # this dtype fails unpickling
        np.ones(shape=(5, ), dtype=np.dtype([("a", "m8")], align=False)),
        np.array([(1, "abc")], dtype=[("x", "i4"), ("s", object)]),
        np.zeros(5000, dtype=[("x%d" % i, "<f8") for i in range(4)]),
        np.zeros(5000, dtype="S32"),
        np.zeros((1, 1000, 1000)),
        np.arange(12)[::2],  # non-contiguous array
        np.ones(shape=(5, 6)).astype(dtype=[("total", "<f8"), ("n", "<f8")]),
        np.broadcast_to(np.arange(3), shape=(10, 3)),  # zero-strided array
    ],
)
def test_dumps_serialize_numpy(x):
    header, frames = serialize(x)
    if "compression" in header:
        frames = decompress(header, frames)
    buffer_interface = memoryview
    for frame in frames:
        assert isinstance(frame, (bytes, buffer_interface))
    y = deserialize(header, frames)

    np.testing.assert_equal(x, y)
    if x.flags.c_contiguous or x.flags.f_contiguous:
        assert x.strides == y.strides
예제 #58
0
def broadcast_to_batch_and_evaluate(loss_func, batch_size, yt, yp):

    yt = np.broadcast_to(yt, (batch_size, ) + yt.shape[1:])
    yp = np.broadcast_to(yp, (batch_size, ) + yp.shape[1:])

    return loss_func(tf.convert_to_tensor(yt), tf.convert_to_tensor(yp))
예제 #59
0
파일: base.py 프로젝트: khprash/vectorbt
    def from_orders(cls, main_price, order_size, order_price=None, init_capital=None, fees=None, fixed_fees=None,
                    slippage=None, is_target=False, broadcast_kwargs={}, freq=None, **kwargs):
        """Build portfolio from orders.

        Starting with initial capital `init_capital`, at each time step, orders the number
        of shares specified in `order_size` for `order_price`.

        Args:
            main_price (pandas_like): Main price of the asset, such as close.
            order_size (int, float or array_like): The amount of shares to order.

                If the size is positive, this is the number of shares to buy.
                If the size is negative, this is the number of shares to sell.
                To buy/sell everything, set the size to `np.inf`.
            order_price (array_like): Order price. Defaults to `main_price`.
            init_capital (int, float or array_like): The initial capital.

                Single value or value per column.
            fees (float or array_like): Fees in percentage of the order value.

                Single value, value per column, or value per element.
            fixed_fees (float or array_like): Fixed amount of fees to pay per order.

                Single value, value per column, or value per element.
            slippage (float or array_like): Slippage in percentage of price.

                Single value, value per column, or value per element.
            is_target (bool): If `True`, will order the difference between current and target size.
            broadcast_kwargs: Keyword arguments passed to `vectorbt.base.reshape_fns.broadcast`.
            freq (any): Index frequency in case `main_price.index` is not datetime-like.
            **kwargs: Keyword arguments passed to the `__init__` method.

        For defaults, see `vectorbt.defaults.portfolio`.

        All time series will be broadcasted together using `vectorbt.base.reshape_fns.broadcast`.
        At the end, they will have the same metadata.

        Example:
            Portfolio from various order sequences:
            ```python-repl
            >>> portfolio = vbt.Portfolio.from_orders(price, orders,
            ...     init_capital=100, fees=0.0025, fixed_fees=1., slippage=0.001)

            >>> print(portfolio.orders.records)
                col  idx        size  price      fees  side
            0     0    0   98.654463  1.001  1.246883     0
            1     1    0    1.000000  1.001  1.002502     0
            2     1    1    1.000000  2.002  1.005005     0
            3     1    2    1.000000  3.003  1.007507     0
            4     1    3    1.000000  2.002  1.005005     0
            5     1    4    4.000000  0.999  1.009990     1
            6     2    0   98.654463  1.001  1.246883     0
            7     2    1   98.654463  1.998  1.492779     1
            8     2    2   64.646521  3.003  1.485334     0
            9     2    3   64.646521  1.998  1.322909     1
            10    2    4  126.398131  1.001  1.316311     0
            >>> print(portfolio.equity)
                                 a          b           c
            2020-01-01   98.654463  98.996498   98.654463
            2020-01-02  197.308925  98.989493  195.618838
            2020-01-03  295.963388  99.978985  193.939564
            2020-01-04  197.308925  95.971980  127.840840
            2020-01-05   98.654463  90.957990  126.398131
            ```
        """
        # Get defaults
        if order_price is None:
            order_price = main_price
        if init_capital is None:
            init_capital = defaults.portfolio['init_capital']
        if fees is None:
            fees = defaults.portfolio['fees']
        if fixed_fees is None:
            fixed_fees = defaults.portfolio['fixed_fees']
        if slippage is None:
            slippage = defaults.portfolio['slippage']

        # Perform checks
        checks.assert_type(main_price, (pd.Series, pd.DataFrame))

        # Broadcast inputs
        main_price, order_size, order_price, fees, fixed_fees, slippage = \
            reshape_fns.broadcast(main_price, order_size, order_price, fees, fixed_fees,
                                  slippage, **broadcast_kwargs, writeable=True)
        target_shape = (main_price.shape[0], main_price.shape[1] if main_price.ndim > 1 else 1)
        init_capital = np.broadcast_to(init_capital, (target_shape[1],))

        # Perform calculation
        order_records, cash, shares = nb.simulate_from_orders_nb(
            target_shape,
            init_capital,
            reshape_fns.to_2d(order_size, raw=True),
            reshape_fns.to_2d(order_price, raw=True),
            reshape_fns.to_2d(fees, raw=True),
            reshape_fns.to_2d(fixed_fees, raw=True),
            reshape_fns.to_2d(slippage, raw=True),
            is_target)

        # Bring to the same meta
        wrapper = ArrayWrapper.from_obj(main_price, freq=freq)
        cash = wrapper.wrap(cash)
        shares = wrapper.wrap(shares)
        orders = Orders(order_records, main_price, freq=freq)
        if checks.is_series(main_price):
            init_capital = init_capital[0]
        else:
            init_capital = wrapper.wrap_reduced(init_capital)

        return cls(main_price, init_capital, orders, cash, shares, freq=freq, **kwargs)
예제 #60
0
    def __call__(self, x: dy.Expression, att_mask: np.ndarray,
                 batch_mask: np.ndarray, p: float):
        """
    x: expression of dimensions (input_dim, time) x batch
    att_mask: numpy array of dimensions (time, time); pre-transposed
    batch_mask: numpy array of dimensions (batch, time)
    p: dropout prob
    """
        sent_len = x.dim()[0][1]
        batch_size = x[0].dim()[1]

        if self.downsample_factor > 1:
            if sent_len % self.downsample_factor != 0:
                raise ValueError(
                    "For 'reshape' downsampling, sequence lengths must be multiples of the downsampling factor. "
                    "Configure batcher accordingly.")
            if batch_mask is not None:
                batch_mask = batch_mask[:, ::self.downsample_factor]
            sent_len_out = sent_len // self.downsample_factor
            sent_len = sent_len_out
            out_mask = x.mask
            if self.downsample_factor > 1 and out_mask is not None:
                out_mask = out_mask.lin_subsampled(
                    reduce_factor=self.downsample_factor)

            x = ExpressionSequence(expr_tensor=dy.reshape(
                x.as_tensor(), (x.dim()[0][0] * self.downsample_factor,
                                x.dim()[0][1] / self.downsample_factor),
                batch_size=batch_size),
                                   mask=out_mask)
            residual = SAAMTimeDistributed()(x)
        else:
            residual = SAAMTimeDistributed()(x)
            sent_len_out = sent_len
        if self.model_dim != self.input_dim * self.downsample_factor:
            residual = self.res_shortcut.transform(residual)

        # Concatenate all the words together for doing vectorized affine transform
        if self.kq_pos_encoding_type is None:
            kvq_lin = self.linear_kvq.transform(SAAMTimeDistributed()(x))
            key_up = self.shape_projection(
                dy.pick_range(kvq_lin, 0, self.head_count * self.dim_per_head),
                batch_size)
            value_up = self.shape_projection(
                dy.pick_range(kvq_lin, self.head_count * self.dim_per_head,
                              2 * self.head_count * self.dim_per_head),
                batch_size)
            query_up = self.shape_projection(
                dy.pick_range(kvq_lin, 2 * self.head_count * self.dim_per_head,
                              3 * self.head_count * self.dim_per_head),
                batch_size)
        else:
            assert self.kq_pos_encoding_type == "embedding"
            encoding = self.kq_positional_embedder.embed_sent(
                sent_len).as_tensor()
            kq_lin = self.linear_kq.transform(SAAMTimeDistributed()(
                ExpressionSequence(
                    expr_tensor=dy.concatenate([x.as_tensor(), encoding]))))
            key_up = self.shape_projection(
                dy.pick_range(kq_lin, 0, self.head_count * self.dim_per_head),
                batch_size)
            query_up = self.shape_projection(
                dy.pick_range(kq_lin, self.head_count * self.dim_per_head,
                              2 * self.head_count * self.dim_per_head),
                batch_size)
            v_lin = self.linear_v.transform(SAAMTimeDistributed()(x))
            value_up = self.shape_projection(v_lin, batch_size)

        if self.cross_pos_encoding_type:
            assert self.cross_pos_encoding_type == "embedding"
            emb1 = dy.pick_range(dy.parameter(self.cross_pos_emb_p1), 0,
                                 sent_len)
            emb2 = dy.pick_range(dy.parameter(self.cross_pos_emb_p2), 0,
                                 sent_len)
            key_up = dy.reshape(key_up,
                                (sent_len, self.dim_per_head, self.head_count),
                                batch_size=batch_size)
            key_up = dy.concatenate_cols(
                [dy.cmult(key_up, emb1),
                 dy.cmult(key_up, emb2)])
            key_up = dy.reshape(key_up, (sent_len, self.dim_per_head * 2),
                                batch_size=self.head_count * batch_size)
            query_up = dy.reshape(
                query_up, (sent_len, self.dim_per_head, self.head_count),
                batch_size=batch_size)
            query_up = dy.concatenate_cols(
                [dy.cmult(query_up, emb2),
                 dy.cmult(query_up, -emb1)])
            query_up = dy.reshape(query_up, (sent_len, self.dim_per_head * 2),
                                  batch_size=self.head_count * batch_size)

        scaled = query_up * dy.transpose(
            key_up / math.sqrt(self.dim_per_head)
        )  # scale before the matrix multiplication to save memory

        # Apply Mask here
        if not self.ignore_masks:
            if att_mask is not None:
                att_mask_inp = att_mask * -100.0
                if self.downsample_factor > 1:
                    att_mask_inp = att_mask_inp[::self.downsample_factor, ::
                                                self.downsample_factor]
                scaled += dy.inputTensor(att_mask_inp)
            if batch_mask is not None:
                # reshape (batch, time) -> (time, head_count*batch), then *-100
                inp = np.resize(np.broadcast_to(batch_mask.T[:, np.newaxis, :],
                                                (sent_len, self.head_count, batch_size)),
                                (1, sent_len, self.head_count * batch_size)) \
                      * -100
                mask_expr = dy.inputTensor(inp, batched=True)
                scaled += mask_expr
            if self.diag_gauss_mask:
                diag_growing = np.zeros((sent_len, sent_len, self.head_count))
                for i in range(sent_len):
                    for j in range(sent_len):
                        diag_growing[i, j, :] = -(i - j)**2 / 2.0
                e_diag_gauss_mask = dy.inputTensor(diag_growing)
                e_sigma = dy.parameter(self.diag_gauss_mask_sigma)
                if self.square_mask_std:
                    e_sigma = dy.square(e_sigma)
                e_sigma_sq_inv = dy.cdiv(
                    dy.ones(e_sigma.dim()[0], batch_size=batch_size),
                    dy.square(e_sigma))
                e_diag_gauss_mask_final = dy.cmult(e_diag_gauss_mask,
                                                   e_sigma_sq_inv)
                scaled += dy.reshape(e_diag_gauss_mask_final,
                                     (sent_len, sent_len),
                                     batch_size=batch_size * self.head_count)

        # Computing Softmax here.
        attn = dy.softmax(scaled, d=1)
        if LOG_ATTENTION:
            yaml_logger.info({
                "key": "selfatt_mat_ax0",
                "value": np.average(attn.value(), axis=0).dumps(),
                "desc": self.desc
            })
            yaml_logger.info({
                "key": "selfatt_mat_ax1",
                "value": np.average(attn.value(), axis=1).dumps(),
                "desc": self.desc
            })
            yaml_logger.info({
                "key": "selfatt_mat_ax0_ent",
                "value": entropy(attn.value()).dumps(),
                "desc": self.desc
            })
            yaml_logger.info({
                "key": "selfatt_mat_ax1_ent",
                "value": entropy(attn.value().transpose()).dumps(),
                "desc": self.desc
            })

        self.select_att_head = 0
        if self.select_att_head is not None:
            attn = dy.reshape(attn, (sent_len, sent_len, self.head_count),
                              batch_size=batch_size)
            sel_mask = np.zeros((1, 1, self.head_count))
            sel_mask[0, 0, self.select_att_head] = 1.0
            attn = dy.cmult(attn, dy.inputTensor(sel_mask))
            attn = dy.reshape(attn, (sent_len, sent_len),
                              batch_size=self.head_count * batch_size)

        # Applying dropout to attention
        if p > 0.0:
            drop_attn = dy.dropout(attn, p)
        else:
            drop_attn = attn

        # Computing weighted attention score
        attn_prod = drop_attn * value_up

        # Reshaping the attn_prod to input query dimensions
        out = dy.reshape(attn_prod,
                         (sent_len_out, self.dim_per_head * self.head_count),
                         batch_size=batch_size)
        out = dy.transpose(out)
        out = dy.reshape(out, (self.model_dim, ),
                         batch_size=batch_size * sent_len_out)
        #     out = dy.reshape_transpose_reshape(attn_prod, (sent_len_out, self.dim_per_head * self.head_count), (self.model_dim,), pre_batch_size=batch_size, post_batch_size=batch_size*sent_len_out)

        if self.plot_attention:
            from sklearn.metrics.pairwise import cosine_similarity
            assert batch_size == 1
            mats = []
            for i in range(attn.dim()[1]):
                mats.append(dy.pick_batch_elem(attn, i).npvalue())
                self.plot_att_mat(
                    mats[-1], "{}.sent_{}.head_{}.png".format(
                        self.plot_attention, self.plot_attention_counter, i),
                    300)
            avg_mat = np.average(mats, axis=0)
            self.plot_att_mat(
                avg_mat,
                "{}.sent_{}.head_avg.png".format(self.plot_attention,
                                                 self.plot_attention_counter),
                300)
            cosim_before = cosine_similarity(x.as_tensor().npvalue().T)
            self.plot_att_mat(
                cosim_before, "{}.sent_{}.cosim_before.png".format(
                    self.plot_attention, self.plot_attention_counter), 600)
            cosim_after = cosine_similarity(out.npvalue().T)
            self.plot_att_mat(
                cosim_after, "{}.sent_{}.cosim_after.png".format(
                    self.plot_attention, self.plot_attention_counter), 600)
            self.plot_attention_counter += 1

        # Adding dropout and layer normalization
        if p > 0.0:
            res = dy.dropout(out, p) + residual
        else:
            res = out + residual
        ret = self.layer_norm.transform(res)
        return ret