Example #1
0
 def blendo(self,
            files,
            indir,
            outdir,
            fname,
            error=0.05):  # default error is 5%
     # files is a dictionary.  Key = asset name, Value=[filename,fraction] fraction is decimal 0..1
     #   see trial in __main__ below
     # error is the error term as a %, derived from te
     self.frame.SetStatusText('Blending files - please wait.')
     filenms = [i[0] for i in files]
     pcts = [i[1] for i in files]
     returns = [loader(fn, indir, self.filetype) for fn in filenms]
     balret = [ret * pct for ret, pct in zip(returns, pcts)]
     sumbalret = sum(balret)
     sds = error * abs(sumbalret)
     e1 = np.random.normal(sumbalret, sds)
     e2 = pd.DataFrame(e1)
     errs = round(e2, 6)
     errs.columns = sumbalret.columns
     balgro = 1 + sumbalret
     errbalgro = 1 + errs
     balfundval = np.cumproduct(balgro, axis=1)
     errbalfundval = np.cumproduct(errbalgro, axis=1)
     cols_needed = [i for i in balfundval.columns if i % 12 == 0]
     outputbalfund = round(balfundval[cols_needed], 6)
     #outputbalfund.insert(0,-1,1)
     #outputbalfund.to_csv(outdir + '//' + fname +'raw.csv', header=False, index=False)
     outputerrbal = round(errbalfundval[cols_needed], 6)
     #outputerrbal.insert(0,-1,1)
     outputerrbal.to_csv(outdir + '//' + fname + '.csv',
                         header=False,
                         index=False)
Example #2
0
def pnl_plotter(df, price, stock_code):
    plt_df = pd.DataFrame()
    underlying_df = price['close']
    underlying_df.index = price['date_time']
    underlying_df = underlying_df.pct_change().dropna()
    plt_df['underlying'] = np.cumproduct(underlying_df + 1)
    plt_df['strategy'] = np.cumproduct(df['pnl'] + 1)
    plt_df['strategy'].ffill(inplace=True)
    plt_df['strategy'] = plt_df['strategy'].fillna(1)
    plt.plot(plt_df)
    plt.title(stock_code)
    plt.show()
    return (plt_df)
Example #3
0
    def generate_c_code(self, **kwargs):
        TEMPALTE_IDENTITY_FUNC = cleandoc('''
        void {op_func_name}(void *op_param, {t} Input{InputDims}, {t} Output{OutputDims}, void *inputs_params, void* outputs_params){{        
            memcpy(Output, Input, sizeof({t}) * {cumdim});
        }}
        ''')

        res = ''
        res += self.get_c_param_type()  # call only once
        res += '\n\n\n'

        # constant function
        mapping = {}
        mapping.update({'op_func_name': self.get_func_name()})
        mapping.update({'t': data_type.np2c(self.input_tensor_dtypes[0])})
        mapping.update(
            {'cumdim': np.cumproduct(self.input_tensor_shapes[0])[-1]})
        mapping.update({'Input': self.input_tensor_names[0]})
        mapping.update({'Output': self.output_tensor_names[0]})
        mapping.update({
            'InputDims':
            c_helper.generate_dim_bracket(self.input_tensor_shapes[0])
        })
        mapping.update({
            'OutputDims':
            c_helper.generate_dim_bracket(self.input_tensor_shapes[0])
        })

        res += TEMPALTE_IDENTITY_FUNC.format(**mapping)

        return res
Example #4
0
    def calculate_ion_populations(self, phis):
        """
        Calculate the ionization balance

        .. math::
            N(X) = N_1 + N_2 + N_3 + \\dots

            N(X) = (N_2/N_1) \\times N_1 + (N3/N2) \\times (N_2/N_1) \\times N_1 + \\dots

            N(X) = N_1(1 + N_2/N_1 + (N_3/N_2) \\times (N_2/N_1) + \\dots

            N(X) = N_1(1+ \\Phi_{i,j}/N_e + \\Phi_{i, j}/N_e \\times \\Phi_{i, j+1}/N_e + \\dots)


        """
        #TODO see if self.ion_populations is None is needed (first class should be enough)
        if not hasattr(self, 'ion_populations') or self.ion_populations is None:
            self.ion_populations = pd.Series(index=self.partition_functions.index.copy())

        for atomic_number, groups in phis.groupby(level='atomic_number'):
            current_phis = groups.values / self.electron_density
            phis_product = np.cumproduct(current_phis)

            neutral_atom_density = self.number_density.ix[atomic_number] / (1 + np.sum(phis_product))
            ion_densities = [neutral_atom_density] + list(neutral_atom_density * phis_product)

            self.ion_populations.ix[atomic_number] = ion_densities
Example #5
0
    def __init__(self, upsample_scales, pad):
        super(UpsampleNet, self).__init__()
        self.upsample_scales = upsample_scales
        self.pad = pad

        self.total_scale = np.cumproduct(self.upsample_scales)[-1]
        self.indent = self.pad * self.total_scale
Example #6
0
    def calculate_ion_populations(self, phis, ion_zero_threshold=1e-20):
        """
        Calculate the ionization balance

        .. math::
            N(X) = N_1 + N_2 + N_3 + \\dots

            N(X) = (N_2/N_1) \\times N_1 + (N3/N2) \\times (N_2/N_1) \\times N_1 + \\dots

            N(X) = N_1(1 + N_2/N_1 + (N_3/N_2) \\times (N_2/N_1) + \\dots

            N(X) = N_1(1+ \\Phi_{i,j}/N_e + \\Phi_{i, j}/N_e \\times \\Phi_{i, j+1}/N_e + \\dots)


        """
        #TODO see if self.ion_populations is None is needed (first class should be enough)
        if not hasattr(self, 'ion_populations'):
            self.ion_populations = pd.DataFrame(index=self.partition_functions.index.copy(),
                                                columns=np.arange(len(self.t_rads)), dtype=np.float64)

        for atomic_number, groups in phis.groupby(level='atomic_number'):
            current_phis = (groups / self.electron_densities).replace(np.nan, 0.0).values
            phis_product = np.cumproduct(current_phis, axis=0)

            neutral_atom_density = self.number_densities.ix[atomic_number] / (1 + np.sum(phis_product, axis=0))



            self.ion_populations.ix[atomic_number].values[0] = neutral_atom_density.values
            self.ion_populations.ix[atomic_number].values[1:] = neutral_atom_density.values * phis_product
            self.ion_populations[self.ion_populations < ion_zero_threshold] = 0.0
Example #7
0
def _combine(df, groupers, sort, row_limit=None):
    for grouper in groupers:
        if isinstance(grouper, Binner):
            raise NotImplementedError(
                'Cannot combined Binner with other groupers yet')

    groupers = groupers.copy()
    max_count_64bit = 2**63 - 1
    first = groupers.pop(0)
    combine_now = [first]
    combine_later = []
    counts = [first.N]

    # when does the cartesian product overflow 64 bits?
    next = groupers.pop(0)
    while (product(counts) * next.N < max_count_64bit):
        counts.append(next.N)
        combine_now.append(next)
        if groupers:
            next = groupers.pop(0)
        else:
            next = None
            break

    counts.append(1)
    # decreasing [40, 4, 1] for 2 groupers (N=10 and N=4)
    cumulative_counts = np.cumproduct(counts[::-1], dtype='i8').tolist()[::-1]
    assert len(combine_now) >= 2
    combine_later = ([next] if next else []) + groupers

    binby_expressions = [df[k.binby_expression] for k in combine_now]
    for i in range(0, len(binby_expressions)):
        binby_expression = binby_expressions[i]
        dtype = vaex.utils.required_dtype_for_max(cumulative_counts[i])
        binby_expression = binby_expression.astype(str(dtype))
        if isinstance(combine_now[i],
                      GrouperCategory) and combine_now[i].min_value != 0:
            binby_expression -= combine_now[i].min_value
        if cumulative_counts[i + 1] != 1:
            binby_expression = binby_expression * cumulative_counts[i + 1]
        binby_expressions[i] = binby_expression
    expression = reduce(operator.add, binby_expressions)
    grouper = GrouperCombined(expression,
                              df,
                              multipliers=cumulative_counts[1:],
                              parents=combine_now,
                              sort=sort,
                              row_limit=row_limit)
    if combine_later:

        @vaex.delayed
        def combine(_ignore):
            # recursively add more of the groupers (because of 64 bit overflow)
            # return 1
            grouper._create_binner(df)
            new_grouper = _combine(df, [grouper] + combine_later, sort=sort)
            return new_grouper

        return combine(grouper._promise)
    return grouper._promise.then(lambda x: grouper)
 def pis(self):
     #stick breaking procedure
     betas = np.random.beta(1, self.alpha, self.k)
     # compute prod(1- beta)_i^n-1
     cum_prods = np.append(1, np.cumproduct(1 - betas[:-1]))
     prods = betas * cum_prods
     return prods / prods.sum()
Example #9
0
 def __init__(
     self,
     feat_dims,
     upsample_scales,
     compute_dims,
     num_res_blocks,
     res_out_dims,
     pad,
     use_aux_net,
 ):
     super().__init__()
     self.total_scale = np.cumproduct(upsample_scales)[-1]
     self.indent = pad * self.total_scale
     self.use_aux_net = use_aux_net
     if use_aux_net:
         self.resnet = MelResNet(num_res_blocks, feat_dims, compute_dims,
                                 res_out_dims, pad)
         self.resnet_stretch = Stretch2d(self.total_scale, 1)
     self.up_layers = nn.ModuleList()
     for scale in upsample_scales:
         k_size = (1, scale * 2 + 1)
         padding = (0, scale)
         stretch = Stretch2d(scale, 1)
         conv = nn.Conv2d(1,
                          1,
                          kernel_size=k_size,
                          padding=padding,
                          bias=False)
         conv.weight.data.fill_(1.0 / k_size[1])
         self.up_layers.append(stretch)
         self.up_layers.append(conv)
Example #10
0
 def __init__(self,
              feat_dims,
              upsample_scales=[4, 4, 10],
              compute_dims=128,
              res_blocks=10,
              res_out_dims=128,
              pad=2):
     super().__init__()
     self.num_outputs = res_out_dims
     total_scale = np.cumproduct(upsample_scales)[-1]
     self.indent = pad * total_scale
     self.resnet = MelResNet(res_blocks, feat_dims, compute_dims,
                             res_out_dims, pad)
     self.resnet_stretch = Stretch2d(total_scale, 1)
     self.up_layers = nn.ModuleList()
     for scale in upsample_scales:
         k_size = (1, scale * 2 + 1)
         padding = (0, scale)
         stretch = Stretch2d(scale, 1)
         conv = nn.Conv2d(1,
                          1,
                          kernel_size=k_size,
                          padding=padding,
                          bias=False)
         conv.weight.data.fill_(1. / k_size[1])
         self.up_layers.append(stretch)
         self.up_layers.append(conv)
Example #11
0
    def calculate_ion_populations(self, phis):
        """
        Calculate the ionization balance

        .. math::
            N(X) = N_1 + N_2 + N_3 + \\dots

            N(X) = (N_2/N_1) \\times N_1 + (N3/N2) \\times (N_2/N_1) \\times N_1 + \\dots

            N(X) = N_1(1 + N_2/N_1 + (N_3/N_2) \\times (N_2/N_1) + \\dots

            N(X) = N_1(1+ \\Phi_{i,j}/N_e + \\Phi_{i, j}/N_e \\times \\Phi_{i, j+1}/N_e + \\dots)


        """
        #TODO see if self.ion_populations is None is needed (first class should be enough)
        if not hasattr(self,
                       'ion_populations') or self.ion_populations is None:
            self.ion_populations = pd.Series(
                index=self.partition_functions.index.copy())

        for atomic_number, groups in phis.groupby(level='atomic_number'):
            current_phis = groups.values / self.electron_density
            phis_product = np.cumproduct(current_phis)

            neutral_atom_density = self.number_density.ix[atomic_number] / (
                1 + np.sum(phis_product))
            ion_densities = [neutral_atom_density] + list(
                neutral_atom_density * phis_product)

            self.ion_populations.ix[atomic_number] = ion_densities
Example #12
0
    def prepare_diffusion_vars(self):
        """Prepare for variables used in the diffusion process."""
        self.betas = self.get_betas()
        self.alphas = 1.0 - self.betas
        self.alphas_bar = np.cumproduct(self.alphas, axis=0)
        self.alphas_bar_prev = np.append(1.0, self.alphas_bar[:-1])
        self.alphas_bar_next = np.append(self.alphas_bar[1:], 0.0)

        # calculations for diffusion q(x_t | x_0) and others
        self.sqrt_alphas_bar = np.sqrt(self.alphas_bar)
        self.sqrt_one_minus_alphas_bar = np.sqrt(1.0 - self.alphas_bar)
        self.log_one_minus_alphas_bar = np.log(1.0 - self.alphas_bar)
        self.sqrt_recip_alplas_bar = np.sqrt(1.0 / self.alphas_bar)
        self.sqrt_recipm1_alphas_bar = np.sqrt(1.0 / self.alphas_bar - 1)

        # calculations for posterior q(x_{t-1} | x_t, x_0)
        self.tilde_betas_t = self.betas * (1 - self.alphas_bar_prev) / (
            1 - self.alphas_bar)
        # clip log var for tilde_betas_0 = 0
        self.log_tilde_betas_t_clipped = np.log(
            np.append(self.tilde_betas_t[1], self.tilde_betas_t[1:]))
        self.tilde_mu_t_coef1 = np.sqrt(
            self.alphas_bar_prev) / (1 - self.alphas_bar) * self.betas
        self.tilde_mu_t_coef2 = np.sqrt(
            self.alphas) * (1 - self.alphas_bar_prev) / (1 - self.alphas_bar)
Example #13
0
def cartesian_product(X):
    """
    Numpy version of itertools.product or pandas.compat.product.
    Sometimes faster (for large inputs)...

    Examples
    --------
    >>> cartesian_product([list('ABC'), [1, 2]])
    [array(['A', 'A', 'B', 'B', 'C', 'C'], dtype='|S1'),
    array([1, 2, 1, 2, 1, 2])]

    """

    lenX = np.fromiter((len(x) for x in X), dtype=int)
    cumprodX = np.cumproduct(lenX)

    a = np.roll(cumprodX, 1)
    a[0] = 1

    b = cumprodX[-1] / cumprodX

    return [
        np.tile(np.repeat(np.asarray(com._values_from_object(x)), b[i]),
                np.product(a[i])) for i, x in enumerate(X)
    ]
Example #14
0
    def calculate_ion_populations(self, phis, ion_zero_threshold=1e-20):
        """
        Calculate the ionization balance

        .. math::
            N(X) = N_1 + N_2 + N_3 + \\dots

            N(X) = (N_2/N_1) \\times N_1 + (N3/N2) \\times (N_2/N_1) \\times N_1 + \\dots

            N(X) = N_1(1 + N_2/N_1 + (N_3/N_2) \\times (N_2/N_1) + \\dots

            N(X) = N_1(1+ \\Phi_{i,j}/N_e + \\Phi_{i, j}/N_e \\times \\Phi_{i, j+1}/N_e + \\dots)


        """
        #TODO see if self.ion_populations is None is needed (first class should be enough)
        if not hasattr(self, 'ion_populations'):
            self.ion_populations = pd.DataFrame(index=self.partition_functions.index.copy(),
                                                columns=np.arange(len(self.t_rads)), dtype=np.float64)

        for atomic_number, groups in phis.groupby(level='atomic_number'):
            current_phis = (groups / self.electron_densities).replace(np.nan, 0.0).values
            phis_product = np.cumproduct(current_phis, axis=0)

            neutral_atom_density = self.number_densities.ix[atomic_number] / (1 + np.sum(phis_product, axis=0))



            self.ion_populations.ix[atomic_number].values[0] = neutral_atom_density.values
            self.ion_populations.ix[atomic_number].values[1:] = neutral_atom_density.values * phis_product
            self.ion_populations[self.ion_populations < ion_zero_threshold] = 0.0
Example #15
0
def sample_episode(env, action_policy, gamma):
    lists_a, lists_s, lists_s_ne, lists_r = [], [], [], []
    done = False
    s = env.reset()
    step_counter = 0
    while not done:
        a = action_policy(torch.tensor(s.reshape(1, -1), dtype=torch.float))
        s_ne, r, done, _ = env.step(a)
        lists_a.append(a), lists_s.append(s), lists_s_ne.append(
            s_ne), lists_r.append(r)

        step_counter += 1
        s = s_ne

    assert step_counter == 5000  # not sure if the stepsize is fixed of 5000

    actions = np.array(lists_a)
    states = np.array(lists_s)
    rewards = np.array(lists_r)
    gamma_array = np.ones_like(rewards) * gamma
    gamma_array = np.cumproduct(gamma_array)
    R_fun = gamma_array * rewards

    print("mean_reward in this sampling ", np.sum(rewards) / step_counter)
    return states, actions.reshape(-1, 1), R_fun.reshape(
        -1, 1), np.sum(rewards) / step_counter
Example #16
0
def gradient(x, t, p, log_p):
    '''
    Gradient of the objective with decision variables x and scenario t. p is the
    detection probabilities and log_p is log(1 - p) (elementwise). 
    
    Computations are performed via vectorized numpy operations for greater efficiency.
    '''
    import numpy as np
    order = np.argsort(t)
    x = x[order]
    p = p[order]
    log_p = log_p[order]
    t = t[order]
    p_fail = ((1 - p)**x)
    cum_prob_fail = np.cumproduct(p_fail)
    p_fail_offset = np.zeros((len(x) + 1))
    p_fail_offset[0] = 1
    p_fail_offset[1:] = cum_prob_fail
    cum_sum = np.zeros((len(x)))
    cum_sum[-1] = 0
    intermediate = (p_fail_offset[:-1] * (1 - p_fail) * t)[1:]
    intermediate = intermediate[::-1]
    intermediate = np.cumsum(intermediate)
    intermediate = intermediate[::-1]
    cum_sum[:-1] = intermediate
    ordered_grad = t * log_p * cum_prob_fail - log_p * cum_sum - log_p * cum_prob_fail[
        -1] * t.max()
    ordered_grad[np.abs(ordered_grad) < 0.00001] = 0
    new_perm = np.zeros((len(order)), dtype=np.int)
    for i, val in enumerate(order):
        new_perm[val] = i
    return ordered_grad[new_perm]
Example #17
0
    def __init__(self, shape_, order='C', **keywords):
        shape_ = tointtuple(shape_)
        ndim = len(shape_)
        if ndim == 1:
            raise NotImplementedError('ndim == 1 is not implemented.')
        if order.upper() not in ('C', 'F'):
            raise ValueError("Invalid order '{0}'. Expected order is 'C' or 'F"
                             "'".format(order))
        order = order.upper()

        Operator.__init__(self, **keywords)
        self.shape_ = shape_
        self.order = order
        self.ndim = ndim
        if order == 'C':
            self.coefs = np.cumproduct((1,) + shape_[:0:-1])[::-1]
        elif order == 'F':
            self.coefs = np.cumproduct((1,) + shape_[:-1])
Example #18
0
    def __init__(self, shape_, order='C', **keywords):
        shape_ = tointtuple(shape_)
        ndim = len(shape_)
        if ndim == 1:
            raise NotImplementedError('ndim == 1 is not implemented.')
        if order.upper() not in ('C', 'F'):
            raise ValueError("Invalid order '{0}'. Expected order is 'C' or 'F"
                             "'".format(order))
        order = order.upper()

        Operator.__init__(self, **keywords)
        self.shape_ = shape_
        self.order = order
        self.ndim = ndim
        if order == 'C':
            self.coefs = np.cumproduct((1, ) + shape_[:0:-1])[::-1]
        elif order == 'F':
            self.coefs = np.cumproduct((1, ) + shape_[:-1])
Example #19
0
    def __init__(self, rnn_dims, fc_dims, mode, mulaw, pad, use_aux_net, use_upsample_net, upsample_factors,
                 feat_dims, compute_dims, res_out_dims, res_blocks,
                 hop_length, sample_rate):
        super().__init__()
        self.mode = mode
        self.mulaw = mulaw
        self.pad = pad
        self.use_upsample_net = use_upsample_net
        self.use_aux_net = use_aux_net
        if type(self.mode) is int:
            self.n_classes = 2 ** self.mode
        elif self.mode == 'mold':
            self.n_classes = 3 * 10
        elif self.mode == 'gauss':
            self.n_classes = 2
        else:
            raise RuntimeError(" > Unknown training mode")

        self.rnn_dims = rnn_dims
        self.aux_dims = res_out_dims // 4
        self.hop_length = hop_length
        self.sample_rate = sample_rate
        print(np.cumproduct(upsample_factors))
        if self.use_upsample_net:
            print(np.cumproduct(upsample_factors)[-1], self.hop_length)
            assert np.cumproduct(upsample_factors)[-1] == self.hop_length, " [!] upsample scales needs to be equal to hop_length"
            self.upsample = UpsampleNetwork(feat_dims, upsample_factors, compute_dims, 
                                            res_blocks, res_out_dims, pad, use_aux_net)
        else:
            self.upsample = Upsample(hop_length, pad, res_blocks, feat_dims, compute_dims, res_out_dims, use_aux_net)
        if self.use_aux_net:
            self.I = nn.Linear(feat_dims + self.aux_dims + 1, rnn_dims)
            self.rnn1 = nn.GRU(rnn_dims, rnn_dims, batch_first=True)
            self.rnn2 = nn.GRU(rnn_dims + self.aux_dims, rnn_dims, batch_first=True)
            self.fc1 = nn.Linear(rnn_dims + self.aux_dims, fc_dims)
            self.fc2 = nn.Linear(fc_dims + self.aux_dims, fc_dims)
            self.fc3 = nn.Linear(fc_dims, self.n_classes)
        else:
            self.I = nn.Linear(feat_dims + 1, rnn_dims)
            self.rnn1 = nn.GRU(rnn_dims, rnn_dims, batch_first=True)
            self.rnn2 = nn.GRU(rnn_dims, rnn_dims, batch_first=True)
            self.fc1 = nn.Linear(rnn_dims, fc_dims)
            self.fc2 = nn.Linear(fc_dims, fc_dims)
            self.fc3 = nn.Linear(fc_dims, self.n_classes)
Example #20
0
def shift(array, n, axis=0):
    """
    Shift array elements inplace along a given axis.

    Elements that are shifted beyond the last position are not re-introduced
    at the first.

    Parameters
    ----------
    array : float array
        Input array to be modified

    n : integer number or array
        The number of places by which elements are shifted. If it is an array,
        specific offsets are applied along the first dimensions.

    axis : int, optional
        The axis along which elements are shifted. By default, it is the first
        axis.

    Examples
    --------
    >>> a = ones(8)
    >>> shift(a, 3); a
    array([0., 0., 0., 1., 1., 1., 1., 1., 1.])

    >>> a = array([[1.,1.,1.,1.],[2.,2.,2.,2.]])
    >>> shift(a, [1,-1], axis=1); a
    array([[0., 1., 1., 1.],
           [2., 2., 2., 0.]])
    """
    if not isinstance(array, np.ndarray):
        raise TypeError('Input array is not an ndarray.')

    if array.dtype != var.FLOAT_DTYPE:
        raise TypeError('The data type of the input array is not ' + \
                        str(var.FLOAT_DTYPE.name) + '.')

    rank = array.ndim
    n = np.array(n, ndmin=1, dtype='int32').ravel()
    
    if axis < 0:
        axis = rank + axis

    if axis == 0 and n.size > 1 or n.size != 1 and n.size not in \
       np.cumproduct(array.shape[0:axis]):
        raise ValueError('The offset size is incompatible with the first dime' \
                         'nsions of the array')
    if rank == 0:
        array.shape = (1,)
        array[:] = 0
        array.shape = ()
    else:
        tmf.shift(array.ravel(), rank-axis, np.asarray(array.T.shape), n)
Example #21
0
    def __init__(self,
                 time_size,
                 space_size,
                 dim,
                 beta,
                 link_type,
                 num_samples=None,
                 rand=False):
        """Initialization for GaugeLattice object.

        Args:
            time_size (int): Temporal extent of lattice.
            space_size (int): Spatial extent of lattice.
            dim (int): Dimensionality
            beta (float): Inverse coupling constant.
            link_type (str): String representing the type of gauge group for
                the link variables. Must be either 'U1', 'SU2', or 'SU3'
            num_samples (int): Number of sample lattices to use.
            rand (bool): Flag specifying if lattice should be initialized
                randomly or uniformly.
        """
        assert link_type.upper() in [
            'U1', 'SU2', 'SU3'
        ], ("Invalid link_type. Possible values: U1', 'SU2', 'SU3'")
        self.time_size = time_size
        self.space_size = space_size
        self.dim = dim
        self.beta = beta
        self.link_type = link_type
        self.link_shape = None

        self._init_lattice(link_type, rand)
        self.samples = None

        self.num_sites = np.cumproduct(self.site_idxs)[-1]
        self.num_links = int(self.dim * self.num_sites)
        self.num_plaquettes = self.time_size * self.space_size
        self.bases = np.eye(dim, dtype=np.int)

        if self.link_type == 'U1':
            self.plaquette_operator = self.plaquette_operator_u1
            self._action_op = self._action_op_u1

        else:
            self.plaquette_operator = self.plaquette_operator_suN
            self.action_op = self._action_op_suN

        if num_samples is not None:
            #  Create `num_samples` randomized instances of links array
            self.num_samples = num_samples
            self.samples = self.get_links_samples(num_samples,
                                                  rand=rand,
                                                  link_type=self.link_type)
            self.samples[0] = self.links
Example #22
0
def shift(array, n, axis=0):
    """
    Shift array elements inplace along a given axis.

    Elements that are shifted beyond the last position are not re-introduced
    at the first.

    Parameters
    ----------
    array : float array
        Input array to be modified

    n : integer number or array
        The number of places by which elements are shifted. If it is an array,
        specific offsets are applied along the first dimensions.

    axis : int, optional
        The axis along which elements are shifted. By default, it is the first
        axis.

    Examples
    --------
    >>> a = ones(8)
    >>> shift(a, 3); a
    array([0., 0., 0., 1., 1., 1., 1., 1., 1.])

    >>> a = array([[1.,1.,1.,1.],[2.,2.,2.,2.]])
    >>> shift(a, [1,-1], axis=1); a
    array([[0., 1., 1., 1.],
           [2., 2., 2., 0.]])
    """
    if not isinstance(array, np.ndarray):
        raise TypeError('Input array is not an ndarray.')

    if array.dtype != var.FLOAT_DTYPE:
        raise TypeError('The data type of the input array is not ' + \
                        str(var.FLOAT_DTYPE.name) + '.')

    rank = array.ndim
    n = np.array(n, ndmin=1, dtype='int32').ravel()

    if axis < 0:
        axis = rank + axis

    if axis == 0 and n.size > 1 or n.size != 1 and n.size not in \
       np.cumproduct(array.shape[0:axis]):
        raise ValueError('The offset size is incompatible with the first dime' \
                         'nsions of the array')
    if rank == 0:
        array.shape = (1, )
        array[:] = 0
        array.shape = ()
    else:
        tmf.shift(array.ravel(), rank - axis, np.asarray(array.T.shape), n)
Example #23
0
    def __init__(self, res_blocks, upsample_scales, compute_dims, output_dims,
                 pad):
        super(UpsampleNet, self).__init__()
        self.res_blocks = res_blocks
        self.upsample_scales = upsample_scales
        self.compute_dims = compute_dims
        self.output_dims = output_dims
        self.pad = pad

        self.total_scale = np.cumproduct(self.upsample_scales)[-1]
        self.indent = self.pad * self.total_scale
Example #24
0
def cartesian_product(X) -> list[np.ndarray]:
    """
    Numpy version of itertools.product.
    Sometimes faster (for large inputs)...

    Parameters
    ----------
    X : list-like of list-likes

    Returns
    -------
    product : list of ndarrays

    Examples
    --------
    >>> cartesian_product([list('ABC'), [1, 2]])
    [array(['A', 'A', 'B', 'B', 'C', 'C'], dtype='<U1'), array([1, 2, 1, 2, 1, 2])]

    See Also
    --------
    itertools.product : Cartesian product of input iterables.  Equivalent to
        nested for-loops.
    """
    msg = "Input must be a list-like of list-likes"
    if not is_list_like(X):
        raise TypeError(msg)
    for x in X:
        if not is_list_like(x):
            raise TypeError(msg)

    if len(X) == 0:
        return []

    lenX = np.fromiter((len(x) for x in X), dtype=np.intp)
    cumprodX = np.cumproduct(lenX)

    if np.any(cumprodX < 0):
        raise ValueError("Product space too large to allocate arrays!")

    a = np.roll(cumprodX, 1)
    a[0] = 1

    if cumprodX[-1] != 0:
        b = cumprodX[-1] / cumprodX
    else:
        # if any factor is empty, the cartesian product is empty
        b = np.zeros_like(cumprodX)

    return [
        tile_compat(np.repeat(x, b[i]), np.product(a[i]))
        for i, x in enumerate(X)
    ]
Example #25
0
    def alpha(self, pH):
        '''Return the fraction of each species at a given pH.

        Parameters
        ----------
        pH : int, float, or Numpy Array
            These are the pH value(s) over which the fraction should be
            returned.

        Returns
        -------
        Numpy NDArray
            These are the fractional concentrations at any given pH. They are
            sorted from most acidic species to least acidic species. If a
            NDArray of pH values is provided, then a 2D array will be
            returned. In this case, each row represents the speciation for
            each given pH.
        '''
        # If the given pH is not a list/array, be sure to convert it to one
        # for future calcs.
        if isinstance(pH, (int, float)):
            pH = [
                pH,
            ]
        pH = np.array(pH, dtype=float)

        # Calculate the concentration of H3O+. If multiple pH values are
        # given, then it is best to construct a two dimensional array of
        # concentrations.
        h3o = 10.**(-pH)
        if len(h3o) > 1:
            h3o = np.repeat(h3o.reshape(-1, 1), len(self._Ka_temp), axis=1)

        # These are the powers that the H3O+ concentrations will be raised.
        power = np.arange(len(self._Ka_temp))
        # Calculate the H3O+ concentrations raised to the powers calculated
        # above (in reverse order).
        h3o_pow = h3o**(power[::-1])
        # Calculate a cumulative product of the Ka values. The first value
        # must be 1.0, which is why _Ka_temp is used instead of Ka.
        Ka_prod = np.cumproduct(self._Ka_temp)
        # Multiply the H3O**power values times the cumulative Ka product.
        h3o_Ka = h3o_pow * Ka_prod

        # Return the alpha values. The return signature will differ is the
        # shape of the H3O array was 2-dimensional.
        if len(h3o.shape) > 1:
            den = h3o_Ka.sum(axis=1)
            return h3o_Ka / den.reshape(-1, 1)
        else:
            den = h3o_Ka.sum()
            return h3o_Ka / den
def index_to_assignment(
        I, D):  # i is an integer and D is a python list eg [2, 2, 2]

    g = list(D[0:len(D) - 1])
    g.insert(0, 1)

    result = np.float32(
        np.mod(
            np.floor(
                np.divide(npm.repmat(I - 1, 1, len(D)),
                          npm.repmat(np.cumproduct(g), 1, 1))),
            npm.repmat(D, 1, 1)))
    return result.flatten()
Example #27
0
 def reshape(self, shape):
     try:
         shape = tuple(shape)
     except TypeError:
         shape = (shape, )
     if np.product(shape) != len(self):
         raise InvalidArgument(
             "Reshape failed: New shape %s has different length to "
             "existing shape %s" % (str(shape), str(self.shape)))
     bools = self.as_boolarray().reshape(-1)
     buf = np.packbits(bools)
     strides = [1] + list(np.cumproduct(shape[-1:0:-1]))
     return BitArray(buf, 0, shape, strides)
Example #28
0
def f(x, t, p):
    '''
    Objective value to decision x in scenario with reachability times t and detection probabilities p
    '''
    import numpy as np
    order = np.argsort(t)
    prob_fail = np.zeros((len(x) + 1))
    prob_fail[0] = 1
    prob_fail[1:] = ((1 - p)**x)[order]
    cum_prob_fail = np.cumproduct(prob_fail)
    prob_succeed = (1 - prob_fail)[1:]
    return t.max() - (cum_prob_fail[:-1] * prob_succeed *
                      t[order]).sum() - cum_prob_fail[-1] * t.max()
Example #29
0
    def __init__(self, description, dimension=None, offset=None, strides=None):
        dimension = [1] if dimension is None else dimension[:] # copy
        strides = (np.cumproduct(np.hstack((1, dimension)))[:-1]
                   if strides is None else strides)
        self._nodes = copy.deepcopy(Detector._nodes)
        self._nodes['dimension']['shape'] = [len(dimension)]
        self._nodes['strides']['shape'] = [len(strides)]
        self._nodes['counts']['shape'] = dimension
        self._nodes['roiMask']['shape'] = dimension
        self._nodes['sliceCounts']['shape'] = dimension
        self.dimension = dimension
        self.strides = strides
        self.offset = 0 if offset is None else offset
        self.strides = (np.cumproduct(np.hstack((1, dimension)))[:-1].tolist()
                        if strides is None else strides)
        self.roiMask = np.ones(dimension) #.tolist()
        self.roiShape = ["name", "unknown"]
        self.liveROI = 0.
        self.counts = np.zeros(dimension, 'int32') #.tolist()
        self.sliceCounts = np.zeros(dimension, 'int32') #.tolist()

        self._description = description
Example #30
0
def generate_encode_macro(name, shape):
    multipliers = [1] + list(np.cumproduct(shape[::-1]))[:-1]
    multipliers = [Constant(i) for i in reversed(multipliers)]
    params = [SymbolRef(name='x{}'.format(dim)) for dim in range(len(shape))]
    variables = []
    for var in params:
        cp = var.copy()
        cp._force_parentheses = True
        variables.append(Cast(ctypes.c_long(), cp))

    products = [Mul(mult, var) for mult, var in zip(multipliers, variables)]
    total = functools.reduce(Add, products)
    return CppDefine(name=name, params=params, body=total)
Example #31
0
def cartesian_product(X):
    """
    Numpy version of itertools.product or pandas.compat.product.
    Sometimes faster (for large inputs)...

    Parameters
    ----------
    X : list-like of list-likes

    Returns
    -------
    product : list of ndarrays

    Examples
    --------
    >>> cartesian_product([list('ABC'), [1, 2]])
    [array(['A', 'A', 'B', 'B', 'C', 'C'], dtype='|S1'),
    array([1, 2, 1, 2, 1, 2])]

    See also
    --------
    itertools.product : Cartesian product of input iterables.  Equivalent to
        nested for-loops.
    pandas.compat.product : An alias for itertools.product.
    """
    msg = "Input must be a list-like of list-likes"
    if not is_list_like(X):
        raise TypeError(msg)
    for x in X:
        if not is_list_like(x):
            raise TypeError(msg)

    if len(X) == 0:
        return []

    lenX = np.fromiter((len(x) for x in X), dtype=int)
    cumprodX = np.cumproduct(lenX)

    a = np.roll(cumprodX, 1)
    a[0] = 1

    if cumprodX[-1] != 0:
        b = cumprodX[-1] / cumprodX
    else:
        # if any factor is empty, the cartesian product is empty
        b = np.zeros_like(cumprodX)

    return [
        np.tile(np.repeat(np.asarray(com._values_from_object(x)), b[i]),
                np.product(a[i])) for i, x in enumerate(X)
    ]
Example #32
0
def save_portfolio_metrics(portfolios, portfolio_name, period_ends, prices, p_value, p_weights, p_holdings, path=None):

    rebalance_qtys = (p_weights.ix[period_ends] / prices.ix[period_ends]) * p_value.ix[period_ends]
    # p_holdings = rebalance_qtys.align(prices)[0].shift(1).ffill().fillna(0)
    transactions = p_holdings - p_holdings.shift(1).fillna(0)
    transactions = transactions[transactions.sum(1) != 0]

    p_returns = p_value.pct_change(periods=1)
    p_index = np.cumproduct(1 + p_returns)

    m_rets = (1 + p_returns).resample("M", how="prod", kind="period") - 1

    portfolios[portfolio_name]["equity"] = p_value
    portfolios[portfolio_name]["ret"] = p_returns
    portfolios[portfolio_name]["cagr"] = compute_cagr(p_value) * 100
    portfolios[portfolio_name]["sharpe"] = compute_sharpe(p_value)
    portfolios[portfolio_name]["weight"] = p_weights
    portfolios[portfolio_name]["transactions"] = transactions
    portfolios[portfolio_name]["period_return"] = 100 * (p_value.ix[-1] / p_value[0] - 1)
    portfolios[portfolio_name]["avg_monthly_return"] = p_index.resample("BM", how="last").pct_change().mean() * 100
    portfolios[portfolio_name]["monthly_return_table"] = Monthly_Return_Table(m_rets)
    portfolios[portfolio_name]["drawdowns"] = compute_drawdown(p_value).dropna()
    portfolios[portfolio_name]["max_drawdown"] = compute_max_drawdown(p_value) * 100
    portfolios[portfolio_name]["max_drawdown_date"] = (
        p_value.index[compute_drawdown(p_value) == compute_max_drawdown(p_value)][0].date().isoformat()
    )
    portfolios[portfolio_name]["avg_drawdown"] = compute_avg_drawdown(p_value) * 100
    portfolios[portfolio_name]["calmar"] = compute_calmar(p_value)
    portfolios[portfolio_name]["R_squared"] = compute_calmar(p_value)
    portfolios[portfolio_name]["DVR"] = compute_DVR(p_value)
    portfolios[portfolio_name]["volatility"] = compute_volatility(p_returns)
    portfolios[portfolio_name]["VAR"] = compute_var(p_returns)
    portfolios[portfolio_name]["CVAR"] = compute_cvar(p_returns)
    portfolios[portfolio_name]["rolling_annual_returns"] = pd.rolling_apply(p_returns, 252, np.sum)
    portfolios[portfolio_name]["p_holdings"] = p_holdings
    portfolios[portfolio_name]["transactions"] = np.round(transactions[transactions.sum(1) != 0], 0)
    portfolios[portfolio_name]["share"] = p_holdings
    portfolios[portfolio_name]["orders"] = generate_orders(transactions, prices)
    portfolios[portfolio_name]["best"] = max(p_returns)
    portfolios[portfolio_name]["worst"] = min(p_returns)
    portfolios[portfolio_name]["trades"] = len(portfolios[portfolio_name]["orders"])

    if path != None:
        portfolios[portfolio_name].equity.to_csv(path + portfolio_name + "_equity.csv")
        portfolios[portfolio_name].weight.to_csv(path + portfolio_name + "_weight.csv")
        portfolios[portfolio_name].share.to_csv(path + portfolio_name + "_share.csv")
        portfolios[portfolio_name].transactions.to_csv(path + portfolio_name + "_transactions.csv")
        portfolios[portfolio_name].orders.to_csv(path + portfolio_name + "_orders.csv")

    return
Example #33
0
    def alpha(self, pH):
        '''Return the fraction of each species at a given pH.

        Parameters
        ----------
        pH : int, float, or Numpy Array
            These are the pH value(s) over which the fraction should be
            returned.

        Returns
        -------
        Numpy NDArray
            These are the fractional concentrations at any given pH. They are
            sorted from most acidic species to least acidic species. If a
            NDArray of pH values is provided, then a 2D array will be
            returned. In this case, each row represents the speciation for
            each given pH.
        '''
        # If the given pH is not a list/array, be sure to convert it to one
        # for future calcs.
        if isinstance(pH, (int, float)):
            pH = [pH,]
        pH = np.array(pH, dtype=float)

        # Calculate the concentration of H3O+. If multiple pH values are
        # given, then it is best to construct a two dimensional array of
        # concentrations.
        h3o = 10.**(-pH)
        if len(h3o) > 1:
            h3o = np.repeat( h3o.reshape(-1, 1), len(self._Ka_temp), axis=1)

        # These are the powers that the H3O+ concentrations will be raised.
        power = np.arange(len(self._Ka_temp))
        # Calculate the H3O+ concentrations raised to the powers calculated
        # above (in reverse order).
        h3o_pow = h3o**( power[::-1] )
        # Calculate a cumulative product of the Ka values. The first value
        # must be 1.0, which is why _Ka_temp is used instead of Ka.
        Ka_prod = np.cumproduct(self._Ka_temp)
        # Multiply the H3O**power values times the cumulative Ka product.
        h3o_Ka = h3o_pow*Ka_prod

        # Return the alpha values. The return signature will differ is the
        # shape of the H3O array was 2-dimensional. 
        if len(h3o.shape) > 1:
            den = h3o_Ka.sum(axis=1)
            return h3o_Ka/den.reshape(-1,1)
        else:
            den = h3o_Ka.sum()
            return h3o_Ka/den
Example #34
0
def cartesian_product(X):
    """
    Numpy version of itertools.product or pandas.compat.product.
    Sometimes faster (for large inputs)...

    Parameters
    ----------
    X : list-like of list-likes

    Returns
    -------
    product : list of ndarrays

    Examples
    --------
    >>> cartesian_product([list('ABC'), [1, 2]])
    [array(['A', 'A', 'B', 'B', 'C', 'C'], dtype='|S1'),
    array([1, 2, 1, 2, 1, 2])]

    See also
    --------
    itertools.product : Cartesian product of input iterables.  Equivalent to
        nested for-loops.
    pandas.compat.product : An alias for itertools.product.
    """
    msg = "Input must be a list-like of list-likes"
    if not is_list_like(X):
        raise TypeError(msg)
    for x in X:
        if not is_list_like(x):
            raise TypeError(msg)

    if len(X) == 0:
        return []

    lenX = np.fromiter((len(x) for x in X), dtype=np.intp)
    cumprodX = np.cumproduct(lenX)

    a = np.roll(cumprodX, 1)
    a[0] = 1

    if cumprodX[-1] != 0:
        b = cumprodX[-1] / cumprodX
    else:
        # if any factor is empty, the cartesian product is empty
        b = np.zeros_like(cumprodX)

    return [np.tile(np.repeat(np.asarray(com.values_from_object(x)), b[i]),
                    np.product(a[i]))
            for i, x in enumerate(X)]
Example #35
0
    def price_monte_carlo(self, iterations, control = True):
        drift = math.exp( (self.rate - 0.5 * self.iv ** 2) * self.T / self.steps )
        listAP = np.zeros(iterations)
        listGP = np.zeros(iterations)

        for i in range(iterations):
            np.random.seed(i)
            arrZ = np.random.normal(0, 1, self.steps)
            arrR = np.exp(self.iv * math.sqrt( self.T / self.steps ) * arrZ) * drift 
            arrP = self.spot * np.cumproduct( arrR )

            arithmeticMean      = np.mean( arrP )
            if self.option_type == 'call':
                arithmeticPayoff    = math.exp( -self.rate * self.T ) * max ( [ arithmeticMean - self.strike, 0 ] )
            else:
                arithmeticPayoff    = math.exp( -self.rate * self.T ) * max ( [ self.strike - arithmeticMean, 0 ] )
            listAP[i] = arithmeticPayoff

            geometricMean       = np.exp( 1 / self.steps * sum ( np.log( arrP ) ) )
            if self.option_type == 'call':
                geometricPayoff    = math.exp( -self.rate * self.T ) * max ( [ geometricMean - self.strike, 0 ] )
            else:
                geometricPayoff    = math.exp( -self.rate * self.T ) * max ( [ self.strike - geometricMean, 0 ] )
            listGP[i] = geometricPayoff

        meanGP  = np.mean(listGP)
        meanAP  = np.mean(listAP)
        sdAP    = np.std (listAP, ddof = 1)
        pxClose = self.price()

        if self.option_class == 'geometric':
            print("[Geometric Asian from Standard MC: {0:6f}, Closed-form price: {1:6f}]".format(meanGP, pxClose))
            return (meanGP, pxClose)
        
        if self.option_class == 'arithmetic':
            if control:
                covPP   = np.mean(listGP * listAP) - np.mean(listGP) * np.mean(listAP)
                theta   = covPP / np.var(listGP, ddof = 1)

                listCV  = listAP + theta * (pxClose - listGP)
                meanCV  = np.mean(listCV)
                sdCV    = np.std(listCV, ddof = 1)
                ciCV    = (meanCV - 1.96 * sdCV / math.sqrt(iterations), meanCV + 1.96 * sdCV / math.sqrt(iterations), meanCV, pxClose)
                print("Control Variate: [Confidence interval: ({0:6f}, {1:6f}), Mean Price: {2:6f}, Closed-form geometric price: {3:6f}]".format(*ciCV))
                return ciCV
            else:
                ciAP    = (meanAP - 1.96 * sdAP / math.sqrt(iterations), meanAP + 1.96 * sdAP / math.sqrt(iterations), meanAP, pxClose)
                print("No Control:      [Confidence interval: ({0:6f}, {1:6f}), Mean Price: {2:6f}, Closed-form geometric price: {3:6f}]".format(*ciAP))
                return ciAP
Example #36
0
def save_portfolio_metrics (portfolios, portfolio_name, period_ends, prices, \
                            p_value, p_weights, p_holdings, path=None) :
        
    rebalance_qtys = (p_weights.ix[period_ends] / prices.ix[period_ends]) * p_value.ix[period_ends]
    #p_holdings = rebalance_qtys.align(prices)[0].shift(1).ffill().fillna(0)
    transactions = (p_holdings - p_holdings.shift(1).fillna(0))
    transactions = transactions[transactions.sum(1) != 0]
    
    p_returns = p_value.pct_change(periods=1)
    p_index = np.cumproduct(1 + p_returns)
    
    m_rets = (1 + p_returns).resample('M', how='prod', kind='period') - 1
    
    portfolios[portfolio_name]['equity'] = p_value
    portfolios[portfolio_name]['ret'] = p_returns
    portfolios[portfolio_name]['cagr'] = compute_cagr(p_value) * 100
    portfolios[portfolio_name]['sharpe'] = compute_sharpe(p_value)
    portfolios[portfolio_name]['weight'] = p_weights
    portfolios[portfolio_name]['transactions'] = transactions
    portfolios[portfolio_name]['period_return'] = 100 * (p_value.ix[-1] / p_value[0] - 1)
    portfolios[portfolio_name]['avg_monthly_return'] = p_index.resample('BM', how='last').pct_change().mean() * 100
    portfolios[portfolio_name]['monthly_return_table'] = Monthly_Return_Table(m_rets)
    portfolios[portfolio_name]['drawdowns'] = compute_drawdown(p_value).dropna()
    portfolios[portfolio_name]['max_drawdown'] = compute_max_drawdown(p_value) * 100
    portfolios[portfolio_name]['max_drawdown_date'] = p_value.index[compute_drawdown(p_value)==compute_max_drawdown(p_value)][0].date().isoformat()
    portfolios[portfolio_name]['avg_drawdown'] = compute_avg_drawdown(p_value) * 100
    portfolios[portfolio_name]['calmar'] = compute_calmar(p_value)
    portfolios[portfolio_name]['R_squared'] = compute_calmar(p_value)
    portfolios[portfolio_name]['DVR'] = compute_DVR(p_value)
    portfolios[portfolio_name]['volatility'] = compute_volatility(p_returns)
    portfolios[portfolio_name]['VAR'] = compute_var(p_returns)
    portfolios[portfolio_name]['CVAR'] = compute_cvar(p_returns)
    portfolios[portfolio_name]['rolling_annual_returns'] = pd.rolling_apply(p_returns, 252, np.sum) 
    portfolios[portfolio_name]['p_holdings'] = p_holdings
    portfolios[portfolio_name]['transactions'] = np.round(transactions[transactions.sum(1)!=0], 0)
    portfolios[portfolio_name]['share'] = p_holdings
    portfolios[portfolio_name]['orders'] = generate_orders(transactions, prices)
    portfolios[portfolio_name]['best'] = max(p_returns)
    portfolios[portfolio_name]['worst'] = min(p_returns)
    portfolios[portfolio_name]['trades'] = len(portfolios[portfolio_name]['orders'])

    if path != None :
        portfolios[portfolio_name].equity.to_csv(path + portfolio_name + '_equity.csv')
        portfolios[portfolio_name].weight.to_csv(path + portfolio_name + '_weight.csv')
        portfolios[portfolio_name].share.to_csv(path + portfolio_name + '_share.csv')
        portfolios[portfolio_name].transactions.to_csv(path + portfolio_name + '_transactions.csv')
        portfolios[portfolio_name].orders.to_csv(path + portfolio_name + '_orders.csv')
        
    return
Example #37
0
def Profit_Evaluation(act, ts, ts_pct_change, kind='s'):
    '''
    收益率评价
    '''
    act_ts = pd.Series(act[:-1], index=ts.index)
    profit = []
    buy = 0
    sell = 0
    flag = 0
    buy_flag = 0

    for i in range(len(act_ts)):
        day = ts.index[i]
        if flag == 0 and act_ts[i] == -1:
            profit.append(0)
            flag = 1
            continue
        elif act_ts[i] == 1 and ts_pct_change[day] < 9.99:
            buy = ts[i]
            profit.append(0)
            flag = 1
            buy_flag = 1
        elif act_ts[i] == -1 and ts_pct_change[day] < 9.99:
            sell = ts[i]
            if kind == 's':
                profit.append(0.9985 * (sell - buy) / buy)
            elif kind == 'f':
                profit.append(0.9995 * (sell - buy) / buy)
            else:
                profit.append((sell - buy) / buy)
            buy_flag = 0
        else:
            profit.append(0)

    cumprod_profit = np.cumproduct(np.array(profit) + 1)
    profit_ts = pd.Series(profit, ts.index)
    cumprod_profit_ts = pd.Series(cumprod_profit, ts.index)

    buy_flag = 0
    for i in range(len(cumprod_profit_ts)):
        if act[i] == 1:
            buy_flag = 1
        if act[i] == -1:
            buy_flag = 0
        if buy_flag == 1:
            cumprod_profit_ts[i] = cumprod_profit_ts[i - 1] * (ts[i] /
                                                               ts[i - 1])

    return profit_ts, cumprod_profit_ts
Example #38
0
def Tuple_MI(Tuple, IdxLength):
    """
    Function to return the absolution position of a multiindex when the index tuple
    and the index hierarchy and size are given.
    Example: Tuple_MI([2,7,3],[100,10,5]) = 138
    Tuple_MI is the inverse of MI_Tuple.
    """
    # First, generate the index position offset values
    A = IdxLength[1:] + IdxLength[:1]  # Shift 1 to left
    A[-1] = 1  # Replace lowest index by 1
    A.reverse()
    IdxPosOffset = np.cumproduct(A).tolist()
    IdxPosOffset.reverse()
    Position = np.sum([a * b for a, b in zip(Tuple, IdxPosOffset)])
    return Position
def get_monthly_saving(age, retirement_age, retirement_saving_goal, ann_int,
                       annual_saving_growth):
    """
    Given a constant of target cash at retirement, calculate a fix saving per month
    """
    months_before_retirement = (retirement_age - age) * 12

    # investment before retirement
    ann_int = ann_int[:, :months_before_retirement]
    interest_mlp_after_retirement_arr = np.cumproduct(1 + ann_int, axis=1)

    ## solve for constant saving per month
    """
    retirement_saving_goal = sum(distcount_factors_array * saving_per_month_now)
    retirement_saving_goal = sum(distcount_factors_array) * saving_per_month_now
    """
    saving_per_month = retirement_saving_goal / np.sum(
        interest_mlp_after_retirement_arr, axis=1)

    # growth factor
    num_sim = ann_int.shape[0]
    saving_growth_multipliers = np.repeat(np.cumproduct(
        np.ones(shape=(num_sim, np.int(months_before_retirement / 12))) *
        (1 + annual_saving_growth),
        axis=1),
                                          12,
                                          axis=1)

    saving_first_month = retirement_saving_goal / np.sum(
        interest_mlp_after_retirement_arr * saving_growth_multipliers, axis=1)
    growth_saving = np.repeat(saving_first_month.reshape(
        len(saving_first_month), 1),
                              months_before_retirement,
                              axis=1) * saving_growth_multipliers

    return saving_per_month, growth_saving
Example #40
0
    def calculate_with_n_electron(self, phi, partition_function,
                                  number_density, n_electron):
        ion_populations = pd.DataFrame(data=0.0,
            index=partition_function.index.copy(),
            columns=partition_function.columns.copy(), dtype=np.float64)

        for atomic_number, groups in phi.groupby(level='atomic_number'):
            current_phis = (groups / n_electron).replace(np.nan, 0.0).values
            phis_product = np.cumproduct(current_phis, axis=0)
            neutral_atom_density = (number_density.ix[atomic_number] /
                                    (1 + np.sum(phis_product, axis=0)))
            ion_populations.ix[atomic_number, 0] = (
                neutral_atom_density.values)
            ion_populations.ix[atomic_number].values[1:] = (
                neutral_atom_density.values * phis_product)
            ion_populations[ion_populations < self.ion_zero_threshold] = 0.0
        return ion_populations
Example #41
0
def construct_zernike_lookuptable(zernike_indexes):
    """Return a lookup table of the sum-of-factorial part of the radial
    polynomial of the zernike indexes passed
    
    zernike_indexes - an Nx2 array of the Zernike polynomials to be
                      computed.
    """
    factorial = np.ones((100,))
    factorial[1:] = np.cumproduct(np.arange(1, 100).astype(float))
    width = int(np.max(zernike_indexes[:,0]) / 2+1)
    lut = np.zeros((zernike_indexes.shape[0],width))
    for idx,(n,m) in zip(range(zernike_indexes.shape[0]),zernike_indexes):
        for k in range(0,(n-m)/2+1):
            lut[idx,k] = \
                (((-1)**k) * factorial[n-k] /
                 (factorial[k]*factorial[(n+m)/2-k]*factorial[(n-m)/2-k]))
    return lut
Example #42
0
def backtest_old(prices, weights, period_ends, capital, offset=0., commission=0.) : 
    
    p_holdings = (capital / prices * weights.align(prices)[0]).shift(offset).ffill().fillna(0)
    w = weights.align(prices)[0].shift(offset).fillna(0)
    trade_dates = w[w.sum(1) != 0].index
    p_cash = capital - (p_holdings * prices.shift(offset)).sum(1)
    totalcash = p_cash[trade_dates].align(prices[prices.columns[0]])[0].ffill().fillna(0)
    p_returns = (totalcash  + (p_holdings * prices).sum(1) - \
                    (abs(p_holdings - p_holdings.shift(1)) * commission).sum(1)) / \
                    (totalcash + (p_holdings * prices.shift(1)).sum(1)) - 1
    p_returns = p_returns.fillna(0)
#    p_weights = p_holdings * prices.shift(offset) / (totalcash + (p_holdings * prices.shift(offset)).sum(1))
    p_weights = pd.DataFrame([(p_holdings * prices.shift(offset))[symbol] / \
                              (totalcash + (p_holdings * prices.shift(offset)).sum(1)) \
                              for symbol in prices.columns], index=prices.columns).T
    p_weights = p_weights.fillna(0)

    return np.cumproduct(1. + p_returns) * capital, p_holdings.astype(int), p_returns, p_weights
Example #43
0
    def calc_exposed_faces(self):
        #TODO: The 3D bitwise ops are slow
        t = time.time()
        air = BLOCK_SOLID[self.blocks] == 0

        light = numpy.cumproduct(air[:,::-1,:], axis=1)[:,::-1,:]

        exposed = numpy.zeros(air.shape,dtype=numpy.uint8)
        exposed[0,:,:] |= self.edge_blocks(dx=-1)<<5 #left edge
        exposed[-1,:,:] |= self.edge_blocks(dx=1)<<4 #right edge
        exposed[:,:,0] |= self.edge_blocks(dz=-1)<<2 #back edge
        exposed[:,:,-1] |= self.edge_blocks(dz=1)<<3 #front edge
        exposed[:,:-1,:] |= air[:,1:,:]<<7 #up
        exposed[:,1:,:] |= air[:,:-1,:]<<6 #down
        exposed[1:,:,:] |= air[:-1,:,:]<<5 #left
        exposed[:-1,:,:] |= air[1:,:,:]<<4 #right
        exposed[:,:,:-1] |= air[:,:,1:]<<3 #forward
        exposed[:,:,1:] |= air[:,:,:-1]<<2 #back
        self.exposed = exposed*(~air)
    def alpha(self, pH):
        '''Return the fraction of each species at a given pH.

        This returns a Numpy list of fractional speciation at a given
        solution. The returned list will be ordered as per the Ka/pKa values,
        with the most acidic component listed first.

        '''
        # If the given pH is not a list/array, be sure to convert it to one
        # for future calcs.
        if isinstance(pH, (int, float)):
            pH = [pH,]
        pH = np.array(pH, dtype=float)

        # Calculate the concentration of H3O+. If multiple pH values are
        # given, then it is best to construct a two dimensional array of
        # concentrations.
        h3o = 10.**(-pH)
        if len(h3o) > 1:
            h3o = np.repeat( h3o.reshape(-1, 1), len(self._Ka_temp), axis=1)

        # These are the powers that the H3O+ concentrations will be raised.
        power = np.arange(len(self._Ka_temp))
        # Calculate the H3O+ concentrations raised to the powers calculated
        # above (in reverse order).
        h3o_pow = h3o**( power[::-1] )
        # Calculate a cumulative product of the Ka values. The first value
        # must be 1.0, which is why _Ka_temp is used instead of Ka.
        Ka_prod = np.cumproduct(self._Ka_temp)
        # Multiply the H3O**power values times the cumulative Ka product.
        h3o_Ka = h3o_pow*Ka_prod

        # Return the alpha values. The return signature will differ is the
        # shape of the H3O array was 2-dimensional. 
        if len(h3o.shape) > 1:
            den = h3o_Ka.sum(axis=1)
            return h3o_Ka/den.reshape(-1,1)
        else:
            den = h3o_Ka.sum()
            return h3o_Ka/den
Example #45
0
def cartesian_product(X):
    """
    Numpy version of itertools.product or pandas.compat.product.
    Sometimes faster (for large inputs)...

    Examples
    --------
    >>> cartesian_product([list('ABC'), [1, 2]])
    [array(['A', 'A', 'B', 'B', 'C', 'C'], dtype='|S1'),
    array([1, 2, 1, 2, 1, 2])]

    """

    lenX = np.fromiter((len(x) for x in X), dtype=int)
    cumprodX = np.cumproduct(lenX)

    a = np.roll(cumprodX, 1)
    a[0] = 1

    b = cumprodX[-1] / cumprodX

    return [np.tile(np.repeat(np.asarray(com._values_from_object(x)), b[i]), np.product(a[i])) for i, x in enumerate(X)]
Example #46
0
	def combinations(self, lol):
		'''
		Take a list of lists of numbers. 
		Return an array of float64, with shape NxM where
		N = cumprod([len(x) for x in lol]) and  
		M = len(lol). 
		The return array contains all possible
		combinations of the inputs (eg, sets of items containing
		one item from each of the lists in lol)

		'''
		m=len(lol)
		shapes=[len(x) for x in lol]
		stride=np.cumproduct(shapes)
		n = stride[-1]
		oa=np.zeros((n, m), np.float64)
		oa[:,0]=np.resize(lol[0], n)
		for i in range(1, m):
			dl=stride[i-1]
			dr=n/dl
			oa[:,i]=np.ravel(np.transpose(np.resize(lol[i], (dl, dr))))    
		return oa
Example #47
0
    def index_data(self, coords, hyper_points, coord_map):
        """
        Index the data that falls inside the grid cells

        :param coords: coordinates of grid
        :param hyper_points: list of HyperPoints to index
        :param coord_map: list of tuples relating index in HyperPoint to index in coords and in
                          coords to be iterated over
        """

        # create bounds in correct order
        hp_coords = []
        coord_descreasing = [False] * len(coords)
        coord_lengths = [0] * len(coords)
        lower_bounds = [None] * len(coords)
        max_bounds = [None] * len(coords)
        for (hpi, ci, shi) in coord_map:
            coord = coords[ci]
            # Coordinates must be monotonic; determine whether increasing or decreasing.
            if len(coord.points) > 1:
                if coord.points[1] < coord.points[0]:
                    coord_descreasing[shi] = True
            coord_lengths[shi] = len(coord.points)
            if coord_descreasing[shi]:
                lower_bounds[shi] = coord.bounds[::-1, 1]
                max_bounds[shi] = coord.bounds[0, 1]
            else:
                lower_bounds[shi] = coord.bounds[::, 0]
                max_bounds[shi] = coord.bounds[-1, 1]

            hp_coord = hyper_points.coords[hpi]
            if isinstance(hp_coord[0], datetime.datetime):
                hp_coord = convert_datetime_to_std_time(hp_coord)

            hp_coords.append(hp_coord)

        bounds_coords_max = list(zip(lower_bounds, hp_coords, max_bounds))

        # stack for each coordinate
        #    where the coordinate is larger than the maximum set to -1
        #    otherwise search in the sorted coordinate to find all the index of the hyperpoints
        # The choice of 'left' or 'right' and '<' and '<=' determines which
        #  cell is chosen when the coordinate is equal to the boundary.
        # -1 or M_i indicates the point is outside the grid.
        # Output is a list of coordinates which lists the indexes where the hyper points
        #    should be located in the grid
        indices = np.vstack(
            np.where(
                ci < max_coordinate_value,
                np.searchsorted(bi, ci, side='right') - 1,
                -1)
            for bi, ci, max_coordinate_value in bounds_coords_max)

        # D-tuple giving the shape of the output grid
        grid_shape = tuple(len(bi_ci[0]) for bi_ci in bounds_coords_max)

        # shape (N,) telling which points actually fall within the grid,
        # i.e. have indexes that are not -1 and are not masked data points
        grid_mask = np.all(
            (indices >= 0) &
            (ma.getmaskarray(hyper_points.data) == False),
            axis=0)

        # if the coordinate was decreasing then correct the indices for this cell
        for indices_slice, decreasing, coord_length in zip(range(indices.shape[0]), coord_descreasing, coord_lengths):
            if decreasing:
                # indices[indices_slice] += (coord_length - 1) - indices[indices_slice]
                indices[indices_slice] *= -1
                indices[indices_slice] += (coord_length - 1)

        # shape (N,) containing negative scalar cell numbers for each
        # input point (sequence doesn't matter so long as they are unique), or
        # -1 for points outside the grid.
        #
        # Possibly numpy.lexsort could be used to avoid the need for this,
        # although we'd have to be careful about points outside the grid.
        self.cell_numbers = np.where(
            grid_mask,
            np.tensordot(
                np.cumproduct((1,) + grid_shape[:-1]),
                indices,
                axes=1
            ),
            -1)

        # Sort everything by cell number
        self.sort_order = np.argsort(self.cell_numbers)
        self.cell_numbers = self.cell_numbers[self.sort_order]
        self._indices = indices[:, self.sort_order]
        self.hp_coords = [hp_coord[self.sort_order] for hp_coord in hp_coords]
Example #48
0
def cumproduct(x, axis=0):
    return np.cumproduct(x, axis)