Ejemplo n.º 1
0
    def _check_bounds(self,x_new):
        # If self.bounds_error = 1, we raise an error if any x_new values
        # fall outside the range of x.  Otherwise, we return an array indicating
        # which values are outside the boundary region.  
        # !! Needs some work for multi-dimensional x !!
        below_bounds = less(x_new,self.x[0])
        above_bounds = greater(x_new,self.x[-1])
        #  Note: sometrue has been redefined to handle length 0 arrays
        # !! Could provide more information about which values are out of bounds
        # RHC -- Changed these ValueErrors to PyDSTool_BoundsErrors
        if self.bounds_error and any(sometrue(below_bounds)):
##            print "Input:", x_new
##            print "Bound:", self.x[0]
##            print "Difference input - bound:", x_new-self.x[0]
            raise PyDSTool_BoundsError, " A value in x_new is below the"\
                              " interpolation range."
        if self.bounds_error and any(sometrue(above_bounds)):
##            print "Input:", x_new
##            print "Bound:", self.x[-1]
##            print "Difference input - bound:", x_new-self.x[-1]
            raise PyDSTool_BoundsError, " A value in x_new is above the"\
                              " interpolation range."
        # !! Should we emit a warning if some values are out of bounds.
        # !! matlab does not.
        out_of_bounds = logical_or(below_bounds,above_bounds)
        return out_of_bounds
Ejemplo n.º 2
0
 def _check_bounds(self, x_new):
     # If self.bounds_error = 1, we raise an error if any x_new values
     # fall outside the range of x.  Otherwise, we return an array indicating
     # which values are outside the boundary region.
     # !! Needs some work for multi-dimensional x !!
     below_bounds = less(x_new, self.x[0])
     above_bounds = greater(x_new, self.x[-1])
     #  Note: sometrue has been redefined to handle length 0 arrays
     # !! Could provide more information about which values are out of bounds
     # RHC -- Changed these ValueErrors to PyDSTool_BoundsErrors
     if self.bounds_error and any(sometrue(below_bounds)):
         ##            print "Input:", x_new
         ##            print "Bound:", self.x[0]
         ##            print "Difference input - bound:", x_new-self.x[0]
         raise PyDSTool_BoundsError, " A value in x_new is below the"\
                           " interpolation range."
     if self.bounds_error and any(sometrue(above_bounds)):
         ##            print "Input:", x_new
         ##            print "Bound:", self.x[-1]
         ##            print "Difference input - bound:", x_new-self.x[-1]
         raise PyDSTool_BoundsError, " A value in x_new is above the"\
                           " interpolation range."
     # !! Should we emit a warning if some values are out of bounds.
     # !! matlab does not.
     out_of_bounds = logical_or(below_bounds, above_bounds)
     return out_of_bounds
    def compute(self, frame, lamda):
        Y = sp.fft(frame * self._window)
        Ysq = sp.absolute(Y)**2.0
        #Compute smooth speech power spectrum
        P = self._nu * self._lastP + (1.0 - self._nu) * Ysq  #eq 2

        #Find the local minimum of noisy speech
        Pmin = sp.zeros(self._winsize, dtype=sp.float32)
        minidx = sp.less_equal(P, self._lastPmin)
        largeidx = sp.greater(P, self._lastPmin)
        Pmin[minidx] = P[minidx]
        Pmin[largeidx]=self._gamma * self._lastPmin[largeidx] + \
                        ((1.0-self._gamma)/(1.0-self._B)) * (P[largeidx] - self._B* self._lastP[largeidx])#eq3
        #Compute ratio of smoothed speech p
        Sr = P / Pmin  #eq4

        #Calculate speech presence probability using first-order recursion
        I = sp.array(sp.greater(self._delta, Sr), dtype=sp.float32)  #eq5
        p = self._alpha_p * self._lastp + (1.0 - self._alpha_p) * I  #eq6

        #Compute time-frequency dependent smoothing factors
        alpha_s = self._alpha_d + (1.0 - self._alpha_d) * p  #eq7

        #Update noise estimate using time-frequency dependent smoothing factors
        D = alpha_s * self._lastD + (1.0 - alpha_s) * Ysq  #eq8

        #calculate debug info
        noise_periodgram = 10 * sp.log10(
            sp.sum(sp.absolute(D)) / self._winsize)
        noisy_speech_periodgram = 10 * sp.log10(sp.sum(Ysq) / self._winsize)
        smooth_noisy_speech_periodgram = 10 * sp.log10(
            sp.sum(P) / self._winsize)
        self._nplist.append(noise_periodgram)
        self._nsplist.append(noisy_speech_periodgram)
        self._snsplist.append(smooth_noisy_speech_periodgram)
        #update
        self._lastP = P
        self._lastPmin = Pmin
        self._lastp = p
        self._lastD = D
        return D
    def compute(self,frame,lamda):
        Y = sp.fft(frame*self._window)
        Ysq = sp.absolute(Y)**2.0
        #Compute smooth speech power spectrum 
        P=self._nu*self._lastP + (1.0-self._nu)*Ysq#eq 2

        #Find the local minimum of noisy speech
        Pmin = sp.zeros(self._winsize,dtype=sp.float32)
        minidx = sp.less_equal(P,self._lastPmin)
        largeidx = sp.greater(P,self._lastPmin)
        Pmin[minidx]=P[minidx]
        Pmin[largeidx]=self._gamma * self._lastPmin[largeidx] + \
                        ((1.0-self._gamma)/(1.0-self._B)) * (P[largeidx] - self._B* self._lastP[largeidx])#eq3
        #Compute ratio of smoothed speech p
        Sr = P/Pmin #eq4

        #Calculate speech presence probability using first-order recursion
        I = sp.array(sp.greater(self._delta,Sr),dtype=sp.float32)#eq5
        p = self._alpha_p * self._lastp + (1.0 - self._alpha_p)*I#eq6

        #Compute time-frequency dependent smoothing factors
        alpha_s = self._alpha_d + (1.0 - self._alpha_d)*p#eq7

        #Update noise estimate using time-frequency dependent smoothing factors
        D = alpha_s*self._lastD + (1.0 - alpha_s)*Ysq#eq8

        #calculate debug info
        noise_periodgram = 10*sp.log10(sp.sum(sp.absolute(D))/self._winsize)
        noisy_speech_periodgram = 10*sp.log10(sp.sum(Ysq)/self._winsize)
        smooth_noisy_speech_periodgram =  10*sp.log10(sp.sum(P)/self._winsize)
        self._nplist.append(noise_periodgram)
        self._nsplist.append(noisy_speech_periodgram)
        self._snsplist.append(smooth_noisy_speech_periodgram)
        #update
        self._lastP = P
        self._lastPmin = Pmin
        self._lastp = p
        self._lastD = D
        return D
Ejemplo n.º 5
0
 def update_stats(self):
     self.accepted=sp.sum(self.chain_accept)
     self.sample_size=sp.sum(self.chain_sizes)
     #Update means and errors if any chain contains acceptable points
     #print 'pre state:',self.mean
     if float(sp.sum(self.chain_weights))>0:
         #Total mean
         #for i in range(len(self.chain_weights)):
             
             #print self.chain_weights[i]*self.chain_means[i]
         self.mean=sp.sum([self.chain_weights[i]*self.chain_means[i] for i in range(len(self.chain_weights)) if not sp.isnan(self.chain_means[i]).all()],0)/float(sp.sum(self.chain_weights))               
         #Total error (assumes no correlation at present)
         if sp.greater(self.chain_sizes,self.batch_size).any():
             w=sp.array(self.chain_weights)/float(sp.sum(self.chain_weights))        
             self.mean_error=sp.sqrt(sp.sum([w[i]**2*self.chain_mean_errors[i]**2 for i in range(len(w)) if self.chain_sizes[i]>self.batch_size],0))        
Ejemplo n.º 6
0
def stats(var, tmpFilters, tmpReturns):

    results = {'total': None, 'win_ct': None, 'lose_ct': None, 'win_ratio': None, 
                    'lose_ratio': None, 'return_med': None, 'return_avg': None, 'return_stddev': None, 
                    'return_min': None, 'return_max': None, 'slope': None, 'intercept': None, 'r': None, 'r_low': None, 
                    'r_high': None, '2_tail_prob': None, 'std_err': None}
    
    total = float( len(tmpReturns) )
    winct = float( sp.greater( tmpReturns, 0 ).sum() )
    losect = total - winct
    if total > 0:
        winrt = winct / total
        losert = losect / total
    else:
        winrt = 0
        losert = 0
    if len(tmpReturns) > 0:
        returnMed = sp.median( tmpReturns )
        returnAvg = sp.mean( tmpReturns )
        returnStdDev = sp.std( tmpReturns )
        returnMin = np.min( tmpReturns )
        returnMax = np.max( tmpReturns )
    else:
        returnMed = 0
        returnAvg = 0
        returnStdDev = 0
        returnMin = 0
        returnMax = 0
    if total > 0 and var != None:
        r = scipy.stats.linregress( tmpFilters, tmpReturns )
        corr = r[2]
        z_r = np.arctanh(corr)
        ci = 1.96
        z_low = z_r - ci/np.sqrt(len(tmpReturns)-3)
        z_high = z_r + ci/np.sqrt(len(tmpReturns)-3)
        r_low = ( np.exp(1) ** ( 2 * z_low ) - 1 ) / ( np.exp(1) ** ( 2 * z_low ) + 1 )
        r_high = ( np.exp(1) ** ( 2 * z_high ) - 1 ) / ( np.exp(1) ** ( 2 * z_high ) + 1 )

        slope = r[0]
        intercept = r[1]
        twoTail = r[3]
        stdErr = r[4]

    else:
        corr = 0
        r_low = 0
        r_high = 0
        slope = 0
        intercept = 0
        twoTail = 0
        stdErr = 0
                    
    if len(tmpReturns) > 0:
        results =  { 'total': total, 
                    'win_ct': winct, 
                    'lose_ct': losect, 
                    'win_ratio': winrt, 
                    'lose_ratio': losert, 
                    'return_med': returnMed,
                    'return_avg': returnAvg, 
                    'return_stddev': returnStdDev, 
                    'return_min': returnMin,
                    'return_max': returnMax,
                    'slope': slope, 
                    'intercept': intercept, 
                    'r': corr,
                    'r_low': r_low, 
                    'r_high': r_high, 
                    '2_tail_prob': twoTail, 
                    'std_err': stdErr}
            
    return results
Ejemplo n.º 7
0
def run_simulation_fast(vol, lam, sprd_client, sprd_dealer, delta_lim,
                        hedge_style, dt, nsteps, nruns, seed):
    '''Runs a Monte Carlo simulation and returns statics on PNL, client trades, and hedge trades.
    "_fast" because it uses vectorized operations.

    vol:         lognormal volatility of the spot process
    lam:         Poisson process frequency
    sprd_client: fractional bid/ask spread for client trades. eg 1e-4 means 1bp.
    sprd_dealer: fractional bid/ask spread for inter-dealer hedge trades. eg 1e-4 means 1bp.
    delta_lim:   the delta limit at or beyond which the machine will hedge in the inter-dealer market
    hedge_style: 'Zero' or 'Edge', defining the hedging style. 'Zero' means hedge to zero position,
                 'Edge' means hedge to the nearer delta limit.
    dt:          length of a time step
    nsteps:      number of time steps for each run of the simulation
    nruns:       number of Monte Carlo runs
    seed:        RNG seed
    '''

    scipy.random.seed(seed)

    trade_prob = 1 - exp(-lam * dt)
    sqrtdt = sqrt(dt)

    spots = scipy.zeros(nruns) + 1  # initial spot == 1
    posns = scipy.zeros(nruns)
    trades = scipy.zeros(nruns)
    hedges = scipy.zeros(nruns)
    pnls = scipy.zeros(nruns)

    for step in range(nsteps):
        dzs = scipy.random.normal(0, sqrtdt, nruns)
        qs = scipy.random.uniform(0, 1, nruns)
        ps = scipy.random.binomial(
            1, 0.5,
            nruns) * 2 - 1  # +1 or -1 - trade quantities if a trade happens

        # check if there are client trades for each path

        indics = scipy.less(qs, trade_prob)
        posns += indics * ps
        trades += scipy.ones(nruns) * indics
        pnls += scipy.ones(nruns) * indics * sprd_client * spots / 2.

        # check if there are hedges to do for each path

        if hedge_style == 'Zero':
            indics = scipy.logical_or(scipy.less_equal(posns, -delta_lim),
                                      scipy.greater_equal(posns, delta_lim))
            pnls -= scipy.absolute(posns) * indics * sprd_dealer * spots / 2.
            posns -= posns * indics
            hedges += scipy.ones(nruns) * indics
        elif hedge_style == 'Edge':
            # first deal with cases where pos>delta_lim

            indics = scipy.greater(posns, delta_lim)
            pnls -= (posns - delta_lim) * indics * sprd_dealer * spots / 2.
            posns = posns * scipy.logical_not(indics) + scipy.ones(
                nruns) * indics * delta_lim
            hedges += scipy.ones(nruns) * indics

            # then the cases where pos<-delta_lim

            indics = scipy.less(posns, -delta_lim)
            pnls -= (-delta_lim - posns) * indics * sprd_dealer * spots / 2.
            posns = posns * scipy.logical_not(indics) + scipy.ones(
                nruns) * indics * (-delta_lim)
            hedges += scipy.ones(nruns) * indics
        else:
            raise ValueError('hedge_style must be "Edge" or "Zero"')

        # advance the spots and calculate period PNL

        dspots = vol * spots * dzs
        pnls += posns * dspots
        spots += dspots

    return {
        'PNL': (pnls.mean(), pnls.std()),
        'Trades': (trades.mean(), trades.std()),
        'Hedges': (hedges.mean(), hedges.std())
    }