예제 #1
0
    def random(self, *phi, plates=None):
        """
        Draw a random sample from the distribution.
        """
        # Convert natural parameters to transition probabilities
        p0 = np.exp(phi[0] - misc.logsumexp(phi[0], axis=-1, keepdims=True))
        P = np.exp(phi[1] - misc.logsumexp(phi[1], axis=-1, keepdims=True))
        # Explicit broadcasting
        P = P * np.ones(plates)[..., None, None, None]
        # Allocate memory
        Z = np.zeros(plates + (self.N,), dtype=np.int)
        # Draw initial state
        Z[..., 0] = random.categorical(p0, size=plates)
        # Create [0,1,2,...,len(plate_axis)] indices for each plate axis and
        # make them broadcast properly
        nplates = len(plates)
        plates_ind = [np.arange(plate)[(Ellipsis,) + (nplates - i - 1) * (None,)] for (i, plate) in enumerate(plates)]
        plates_ind = tuple(plates_ind)
        # Draw next states iteratively
        for n in range(self.N - 1):
            # Select the transition probabilities for the current state but take
            # into account the plates.  This leads to complex NumPy
            # indexing.. :)
            time_ind = min(n, np.shape(P)[-3] - 1)
            ind = plates_ind + (time_ind, Z[..., n], Ellipsis)
            p = P[ind]
            # Draw next state
            z = random.categorical(P[ind])
            Z[..., n + 1] = z

        return Z
예제 #2
0
def f1(u, eigvals, Z10, Z11):
    """
    A component of exact time pdf (Eq. 22, HJC92).

    Parameters
    ----------
    u : float
        u = t - tres
    eigvals : array_like, shape (k,)
        Eigenvalues of -Q matrix.
    Z10, Z11 (or gama10, gama11) : list of array_likes
        Constants for the exact open/shut time pdf. Z10, Z11 for likelihood
        calculation or gama10, gama11 for time distributions.

    Returns
    -------
    f : ndarray
    """

#    f = np.zeros(Z10[0].shape)
#    for i in range(len(eigvals)):
#        f += (Z10[i] + Z11[i] * u) *  math.exp(-eigvals[i] * u)

    if Z10.ndim > 1:
        f = np.sum((Z10 + Z11 * u) *
            np.exp(-eigvals * u).reshape(Z10.shape[0],1,1), axis=0)
    else:
        f = np.sum((Z10 + Z11 * u) * np.exp(-eigvals * u))
    return f
예제 #3
0
def log_diff_exp(x, axis=0):
    """ Calculates the logarithm of the diffs of e to the power of input 'x'. The method tries to avoid
        overflows by using the relationship: log(diff(exp(x))) = alpha + log(diff(exp(x-alpha))).
        
    :Parameter:
        x:    data.
             -type: float or numpy array 
          
        axis: Sums along the given axis.
             -type: int
        
    :Return:
        Logarithm of the sum of exp of x. 
       -type: float or numpy array.
        
    """
    alpha = x.max(axis) - numx.log(numx.finfo(numx.float64).max)/2.0
    if axis == 1:
        return numx.squeeze(alpha + numx.log(
                                             numx.diff(
                                                       numx.exp(x.T - alpha)
                                                       , n=1, axis=0)))
    else:
        return numx.squeeze(alpha + numx.log(
                                             numx.diff(
                                                       numx.exp(x - alpha)
                                                       , n=1, axis=0)))
예제 #4
0
    def plot_open_close(self, fig = None, savefig = True):
        '''
        Plot the open period versus shut periods.
        '''

        stretch_list = self.cluster_data.compute_open_close()
        mode_num = len(stretch_list)


        if fig is None:
            fig = plt.figure()
        ax = fig.add_subplot(111)
        cmap = np.linspace(0,1,mode_num)

        for index, stretch in enumerate(stretch_list):
            ax.scatter(stretch['open_period'], stretch['shut_period'],
                       facecolors='none',
                       edgecolors=plt.cm.spectral(cmap[index]),
                       s=1, label = str(index + 1))
            #ax.scatter(stretch['mean_open'], stretch['mean_shut'],
            #           color=plt.cm.spectral(cmap[index]),
             #          s=50, label = str(index + 1))
    
        ax.legend()
        ax.set_xscale('log')
        ax.set_yscale('log')
        ax.set_ylim([0.3, np.exp(7)])
        ax.set_xlim([0.3, np.exp(5)])
        ax.set_xlabel('Open period (ms in log scale)')
        ax.set_ylabel('Shut period (ms in log scale)')
        ax.set_title('Open/Shut')
        if savefig:
            fig.savefig(os.path.join(self.filepath,self.name+'Open_Shut.png'),dpi=150)
def mean_quadratic_weighted_kappa(kappas, weights=None):
    """
    Calculates the mean of the quadratic
    weighted kappas after applying Fisher's r-to-z transform, which is
    approximately a variance-stabilizing transformation.  This
    transformation is undefined if one of the kappas is 1.0, so all kappa
    values are capped in the range (-0.999, 0.999).  The reverse
    transformation is then applied before returning the result.

    mean_quadratic_weighted_kappa(kappas), where kappas is a vector of
    kappa values

    mean_quadratic_weighted_kappa(kappas, weights), where weights is a vector
    of weights that is the same size as kappas.  Weights are applied in the
    z-space
    """
    kappas = np.array(kappas, dtype=float)
    if weights is None:
        weights = np.ones(np.shape(kappas))
    else:
        weights = weights / np.mean(weights)

    # ensure that kappas are in the range [-.999, .999]
    kappas = np.array([min(x, .999) for x in kappas])
    kappas = np.array([max(x, -.999) for x in kappas])

    z = 0.5 * np.log((1 + kappas) / (1 - kappas)) * weights
    z = np.mean(z)
    return (np.exp(2 * z) - 1) / (np.exp(2 * z) + 1)
def filter(input):
  infilter = DoubleInPointerFilter(input)
  infilter.output_sampling_rate = sampling_rate
  
  attackreleasefilter = DoubleAttackReleaseFilter(1)
  attackreleasefilter.input_sampling_rate = sampling_rate
  attackreleasefilter.set_input_port(0, infilter, 0)
  attackreleasefilter.attack = np.exp(-1/(sampling_rate*1e-3))
  attackreleasefilter.release = np.exp(-1/(sampling_rate*10e-3))
  
  outdata = np.zeros(processsize, dtype=np.float64)
  outfilter = DoubleOutPointerFilter(outdata)
  outfilter.input_sampling_rate = sampling_rate
  outfilter.set_input_port(0, attackreleasefilter, 0)
  
  attackreleasefilter2 = DoubleAttackReleaseHysteresisFilter(1)
  attackreleasefilter2.input_sampling_rate = sampling_rate
  attackreleasefilter2.set_input_port(0, infilter, 0)
  attackreleasefilter2.attack = np.exp(-1/(sampling_rate*1e-3))
  attackreleasefilter2.release = np.exp(-1/(sampling_rate*10e-3))
  attackreleasefilter2.release_hysteresis = .5
  attackreleasefilter2.attack_hysteresis = .9
  
  outdata2 = np.zeros(processsize, dtype=np.float64)
  outfilter_hyst = DoubleOutPointerFilter(outdata2)
  outfilter_hyst.input_sampling_rate = sampling_rate
  outfilter_hyst.set_input_port(0, attackreleasefilter2, 0)
  
  pipelineend = PipelineGlobalSinkFilter()
  pipelineend.input_sampling_rate = sampling_rate
  pipelineend.add_filter(outfilter)
  pipelineend.add_filter(outfilter_hyst)
  pipelineend.process(processsize)
  
  return outdata, outdata2
def sample(scores, temperature=1.0):
    """
    Sampling words (each sample is drawn from a categorical distribution).

    In:
        scores - array of size #samples x #classes; 
            every entry determines a score for sample i having class j
        temperature - temperature for the predictions;
            the higher the flatter probabilities and hence more random answers

    Out:
        set of indices chosen as output, a vector of size #samples
    """
    logscores = np.log(scores) / temperature

    # numerically stable version
    normalized_logscores= logscores - np.max(logscores, axis=-1)[:, np.newaxis]
    margin_logscores = np.sum(np.exp(normalized_logscores),axis=-1)
    probs = np.exp(normalized_logscores) / margin_logscores[:,np.newaxis]

    draws = np.zeros_like(probs)
    num_samples = probs.shape[0]
    # we use 1 trial to mimic categorical distributions using multinomial
    for k in xrange(num_samples):
        draws[k,:] = np.random.multinomial(1,probs[k,:],1)
    return np.argmax(draws, axis=-1)
예제 #8
0
def posterior(kpl, pk, err, pkfold=None, errfold=None):
    k0 = n.abs(kpl).argmin()
    kpl = kpl[k0:]
    if pkfold is None:
        print 'Folding for posterior'
        pkfold = pk[k0:].copy()
        errfold = err[k0:].copy()
        pkpos,errpos = pk[k0+1:].copy(), err[k0+1:].copy()
        pkneg,errneg = pk[k0-1:0:-1].copy(), err[k0-1:0:-1].copy()
        pkfold[1:] = (pkpos/errpos**2 + pkneg/errneg**2) / (1./errpos**2 + 1./errneg**2)
        errfold[1:] = n.sqrt(1./(1./errpos**2 + 1./errneg**2))

    #ind = n.logical_and(kpl>.2, kpl<.5)
    ind = n.logical_and(kpl>.15, kpl<.5)
    #ind = n.logical_and(kpl>.12, kpl<.5)
    #print kpl,pk.real,err
    kpl = kpl[ind]
    pk= kpl**3 * pkfold[ind]/(2*n.pi**2)
    err = kpl**3 * errfold[ind]/(2*n.pi**2)
    s = n.logspace(5.,6.5,100)
    data = []
    for ss in s:
        data.append(n.exp(-.5*n.sum((pk.real - ss)**2 / err**2)))
    #    print data[-1]
    data = n.array(data)
    #print data
    #print s
    #data/=n.sum(data)
    data /= n.max(data)
    p.figure(5)
    p.plot(s, data)
    p.plot(s, n.exp(-.5)*n.ones_like(s))
    p.plot(s, n.exp(-.5*2**2)*n.ones_like(s))
    p.show()
 def __set_static_gaus_pmfs(self):
     if np.logical_not(self.off_buff.is_full()):
         print "The long term buffer is not yet full.  This may give undesirable results"
     
     # median RSS of off-state buffer
     cal_med = self.off_buff.get_no_nan_median()
     
     if (np.sum(cal_med == 127) > 0) | (np.sum(np.isnan(cal_med)) > 0):
         sys.stderr.write('At least one link has a median of 127 or is nan\n\n')
         quit()
          
     if (np.sum(np.isnan(self.off_buff.get_nanvar())) > 0):
         sys.stderr.write('the long term buffer has a nan')
         quit()
     
     cal_med_mat = np.tile(cal_med,(self.V_mat.shape[1],1)).T
     
     # variance of RSS during calibration
     cal_var = np.maximum(self.off_buff.get_nanvar(),self.omega) #3.0 
     cal_var_mat = np.tile(cal_var,(self.V_mat.shape[1],1)).T
     
     # Compute the off_link emission probabilities for each link
     x = np.exp(- (self.V_mat - cal_med_mat)**2/(2*cal_var_mat/1.0)) # 1.0
     self.off_links = self.__normalize_pmf(x)
     
     # Compute the on_link emission probabilities for each link
     x = np.exp(- (self.V_mat - (cal_med_mat-self.Delta))**2/(self.eta*2*cal_var_mat)) # 3
     self.on_links = self.__normalize_pmf(x) 
예제 #10
0
    def test_square_exponential_covariance_one_dim(self):
        """Test the SquareExponential covariance function against correct values for different sets of hyperparameters in 1D."""
        for hyperparameters in self.one_dim_test_sets:
            signal_variance = hyperparameters[0]
            length = hyperparameters[1]
            covariance = self.CovarianceClass(hyperparameters)

            # One length away
            truth = signal_variance * numpy.exp(-0.5)
            self.assert_scalar_within_relative(
                covariance.covariance(numpy.array([0.0]), numpy.array(length)),
                truth,
                self.epsilon,
            )
            # Sym
            self.assert_scalar_within_relative(
                covariance.covariance(numpy.array(length), numpy.array([0.0])),
                truth,
                self.epsilon,
            )

            # One length * sqrt 2 away
            truth = signal_variance * numpy.exp(-1.0)
            self.assert_scalar_within_relative(
                covariance.covariance(numpy.array([0.0]), numpy.array([length * numpy.sqrt(2)])),
                truth,
                self.epsilon,
            )
예제 #11
0
    def solveParams(self):
        # Given the specified values for w, z1 and z2, determine the offsets
        # xc and zc required to match a catenary to our cable.  This is done
        # algerbraically.  The deriviation of the equation was performed with
        # the sympy package.

        w = self.w
        a = self.a
        zd = self.z2 - self.z1

        # calculate some repeated elements
        e2wa = np.exp(2 * w / a)
        ewa = np.exp(w / a)
        a2 = a ** 2

        # calculate the 3 components
        c1 = (a2 * e2wa - 2 * a2 * ewa + a2 + zd ** 2 * ewa) * ewa
        c2 = (-2 * a * e2wa + 2 * a * ewa)
        c3 = zd / (a * (ewa - 1))

        # Determine the x offset ...
        self.xc = a * np.log(2 * np.abs(np.sqrt(c1) / c2) + c3)

        # ... and from this the y offset
        self.zc = self.z1 - a * np.cosh(self.xc / a)
예제 #12
0
def rbf_kernel(X, Y=None, gamma=None):
    """
    Compute the rbf (gaussian) kernel between X and Y::

        K(x, y) = exp(-gamma ||x-y||^2)

    for each pair of rows x in X and y in Y.

    Parameters
    ----------
    X : array of shape (n_samples_X, n_features)

    Y : array of shape (n_samples_Y, n_features)

    gamma : float

    Returns
    -------
    kernel_matrix : array of shape (n_samples_X, n_samples_Y)
    """
    X, Y = check_pairwise_arrays(X, Y)
    if gamma is None:
        gamma = 1.0 / X.shape[1]

    K = euclidean_distances(X, Y, squared=True)
    K *= -gamma
    np.exp(K, K)    # exponentiate K in-place
    return K
예제 #13
0
파일: tree_based.py 프로젝트: chrinide/PyFV
def CRRbinomial(S, K, T, rf, sigma, n):
    '''
    Option pricing using binomial tree, no dividend
    :param S: underlying current prince
    :param K: option strke price
    :param T: expire date
    :param rf: risk-free rate
    :param sigma: volatility
    :param n: number of periods to T
    :return:
    '''
    dt = float(T)/n
    u = np.exp(sigma * (dt**0.5))
    d= 1./u
    p = (np.exp(rf*dt)-d)/(u-d)

    euroCall, euroPut = 0, 0

    for idx in xrange(0, n+1):
        prob = spmisc.comb(n, idx)* (p**idx) * (1-p)**(n-idx)
        euroCall += prob*max(S*(u**idx)*(d**(n-idx))-K, 0)
        euroPut += prob*max(K-S*(u**idx)*(d**(n-idx)), 0)

    euroCall *= np.exp(-rf*T)
    euroPut *= np.exp(-rf*T)
    return euroCall, euroPut
예제 #14
0
def mycavvaccaleib(x,p, secondg=False):
     latebump=False
     if p==None:
          return (x)*99e9
     if p[8]>p[0]:
#          print "late bump"
          latebump=True
    #fit the magnitudes with a vacca leibundgut (1997) analytical model 
    #p is the parameter list
    #if secondg=1: secondgaussian added
    #if secondg=0: secondgaussian not    
    #parameters are: 
    #p[0]=first gaussian normalization (negative if fitting mag)
    #p[1]=first gaussian mean
    #p[2]=first gaussian sigma
    #p[3]=linear decay offset
    #p[4]=linear decay slope
    #p[5]=exponxential rise slope
    #p[6]=exponential zero point
    #p[7]=second gaussian normalization (negative if fitting mag)
    #p[8]=second gaussian mean
    #p[9]=second gaussian sigma
     g=p[4]*(x)+p[3]
     g+=p[0]*np.exp(-(x-p[1])**2/p[2]**2)
     g*=(np.exp(-p[5]*(x-p[6]))+1)
     if secondg:
          g+=p[7]*np.exp(-(x-p[8])**2/p[9]**2)    
     if latebump and p[8]-p[1]<15 :
          g+=(np.zeros(len(g),float)+1)
     if p[8]-p[1]>70:
          g+=(np.zeros(len(g),float)+1)
     return g
예제 #15
0
  def testCustomGradient(self):
    dtype = dtypes.float32

    @function.Defun(dtype, dtype, dtype)
    def XentLossGrad(logits, labels, dloss):
      dlogits = array_ops.reshape(dloss, [-1, 1]) * (
          nn_ops.softmax(logits) - labels)
      dlabels = array_ops.zeros_like(labels)
      # Takes exp(dlogits) to differentiate it from the "correct" gradient.
      return math_ops.exp(dlogits), dlabels

    @function.Defun(dtype, dtype, grad_func=XentLossGrad)
    def XentLoss(logits, labels):
      return math_ops.reduce_sum(labels * math_ops.log(nn_ops.softmax(logits)),
                                 1)

    g = ops.Graph()
    with g.as_default():
      logits = array_ops.placeholder(dtype)
      labels = array_ops.placeholder(dtype)
      loss = XentLoss(logits, labels)
      dlogits = gradients_impl.gradients([loss], [logits])

    x = np.random.uniform(-10., 10., size=(4, 9)).astype(np.float32)
    prob = np.exp(x) / np.sum(np.exp(x), 1, keepdims=1)
    y = np.random.uniform(-10., 10., size=(4, 9)).astype(np.float32)
    for cfg in _OptimizerOptions():
      tf_logging.info("cfg = %s", cfg)
      with session.Session(graph=g, config=cfg) as sess:
        out, = sess.run(dlogits, {logits: x, labels: y})
      self.assertAllClose(out, np.exp(prob - y))
예제 #16
0
def bbox_transform_inv(boxes, deltas):
    if boxes.shape[0] == 0:
        return np.zeros((0, deltas.shape[1]), dtype=deltas.dtype)

    boxes = boxes.astype(deltas.dtype, copy=False)

    widths = boxes[:, 2] - boxes[:, 0] + 1.0
    heights = boxes[:, 3] - boxes[:, 1] + 1.0
    ctr_x = boxes[:, 0] + 0.5 * widths
    ctr_y = boxes[:, 1] + 0.5 * heights

    dx = deltas[:, 0::4]
    dy = deltas[:, 1::4]
    dw = deltas[:, 2::4]
    dh = deltas[:, 3::4]

    pred_ctr_x = dx * widths[:, np.newaxis] + ctr_x[:, np.newaxis]
    pred_ctr_y = dy * heights[:, np.newaxis] + ctr_y[:, np.newaxis]
    pred_w = np.exp(dw) * widths[:, np.newaxis]
    pred_h = np.exp(dh) * heights[:, np.newaxis]

    pred_boxes = np.zeros(deltas.shape, dtype=deltas.dtype)
    # x1
    pred_boxes[:, 0::4] = pred_ctr_x - 0.5 * pred_w
    # y1
    pred_boxes[:, 1::4] = pred_ctr_y - 0.5 * pred_h
    # x2
    pred_boxes[:, 2::4] = pred_ctr_x + 0.5 * pred_w
    # y2
    pred_boxes[:, 3::4] = pred_ctr_y + 0.5 * pred_h

    return pred_boxes
def convolve_template():
    from plot_Profile_evolution import all_obs
    import numpy as np

    date_list, observations = all_obs()
    x = np.arange(512 - 25 - 50, 512 + 45 + 51)

    delay = []
    for n in observations:
        best_mu = 0
        best_corr = 0

        for mu in np.linspace(512 - 25, 512 + 45, 701):
            template = np.exp(-(x - 512) ** 2 / (2.0 * 6.2 ** 2)) + 0.09 * np.exp(-(x - mu) ** 2 / (2.0 * 8.0 ** 2))
            template /= template.max()
            corr = np.correlate(n, template, mode="valid").max()

            if corr > best_corr:
                best_corr = corr
                best_mu = mu

        delay.append(best_mu - 512)

    plt.plot(date_list, delay, "ko")
    plt.show()
def InterpBeadError(X, y, bead1, bead2, write = False, name = "00"):
    '''X = []
    y = []
    for i in xrange(1000):
        a,b = generatecandidate3(.5,.25,.1)
        X.append(a)
        y.append(b)'''
        
    X= np.array(X)
    y=np.array(y)

    errors = []
    

    for tt in xrange(100):
        #Should make this architecture independent at some point
        t = tt/100.
        layer_1 = 1/(1+np.exp(-(np.dot(X,synapse_interpolate(bead1[0],bead2[0],t)))))
        layer_2 = 1/(1+np.exp(-(np.dot(layer_1,synapse_interpolate(bead1[1],bead2[1],t)))))
        #layer_3 = 1/(1+np.exp(-(np.dot(layer_2,synapse_interpolate(bead1[2],bead2[2],t)))))

        # how much did we miss the target value?
        layer_2_error = layer_2 - y
        
        errors.append(np.mean(np.abs(layer_2_error)))

    if write == True:
        with open("f" + str(name) + ".out",'w+') as f:
            for e in errors:
                f.write(str(e) + "\n")
    
    return max(errors)
예제 #19
0
    def mat_unitary(self):
        s = [
            math.sin(2 * self.at0),
            math.sin(2 * self.at1),
            math.sin(2 * self.aq0),
            math.sin(2 * self.aq1),
            math.sin(2 * self.aq2),
        ]
        c = [
            math.cos(2 * self.at0),
            math.cos(2 * self.at1),
            math.cos(2 * self.aq0),
            math.cos(2 * self.aq1),
            math.cos(2 * self.aq2),
        ]
        phi = [
            numpy.exp(complex(0, 4 * self.pt1)),
            numpy.exp(complex(0, 2 * self.pt2)),
            numpy.exp(complex(0, -2 * self.pt2)),
        ]

        U = numpy.array(
            [
                [c[2] * c[3], -phi[0] * s[0] * s[2] * c[3] - phi[1] * c[0] * s[1] * s[3]],
                [c[2] * s[3], -phi[0] * s[0] * s[2] * s[3] + phi[1] * c[0] * s[1] * c[3]],
                [s[2] * c[4], phi[0] * s[0] * c[2] * c[4] + phi[2] * c[0] * c[1] * s[4]],
                [s[2] * s[4], phi[0] * s[0] * c[2] * s[4] - phi[2] * c[0] * c[1] * c[4]],
            ]
        )

        theta = cmath.phase(U[0, 1])
        for i in range(4):
            U[i, 1] = U[i, 1] * numpy.exp(complex(0, -theta))

        return U
예제 #20
0
파일: rbf.py 프로젝트: mbentz80/jzigbeercp
    def fwd_all(self,X,w=None):
        """ Propagate values forward through the net.
        Inputs:
                inputs      - vector of input values
                w           - packed array of weights
        Returns:
                array of outputs for all input patterns
        """
        if w is not None:
            self.wp = w
        self.unpack()
        # compute hidden unit values
        z = N.zeros((len(X),self.centers.shape[0]))
        for i in range(len(X)):
             z[i] = N.exp((-1.0/(2*self.variance))*(N.sum((X[i]-self.centers)**2,axis=1)))
        # compute net outputs
        o = N.dot(z,self.w) + N.dot(N.ones((len(z),1)),self.b)
        # compute final output activations
        if self.outfxn == 'linear':
            y = o
        elif self.outfxn == 'logistic':     # TODO: check for overflow here...
            y = 1/(1+N.exp(-o))
        elif self.outfxn == 'softmax':      # TODO: and here...
            tmp = N.exp(o)
            y = tmp/(N.sum(temp,1)*N.ones((1,self.no)))

        return N.array(y)
예제 #21
0
파일: SBOcv.py 프로젝트: toscanosaul/SBO
def gradWB(new,kern,BN,keep,points):
    """Computes the vector of gradients with respect to w_{n+1} of
	B(x_{p},n+1)=\int\Sigma_{0}(x_{p},w,x_{n+1},w_{n+1})dp(w),
	where x_{p} is a point in the discretization of the domain of x.
        
       Args:
          new: Point (x_{n+1},w_{n+1})
          kern: Kernel
          keep: Indexes of the points keeped of the discretization of the domain of x,
                after using AffineBreakPoints
          BN: Vector B(x_{p},n+1), where x_{p} is a point in the discretization of
              the domain of x.
          points: Discretization of the domain of x
    """
    alpha1=0.5*((kern.alpha[0:n1])**2)/scaleAlpha[0:n1]**2
    alpha2=0.5*((kern.alpha[n1:n1+n2])**2)/scaleAlpha[n1:n1+n2]**2
    variance0=kern.variance
    wNew=new[0,n1:n1+n2].reshape((1,n2))
    gradWBarray=np.zeros([len(keep),n2])
    M=len(keep)
   # parameterLamb=parameterSetsPoisson
    X=new[0,0:n1]
    W=new[0,n1:n1+n2]
    
    num=0
    for i in range(n1):
        num+=(2.0*alpha2*(i-wNew))*np.exp(-alpha2*((i-wNew)**2))
    num=num/n1
    for j in range(M):
        gradWBarray[j,0]=num*(variance0)*np.exp(np.sum(alpha1*((points[keep[j],:]-X)**2)))
    return gradWBarray
예제 #22
0
 def all_GL(self, q, maxpiv=None):
     """return (piv, f_binodal_gas, f_binodal_liquid, f_spinodal_gas, f_spinodal_liquid) at insersion works piv sampled between the critical point and maxpiv (default to 2.2*critical pressure)"""
     fc, pivc = self.critical_point(q)
     Fc = np.log(fc)
     #start sensibly above the critical point
     startp = pivc*1.1
     fm = fminbound(self.mu, fc, self.maxf(), args=(startp, q))
     fM = fminbound(lambda f: -self.pv(f, startp, q), 0, fc)
     initial_guess = np.log([0.5*fM, 0.5*(fm+self.maxf())])
     #construct the top of the GL binodal
     if maxpiv is None:
         maxpiv = startp*2
     topp = 1./np.linspace(1./startp, 1./maxpiv)
     topGL = [initial_guess]
     for piv in topp:
         topGL.append(self.binodalGL(piv, q, topGL[-1]))
     #construct the GL binodal between the starting piv and the critical point
     botp = np.linspace(startp, pivc)[:-1]
     botGL = [initial_guess]
     for piv in botp:
         botGL.append(self.binodalGL(piv, q, botGL[-1]))
     #join the two results and convert back from log
     binodal = np.vstack((
         [[pivc, fc, fc]],
         np.column_stack((botp, np.exp(botGL[1:])))[::-1],
         np.column_stack((topp, np.exp(topGL[1:])))[1:]
         ))
     #spinodal at the same pivs
     spinodal = self.spinodalGL(q, binodal[:,0])
     #join everything
     return np.column_stack((binodal, spinodal[:,1:]))
예제 #23
0
 def update(self,proposal,logp,bad,i):
     logps = self.logps[i-1]
     thresh = self.thresh
     if logp>logps:
         self.logps[i] = logp
         self.trace[i] = proposal
         self.dets.append([d.value for d in self.deterministics])
         self.nbad = 0
         self.stuck = False
         self.temp = 1.
         return
     self.nbad += 1
     if self.nbad>self.thresh and self.stuck==False:
         self.stuck = True
         self.logpTmp = logps
     if self.stuck==True:
         r = log(rand())*self.temp
         print 'stuck',i,logps,self.logpTmp,logp,r,logp-self.logpTmp
         if logp-self.logpTmp>r:
             self.logpTmp = logp
             self.temp /= numpy.exp(1./thresh)
         else:
             self.temp *= numpy.exp(1./thresh)
     self.logps[i] = self.logps[i-1]
     self.trace[i] = self.trace[i-1].copy()
     self.dets.append(self.dets[-1])
예제 #24
0
def test_solve_poisson_becke_sa():
    sigma = 8.0
    rtf = ExpRTransform(1e-4, 1e2, 500)
    r = rtf.get_radii()
    rhoy = np.exp(-0.5*(r/sigma)**2)/sigma**3/(2*np.pi)**1.5
    rhod = np.exp(-0.5*(r/sigma)**2)/sigma**3/(2*np.pi)**1.5*(-r/sigma)/sigma
    rho = CubicSpline(rhoy, rhod, rtf)
    v = solve_poisson_becke([rho])[0]

    s2s = np.sqrt(2)*sigma
    soly = erf(r/s2s)/r
    sold = np.exp(-(r/s2s)**2)*2/np.sqrt(np.pi)/s2s/r - erf(r/s2s)/r**2

    if False:
        import matplotlib.pyplot as pt
        n = 10
        pt.clf()
        pt.plot(r[:n], soly[:n], label='exact')
        pt.plot(r[:n], v.y[:n], label='spline')
        pt.legend(loc=0)
        pt.savefig('denu.png')

    assert abs(v.y - soly).max()/abs(soly).max() < 1e-6
    assert abs(v.dx - sold).max()/abs(sold).max() < 1e-4
    # Test the boundary condition at zero and infinity
    assert v.extrapolation.l == 0
    np.testing.assert_allclose(v.extrapolation.amp_left, np.sqrt(2/np.pi)/sigma)
    np.testing.assert_allclose(v.extrapolation.amp_right, 1.0)
예제 #25
0
    def __init__(self, ps=None, sigma_v=0.0, redshift=0.0, **kwargs):
        if ps == None:
            from os.path import join, dirname
            #psfile = join(dirname(__file__),"data/ps_z1.5.dat")
            #psfile = join(dirname(__file__),"data/wigglez_halofit_z1.5.dat")
            psfile = join(dirname(__file__),"data/wigglez_halofit_z0.8.dat")
            print "loading matter power file: " + psfile
            redshift = 0.8

            #pk_interp = cs.LogInterpolater.fromfile(psfile)
            pwrspec_data = np.genfromtxt(psfile)

            (log_k, log_pk) = (np.log(pwrspec_data[:,0]), \
                               np.log(pwrspec_data[:,1]))

            logpk_interp = interpolate.interp1d(log_k, log_pk,
                                                bounds_error=False,
                                                fill_value=np.min(log_pk))

            pk_interp = lambda k: np.exp(logpk_interp(np.log(k)))

            kstar = 7.0
            ps = lambda k: np.exp(-0.5 * k**2 / kstar**2) * pk_interp(k)

        self._sigma_v = sigma_v

        RedshiftCorrelation.__init__(self, ps_vv=ps, redshift=redshift)
예제 #26
0
def filter_annular_bp_kernel(shape, dtype, freq1, freq2):
    ''' Filter an image with a Gaussian low pass filter
    
    Todo: optimize kernel
    
    :Parameters:
    
    shape : tuple
            Tuple of ints describing the shape of the kernel
    dtype : dtype
            Data type of the image
    freq1 : float
            Cutoff frequency
    freq2 : array
            Cutoff frequency
    
    :Returns:
    
    out : array
          Annular BP kernel
    '''
    
    kernel = numpy.zeros(shape, dtype=dtype)
    irad = radial_image(shape)
    val =  (1.0/(1.0+numpy.exp(((numpy.sqrt(irad)-freq1))/(10.0))))*(1.0-(numpy.exp(-irad /(2*freq2*freq2))))
    kernel[:, :].real = val
    kernel[:, :].imag = val
    return kernel
예제 #27
0
파일: som.py 프로젝트: PepGardiola/PyMVPA
    def _compute_influence_kernel(self, iter, dqd):
        """Compute the neighborhood kernel for some iteration.

        Parameters
        ----------
        iter : int
          The iteration for which to compute the kernel.
        dqd : array (nrows x ncolumns)
          This is one quadrant of Euclidean distances between Kohonen unit
          locations.
        """
        # compute radius decay for this iteration
        curr_max_radius = self.radius * np.exp(-1.0 * iter / self.iter_scale)

        # same for learning rate
        curr_lrate = self.lrate * np.exp(-1.0 * iter / self.iter_scale)

        # compute Gaussian influence kernel
        infl = np.exp((-1.0 * dqd) / (2 * curr_max_radius * iter))
        infl *= curr_lrate

        # hard-limit kernel to max radius
        # XXX is this really necessary?
        infl[dqd > curr_max_radius] = 0.

        return infl
예제 #28
0
파일: Model.py 프로젝트: jesscyzhao/moulton
    def logLikelihood(self, obs_seq, num_cluster, error_rate_dict, switch_rate_dict):
        """

        :param obs_seq: state list of the same length as observation sequence
        :param num_cluster: int
        :param error_rate_dict: dict
        :param switch_rate_dict: dict
        :return: log likelihood
        """
        pi = [1/num_cluster] * num_cluster
        path_length = len(obs_seq)

        alpha_dict = dict()

        alpha_dict[0] = dict(zip(range(num_cluster), [np.log(pi[i]) + np.log(error_rate_dict[i][obs_seq[0]])
                                                      for i in range(num_cluster)]))

        for t in range(path_length-1):
            alpha_dict[t+1] = dict()
            for j in range(num_cluster):
                log_alpha_j = [(alpha_dict[t][i] + np.log(switch_rate_dict[i][j])) for i in range(num_cluster)]
                max_log_alpha_j = max(log_alpha_j)
                sum_residual = np.sum(np.exp(log_alpha_j - max_log_alpha_j))
                final = max_log_alpha_j + np.log(sum_residual)
                alpha_dict[t+1][j] = final + np.log(error_rate_dict[j][obs_seq[t + 1]])

        alpha_df = pd.DataFrame.from_dict(alpha_dict)
        max_final = max(alpha_df.ix[:, path_length-1])

        llk = max_final + np.log(np.sum(np.exp(alpha_df.ix[:, path_length-1] - max_final)))

        return llk
def firwin_complex_bandpass(num_taps, cutoffs, window='hamming'):
    width, center = max(cutoffs) - min(cutoffs), (cutoffs[0] + cutoffs[1]) / 2
    b = scipy.signal.firwin(num_taps, width / 2, window='rectangular', scale=False)
    b = b * numpy.exp(1j * numpy.pi * center * numpy.arange(len(b)))
    b = b * scipy.signal.get_window(window, num_taps, False)
    b = b / numpy.sum(b * numpy.exp(-1j * numpy.pi * center * (numpy.arange(num_taps) - (num_taps - 1) / 2)))
    return b.astype(numpy.complex64)
예제 #30
0
파일: ais.py 프로젝트: gdesjardins/DBM
def estimate_from_weights(log_ais_w):
    """ Safely computes the log-average of the ais-weights.

    Inputs
    ------
    log_ais_w: T.vector
        Symbolic vector containing log_ais_w^{(m)}.

    Returns
    -------
    dlogz: scalar
        log(Z_B) - log(Z_A).
    var_dlogz: scalar
        Variance of our estimator.
    """
    # Utility function for safely computing log-mean of the ais weights.
    ais_w = T.vector()
    max_ais_w = T.max(ais_w)
    dlogz = T.log(T.mean(T.exp(ais_w - max_ais_w))) + max_ais_w
    log_mean = theano.function([ais_w], dlogz, allow_input_downcast=False)

    # estimate the log-mean of the AIS weights
    dlogz = log_mean(log_ais_w)

    # estimate log-variance of the AIS weights
    # VAR(log(X)) \approx VAR(X) / E(X)^2 = E(X^2)/E(X)^2 - 1
    m = numpy.max(log_ais_w)
    var_dlogz = (log_ais_w.shape[0] *
                 numpy.sum(numpy.exp(2 * (log_ais_w - m))) /
                 numpy.sum(numpy.exp(log_ais_w - m)) ** 2 - 1.)

    return dlogz, var_dlogz
예제 #31
0
def test_WeightedDataFrame_neff():
    df = test_WeightedDataFrame_constructor()
    neff = df.neff()
    assert isinstance(neff, float)
    assert neff < len(df)
    assert neff > len(df) * np.exp(-0.25)
예제 #32
0
def sigmoid(Z):
    return 1/(1+np.exp(-Z))
o.fallback_values['sea_surface_wave_stokes_drift_x_velocity'] = .3
o.fallback_values['sea_surface_wave_stokes_drift_y_velocity'] = 0
o.set_config('wave_entrainment:droplet_size_distribution', 'Johansen et al. (2015)')
o.set_config('processes:evaporation', False)
o.set_config('processes:dispersion', False)
o.set_config('turbulentmixing:droplet_diameter_min_wavebreaking', dmin)
o.set_config('turbulentmixing:droplet_diameter_max_wavebreaking', dmax)
o.seed_elements(lon=4, lat=60, time=datetime.now(), number=10000, radius=100,
                 z=0, oiltype='TROLL, STATOIL', oil_film_thickness=0.005)
o.run(duration=timedelta(hours=2), time_step=3600)

droplet_diameters = o.elements.diameter
sd = 0.4
Sd = np.log(10.)*sd
DV_50 = np.median(droplet_diameters)
DN_50 = np.exp( np.log(DV_50) - 3*Sd**2 ) 

print 'DV_50: %f' % DV_50
print 'DN_50: %f' % DN_50

################## Plotting ##########################
plt.figure(figsize=[14,14])
plt.subplot(3, 2, 1)
nVpdf, binsV, patches = plt.hist(droplet_diameters, 100, range=(dmin,dmax), align='mid')
plt.xlabel('Droplet diameter d [m]', fontsize=8)
plt.ylabel('V(d)', fontsize=8)
plt.title('volume spectrum\nJohansen et al. (2015) distribution in OpenOil3D', fontsize=10)

plt.subplot(3,2,2)
nVcum, binsV, patches = plt.hist(droplet_diameters, 100, range=(dmin,dmax), align='mid', cumulative=True)
plt.xlabel('Droplet diameter d [m]', fontsize=8)
예제 #34
0
    print "Maximum logL is %g, (%g in \"SNR\")" % (max(logLs), numpy.sqrt(2*max(logLs)))
    print "Which occurs at sample", numpy.argmax(logLs)
    print "This corresponds to time %.20g" % tvals[numpy.argmax(logLs)]
    print "The data event time is:  %.20g" % sim_row.get_time_geocent()
    print "Difference from geocenter t_ref is %.20g" %\
            (tvals[numpy.argmax(logLs)] - sim_row.get_time_geocent())
    print "This difference in discrete samples: %.20g" %\
            ((tvals[numpy.argmax(logLs)]-sim_row.get_time_geocent())/P.deltaT)
    pyplot.plot(tvals-tref, logLs)
    pyplot.ylabel("log Likelihood")
    pyplot.xlabel("time (relative to %10.5f)" % tref)
    pyplot.axvline(0, color="k")
    pyplot.title("lnL(t),\n value at event time: %f" % logL)
    pyplot.grid()
    pyplot.savefig("logL.png")
    integral = numpy.sum( numpy.exp(logLs) * P.deltaT )
    print "Integral over t of likelihood is:", integral
    print "The log of the integral is:", numpy.log(integral)
    exit()

#
# Parameter integral sampling strategy
#
params = {}
sampler = mcsampler.MCSampler()

#
# Psi -- polarization angle
# sampler: uniform in [0, pi)
#
psi_sampler = functools.partial(mcsampler.uniform_samp_vector, param_limits["psi"][0], param_limits["psi"][1])
예제 #35
0
             '2017-05-16', '2017-10-05', '2018-04-09', '2018-08-29', '2019-01-24', '2019-05-16']

cp_train_std = np.std(df_close.to_numpy(), axis=0)
choice = np.argsort(cp_train_std)

for phs in range(0, len(end_dates)):
    print(phs)
    ntrain = 250
    nval = 50
    ntest = 100
    win = 5
    nstock = len(all_stock)

    cp = df_close[:end_dates[phs]].tail(ntrain+nval+ntest)
    cp_train = cp.iloc[:ntrain, :]
    cov_train = np.cov(np.exp(cp_train.to_numpy().T))
    
    cp_val = cp.iloc[ntrain-win:ntrain+nval, :]
    cp_test = cp.iloc[ntrain+nval-win:, :]

    cp_trainx = np.zeros((ntrain - win, win * nstock))
    cp_trainy = np.zeros((ntrain - win, nstock))

    for i in range(win, ntrain):
        cp_trainy[i - win] = cp_train.to_numpy()[i]
        for s in range(nstock):
            cp_trainx[i - win, s * win:(s + 1) * win] = cp_train.to_numpy()[i - win:i, s]
    
    cp_valx = np.zeros((nval, win * nstock))
    cp_valy = np.zeros((nval, nstock))
    simVals = np.array(simVals)
    polLeakdbs = np.array(polLeakdbs)
    snrs = (np.array(snrs) / normSNR)**2.

    fig = p.figure()
    ax = fig.add_subplot(1, 1, 1)
    fig.set_size_inches(9., 5.)

    polLeakVals = np.unique(polLeakdbs)
    cNorm = np.min(polLeakVals)
    for pVal in polLeakVals:
        idx = np.argwhere(polLeakdbs == pVal)
        subSnrs = snrs[idx]
        subSimVals = simVals[idx]
        sIdx = np.argsort(subSnrs[:, 0])
        rgb = (np.exp(pVal / 10.), 0., 1. - np.exp(pVal / 10.))
        #slightly hardcoded plots and labels
        #if pVal > -0.0001: labelStr='%0.f dB'%(-1.*pVal) #-0 to 0
        if pVal > -0.1: continue
        elif (pVal > -5.) or (pVal < -16. and pVal > -30) or pVal < -31.:
            continue  #skip these lines
        else:
            labelStr = '%0.f dB' % (pVal)
        midPoint = int(subSnrs.shape[0] * .33)
        p.plot(subSnrs[sIdx, 0], subSimVals[sIdx, 0], color=rgb)
        p.text(subSnrs[sIdx, 0][midPoint],
               0.8 * subSimVals[sIdx, 0][midPoint],
               labelStr,
               fontsize=18)

    #lines of constant time (5, 1, .5 us)
예제 #37
0
def test_WeightedSeries_neff():
    series = test_WeightedSeries_constructor()
    neff = series.neff()
    assert isinstance(neff, float)
    assert neff < len(series)
    assert neff > len(series) * np.exp(-0.25)
예제 #38
0
 def forward(self, x):
   return np.exp(x)
예제 #39
0
 def __sigmoid(self, x):
     return 1 / (1 + numpy.exp(-x))
예제 #40
0
파일: srank_k8.py 프로젝트: JamesTuna/dlrm
def effective_rank(X):
    U,S,VT = np.linalg.svd(X)
    S = S / np.linalg.norm(S,1)
    assert (np.all(S >= 0) and abs(S.sum()-1) < 1e-5), 'S not a probability distribution'
    H = entropy(S)
    return np.exp(H)
예제 #41
0
    corr = np.abs(np.fft.ifft2(conj))

    alpha_p, logr_p = np.unravel_index(np.argmax(corr), corr.shape)
    logr_size = im_p_logpolar.shape[0]
    alpha_size = im_p_logpolar.shape[1]

    if logr_p > logr_size // 2:
        w = logr_size - logr_p  # powiekszenie
    else:
        w = -logr_p  # pomniejszenie

    A = (alpha_p * 360.0) / alpha_size
    a1 = -A
    a2 = 180 - A

    scale = np.exp(w / M)  # M to parametr funkcji cv2.logPolar
    print(scale)

    im = np.zeros(im_p.shape)
    x_1 = int((im_p.shape[0] - im_w.shape[0]) / 2)
    x_2 = int((im_p.shape[0] + im_w.shape[0]) / 2)
    y_1 = int((im_p.shape[1] - im_w.shape[1]) / 2)
    y_2 = int((im_p.shape[1] + im_w.shape[1]) / 2)
    im[x_1:x_2, y_1:y_2] = im_w_in

    centerT = (im.shape[0] / 2 - 0.5, im.shape[1] / 2 - 0.5)
    # im to obraz wzorca uzupelniony zerami, ale ze wzorcem umieszczonym na srodku, a nie w lewym, gornym rogu!
    transM1 = cv.getRotationMatrix2D(centerT, a1, scale)
    im_r_s1 = cv.warpAffine(im, transM1, im.shape)

    transM2 = cv.getRotationMatrix2D(centerT, a2, scale)
def poly5_to_Z(z_OLS, T):
    ## This function takes maturity and a fitted ploynomial interest rate model and return Z(0,T)
    ## z_OLS must be a OLS class and T must be a list
    return np.exp(np.dot(power_5(T), z_OLS.beta))
예제 #43
0
def f_o(x, pi4):

    f_O= 3.106934*np.exp(-19.868080*(x/pi4)**2) + 3.235142*np.exp(-6.960252*(x/pi4)**2) + \
      1.148886*np.exp(-0.170043*(x/pi4)**2) + 0.783981*np.exp(-65.693509*(x/pi4)**2) + \
      0.676953*np.exp(-0.630757*(x/pi4)**2) + 0.046136
    return (f_O)
예제 #44
0
def sigmoid(z):
    return 1. / (1 + np.exp(- z))
 def _logistic(self, x):
     return np.where(x > 0, 1 / (1 + np.exp(-x)), np.exp(x) / (1 + np.exp(x)))
예제 #46
0
def f_li(x, pi4):

    f_Li= 0.432724*np.exp(-0.260367*(x/pi4)**2)+0.549257*np.exp(-1.042836*(x/pi4)**2)+ \
     0.376575*np.exp(-7.885294*(x/pi4)**2)- 0.336481*np.exp(-0.260368*(x/pi4)**2) + \
     0.976060*np.exp(-3.042539*(x/pi4)**2) + 0.001764
    return (f_Li)
예제 #47
0
A=0.004;
P=1000;
eff=0.25;
h=6.626e-34;
freq=1e15;
nphotons=P*A/h/freq;
Is=nphotons*1.6e-19*eff;
print(Is)
V1=4.7;
Vt=0.025;
n=10;
Vs=4;
I0=1e-12;
I02=1e-12;
I=I02*(np.exp((V1-Vs)/Vt)-1);
print(I)
I=Is-I0*(np.exp(V1/n/Vt)-1);
print(I)
def curr(x):
    y=I02*(np.exp((x-Vs)/Vt)-1)-Is+I0*(np.exp(x/n/Vt)-1);
    return y

soln=fsolve(curr,4.7)
print(soln)



P=np.linspace(0,1000,100);
I2=np.linspace(0,1000,100);
예제 #48
0
 def __calculate_weights__(self, z, xichma):
     weight = 1
     temp = sum(z ** 2)
     if temp != 0:
         weight = (1.0 / sqrt(temp)) * exp(-temp / (2 * self.problem_size * xichma ** 2))
     return weight
예제 #49
0
    return x, y, z


if True:
    x, y, z = import_h5_array('/tmp/pyrs_test_ss/test.hdf5')
    print x.min(), x.max()
    print y.min(), y.max()
    print z.min(), z.max(), z.mean()

else:
    np.random.seed(19680801)
    npts = 200
    x = np.random.uniform(-2, 2, npts)
    y = np.random.uniform(-2, 2, npts)
    z = x * np.exp(-x**2 - y**2)

fig, (ax1, ax2) = plt.subplots(nrows=2)

# -----------------------
# Interpolation on a grid
# -----------------------
# A contour plot of irregularly spaced data coordinates
# via interpolation on a grid.

# Create grid values first.
ngridx = 1000
ngridy = 2000
xi = np.linspace(-200, 200, ngridx)
yi = np.linspace(0, 70, ngridy)
예제 #50
0
 def curr(x):
     y=I02*(np.exp((x-Vs2[j])/Vt)-1)-Is+I0*(np.exp(x/n/Vt)-1);
     return y
예제 #51
0
파일: Fit.py 프로젝트: yichuan0707/PR-SIM
def weib(x, n, a):
    return (a / n) * (x / n)**(a - 1) * np.exp(-(x / n)**a)
예제 #52
0
def RECTE(
        cRates,
        tExp,
        exptime=180,
        trap_pop_s=0,
        trap_pop_f=0,
        dTrap_s=0,
        dTrap_f=0,
        dt0=0,
        lost=0,
        mode='scanning'
):
    """This function calculates HST/WFC3/IR ramp effect profile based on
the charge trapping explanation developed in Zhou et al. (2017).

    :param cRates: intrinsic count rate of each exposures, unit: e/s
    :type cRates: numpy.array
    :param tExp: time stamps for the exposures, unit: seconds
    :type tExp: numpy.array
    :param exptime: (default 180 seconds) exposure time
    :type exptime: numpy.array or float
    :param trap_pop_s: (default 0) number of occupied slow population
        charge traps before the very beginning of the observation
    :type trap_pop_s: float or numpy.array
    :param trap_pop_f: (default 0) number of occupied fast population
        charge traps before the very beginning of the observation
    :type trap_pop_f: float or numpy.array
    :param dTrap_s: (default [0]) number of additional charges trapped
        by slow population traps during earth occultation
    :type dTrap_s: float or numpy.array
    :param dTrap_f: (default [0]) number of additional charges trapped
        by fast population traps during earth occultation
    :type dTrap_f: float or numpy.array
    :param dt0: (default 0) exposure time before the very beginning
        of the observation. It could be due to guidence adjustment
    :type dt0: float
    :param lost: (default 0, no lost) fraction of trapped electrons that are
        not eventually detected
    :type lost: float
    :param mode: (default scanning, scanning or staring, or others),
        for scanning mode observation , the pixel no longer receive
        photons during the overhead time, in staring mode,
        the pixel keps receiving elctrons
    :type mode: string

    :returns: observed counts
    :rtype: numpy.array

    :Example:

    see Examples and Cookbook

    """
    nTrap_s = 1525.38
    eta_trap_s = 0.013318
    tau_trap_s = 1.63e4
    nTrap_f = 162.38
    eta_trap_f = 0.008407
    tau_trap_f = 281.463
    try:
        dTrap_f = itertools.cycle(dTrap_f)
        dTrap_s = itertools.cycle(dTrap_s)
        dt0 = itertools.cycle(dt0)
    except TypeError:
        dTrap_f = itertools.cycle([dTrap_f])
        dTrap_s = itertools.cycle([dTrap_s])
        dt0 = itertools.cycle([dt0])
    obsCounts = np.zeros(len(tExp))
    trap_pop_s = min(trap_pop_s, nTrap_s)
    trap_pop_f = min(trap_pop_f, nTrap_f)
    for i in range(len(tExp)):
        try:
            dt = tExp[i+1] - tExp[i]
        except IndexError:
            dt = exptime
        f_i = cRates[i]
        c1_s = eta_trap_s * f_i / nTrap_s + 1 / tau_trap_s  # a key factor
        c1_f = eta_trap_f * f_i / nTrap_f + 1 / tau_trap_f
        # number of trapped electron during one exposure
        dE1_s = (eta_trap_s * f_i / c1_s - trap_pop_s) * \
            (1 - np.exp(-c1_s * exptime))
        dE1_f = (eta_trap_f * f_i / c1_f - trap_pop_f) * \
            (1 - np.exp(-c1_f * exptime))
        dE1_s = min(trap_pop_s + dE1_s, nTrap_s) - trap_pop_s
        dE1_f = min(trap_pop_f + dE1_f, nTrap_f) - trap_pop_f
        trap_pop_s = min(trap_pop_s + dE1_s, nTrap_s)
        trap_pop_f = min(trap_pop_f + dE1_f, nTrap_f)
        obsCounts[i] = f_i * exptime - dE1_s - dE1_f
        if dt < 5 * exptime:  # whether next exposure is in next batch of exposures
            # same orbits
            if mode == 'scanning':
                # scanning mode, no incoming flux between exposures
                dE2_s = - trap_pop_s * (1 - np.exp(-(dt - exptime)/tau_trap_s))
                dE2_f = - trap_pop_f * (1 - np.exp(-(dt - exptime)/tau_trap_f))
            elif mode == 'staring':
                # for staring mode, there is flux between exposures
                dE2_s = (eta_trap_s * f_i / c1_s - trap_pop_s) * \
                    (1 - np.exp(-c1_s * (dt - exptime)))
                dE2_f = (eta_trap_f * f_i / c1_f - trap_pop_f) * \
                    (1 - np.exp(-c1_f * (dt - exptime)))
            else:
                # others, same as scanning
                dE2_s = - trap_pop_s * (1 - np.exp(-(dt - exptime)/tau_trap_s))
                dE2_f = - trap_pop_f * (1 - np.exp(-(dt - exptime)/tau_trap_f))
            trap_pop_s = min(trap_pop_s + dE2_s, nTrap_s)
            trap_pop_f = min(trap_pop_f + dE2_f, nTrap_f)
        elif dt < 1200:
            # considering in orbit download scenario
            trap_pop_s = min(
                trap_pop_s * np.exp(-(dt-exptime)/tau_trap_s), nTrap_s)
            trap_pop_f = min(
                trap_pop_f * np.exp(-(dt-exptime)/tau_trap_f), nTrap_f)
        else:
            # switch orbit
            dt0_i = next(dt0)
            trap_pop_s = min(trap_pop_s * np.exp(-(dt-exptime-dt0_i)/tau_trap_s) +
                             next(dTrap_s), nTrap_s)
            trap_pop_f = min(trap_pop_f * np.exp(-(dt-exptime-dt0_i)/tau_trap_f) +
                             next(dTrap_f), nTrap_f)
            f_i = cRates[i + 1]
            c1_s = eta_trap_s * f_i / nTrap_s + 1 / tau_trap_s  # a key factor
            c1_f = eta_trap_f * f_i / nTrap_f + 1 / tau_trap_f
            dE3_s = (eta_trap_s * f_i / c1_s - trap_pop_s) * \
                (1 - np.exp(-c1_s * dt0_i))
            dE3_f = (eta_trap_f * f_i / c1_f - trap_pop_f) * \
                (1 - np.exp(-c1_f * dt0_i))
            dE3_s = min(trap_pop_s + dE3_s, nTrap_s) - trap_pop_s
            dE3_f = min(trap_pop_f + dE3_f, nTrap_f) - trap_pop_f
            trap_pop_s = min(trap_pop_s + dE3_s, nTrap_s)
            trap_pop_f = min(trap_pop_f + dE3_f, nTrap_f)
        trap_pop_s = max(trap_pop_s, 0)
        trap_pop_f = max(trap_pop_f, 0)

    return obsCounts
예제 #53
0
파일: 1_2.py 프로젝트: naturliche/signal
# -*- coding: utf-8 -*-
"""
Created on Sun Apr 14 19:22:01 2019

@author: natur
"""

import numpy as np
import matplotlib.pylab as plt
import math
import matplotlib

t = np.linspace(0,10.0)
x1 = 4*np.exp(-0.5*t)*np.sin(math.pi*t + math.pi/2)
plt.title(u'4exp(-0.5t)cos(Πt)')
plt.plot(t,x1)
예제 #54
0
     times.append(t)
     use_this_time = ((len(times) - 1) % opts.decimate) == 0
     use_this_time &= time_sel(t, (len(times)-1) / opts.decimate)
     if use_this_time:
         plot_t['lst'].append(uv['lst'])
         plot_t['jd'].append(t)
         plot_t['cnt'].append((len(times)-1) / opts.decimate)
 if not use_this_time: continue
 #apply cal phases
 if not opts.cal is None:
     aa.set_jultime(t)
     if not opts.src is None:
         src.compute(aa)
         d = aa.phs2src(d, src, i, j)
     else:
         d *= n.exp(-1j*n.pi*aa.get_phs_offset(i,j))
 # Do delay transform if required
 if opts.delay:
     if opts.unmask:
         d = d.data
         ker = n.zeros_like(d)
         ker[0] = 1.
         gain = 1.
     else:
         flags = n.logical_not(d.mask).astype(n.float)
         gain = n.sqrt(n.average(flags**2))
         ker = n.fft.ifft(flags)
         d = d.filled(0)
     d = n.fft.ifft(d)
     if not opts.clean is None and not n.all(d == 0):
         d, info = a.deconv.clean(d, ker, tol=opts.clean)
예제 #55
0
def exponential_sine(t, amp, freq, growth, phase):
    return amp * np.sin(2*np.pi*freq*t + phase) * np.exp(growth*t)
예제 #56
0
ub        = scalar, upper bound of cake grid
size_w    = integer, number of grid points in cake state space
w_grid    = vector, size_w x 1 vector of cake grid points
------------------------------------------------------------------------
'''

lb = 10
ub = 15

size_c = 50
size_z = 50

c_grid = np.linspace(lb, ub, size_c)

z_grid, pi = ar1.addacooper(size_z, mu, rho, sigma_z)
z_grid = np.exp(z_grid)
prob_m = np.transpose(pi)
'''
------------------------------------------------------------------------
Create grid of current utility values
------------------------------------------------------------------------
C        = matrix, current consumption (c=w-w')
U        = matrix, current period utility value for all possible
           choices of w and w' (rows are w, columns w')
------------------------------------------------------------------------
'''
C = np.zeros((size_c, size_c, size_c))
for i in range(size_c):
    for j in range(size_c):
        for k in range(size_c):
            C[i, j, k] = z_grid[k] * c_grid[i]**alpha + (
 def forward(self, x, t):
     self.t = t
     self.y = 1 / (1 + np.exp(-x))
     loss = cross_entropy_error(np.c_[1 - self.y, self.y], t)
     return loss
예제 #58
0
def sigmoid(in_x):
    return 1.0 / (1 + np.exp(-in_x))
예제 #59
0
 totalAll = 0.0
 for a in range(2):
     for b in range(2):
         for c in range(2):
             for d in range(2):
                 for e in range(2):
                     for f in range(2):
                         for g in range(2):
                             AllJointProb[a,b,c,d,e,f,g] = np.log(A[tuple([a])])
                             AllJointProb[a,b,c,d,e,f,g] += np.log(B[tuple([b, a])])
                             AllJointProb[a,b,c,d,e,f,g] += np.log(C[tuple([c, a, b, d])])
                             AllJointProb[a,b,c,d,e,f,g] += np.log(D[tuple([d, a])])
                             AllJointProb[a,b,c,d,e,f,g] += np.log(E[tuple([e, b, c, d])])
                             AllJointProb[a,b,c,d,e,f,g] += np.log(F[tuple([f, a, d, e])])
                             AllJointProb[a,b,c,d,e,f,g] += np.log(G[tuple([g, b, e])])
                             AllJointProb[a,b,c,d,e,f,g] = np.exp(AllJointProb[a,b,c,d,e,f,g])
                             totalAll += AllJointProb[a,b,c,d,e,f,g]
                             
 print totalAll
 
 CEABF = np.zeros((2,2,2,2,2))
 ABF = np.zeros((2,2,2))
 CEGFABD = np.zeros((2,2,2,2,2,2,2))
 ABD = np.zeros((2,2,2))
 ABCDEFG = np.zeros((2,2,2,2,2,2,2))
 EFG = np.zeros((2,2,2))
 ACEFBG = np.zeros((2,2,2,2,2,2))
 BG = np.zeros((2,2))   
 GBC = np.zeros((2,2,2))
 BC = np.zeros((2,2))
 BCDAE = np.zeros((2,2,2,2,2))
예제 #60
0
        Fourier_array.append(f / (np.sqrt(N)))
    return Fourier_array


x_min = -1.0
x_max = 1.0
sampling_rate_1 = 256
delta = (x_max - x_min) / (sampling_rate_1 - 1)
function_array = np.zeros(sampling_rate_1)
x_arr = np.zeros(sampling_rate_1)
for i in range(sampling_rate_1):
    function_array[i] = 1
    x_arr[i] = x_min + i * delta
nft = DFT(function_array)
karr = 2 * np.pi * np.fft.fftfreq(sampling_rate_1, d=delta)
fact = np.exp(-1j * karr * x_min)
aft = delta * np.sqrt(sampling_rate_1 / (2.0 * np.pi)) * fact * nft

fig1 = plt.subplots()
plt.plot(x_arr, function_array, 'r', label='The constant function')
plt.xlabel('x', fontsize=16)
plt.ylabel('f(x)', fontsize=16)
plt.grid(True)

fig2 = plt.subplots()
plt.plot(
    karr,
    aft,
    'g',
    label='The fourier transform of the constant function for sampling rate 1')
plt.xlabel('k', fontsize=16)