def _simple_logistic_regression(x,y,beta_start=None,verbose=False,
                               CONV_THRESH=1.e-3,MAXIT=500):
    """
 Faster than logistic_regression when there is only one predictor.
    """
    if len(x) != len(y):
        raise ValueError, "x and y should be the same length!"
    if beta_start is None:
        beta_start = NA.zeros(2,x.dtype.char)
    iter = 0; diff = 1.; beta = beta_start  # initial values
    if verbose:
        print 'iteration  beta log-likliehood |beta-beta_old|' 
    while iter < MAXIT:
        beta_old = beta 
        p = NA.exp(beta[0]+beta[1]*x)/(1.+NA.exp(beta[0]+beta[1]*x))
        l = NA.sum(y*NA.log(p) + (1.-y)*NA.log(1.-p)) # log-likliehood
        s = NA.array([NA.sum(y-p), NA.sum((y-p)*x)])  # scoring function
        # information matrix
        J_bar = NA.array([[NA.sum(p*(1-p)),NA.sum(p*(1-p)*x)],
                          [NA.sum(p*(1-p)*x),NA.sum(p*(1-p)*x*x)]])
        beta = beta_old + NA.dot(LA.inverse(J_bar),s) # new value of beta
        diff = NA.sum(NA.fabs(beta-beta_old)) # sum of absolute differences
        if verbose:
            print iter+1, beta, l, diff
        if diff <= CONV_THRESH: break
        iter = iter + 1
    return beta, J_bar, l
def _simple_logistic_regression(x,y,beta_start=None,verbose=False,
                               CONV_THRESH=1.e-3,MAXIT=500):
    """
 Faster than logistic_regression when there is only one predictor.
    """
    if len(x) != len(y):
        raise ValueError, "x and y should be the same length!"
    if beta_start is None:
        beta_start = NA.zeros(2,x.typecode())
    iter = 0; diff = 1.; beta = beta_start  # initial values
    if verbose:
        print 'iteration  beta log-likliehood |beta-beta_old|' 
    while iter < MAXIT:
        beta_old = beta 
        p = NA.exp(beta[0]+beta[1]*x)/(1.+NA.exp(beta[0]+beta[1]*x))
        l = NA.sum(y*NA.log(p) + (1.-y)*NA.log(1.-p)) # log-likliehood
        s = NA.array([NA.sum(y-p), NA.sum((y-p)*x)])  # scoring function
        # information matrix
        J_bar = NA.array([[NA.sum(p*(1-p)),NA.sum(p*(1-p)*x)],
                          [NA.sum(p*(1-p)*x),NA.sum(p*(1-p)*x*x)]])
        beta = beta_old + NA.dot(LA.inverse(J_bar),s) # new value of beta
        diff = NA.sum(NA.fabs(beta-beta_old)) # sum of absolute differences
        if verbose:
            print iter+1, beta, l, diff
        if diff <= CONV_THRESH: break
        iter = iter + 1
    return beta, J_bar, l
Пример #3
0
    def traf_window(self, line_broadening=10, show=0):
	apod = (numarray.exp(-self.timepoints*line_broadening))**2 / ( (numarray.exp(-self.timepoints*line_broadening))**3 
		+ (numarray.exp(-self.aquisition_time*line_broadening))**3  )
	for i in range(2):
	    self.the_result.y[i] = self.the_result.y[i]*apod
	if show == 1:
	    return self.the_result
	return self
Пример #4
0
    def dexp_window(self, line_broadening=10, gaussian_multiplicator=0.3, show=0):
	apod = numarray.exp(-(self.timepoints*line_broadening - gaussian_multiplicator*self.aquisition_time)**2)
	for i in range(2):
	    self.the_result.y[i] = self.the_result.y[i]*apod 
	if show == 1:
	    return self.the_result
	return self
Пример #5
0
	def gauss_window(self, line_broadening=10, show=0):
		apod = numarray.exp(-(self.timepoints*line_broadening)**2)
		for i in range(2):
			self.the_result.y[i] = self.the_result.y[i]*apod
		if show == 1 :
			return self.the_result
		return self
def calcprob(beta, x):
    """
 calculate probabilities (in percent) given beta and x
    """
    try:
        N, npreds = x.shape[1], x.shape[0]
    except: # single predictor, x is a vector, len(beta)=2.
        N, npreds = len(x), 1
    if len(beta) != npreds+1:
        raise ValueError,'sizes of beta and x do not match!'
    if npreds==1: # simple logistic regression
        return 100.*NA.exp(beta[0]+beta[1]*x)/(1.+NA.exp(beta[0]+beta[1]*x))
    X = NA.ones((npreds+1,N), x.typecode())
    X[1:, :] = x
    ebx = NA.exp(NA.dot(beta, X))
    return 100.*ebx/(1.+ebx)
def calcprob(beta, x):
    """
 calculate probabilities (in percent) given beta and x
    """
    try:
        N, npreds = x.shape[1], x.shape[0]
    except: # single predictor, x is a vector, len(beta)=2.
        N, npreds = len(x), 1
    if len(beta) != npreds+1:
        raise ValueError,'sizes of beta and x do not match!'
    if npreds==1: # simple logistic regression
        return 100.*NA.exp(beta[0]+beta[1]*x)/(1.+NA.exp(beta[0]+beta[1]*x))
    X = NA.ones((npreds+1,N), x.dtype.char)
    X[1:, :] = x
    ebx = NA.exp(NA.dot(beta, X))
    return 100.*ebx/(1.+ebx)
Пример #8
0
 def __call__(self, x):
     if (type(x) == type(1) or type(x) == type(1.)):
         return self.value(x)
     elif (type(x) == type([])):
         my_list = []
         for xx in x:
             my_list.append(self.value(xx))
         return my_list
     elif (type(x) == numarray.NumArray):
         return (numarray.exp(-numarray.power((x-self.mean)/self.sigma, 2)/2.)
                 /numarray.sqrt(2.*numarray.pi)/self.sigma)
Пример #9
0
 def __call__(self, x):
     if (type(x) == type(1) or type(x) == type(1.)):
         return self.value(x)
     elif (type(x) == type([])):
         my_list = []
         for xx in x:
             my_list.append(self.value(xx))
         return my_list
     elif (type(x) == numarray.NumArray):
         return (numarray.exp(-numarray.power(
             (x - self.mean) / self.sigma, 2) / 2.) /
                 numarray.sqrt(2. * numarray.pi) / self.sigma)
Пример #10
0
def plotLine(display, slope, intercept, xlog, ylog, lineStyle='Dot',
             color='black'):
    f = lambda x: slope*x + intercept
    xr = display.getRange("x")
    if xlog:
        xr = ( numarray.log(xr[0]), numarray.log(xr[1]) )
    xx = numarray.arange(100)/99.*(xr[1] - xr[0]) + xr[0]
    yy = numarray.array([f(x) for x in xx])
    if ylog:
        yy = numarray.exp(yy)
    if xlog:
        xx = numarray.exp(xx)
    ylabel = display.getLabel("y")
    xBinWidth = display.getDataRep().getBinWidth("x")
    if xBinWidth and ylabel == "Entries / bin":
        yy = yy/xBinWidth
    Canvas().selectDisplay( display )
    display.setAutoRanging("x", 0)
    display.setAutoRanging("y", 0)
    nt = newNTuple( (xx, yy), ('x', 'y'), register=0 )
    Scatter(nt, 'x', 'y', pointRep="Line", oplot=1, lineStyle=lineStyle,
            color=color)
Пример #11
0
def gauss(x,m,sigma):
    """ Return a Gaussian with mean, sigma for a numarray x """
    sigma2 = sigma*sigma
    exponent = -(x-m)**2/(2.*sigma2)
    exponent = numarray.choose(exponent<-700.,(exponent,-700.))
    try:
       result = numarray.exp(exponent)/numarray.sqrt(2.*numarray.pi*sigma2)
    except OverflowError:
       print "gauss: overflow error"
       print "sigma = ", sigma
       print "m = ", m
       print "x = ", x
       sys.exit()
    return result
Пример #12
0
def gauss(x, m, sigma):
    """ Return a Gaussian with mean, sigma for a numarray x """
    sigma2 = sigma * sigma
    exponent = -(x - m)**2 / (2. * sigma2)
    exponent = numarray.choose(exponent < -700., (exponent, -700.))
    try:
        result = numarray.exp(exponent) / numarray.sqrt(
            2. * numarray.pi * sigma2)
    except OverflowError:
        print "gauss: overflow error"
        print "sigma = ", sigma
        print "m = ", m
        print "x = ", x
        sys.exit()
    return result
 def _gaussian(self, mean, cvm, x):
     m = len(mean)
     assert cvm.shape == (m, m), \
         'bad sized covariance matrix, %s' % str(cvm.shape)
     try:
         det = numarray.linear_algebra.determinant(cvm)
         inv = numarray.linear_algebra.inverse(cvm)
         a = det ** -0.5 * (2 * numarray.pi) ** (-m / 2.0) 
         dx = x - mean
         b = -0.5 * numarray.matrixmultiply( \
                 numarray.matrixmultiply(dx, inv), dx)
         return a * numarray.exp(b) 
     except OverflowError:
         # happens when the exponent is negative infinity - i.e. b = 0
         # i.e. the inverse of cvm is huge (cvm is almost zero)
         return 0
Пример #14
0
 def plot_rspgenIntegral(self, energy, inclination, phi=0, nsamp=2000):
     rmin = 1e-2
     rmax = 30.
     npts = 20
     rstep = num.log(rmax / rmin) / (npts - 1)
     radii = rmin * num.exp(rstep * num.arange(npts))
     self._setPsf(energy, inclination, phi)
     seps = []
     srcDir = SkyDir(180, 0)
     for i in range(nsamp):
         appDir = self.psf.appDir(energy, srcDir, self.scZAxis,
                                  self.scXAxis)
         seps.append(appDir.difference(srcDir) * 180. / num.pi)
     seps.sort()
     fraction = num.arange(nsamp, type=num.Float) / nsamp
     disp = plot.scatter(seps,
                         fraction,
                         xlog=1,
                         xname='ROI radius',
                         yname='enclosed Psf fraction',
                         pointRep='Line',
                         color='red')
     disp.setTitle("%s: %i MeV, %.1f deg" %
                   (self.irfs, energy, inclination))
     npred = []
     resids = []
     for radius in radii:
         npred.append(
             self.psf.angularIntegral(energy, inclination, phi, radius))
         resids.append(
             num.abs(
                 (self._interpolate(seps, fraction, radius) - npred[-1]) /
                 npred[-1]))
     plot.scatter(radii, npred, pointRep='Line', oplot=1)
     residplot = plot.scatter(radii,
                              resids,
                              'ROI radius',
                              yname='abs(sim - npred)/npred',
                              xlog=1,
                              ylog=1)
     #        Npred = Interpolator(radii, npred)
     ks_prob = ks2(npred, seps)
     plot.hline(0)
     residplot.setTitle("%s: %i MeV, %.1f deg\n ks prob=%.2e" %
                        (self.irfs, energy, inclination, ks_prob[1]))
     return energy, inclination, ks_prob[1]
Пример #15
0
def vline(x=None, color='black'):
    if x == None:
        pt = Canvas().mouseData()
        x = pt[0]
        print x
    x = numarray.zeros(100) + x
    display = Canvas().getDisplay()
    yr = list(display.getRange("y"))
    if display.getBinWidth('x') > 0 and display.getBinWidth('y') < 0:
        yr[0] /= display.getBinWidth('x')
        yr[1] /= display.getBinWidth('x')
    ylog = display.getLog("y")
    if ylog:
        yr = ( numarray.log(yr[0]), numarray.log(yr[1]) )
    y = numarray.arange(100)/99.*(yr[1]-yr[0]) + yr[0]
    if ylog:
        y = numarray.exp(y)
    display.setAutoRanging("x", 0)
    display.setAutoRanging("y", 0)
    nt = newNTuple( (x, y), ('x', 'y'), register=0 )
    Scatter(nt, 'x', 'y', pointRep="Line", oplot=1, lineStyle='Dot',
            color=color)
Пример #16
0
"""
Solve the SDE. We define the f and g functions first, then pass them to
the solver. We also use the known solution for the SDE for comparison 
purposes.
"""

def f(x):
    return 2 * x
def g(x):
    return x

# numeric solutions
t, Xt = DiscreteBrownianMotion.sdeEM(f, g, 1, dbm)  
# exact solutions
Yt = N.exp(1.5*t + dbm.Wt)       

"""
Now use a different time interval for integration than for the discretized
Brownian motion. Just expand the time interval for the Brownian motion, and
repeat the integration.
"""

expandFactor = 10
dbm2 = dbm.expandInterval(expandFactor)
t2, Xt2  = DiscreteBrownianMotion.sdeEM(f, g, 1, dbm2)  

"""
Take a look at the results. We'll first examine the RMS error of the numeric
solution compared to the exact solution, and print the values determined.
"""
Пример #17
0
    def train(self, train_toks, **kwargs):
        """
        Train a new C{ConditionalExponentialClassifier}, using the
        given training samples.  This
        C{ConditionalExponentialClassifier} should encode the model
        that maximizes entropy from all the models that are
        emperically consistant with C{train_toks}.
        
        @param kwargs: Keyword arguments.
          - C{iterations}: The maximum number of times IIS should
            iterate.  If IIS converges before this number of
            iterations, it may terminate.  Default=C{20}.
            (type=C{int})
            
          - C{debug}: The debugging level.  Higher values will cause
            more verbose output.  Default=C{0}.  (type=C{int})
            
          - C{classes}: The set of possible classes.  If none is given,
            then the set of all classes attested in the training data
            will be used instead.  (type=C{list} of (immutable)).
            
          - C{accuracy_cutoff}: The accuracy value that indicates
            convergence.  If the accuracy becomes closer to one
            than the specified value, then IIS will terminate.  The
            default value is None, which indicates that no accuracy
            cutoff should be used. (type=C{float})

          - C{delta_accuracy_cutoff}: The change in accuracy should be
            taken to indicate convergence.  If the accuracy changes by
            less than this value in a single iteration, then IIS will
            terminate.  The default value is C{None}, which indicates
            that no accuracy-change cutoff should be
            used. (type=C{float})

          - C{log_likelihood_cutoff}: specifies what log-likelihood
            value should be taken to indicate convergence.  If the
            log-likelihod becomes closer to zero than the specified
            value, then IIS will terminate.  The default value is
            C{None}, which indicates that no log-likelihood cutoff
            should be used. (type=C{float})

          - C{delta_log_likelihood_cutoff}: specifies what change in
            log-likelihood should be taken to indicate convergence.
            If the log-likelihood changes by less than this value in a
            single iteration, then IIS will terminate.  The default
            value is C{None}, which indicates that no
            log-likelihood-change cutoff should be used.  (type=C{float})
        """
        assert _chktype(1, train_toks, [Token], (Token,))
        # Process the keyword arguments.
        iter = 20
        debug = 0
        classes = None
        ll_cutoff = lldelta_cutoff = None
        acc_cutoff = accdelta_cutoff = None
        for (key, val) in kwargs.items():
            if key in ('iterations', 'iter'): iter = val
            elif key == 'debug': debug = val
            elif key == 'classes': classes = val
            elif key == 'log_likelihood_cutoff':
                ll_cutoff = abs(val)
            elif key == 'delta_log_likelihood_cutoff':
                lldelta_cutoff = abs(val)
            elif key == 'accuracy_cutoff': 
                acc_cutoff = abs(val)
            elif key == 'delta_accuracy_cutoff':
                accdelta_cutoff = abs(val)
            else: raise TypeError('Unknown keyword arg %s' % key)
        if classes is None:
            classes = attested_classes(train_toks)
            self._classes = classes
            
        # Find the classes, if necessary.
        if classes is None:
            classes = find_classes(train_toks)

        # Find the length of the first token's feature vector.
        if len(train_toks) == 0:
            raise ValueError('Expected at least one training token')
        vector0 = train_toks[0]['FEATURE_VECTOR']
        self._feature_vector_len = len(vector0)
        self._weight_vector_len = self._feature_vector_len*len(self._classes)

        # Build the offsets dictionary.  This maps from a class to the
        # index in the weight vector where that class's weights begin.
        self._offsets = dict([(cls, i*self._feature_vector_len)
                              for i, cls in enumerate(classes)])

        # Find the frequency with which each feature occurs in the
        # training data.
        ffreq_emperical = self._ffreq_emperical(train_toks)

        # Find the nf map, and related variables nfarray and nfident.
        # nf is the sum of the features for a given labeled text.
        # nfmap compresses this sparse set of values to a dense list.
        # nfarray performs the reverse operation.  nfident is 
        # nfarray multiplied by an identity matrix.
        nfmap = self._nfmap(train_toks)
        nfs = nfmap.items()
        nfs.sort(lambda x,y:cmp(x[1],y[1]))
        nfarray = numarray.array([nf for (nf, i) in nfs], 'd')
        nftranspose = numarray.reshape(nfarray, (len(nfarray), 1))

        # An array that is 1 whenever ffreq_emperical is zero.  In
        # other words, it is one for any feature that's not attested
        # in the data.  This is used to avoid division by zero.
        unattested = numarray.zeros(self._weight_vector_len, 'd')
        for i in range(len(unattested)):
            if ffreq_emperical[i] == 0: unattested[i] = 1

        # Build the classifier.  Start with weight=1 for each feature,
        # except for the unattested features.  Start those out at
        # zero, since we know that's the correct value.
        weights = numarray.ones(self._weight_vector_len, 'd')
        weights -= unattested
        classifier = ConditionalExponentialClassifier(classes, weights)
                
        if debug > 0: print '  ==> Training (%d iterations)' % iter
        if debug > 2:
            print
            print '      Iteration    Log Likelihood    Accuracy'
            print '      ---------------------------------------'

        # Train for a fixed number of iterations.
        for iternum in range(iter):
            if debug > 2:
                print ('     %9d    %14.5f    %9.3f' %
                       (iternum, classifier_log_likelihood(classifier, train_toks),
                        classifier_accuracy(classifier, train_toks)))

            # Calculate the deltas for this iteration, using Newton's method.
            deltas = self._deltas(train_toks, classifier, unattested,
                                  ffreq_emperical, nfmap, nfarray,
                                  nftranspose)

            # Use the deltas to update our weights.
            weights = classifier.weights()
            weights *= numarray.exp(deltas)
            classifier.set_weights(weights)
                        
            # Check log-likelihood cutoffs.
            if ll_cutoff is not None or lldelta_cutoff is not None:
                ll = classifier_log_likelihood(classifier, train_toks)
                if ll_cutoff is not None and ll > -ll_cutoff: break
                if lldelta_cutoff is not None:
                    if (ll - ll_old) < lldelta_cutoff: break
                    ll_old = ll

            # Check accuracy cutoffs.
            if acc_cutoff is not None or accdelta_cutoff is not None:
                acc = classifier_accuracy(classifier, train_toks)
                if acc_cutoff is not None and acc < acc_cutoff: break
                if accdelta_cutoff is not None:
                    if (acc_old - acc) < accdelta_cutoff: break
                    acc_old = acc

        if debug > 2:
            print ('     %9d    %14.5f    %9.3f' %
                   (iternum+1, classifier_log_likelihood(classifier, train_toks),
                    classifier_accuracy(classifier, train_toks)))
            print
                   
        # Return the classifier.
        return classifier
def logistic_regression(x,
                        y,
                        beta_start=None,
                        verbose=False,
                        CONV_THRESH=1.e-3,
                        MAXIT=500):
    """
 Uses the Newton-Raphson algorithm to calculate a maximum
 likelihood estimate logistic regression.
 The algorithm is known as 'iteratively re-weighted least squares', or IRLS.

 x - rank-1 or rank-2 array of predictors. If x is rank-2,
     the number of predictors = x.shape[0] = N.  If x is rank-1,
     it is assumed N=1.
     
 y - binary outcomes (if N>1 len(y) = x.shape[1], if N=1 len(y) = len(x))
 
 beta_start - initial beta vector (default zeros(N+1,x.dtype.char))
 
 if verbose=True, diagnostics printed for each iteration (default False).
 
 MAXIT - max number of iterations (default 500)
 
 CONV_THRESH - convergence threshold (sum of absolute differences
  of beta-beta_old, default 0.001)

 returns beta (the logistic regression coefficients, an N+1 element vector),
 J_bar (the (N+1)x(N+1) information matrix), and l (the log-likeliehood).
 
 J_bar can be used to estimate the covariance matrix and the standard
 error for beta.
 
 l can be used for a chi-squared significance test.

 covmat = inverse(J_bar)     --> covariance matrix of coefficents (beta)
 stderr = sqrt(diag(covmat)) --> standard errors for beta
 deviance = -2l              --> scaled deviance statistic
 chi-squared value for -2l is the model chi-squared test.
    """
    if x.shape[-1] != len(y):
        raise ValueError, "x.shape[-1] and y should be the same length!"
    try:
        N, npreds = x.shape[1], x.shape[0]
    except:  # single predictor, use simple logistic regression routine.
        return _simple_logistic_regression(x,
                                           y,
                                           beta_start=beta_start,
                                           CONV_THRESH=CONV_THRESH,
                                           MAXIT=MAXIT,
                                           verbose=verbose)
    if beta_start is None:
        beta_start = NA.zeros(npreds + 1, x.dtype.char)
    X = NA.ones((npreds + 1, N), x.dtype.char)
    X[1:, :] = x
    Xt = NA.transpose(X)
    iter = 0
    diff = 1.
    beta = beta_start  # initial values
    if verbose:
        print 'iteration  beta log-likliehood |beta-beta_old|'
    while iter < MAXIT:
        beta_old = beta
        ebx = NA.exp(NA.dot(beta, X))
        p = ebx / (1. + ebx)
        l = NA.sum(y * NA.log(p) +
                   (1. - y) * NA.log(1. - p))  # log-likeliehood
        s = NA.dot(X, y - p)  # scoring function
        J_bar = NA.dot(X * p, Xt)  # information matrix
        beta = beta_old + NA.dot(LA.inverse(J_bar), s)  # new value of beta
        diff = NA.sum(NA.fabs(beta - beta_old))  # sum of absolute differences
        if verbose:
            print iter + 1, beta, l, diff
        if diff <= CONV_THRESH: break
        iter = iter + 1
    if iter == MAXIT and diff > CONV_THRESH:
        print 'warning: convergence not achieved with threshold of %s in %s iterations' % (
            CONV_THRESH, MAXIT)
    return beta, J_bar, l
Пример #19
0
def log_array(npts, xmin, xmax):
    xstep = num.log(xmax / xmin) / (npts - 1)
    return xmin * num.exp(num.arange(npts, type=num.Float) * xstep)
Пример #20
0
    def _deltas(self, train_toks, #fd_list, labeled_tokens, labels,
                classifier, unattested, ffreq_emperical, nfmap,
                nfarray, nftranspose):
        """
        Calculate the update values for the classifier weights for
        this iteration of IIS.  These update weights are the value of
        C{delta} that solves the equation::
        
          ffreq_emperical[i]
                 =
          SUM[t,l] (classifier.prob(LabeledText(t,l)) *
                    fd_list.detect(LabeledText(t,l))[i] *
                    exp(delta[i] * nf(LabeledText(t,l))))

        Where:
            - M{t} is a text C{labeled_tokens}
            - M{l} is an element of C{labels}
            - M{nf(ltext)} = SUM[M{j}] C{fd_list.detect}(M{ltext})[M{j}] 

        This method uses Newton's method to solve this equation for
        M{delta[i]}.  In particular, it starts with a guess of
        C{delta[i]}=1; and iteratively updates C{delta} with::

            delta[i] -= (ffreq_emperical[i] - sum1[i])/(-sum2[i])

        until convergence, where M{sum1} and M{sum2} are defined as::
        
          sum1 = SUM[t,l] (classifier.prob(LabeledText(t,l)) *
                           fd_list.detect(LabeledText(t,l))[i] *
                           exp(delta[i] * nf(LabeledText(t,l))))
          sum2 = SUM[t,l] (classifier.prob(LabeledText(t,l)) *
                           fd_list.detect(LabeledText(t,l))[i] *
                           nf(LabeledText(t,l)) *
                           exp(delta[i] * nf(LabeledText(t,l))))

        Note that M{sum1} and M{sum2} depend on C{delta}; so they need
        to be re-computed each iteration.
        
        The variables C{nfmap}, C{nfarray}, and C{nftranspose} are
        used to generate a dense encoding for M{nf(ltext)}.  This
        allows C{_deltas} to calculate M{sum1} and M{sum2} using
        matrices, which yields a signifigant performance improvement. 

        @param fd_list: The feature detector list for the classifier
            that this C{IISMaxentClassifierTrainer} is training.
        @type fd_list: C{FeatureDetectorListI}
        @param labeled_tokens: The set of training tokens.
        @type labeled_tokens: C{list} of C{Token} with C{LabeledText}
            type
        @param labels: The set of labels that should be considered by
            the classifier constructed by this
            C{IISMaxentClassifierTrainer}. 
        @type labels: C{list} of (immutable)
        @param classifier: The current classifier.
        @type classifier: C{ClassifierI}
        @param ffreq_emperical: An array containing the emperical
            frequency for each feature.  The M{i}th element of this
            array is the emperical frequency for feature M{i}.
        @type ffreq_emperical: C{sequence} of C{float}
        @param unattested: An array that is 1 for features that are
            not attested in the training data; and 0 for features that
            are attested.  In other words, C{unattested[i]==0} iff
            C{ffreq_emperical[i]==0}. 
        @type unattested: C{sequence} of C{int}
        @param nfmap: A map that can be used to compress C{nf} to a dense
            vector.
        @type nfmap: C{dictionary} from C{int} to C{int}
        @param nfarray: An array that can be used to uncompress C{nf}
            from a dense vector.
        @type nfarray: C{array} of C{float}
        @param nftranspose: C{array} of C{float}
        @type nftranspose: The transpose of C{nfarray}
        """
        # These parameters control when we decide that we've
        # converged.  It probably should be possible to set these
        # manually, via keyword arguments to train.
        NEWTON_CONVERGE = 1e-12
        MAX_NEWTON = 30
        
        deltas = numarray.ones(self._weight_vector_len, 'd')

        # Precompute the A matrix:
        # A[nf][id] = sum ( p(text) * p(label|text) * f(text,label) )
        # over all label,text s.t. num_features[label,text]=nf
        A = numarray.zeros((len(nfmap), self._weight_vector_len), 'd')

        for i, tok in enumerate(train_toks):
            dist = classifier.get_class_probs(tok)

            # Find the number of active features.
            feature_vector = tok['FEATURE_VECTOR']
            assignments = feature_vector.assignments()
            nf = sum([val for (id, val) in assignments])

            # Update the A matrix
            for cls, offset in self._offsets.items():
                for (id, val) in assignments:
                    A[nfmap[nf], id+offset] += dist.prob(cls) * val
        A /= len(train_toks)

        # Iteratively solve for delta.  Use the following variables:
        #   - nf_delta[x][y] = nf[x] * delta[y]
        #   - exp_nf_delta[x][y] = exp(nf[x] * delta[y])
        #   - nf_exp_nf_delta[x][y] = nf[x] * exp(nf[x] * delta[y])
        #   - sum1[i][nf] = sum p(text)p(label|text)f[i](label,text)
        #                       exp(delta[i]nf)
        #   - sum2[i][nf] = sum p(text)p(label|text)f[i](label,text)
        #                       nf exp(delta[i]nf)
        for rangenum in range(MAX_NEWTON):
            nf_delta = numarray.outerproduct(nfarray, deltas)
            exp_nf_delta = numarray.exp(nf_delta)
            nf_exp_nf_delta = nftranspose * exp_nf_delta
            sum1 = numarray.sum(exp_nf_delta * A) 
            sum2 = numarray.sum(nf_exp_nf_delta * A)

            # Avoid division by zero.
            sum2 += unattested

            # Update the deltas.
            deltas -= (ffreq_emperical - sum1) / -sum2

            # We can stop once we converge.
            n_error = (numarray.sum(abs((ffreq_emperical-sum1)))/
                       numarray.sum(abs(deltas)))
            if n_error < NEWTON_CONVERGE:
                return deltas

        return deltas
def logistic_regression(x,y,beta_start=None,verbose=False,CONV_THRESH=1.e-3,
                        MAXIT=500):
    """
 Uses the Newton-Raphson algorithm to calculate a maximum
 likelihood estimate logistic regression.
 The algorithm is known as 'iteratively re-weighted least squares', or IRLS.

 x - rank-1 or rank-2 array of predictors. If x is rank-2,
     the number of predictors = x.shape[0] = N.  If x is rank-1,
     it is assumed N=1.
     
 y - binary outcomes (if N>1 len(y) = x.shape[1], if N=1 len(y) = len(x))
 
 beta_start - initial beta vector (default zeros(N+1,x.dtype.char))
 
 if verbose=True, diagnostics printed for each iteration (default False).
 
 MAXIT - max number of iterations (default 500)
 
 CONV_THRESH - convergence threshold (sum of absolute differences
  of beta-beta_old, default 0.001)

 returns beta (the logistic regression coefficients, an N+1 element vector),
 J_bar (the (N+1)x(N+1) information matrix), and l (the log-likeliehood).
 
 J_bar can be used to estimate the covariance matrix and the standard
 error for beta.
 
 l can be used for a chi-squared significance test.

 covmat = inverse(J_bar)     --> covariance matrix of coefficents (beta)
 stderr = sqrt(diag(covmat)) --> standard errors for beta
 deviance = -2l              --> scaled deviance statistic
 chi-squared value for -2l is the model chi-squared test.
    """
    if x.shape[-1] != len(y):
        raise ValueError, "x.shape[-1] and y should be the same length!"
    try:
        N, npreds = x.shape[1], x.shape[0]
    except: # single predictor, use simple logistic regression routine.
        return _simple_logistic_regression(x,y,beta_start=beta_start,
               CONV_THRESH=CONV_THRESH,MAXIT=MAXIT,verbose=verbose)
    if beta_start is None:
        beta_start = NA.zeros(npreds+1,x.dtype.char)
    X = NA.ones((npreds+1,N), x.dtype.char)
    X[1:, :] = x
    Xt = NA.transpose(X)
    iter = 0; diff = 1.; beta = beta_start  # initial values
    if verbose:
        print 'iteration  beta log-likliehood |beta-beta_old|' 
    while iter < MAXIT:
        beta_old = beta 
        ebx = NA.exp(NA.dot(beta, X))
        p = ebx/(1.+ebx)
        l = NA.sum(y*NA.log(p) + (1.-y)*NA.log(1.-p)) # log-likeliehood
        s = NA.dot(X, y-p)                            # scoring function
        J_bar = NA.dot(X*p,Xt)                        # information matrix
        beta = beta_old + NA.dot(LA.inverse(J_bar),s) # new value of beta
        diff = NA.sum(NA.fabs(beta-beta_old)) # sum of absolute differences
        if verbose:
            print iter+1, beta, l, diff
        if diff <= CONV_THRESH: break
        iter = iter + 1
    if iter == MAXIT and diff > CONV_THRESH: 
        print 'warning: convergence not achieved with threshold of %s in %s iterations' % (CONV_THRESH,MAXIT)
    return beta, J_bar, l
Пример #22
0
def log_array(xmin, xmax, npts):
    return xmin * num.exp(
        num.arange(npts, type=num.Float) / (npts - 1) * num.log(xmax / xmin))
def estimate_mixture(models, seqs, max_iter, eps, alpha=None):
    """ Given a Python-list of models and a SequenceSet seqs
    perform an nested EM to estimate maximum-likelihood
    parameters for the models and the mixture coefficients.
    The iteration stops after max_iter steps or if the
    improvement in log-likelihood is less than eps.

    alpha is a numarray of dimension len(models) containing
    the mixture coefficients. If alpha is not given, uniform
    values will be chosen.
        
    Result: The models are changed in place. Return value
    is (l, alpha, P) where l is the final log likelihood of
    seqs under the mixture, alpha is a numarray of
    dimension len(models) containing the mixture coefficients
    and P is a (|sequences| x |models|)-matrix containing
    P[model j| sequence i]
        
    """
    done = 0
    iter = 1
    last_mixture_likelihood = -99999999.99
    # The (nr of seqs x nr of models)-matrix holding the likelihoods
    l = numarray.zeros((len(seqs), len(models)), numarray.Float)
    if alpha == None:  # Uniform alpha
        logalpha = numarray.ones(len(models), numarray.Float) * \
                   math.log(1.0/len(models))
    else:
        logalpha = numarray.log(alpha)
    print logalpha, numarray.exp(logalpha)
    log_nrseqs = math.log(len(seqs))

    while 1:
        # Score all sequences with all models
        for i, m in enumerate(models):
            loglikelihood = m.loglikelihoods(seqs)
            # numarray slices: l[:,i] is the i-th column of l
            l[:, i] = numarray.array(loglikelihood)

        #print l
        for i in xrange(len(seqs)):
            l[i] += logalpha  # l[i] = ( log( a_k * P[seq i| model k]) )
        #print l
        mixture_likelihood = numarray.sum(numarray.sum(l))
        print "# iter %s joint likelihood = %f" % (iter, mixture_likelihood)

        improvement = mixture_likelihood - last_mixture_likelihood
        if iter > max_iter or improvement < eps:
            break

        # Compute P[model j| seq i]
        for i in xrange(len(seqs)):
            seq_logprob = sumlogs(l[i])  # \sum_{k} a_k P[seq i| model k]
            l[i] -= seq_logprob  # l[i] = ( log P[model j | seq i] )

        #print l
        l_exp = numarray.exp(l)  # XXX Use approx with table lookup
        #print "exp(l)", l_exp
        #print numarray.sum(numarray.transpose(l_exp)) # Print row sums

        # Compute priors alpha
        for i in xrange(len(models)):
            logalpha[i] = sumlogs(l[:, i]) - log_nrseqs

        #print "logalpha", logalpha, numarray.exp(logalpha)

        for j, m in enumerate(models):
            # Set the sequence weight for sequence i under model m to P[m| i]
            for i in xrange(len(seqs)):
                seqs.setWeight(i, l_exp[i, j])
            m.baumWelch(seqs, 10, 0.0001)

        iter += 1
        last_mixture_likelihood = mixture_likelihood

    return (mixture_likelihood, numarray.exp(logalpha), l_exp)
Пример #24
0
def orbitsAnimate(years=None,
                  root='./',
                  align='align/align_d_rms_1000_abs_t',
                  poly='polyfit_d/fit'):

    ##########
    #
    # START - Modify stuff in here only
    #
    ##########
    # Today's date
    today = 2008.5

    # Load up a starset of just those stars in orbits_movie.dat
    s = getOrbitStars(orbitFile='orbits_movie.dat',
                      root=root,
                      align=align,
                      poly=poly)
    tab = asciidata.open('/u/ghezgroup/data/gc/source_list/orbits_movie.dat')

    ##########
    #
    # STOP - Modify stuff in here only
    #
    ##########

    name = s.getArray('name')
    mag = s.getArray('mag')

    # Get plotting properties from the orbits.dat file
    discovered = tab[9].tonumpy()  # Discovery date
    xshift1 = tab[10].tonumpy()  # Shifts for labels (in first frame)
    yshift1 = tab[11].tonumpy()
    xshift2 = tab[12].tonumpy()  # Shifts for labels (in last frame)
    yshift2 = tab[13].tonumpy()
    colors = [tab[14][ss].strip() for ss in range(tab.nrows)]

    # Determine the mass assuming a distance of 8.0 kpc
    star0orb = s.stars[0].orbit
    dist = 8000.0  # in parsec
    axis = (star0orb.a / 1000.0) * dist  # in au
    mass = (axis)**3 / star0orb.p**2

    # Set the duration of the animation from the years keyword
    if (years == None):
        idx = name.index('S0-2')

        # Use S0-2's orbital period, rounded up to the nearest year
        years = math.ceil(s.stars[idx].orbit.p)

    # Array of time steps (0.1 yr steps)
    t = na.arange(1995.5, 1995.5 + years, 0.2, type=na.Float)

    # Do a flux scaling so that all the stars look good in our image.
    flux = 10.0**(mag / -3.0)
    flux /= flux.max()

    # Loop through all the stars and make an array of the X and Y positions
    # as a function of time. Store this on the star object as
    #   star.xanim -- array of X positions at each time step in t
    #   star.yanim -- array of Y positions at each time step in t
    for star in s.stars:
        (r, v, a) = star.orbit.kep2xyz(t, mass=mass, dist=dist)

        star.xanim = r[:, 0].copy()
        star.yanim = r[:, 1].copy()

    ## Make an image 500x500 pixels (1" x 1")
    #imgSize = 500 # pixels
    #scale = 1.0 / imgSize
    #xaxis = (na.arange(imgSize, type=na.Float) - (imgSize/2.0)) # xextent
    #xaxis *= -scale
    #yaxis = (na.arange(imgSize, type=na.Float) - (imgSize/2.0)) # yextent
    #yaxis *= scale

    # Make an image 1920x1080 pixels (1.7" x 1")
    ximgSize = 1920  # pixels
    yimgSize = 1080  # pixels
    xscale = (16.0 /
              9.) / ximgSize  # arcsec per pixel (16/9" from left to right)
    yscale = 1.0 / yimgSize  # arcsec per pixel (1" from top to bottom)
    xaxis = (na.arange(ximgSize, type=na.Float) - (ximgSize / 2.0))  # xextent
    xaxis *= -xscale
    yaxis = (na.arange(yimgSize, type=na.Float) - (yimgSize / 2.0))  # yextent
    yaxis *= yscale

    # Make grids of X/Y value at each pixel
    xx, yy = pylab.meshgrid(xaxis, yaxis)

    ##########
    #
    # Create image with gaussian PSF for each star
    #
    ##########
    fwhm = 0.020  # Make 20 mas instead of 55 mas

    for tt in range(1):
        #for tt in range(len(t)):
        time = t[tt]
        img = na.zeros((ximgSize, yimgSize), type=na.Float)
        xorb = []
        yorb = []

        for ss in range(1):
            #for ss in range(len(s.stars)):
            star = s.stars[ss]

            xpos = star.xanim[tt]
            ypos = star.yanim[tt]

            # Make a 2D gaussian for this star
            psf = na.exp(-((xx - xpos)**2 + (yy - ypos)**2) / fwhm**2)

            pdb.set_trace()
            img += flux[ss] * psf

        pylab.close(2)
        #pylab.figure(2, figsize=(5,5))
        pylab.figure(2, figsize=(16, 9))
        pylab.clf()
        pylab.axes([0.0, 0.0, 1.0, 1.0])
        pylab.axis('off')
        cmap = gccolors.idl_rainbow()
        pylab.imshow(sqrt(img),
                     origin='lowerleft',
                     cmap=cmap,
                     extent=[xaxis[0], xaxis[-1], yaxis[0], yaxis[-1]],
                     vmin=sqrt(0.01),
                     vmax=sqrt(1.0))

        # Plot the trails for each star
        for ss in range(len(s.stars)):
            star = s.stars[ss]

            before = where((t < time) & (t < discovered[ss]))[0]
            during = where((t < time) & (t >= discovered[ss])
                           & (t <= today))[0]
            future = where((t < time) & (t > today))[0]

            # Dashed before discovery and in the future
            if (len(before) > 0):
                pylab.plot(star.xanim[before],
                           star.yanim[before],
                           '--',
                           color=colors[ss],
                           linewidth=2)
            if (len(during) > 0):
                pylab.plot(star.xanim[during],
                           star.yanim[during],
                           '-',
                           color=colors[ss],
                           linewidth=2)
            if (len(future) > 0):
                pylab.plot(star.xanim[future],
                           star.yanim[future],
                           '--',
                           color=colors[ss],
                           linewidth=2)
            # Label the stars in the first and last image
            if (tt == 0):
                pylab.text(star.xanim[tt] + xshift1[ss],
                           star.yanim[tt] + yshift1[ss],
                           name[ss],
                           color='y',
                           fontsize=10)
            if (tt == (len(t) - 1)):
                pylab.text(star.xanim[tt] + xshift2[ss],
                           star.yanim[tt] + yshift2[ss],
                           name[ss],
                           color='y',
                           fontsize=10)
            # Label the first LGSAO image
            #diff = (abs(2005.5 - t).argsort())[0]
            #if (tt == diff):
            #    pylab.text(star.xanim[tt]+0.05,star.yanim[tt]+0.05,name[ss],color='y')

        ## Draw an outline box
        #bx = 0.49
        #pylab.plot([bx, -bx, -bx, bx, bx], [-bx, -bx, bx, bx, -bx],
        #           color='white', linewidth=2)

        #pylab.text(0.45, 0.4, t[tt], color='white',
        #           fontsize=16, fontweight='bold',
        #           horizontalalignment='left', verticalalignment='bottom')
        #pylab.text(-0.3, -0.4, 'Keck/UCLA Galactic',
        #           color='white', fontsize=10, fontweight='bold',
        #           horizontalalignment='center', verticalalignment='top')
        #pylab.text(-0.3, -0.44, 'Center Group',
        #           color='white', fontsize=10, fontweight='bold',
        #           horizontalalignment='center', verticalalignment='top')

        # Plot a scale (make it slightly larger than 0.1", otherwise overlapping
        # arrows look funny
        pylab.quiver2([0.45], [-0.1], [0], [0.105],
                      color='w',
                      width=0.005,
                      scale=1)
        pylab.quiver2([0.45], [0.0], [0], [-0.105],
                      color='w',
                      width=0.005,
                      scale=1)
        pylab.text(0.4,
                   -0.045,
                   '0.1\"',
                   color='white',
                   fontsize=14,
                   fontweight='bold',
                   horizontalalignment='center',
                   verticalalignment='top')

        # Draw a star at the position of Sgr A* (large at first, then smaller)
        sgraColor = 'white'
        if (tt == 0):
            star = gccolors.Star(0, 0, 0.08)
            pylab.fill(star[0],
                       star[1],
                       fill=True,
                       edgecolor=sgraColor,
                       linewidth=1.5,
                       facecolor=sgraColor)
        if (tt == 1):
            star = gccolors.Star(0, 0, 0.07)
            pylab.fill(star[0],
                       star[1],
                       fill=True,
                       edgecolor=sgraColor,
                       linewidth=1.5,
                       facecolor=sgraColor)
        if (tt == 2):
            star = gccolors.Star(0, 0, 0.06)
            pylab.fill(star[0],
                       star[1],
                       fill=True,
                       edgecolor=sgraColor,
                       linewidth=1.5,
                       facecolor=sgraColor)
        if (tt == 3):
            star = gccolors.Star(0, 0, 0.05)
            pylab.fill(star[0],
                       star[1],
                       fill=True,
                       edgecolor=sgraColor,
                       linewidth=1.5,
                       facecolor=sgraColor)
        if (tt == 4):
            star = gccolors.Star(0, 0, 0.04)
            pylab.fill(star[0],
                       star[1],
                       fill=True,
                       edgecolor=sgraColor,
                       linewidth=1.5,
                       facecolor=sgraColor)
        if (tt == 5):
            star = gccolors.Star(0, 0, 0.04)
            pylab.fill(star[0],
                       star[1],
                       fill=True,
                       edgecolor=sgraColor,
                       linewidth=1.5,
                       facecolor=sgraColor)
        if (tt > 5):
            star = gccolors.Star(0, 0, 0.03)
            pylab.fill(star[0],
                       star[1],
                       fill=False,
                       edgecolor=sgraColor,
                       linewidth=1.5)
        pylab.axis([0.5, -0.5, -0.5, 0.5])
        # Save as png for the best animation image quality and smallest animation!!!!
        pylab.savefig(
            '/u/ghezgroup/public_html/gc/images/media/orbits_anim_HD/img_%s.png'
            % str(t[tt]),
            dpi=100)
Пример #25
0
    def train(self, train_toks, **kwargs):
        """
        Train a new C{ConditionalExponentialClassifier}, using the
        given training samples.  This
        C{ConditionalExponentialClassifier} should encode the model
        that maximizes entropy from all the models that are
        emperically consistant with C{train_toks}.
        
        @param kwargs: Keyword arguments.
          - C{iterations}: The maximum number of times IIS should
            iterate.  If IIS converges before this number of
            iterations, it may terminate.  Default=C{20}.
            (type=C{int})
            
          - C{debug}: The debugging level.  Higher values will cause
            more verbose output.  Default=C{0}.  (type=C{int})
            
          - C{classes}: The set of possible classes.  If none is given,
            then the set of all classes attested in the training data
            will be used instead.  (type=C{list} of (immutable)).
            
          - C{accuracy_cutoff}: The accuracy value that indicates
            convergence.  If the accuracy becomes closer to one
            than the specified value, then IIS will terminate.  The
            default value is None, which indicates that no accuracy
            cutoff should be used. (type=C{float})

          - C{delta_accuracy_cutoff}: The change in accuracy should be
            taken to indicate convergence.  If the accuracy changes by
            less than this value in a single iteration, then IIS will
            terminate.  The default value is C{None}, which indicates
            that no accuracy-change cutoff should be
            used. (type=C{float})

          - C{log_likelihood_cutoff}: specifies what log-likelihood
            value should be taken to indicate convergence.  If the
            log-likelihod becomes closer to zero than the specified
            value, then IIS will terminate.  The default value is
            C{None}, which indicates that no log-likelihood cutoff
            should be used. (type=C{float})

          - C{delta_log_likelihood_cutoff}: specifies what change in
            log-likelihood should be taken to indicate convergence.
            If the log-likelihood changes by less than this value in a
            single iteration, then IIS will terminate.  The default
            value is C{None}, which indicates that no
            log-likelihood-change cutoff should be used.  (type=C{float})
        """
        assert _chktype(1, train_toks, [Token], (Token, ))
        # Process the keyword arguments.
        iter = 20
        debug = 0
        classes = None
        ll_cutoff = lldelta_cutoff = None
        acc_cutoff = accdelta_cutoff = None
        for (key, val) in kwargs.items():
            if key in ('iterations', 'iter'): iter = val
            elif key == 'debug': debug = val
            elif key == 'classes': classes = val
            elif key == 'log_likelihood_cutoff':
                ll_cutoff = abs(val)
            elif key == 'delta_log_likelihood_cutoff':
                lldelta_cutoff = abs(val)
            elif key == 'accuracy_cutoff':
                acc_cutoff = abs(val)
            elif key == 'delta_accuracy_cutoff':
                accdelta_cutoff = abs(val)
            else:
                raise TypeError('Unknown keyword arg %s' % key)
        if classes is None:
            classes = attested_classes(train_toks)
            self._classes = classes

        # Find the classes, if necessary.
        if classes is None:
            classes = find_classes(train_toks)

        # Find the length of the first token's feature vector.
        if len(train_toks) == 0:
            raise ValueError('Expected at least one training token')
        vector0 = train_toks[0]['FEATURE_VECTOR']
        self._feature_vector_len = len(vector0)
        self._weight_vector_len = self._feature_vector_len * len(self._classes)

        # Build the offsets dictionary.  This maps from a class to the
        # index in the weight vector where that class's weights begin.
        self._offsets = dict([(cls, i * self._feature_vector_len)
                              for i, cls in enumerate(classes)])

        # Find the frequency with which each feature occurs in the
        # training data.
        ffreq_emperical = self._ffreq_emperical(train_toks)

        # Find the nf map, and related variables nfarray and nfident.
        # nf is the sum of the features for a given labeled text.
        # nfmap compresses this sparse set of values to a dense list.
        # nfarray performs the reverse operation.  nfident is
        # nfarray multiplied by an identity matrix.
        nfmap = self._nfmap(train_toks)
        nfs = nfmap.items()
        nfs.sort(lambda x, y: cmp(x[1], y[1]))
        nfarray = numarray.array([nf for (nf, i) in nfs], 'd')
        nftranspose = numarray.reshape(nfarray, (len(nfarray), 1))

        # An array that is 1 whenever ffreq_emperical is zero.  In
        # other words, it is one for any feature that's not attested
        # in the data.  This is used to avoid division by zero.
        unattested = numarray.zeros(self._weight_vector_len, 'd')
        for i in range(len(unattested)):
            if ffreq_emperical[i] == 0: unattested[i] = 1

        # Build the classifier.  Start with weight=1 for each feature,
        # except for the unattested features.  Start those out at
        # zero, since we know that's the correct value.
        weights = numarray.ones(self._weight_vector_len, 'd')
        weights -= unattested
        classifier = ConditionalExponentialClassifier(classes, weights)

        if debug > 0: print '  ==> Training (%d iterations)' % iter
        if debug > 2:
            print
            print '      Iteration    Log Likelihood    Accuracy'
            print '      ---------------------------------------'

        # Train for a fixed number of iterations.
        for iternum in range(iter):
            if debug > 2:
                print('     %9d    %14.5f    %9.3f' %
                      (iternum,
                       classifier_log_likelihood(classifier, train_toks),
                       classifier_accuracy(classifier, train_toks)))

            # Calculate the deltas for this iteration, using Newton's method.
            deltas = self._deltas(train_toks, classifier, unattested,
                                  ffreq_emperical, nfmap, nfarray, nftranspose)

            # Use the deltas to update our weights.
            weights = classifier.weights()
            weights *= numarray.exp(deltas)
            classifier.set_weights(weights)

            # Check log-likelihood cutoffs.
            if ll_cutoff is not None or lldelta_cutoff is not None:
                ll = classifier_log_likelihood(classifier, train_toks)
                if ll_cutoff is not None and ll > -ll_cutoff: break
                if lldelta_cutoff is not None:
                    if (ll - ll_old) < lldelta_cutoff: break
                    ll_old = ll

            # Check accuracy cutoffs.
            if acc_cutoff is not None or accdelta_cutoff is not None:
                acc = classifier_accuracy(classifier, train_toks)
                if acc_cutoff is not None and acc < acc_cutoff: break
                if accdelta_cutoff is not None:
                    if (acc_old - acc) < accdelta_cutoff: break
                    acc_old = acc

        if debug > 2:
            print('     %9d    %14.5f    %9.3f' %
                  (iternum + 1,
                   classifier_log_likelihood(classifier, train_toks),
                   classifier_accuracy(classifier, train_toks)))
            print

        # Return the classifier.
        return classifier
Пример #26
0
def estimate_mixture(models, seqs, max_iter, eps, alpha=None):
    """ Given a Python-list of models and a SequenceSet seqs
    perform an nested EM to estimate maximum-likelihood
    parameters for the models and the mixture coefficients.
    The iteration stops after max_iter steps or if the
    improvement in log-likelihood is less than eps.

    alpha is a numarray of dimension len(models) containing
    the mixture coefficients. If alpha is not given, uniform
    values will be chosen.
        
    Result: The models are changed in place. Return value
    is (l, alpha, P) where l is the final log likelihood of
    seqs under the mixture, alpha is a numarray of
    dimension len(models) containing the mixture coefficients
    and P is a (|sequences| x |models|)-matrix containing
    P[model j| sequence i]
        
    """
    done = 0
    iter = 1
    last_mixture_likelihood = -99999999.99
    # The (nr of seqs x nr of models)-matrix holding the likelihoods
    l = numarray.zeros((len(seqs), len(models)), numarray.Float)
    if alpha == None: # Uniform alpha
        logalpha = numarray.ones(len(models), numarray.Float) * \
                   math.log(1.0/len(models))
    else:
        logalpha = numarray.log(alpha)
    print logalpha, numarray.exp(logalpha)
    log_nrseqs = math.log(len(seqs))

    while 1:
        # Score all sequences with all models
        for i, m in enumerate(models):
            loglikelihood = m.loglikelihoods(seqs)
            # numarray slices: l[:,i] is the i-th column of l
            l[:,i] = numarray.array(loglikelihood)

        #print l
        for i in xrange(len(seqs)):
            l[i] += logalpha # l[i] = ( log( a_k * P[seq i| model k]) )
        #print l
        mixture_likelihood = numarray.sum(numarray.sum(l))
        print "# iter %s joint likelihood = %f" % (iter, mixture_likelihood) 

        improvement = mixture_likelihood - last_mixture_likelihood
        if iter > max_iter or improvement < eps:
            break

        # Compute P[model j| seq i]
        for i in xrange(len(seqs)):
            seq_logprob = sumlogs(l[i]) # \sum_{k} a_k P[seq i| model k]
            l[i] -= seq_logprob # l[i] = ( log P[model j | seq i] )

        #print l
        l_exp = numarray.exp(l) # XXX Use approx with table lookup
        #print "exp(l)", l_exp
        #print numarray.sum(numarray.transpose(l_exp)) # Print row sums

        # Compute priors alpha
        for i in xrange(len(models)):
            logalpha[i] = sumlogs(l[:,i]) - log_nrseqs

        #print "logalpha", logalpha, numarray.exp(logalpha)

        for j, m in enumerate(models):
            # Set the sequence weight for sequence i under model m to P[m| i]
            for i in xrange(len(seqs)):
                seqs.setWeight(i,l_exp[i,j])
            m.baumWelch(seqs, 10, 0.0001)

        iter += 1
        last_mixture_likelihood = mixture_likelihood

    return (mixture_likelihood, numarray.exp(logalpha), l_exp)
Пример #27
0
    def _deltas(
            self,
            train_toks,  #fd_list, labeled_tokens, labels,
            classifier,
            unattested,
            ffreq_emperical,
            nfmap,
            nfarray,
            nftranspose):
        """
        Calculate the update values for the classifier weights for
        this iteration of IIS.  These update weights are the value of
        C{delta} that solves the equation::
        
          ffreq_emperical[i]
                 =
          SUM[t,l] (classifier.prob(LabeledText(t,l)) *
                    fd_list.detect(LabeledText(t,l))[i] *
                    exp(delta[i] * nf(LabeledText(t,l))))

        Where:
            - M{t} is a text C{labeled_tokens}
            - M{l} is an element of C{labels}
            - M{nf(ltext)} = SUM[M{j}] C{fd_list.detect}(M{ltext})[M{j}] 

        This method uses Newton's method to solve this equation for
        M{delta[i]}.  In particular, it starts with a guess of
        C{delta[i]}=1; and iteratively updates C{delta} with::

            delta[i] -= (ffreq_emperical[i] - sum1[i])/(-sum2[i])

        until convergence, where M{sum1} and M{sum2} are defined as::
        
          sum1 = SUM[t,l] (classifier.prob(LabeledText(t,l)) *
                           fd_list.detect(LabeledText(t,l))[i] *
                           exp(delta[i] * nf(LabeledText(t,l))))
          sum2 = SUM[t,l] (classifier.prob(LabeledText(t,l)) *
                           fd_list.detect(LabeledText(t,l))[i] *
                           nf(LabeledText(t,l)) *
                           exp(delta[i] * nf(LabeledText(t,l))))

        Note that M{sum1} and M{sum2} depend on C{delta}; so they need
        to be re-computed each iteration.
        
        The variables C{nfmap}, C{nfarray}, and C{nftranspose} are
        used to generate a dense encoding for M{nf(ltext)}.  This
        allows C{_deltas} to calculate M{sum1} and M{sum2} using
        matrices, which yields a signifigant performance improvement. 

        @param fd_list: The feature detector list for the classifier
            that this C{IISMaxentClassifierTrainer} is training.
        @type fd_list: C{FeatureDetectorListI}
        @param labeled_tokens: The set of training tokens.
        @type labeled_tokens: C{list} of C{Token} with C{LabeledText}
            type
        @param labels: The set of labels that should be considered by
            the classifier constructed by this
            C{IISMaxentClassifierTrainer}. 
        @type labels: C{list} of (immutable)
        @param classifier: The current classifier.
        @type classifier: C{ClassifierI}
        @param ffreq_emperical: An array containing the emperical
            frequency for each feature.  The M{i}th element of this
            array is the emperical frequency for feature M{i}.
        @type ffreq_emperical: C{sequence} of C{float}
        @param unattested: An array that is 1 for features that are
            not attested in the training data; and 0 for features that
            are attested.  In other words, C{unattested[i]==0} iff
            C{ffreq_emperical[i]==0}. 
        @type unattested: C{sequence} of C{int}
        @param nfmap: A map that can be used to compress C{nf} to a dense
            vector.
        @type nfmap: C{dictionary} from C{int} to C{int}
        @param nfarray: An array that can be used to uncompress C{nf}
            from a dense vector.
        @type nfarray: C{array} of C{float}
        @param nftranspose: C{array} of C{float}
        @type nftranspose: The transpose of C{nfarray}
        """
        # These parameters control when we decide that we've
        # converged.  It probably should be possible to set these
        # manually, via keyword arguments to train.
        NEWTON_CONVERGE = 1e-12
        MAX_NEWTON = 30

        deltas = numarray.ones(self._weight_vector_len, 'd')

        # Precompute the A matrix:
        # A[nf][id] = sum ( p(text) * p(label|text) * f(text,label) )
        # over all label,text s.t. num_features[label,text]=nf
        A = numarray.zeros((len(nfmap), self._weight_vector_len), 'd')

        for i, tok in enumerate(train_toks):
            dist = classifier.get_class_probs(tok)

            # Find the number of active features.
            feature_vector = tok['FEATURE_VECTOR']
            assignments = feature_vector.assignments()
            nf = sum([val for (id, val) in assignments])

            # Update the A matrix
            for cls, offset in self._offsets.items():
                for (id, val) in assignments:
                    A[nfmap[nf], id + offset] += dist.prob(cls) * val
        A /= len(train_toks)

        # Iteratively solve for delta.  Use the following variables:
        #   - nf_delta[x][y] = nf[x] * delta[y]
        #   - exp_nf_delta[x][y] = exp(nf[x] * delta[y])
        #   - nf_exp_nf_delta[x][y] = nf[x] * exp(nf[x] * delta[y])
        #   - sum1[i][nf] = sum p(text)p(label|text)f[i](label,text)
        #                       exp(delta[i]nf)
        #   - sum2[i][nf] = sum p(text)p(label|text)f[i](label,text)
        #                       nf exp(delta[i]nf)
        for rangenum in range(MAX_NEWTON):
            nf_delta = numarray.outerproduct(nfarray, deltas)
            exp_nf_delta = numarray.exp(nf_delta)
            nf_exp_nf_delta = nftranspose * exp_nf_delta
            sum1 = numarray.sum(exp_nf_delta * A)
            sum2 = numarray.sum(nf_exp_nf_delta * A)

            # Avoid division by zero.
            sum2 += unattested

            # Update the deltas.
            deltas -= (ffreq_emperical - sum1) / -sum2

            # We can stop once we converge.
            n_error = (numarray.sum(abs(
                (ffreq_emperical - sum1))) / numarray.sum(abs(deltas)))
            if n_error < NEWTON_CONVERGE:
                return deltas

        return deltas
Пример #28
0
def orbitsAnimate(years=None, root="./", align="align/align_d_rms_1000_abs_t", poly="polyfit_d/fit"):

    ##########
    #
    # START - Modify stuff in here only
    #
    ##########
    # Today's date
    today = 2008.5

    # Load up a starset of just those stars in orbits_movie.dat
    s = getOrbitStars(orbitFile="orbits_movie.dat", root=root, align=align, poly=poly)
    tab = asciidata.open("/u/ghezgroup/data/gc/source_list/orbits_movie.dat")

    ##########
    #
    # STOP - Modify stuff in here only
    #
    ##########

    name = s.getArray("name")
    mag = s.getArray("mag")

    # Get plotting properties from the orbits.dat file
    discovered = tab[9].tonumpy()  # Discovery date
    xshift1 = tab[10].tonumpy()  # Shifts for labels (in first frame)
    yshift1 = tab[11].tonumpy()
    xshift2 = tab[12].tonumpy()  # Shifts for labels (in last frame)
    yshift2 = tab[13].tonumpy()
    colors = [tab[14][ss].strip() for ss in range(tab.nrows)]

    # Determine the mass assuming a distance of 8.0 kpc
    star0orb = s.stars[0].orbit
    dist = 8000.0  # in parsec
    axis = (star0orb.a / 1000.0) * dist  # in au
    mass = (axis) ** 3 / star0orb.p ** 2

    # Set the duration of the animation from the years keyword
    if years == None:
        idx = name.index("S0-2")

        # Use S0-2's orbital period, rounded up to the nearest year
        years = math.ceil(s.stars[idx].orbit.p)

    # Array of time steps (0.1 yr steps)
    t = na.arange(1995.5, 1995.5 + years, 0.2, type=na.Float)

    # Do a flux scaling so that all the stars look good in our image.
    flux = 10.0 ** (mag / -3.0)
    flux /= flux.max()

    # Loop through all the stars and make an array of the X and Y positions
    # as a function of time. Store this on the star object as
    #   star.xanim -- array of X positions at each time step in t
    #   star.yanim -- array of Y positions at each time step in t
    for star in s.stars:
        (r, v, a) = star.orbit.kep2xyz(t, mass=mass, dist=dist)

        star.xanim = r[:, 0].copy()
        star.yanim = r[:, 1].copy()

    ## Make an image 500x500 pixels (1" x 1")
    # imgSize = 500 # pixels
    # scale = 1.0 / imgSize
    # xaxis = (na.arange(imgSize, type=na.Float) - (imgSize/2.0)) # xextent
    # xaxis *= -scale
    # yaxis = (na.arange(imgSize, type=na.Float) - (imgSize/2.0)) # yextent
    # yaxis *= scale

    # Make an image 1920x1080 pixels (1.7" x 1")
    ximgSize = 1920  # pixels
    yimgSize = 1080  # pixels
    xscale = (16.0 / 9.0) / ximgSize  # arcsec per pixel (16/9" from left to right)
    yscale = 1.0 / yimgSize  # arcsec per pixel (1" from top to bottom)
    xaxis = na.arange(ximgSize, type=na.Float) - (ximgSize / 2.0)  # xextent
    xaxis *= -xscale
    yaxis = na.arange(yimgSize, type=na.Float) - (yimgSize / 2.0)  # yextent
    yaxis *= yscale

    # Make grids of X/Y value at each pixel
    xx, yy = pylab.meshgrid(xaxis, yaxis)

    ##########
    #
    # Create image with gaussian PSF for each star
    #
    ##########
    fwhm = 0.020  # Make 20 mas instead of 55 mas

    for tt in range(1):
        # for tt in range(len(t)):
        time = t[tt]
        img = na.zeros((ximgSize, yimgSize), type=na.Float)
        xorb = []
        yorb = []

        for ss in range(1):
            # for ss in range(len(s.stars)):
            star = s.stars[ss]

            xpos = star.xanim[tt]
            ypos = star.yanim[tt]

            # Make a 2D gaussian for this star
            psf = na.exp(-((xx - xpos) ** 2 + (yy - ypos) ** 2) / fwhm ** 2)

            pdb.set_trace()
            img += flux[ss] * psf

        pylab.close(2)
        # pylab.figure(2, figsize=(5,5))
        pylab.figure(2, figsize=(16, 9))
        pylab.clf()
        pylab.axes([0.0, 0.0, 1.0, 1.0])
        pylab.axis("off")
        cmap = gccolors.idl_rainbow()
        pylab.imshow(
            sqrt(img),
            origin="lowerleft",
            cmap=cmap,
            extent=[xaxis[0], xaxis[-1], yaxis[0], yaxis[-1]],
            vmin=sqrt(0.01),
            vmax=sqrt(1.0),
        )

        # Plot the trails for each star
        for ss in range(len(s.stars)):
            star = s.stars[ss]

            before = where((t < time) & (t < discovered[ss]))[0]
            during = where((t < time) & (t >= discovered[ss]) & (t <= today))[0]
            future = where((t < time) & (t > today))[0]

            # Dashed before discovery and in the future
            if len(before) > 0:
                pylab.plot(star.xanim[before], star.yanim[before], "--", color=colors[ss], linewidth=2)
            if len(during) > 0:
                pylab.plot(star.xanim[during], star.yanim[during], "-", color=colors[ss], linewidth=2)
            if len(future) > 0:
                pylab.plot(star.xanim[future], star.yanim[future], "--", color=colors[ss], linewidth=2)
            # Label the stars in the first and last image
            if tt == 0:
                pylab.text(star.xanim[tt] + xshift1[ss], star.yanim[tt] + yshift1[ss], name[ss], color="y", fontsize=10)
            if tt == (len(t) - 1):
                pylab.text(star.xanim[tt] + xshift2[ss], star.yanim[tt] + yshift2[ss], name[ss], color="y", fontsize=10)
            # Label the first LGSAO image
            # diff = (abs(2005.5 - t).argsort())[0]
            # if (tt == diff):
            #    pylab.text(star.xanim[tt]+0.05,star.yanim[tt]+0.05,name[ss],color='y')

        ## Draw an outline box
        # bx = 0.49
        # pylab.plot([bx, -bx, -bx, bx, bx], [-bx, -bx, bx, bx, -bx],
        #           color='white', linewidth=2)

        # pylab.text(0.45, 0.4, t[tt], color='white',
        #           fontsize=16, fontweight='bold',
        #           horizontalalignment='left', verticalalignment='bottom')
        # pylab.text(-0.3, -0.4, 'Keck/UCLA Galactic',
        #           color='white', fontsize=10, fontweight='bold',
        #           horizontalalignment='center', verticalalignment='top')
        # pylab.text(-0.3, -0.44, 'Center Group',
        #           color='white', fontsize=10, fontweight='bold',
        #           horizontalalignment='center', verticalalignment='top')

        # Plot a scale (make it slightly larger than 0.1", otherwise overlapping
        # arrows look funny
        pylab.quiver2([0.45], [-0.1], [0], [0.105], color="w", width=0.005, scale=1)
        pylab.quiver2([0.45], [0.0], [0], [-0.105], color="w", width=0.005, scale=1)
        pylab.text(
            0.4,
            -0.045,
            '0.1"',
            color="white",
            fontsize=14,
            fontweight="bold",
            horizontalalignment="center",
            verticalalignment="top",
        )

        # Draw a star at the position of Sgr A* (large at first, then smaller)
        sgraColor = "white"
        if tt == 0:
            star = gccolors.Star(0, 0, 0.08)
            pylab.fill(star[0], star[1], fill=True, edgecolor=sgraColor, linewidth=1.5, facecolor=sgraColor)
        if tt == 1:
            star = gccolors.Star(0, 0, 0.07)
            pylab.fill(star[0], star[1], fill=True, edgecolor=sgraColor, linewidth=1.5, facecolor=sgraColor)
        if tt == 2:
            star = gccolors.Star(0, 0, 0.06)
            pylab.fill(star[0], star[1], fill=True, edgecolor=sgraColor, linewidth=1.5, facecolor=sgraColor)
        if tt == 3:
            star = gccolors.Star(0, 0, 0.05)
            pylab.fill(star[0], star[1], fill=True, edgecolor=sgraColor, linewidth=1.5, facecolor=sgraColor)
        if tt == 4:
            star = gccolors.Star(0, 0, 0.04)
            pylab.fill(star[0], star[1], fill=True, edgecolor=sgraColor, linewidth=1.5, facecolor=sgraColor)
        if tt == 5:
            star = gccolors.Star(0, 0, 0.04)
            pylab.fill(star[0], star[1], fill=True, edgecolor=sgraColor, linewidth=1.5, facecolor=sgraColor)
        if tt > 5:
            star = gccolors.Star(0, 0, 0.03)
            pylab.fill(star[0], star[1], fill=False, edgecolor=sgraColor, linewidth=1.5)
        pylab.axis([0.5, -0.5, -0.5, 0.5])
        # Save as png for the best animation image quality and smallest animation!!!!
        pylab.savefig("/u/ghezgroup/public_html/gc/images/media/orbits_anim_HD/img_%s.png" % str(t[tt]), dpi=100)
Пример #29
0
def orbitsAnimate(years=None, rad=0.5,
                      root='./',
                      align='align/align_d_rms_1000_abs_t',
                      poly='polyfit_d/fit',
                      orbitFile='orbits_movie.dat'):
    """
    Set rad to indicate the radial extent of orbits animation.
    Default is rad=0.5, which will show the central arcsecond orbits.
    """

    ##########
    #
    # START - Modify stuff in here only
    #
    ##########
    # Today's date
    today = 2011.7
    
    # Load up a starset of just those stars in orbits_movie.dat
    s = getOrbitStars(orbitFile=orbitFile,
                      root=root, align=align, poly=poly)
    tab = asciidata.open('/u/ghezgroup/data/gc/source_list/orbits_movie.dat')
    #tab = asciidata.open('/u/ghezgroup/data/gc/source_list/new_orbits_movie_keck_foundation_v2.dat')
    #tab = asciidata.open('/u/syelda/research/gc/aligndir/09_09_20/efit/new_orbits_movie.dat')

    ##########
    #
    # STOP - Modify stuff in here only
    #
    ##########

    name = s.getArray('name')
    mag = s.getArray('mag')

    # Get plotting properties from the orbits.dat file
    discovered = tab[9].tonumpy()  # Discovery date
    xshift1 = tab[10].tonumpy()    # Shifts for labels (in first frame)
    yshift1 = tab[11].tonumpy()
    xshift2 = tab[12].tonumpy()    # Shifts for labels (in last frame)
    yshift2 = tab[13].tonumpy()
    colors = [tab[14][ss].strip() for ss in range(tab.nrows)]

    # Determine the mass assuming a distance of 8.0 kpc
    star0orb = s.stars[0].orbit
    dist = 8000.0 # in parsec
    axis = (star0orb.a / 1000.0) * dist # in au
    mass = (axis)**3 / star0orb.p**2

    # Set the duration of the animation from the years keyword
    if (years == None):
        idx = name.index('S0-2')

        # Use S0-2's orbital period, rounded up to the nearest year
        years = math.ceil(s.stars[idx].orbit.p)

    # Array of time steps (0.1 yr steps)
    t = na.arange(1995.5, 1995.5+years, 0.2, type=na.Float)

    # For keck foundation presentation, make two versions:
    # Keck foundation version 1 -- extend to 2010.7
    #t = na.arange(1995.5, 2010.8, 0.2, type=na.Float)
    # Keck foundation version 2 -- extend to 2019.7
    #t = na.arange(1995.5, 2019.8, 0.2, type=na.Float)

    # Do a flux scaling so that all the stars look good in our image.
    #flux = 10.0**(mag/-3.0)
    #flux /= flux.max()
    flux = 10.0**(mag/-4.0) # Had to change to get 19th mag star to show up!
    flux /= flux.max()

    # Loop through all the stars and make an array of the X and Y positions
    # as a function of time. Store this on the star object as
    #   star.xanim -- array of X positions at each time step in t
    #   star.yanim -- array of Y positions at each time step in t
    for star in s.stars:
        (r, v, a) = star.orbit.kep2xyz(t, mass=mass, dist=dist)

        star.xanim = r[:,0].copy()
        star.yanim = r[:,1].copy()

    # Make an image 500x500 pixels (1" x 1")
    imgSize = 500 # pixels
    scale = (2.0*rad) / imgSize
    xaxis = (na.arange(imgSize, type=na.Float) - (imgSize/2.0)) # xextent
    xaxis *= -scale
    yaxis = (na.arange(imgSize, type=na.Float) - (imgSize/2.0)) # yextent
    yaxis *= scale

    # Make grids of X/Y value at each pixel
    xx, yy = pylab.meshgrid(xaxis, yaxis)

    ##########
    #
    # Create image with gaussian PSF for each star
    #
    ##########
    fwhm = 0.020   # Make 20 mas instead of 55 mas

    #for tt in range(1):
    #for tt in [len(t)-1]:
    for tt in range(len(t)):
        time = t[tt]
        img = na.zeros((imgSize, imgSize), type=na.Float)
        xorb = []
        yorb = []
        
        #for ss in range(1):
        for ss in range(len(s.stars)):
            star = s.stars[ss]

            xpos = star.xanim[tt]
            ypos = star.yanim[tt]

            # Make a 2D gaussian for this star
            psf = na.exp(-((xx - xpos)**2 + (yy - ypos)**2) / fwhm**2)

            img += flux[ss] * psf

        pylab.close(2)
        # For higher resolution, just increase figsize slightly
        pylab.figure(2, figsize=(5,5))
        pylab.clf()
        pylab.axes([0.0, 0.0, 1.0, 1.0])
        pylab.axis('off')
        cmap = gccolors.idl_rainbow()
        pylab.imshow(sqrt(img), origin='lowerleft', cmap=cmap,
                     extent=[xaxis[0], xaxis[-1], yaxis[0], yaxis[-1]],
                     vmin=sqrt(0.01), vmax=sqrt(1.0))

        # Plot the trails for each star 
        for ss in range(len(s.stars)):
            star = s.stars[ss]

            before = where((t < time) & (t < discovered[ss]))[0]
            during = where((t < time) & (t >= discovered[ss]) & (t <= today))[0]
            future = where((t < time) & (t > today))[0]

            # Dashed before discovery and in the future
            if (len(before) > 0):    
                pylab.plot(star.xanim[before], star.yanim[before], '--',
                           color=colors[ss], linewidth=2)
            if (len(during) > 0):    
                pylab.plot(star.xanim[during], star.yanim[during], '-',
                           color=colors[ss], linewidth=2)
            if (len(future) > 0):    
                pylab.plot(star.xanim[future], star.yanim[future], '--',
                           color=colors[ss], linewidth=2)
            # Label the stars in the first and last image
            if (tt == 0):
                pylab.text(star.xanim[tt]+xshift1[ss],
                           star.yanim[tt]+yshift1[ss],
                           name[ss],color='y', fontsize=10)
            if (tt == (len(t)-1)):
                pylab.text(star.xanim[tt]+xshift2[ss],
                           star.yanim[tt]+yshift2[ss],
                           name[ss],color='y', fontsize=10)
            # Label the first LGSAO image
            #diff = (abs(2005.5 - t).argsort())[0]
            #if (tt == diff):
            #    pylab.text(star.xanim[tt]+0.05,star.yanim[tt]+0.05,name[ss],color='y')

        # Draw an outline box
        #bx = 0.49
        bx = rad-(0.02*rad)
        pylab.plot([bx, -bx, -bx, bx, bx], [-bx, -bx, bx, bx, -bx],
                   color='white', linewidth=2)

        pylab.text(rad-(0.1*rad), rad-(0.2*rad), t[tt], color='white',
                   fontsize=16, fontweight='bold',
                   horizontalalignment='left', verticalalignment='bottom')
        pylab.text(rad-(0.4*rad), -rad+(0.2*rad), 'Keck/UCLA Galactic',
                   color='white', fontsize=10, fontweight='bold',
                   horizontalalignment='center', verticalalignment='top')
        pylab.text(rad-(0.4*rad), -rad+(0.12*rad), 'Center Group',
                   color='white', fontsize=10, fontweight='bold',
                   horizontalalignment='center', verticalalignment='top')

        # Plot a scale (make it slightly larger than 0.1", otherwise overlapping
        # arrows look funny
        arrowSize = rad/5.
        pylab.quiver([-rad+(0.1*rad)],[-rad+(0.3*rad)],[0],[arrowSize+0.005],
                       color='w',width=0.005,scale=1,units='x')
        pylab.quiver([-rad+(0.1*rad)],[-rad+(0.3*rad)-arrowSize+0.003],[0],
                       [-arrowSize-0.005], color='w',width=0.005,scale=1,units='x')
        pylab.text(-rad+(0.22*rad), -rad+(0.25*rad), str(arrowSize)+'\"',
                   color='white', fontsize=14, fontweight='bold',
                   horizontalalignment='center', verticalalignment='top')

        # Draw a star at the position of Sgr A* (large at first, then smaller)
        sgraColor = 'white'
        if (tt == 0):
            star = gccolors.Star(0,0,0.08)
            pylab.fill(star[0], star[1], fill=True,edgecolor=sgraColor,
                       linewidth=1.5,facecolor=sgraColor)
        if (tt == 1):
            star = gccolors.Star(0,0,0.07)
            pylab.fill(star[0], star[1], fill=True,edgecolor=sgraColor,
                       linewidth=1.5,facecolor=sgraColor)
        if (tt == 2):
            star = gccolors.Star(0,0,0.06)
            pylab.fill(star[0], star[1], fill=True,edgecolor=sgraColor,
                       linewidth=1.5,facecolor=sgraColor)
        if (tt == 3):
            star = gccolors.Star(0,0,0.05)
            pylab.fill(star[0], star[1], fill=True,edgecolor=sgraColor,
                       linewidth=1.5,facecolor=sgraColor)
        if (tt == 4):
            star = gccolors.Star(0,0,0.04)
            pylab.fill(star[0], star[1], fill=True,edgecolor=sgraColor,
                       linewidth=1.5,facecolor=sgraColor)
        if (tt == 5):
            star = gccolors.Star(0,0,0.04)
            pylab.fill(star[0], star[1], fill=True,edgecolor=sgraColor,
                       linewidth=1.5,facecolor=sgraColor)
        if (tt > 5):
            star = gccolors.Star(0,0,0.03)
            pylab.fill(star[0], star[1], fill=False,edgecolor=sgraColor,
                       linewidth=1.5)
        #pylab.axis([0.5, -0.5, -0.5, 0.5])
        pylab.axis([rad, -1.*rad, -1*rad, rad])
        # Save as png for the best animation image quality and smallest animation!!!!
        #pylab.savefig('/u/ghezgroup/public_html/gc/images/media/orbits_anim_2008/img_%s.png'
        pylab.savefig('/u/syelda/research/gc/anim/orbits/2011/img_%s.png'
                      % str(t[tt]), dpi=100)