Пример #1
0
    def inverse(self, value):

        if not self.scaled():
            raise ValueError("Not invertible until scaled")

        vmin, vmax = self.vmin, self.vmax

        if cbook.iterable(value):
            val = ma.asarray(value)
        else:
            val = value

        if self.stretch == 'linear':
            pass
        elif self.stretch == 'log':
            val = (ma.power(10., val * ma.log10(self.midpoint)) -
                   1.) / (self.midpoint - 1.)
        elif self.stretch == 'sqrt':
            val = val * val
        elif self.stretch == 'arcsinh':
            val = self.midpoint * \
                  ma.sinh(val * ma.arcsinh(1. / self.midpoint))
        elif self.stretch == 'power':
            val = ma.power(val, (1. / self.exponent))
        else:
            raise Exception("Unknown stretch in APLpyNormalize: %s" %
                            self.stretch)
        return vmin + val * (vmax - vmin)
Пример #2
0
def pvbBound(n):
    a = multiply(2, n)
    b = power(a, 50)
    c = multiply(6, b)
    d = divide(c, 0.05)
    e = log(d)
    f = divide(1.0, n)
    return divide(1.0, n) + sqrt(divide(1.0, power(n, 2)) + multiply(f, e))
Пример #3
0
def average_in_flux(mag, dmag, axis=None):
    flux = 10**(mag / -2.5)
    dflux = np.log(10) / 2.5 * flux * dmag
    avg_dflux = np.power(np.sum(np.power(dflux, -2), axis), -0.5)
    avg_flux = np.sum(flux * np.power(dflux, -2), axis) * avg_dflux**2
    avg_mag = -2.5 * np.log10(avg_flux)
    avg_dmag = 2.5 / np.log(10) * np.divide(avg_dflux, avg_flux)
    return avg_mag, avg_dmag
Пример #4
0
def average_in_flux(mag, dmag, axis=None):
    flux = 10**(mag / -2.5)
    dflux = np.log(10) / 2.5 * flux * dmag
    avg_dflux = np.power(np.sum(np.power(dflux, -2), axis), -0.5)
    avg_flux = np.sum(flux * np.power(dflux, -2), axis) * avg_dflux**2
    avg_mag = -2.5 * np.log10(avg_flux)
    avg_dmag = 2.5 / np.log(10) * np.divide(avg_dflux, avg_flux)
    return avg_mag, avg_dmag
Пример #5
0
    def inverse(self, value):

        # ORIGINAL MATPLOTLIB CODE

        if not self.scaled():
            raise ValueError("Not invertible until scaled")

        vmin, vmax = self.vmin, self.vmax

        # CUSTOM APLPY CODE

        if cbook.iterable(value):
            val = ma.asarray(value)
        else:
            val = value

        if self.stretch == 'Linear':

            pass

        elif self.stretch == 'Log':

            val = (ma.power(10., val * ma.log10(self.midpoint)) -
                   1.) / (self.midpoint - 1.)

        elif self.stretch == 'Sqrt':

            val = val * val

        elif self.stretch == 'Arcsinh':

            val = self.midpoint * \
                  ma.sinh(val * ma.arcsinh(1. / self.midpoint))

        elif self.stretch == 'Arccosh':

            val = self.midpoint * \
                  ma.cosh(val * ma.arccosh(1. / self.midpoint))

        elif self.stretch == 'Power':

            val = ma.power(val, (1. / self.exponent))

        elif self.stretch == 'Exp':

            val = 1. / np.exp(val)

        else:

            raise Exception("Unknown stretch in APLpyNormalize: %s" %
                            self.stretch)

        return vmin + val * (vmax - vmin)
Пример #6
0
    def inverse(self, value):
        if not self.scaled():
            raise ValueError("Not invertible until scaled")
        vmin, vmax = self.vmin, self.vmax
        vin, cin = self.vin, self.cin

        if cbook.iterable(value):
            val = ma.asarray(value)
            ipos = (val > (0.5 + cin))
            ineg = (val < (0.5 - cin))
            izero = ~(ipos | ineg)

            result = ma.empty_like(val)
            result[izero] = (val[izero] - 0.5) * vin / cin
            result[ipos] = vin * pow((vmax / vin),
                                     (val[ipos] - (0.5 + cin)) / (0.5 - cin))
            result[ineg] = -vin * pow((-vmin / vin),
                                      ((0.5 - cin) - val[min]) / (0.5 - cin))

            r = vmin * ma.power((vmax / vmin), val)
        else:
            if value > 0.5 + cin:
                r = vin * pow((vmax / vin),
                              (value - (0.5 + cin)) / (0.5 - cin))
            elif value < 0.5 - cin:
                r = -vin * pow((-vmin / vin),
                               ((0.5 - cin) - value) / (0.5 - cin))
            else:
                r = (value - 0.5) * vin / cin
        return r
Пример #7
0
    def calculateCentroidMeasurements(self):
        self.X[self.badFrames, :] = ma.masked
        if not self.useSmoothingFilterDerivatives:
            self.v[1:-1] = (self.X[2:, :] - self.X[0:-2])/(2.0/self.frameRate)
        else:
            # use a cubic polynomial filter to estimate the velocity
            self.v = ma.zeros(self.X.shape)
            halfWindow = int(np.round(self.filterWindow/2.*self.frameRate))
            for i in xrange(halfWindow, self.v.shape[0]-halfWindow):
                start = i-halfWindow
                mid = i
                finish = i+halfWindow+1
                if not np.any(self.X.mask[start:finish,:]):
                    px = np.polyder(np.polyfit(self.t[start:finish]-self.t[mid],
                                               self.X[start:finish, 0], 3))
                    py = np.polyder(np.polyfit(self.t[start:finish]-self.t[mid],
                                               self.X[start:finish, 1], 3))
                    self.v[i,:] = [np.polyval(px, 0), np.polyval(py, 0)]
                else:
                    self.v[i,:] = ma.masked

        self.s = ma.sqrt(ma.sum(ma.power(self.v, 2), axis=1))
        self.phi = ma.arctan2(self.v[:, 1], self.v[:, 0])
        self.t[self.badFrames] = ma.masked
        self.X[self.badFrames, :] = ma.masked
        self.v[self.badFrames, :] = ma.masked
        self.s[self.badFrames] = ma.masked
        self.phi[self.badFrames] = ma.masked
Пример #8
0
    def inverse(self, value):

        # ORIGINAL MATPLOTLIB CODE

        if not self.scaled():
            raise ValueError("Not invertible until scaled")

        vmin, vmax = self.vmin, self.vmax

        # CUSTOM APLPY CODE

        if cbook.iterable(value):
            val = ma.asarray(value)
        else:
            val = value

        if self.stretch == 'linear':

            pass

        elif self.stretch == 'log':

            val = (ma.power(10., val * ma.log10(self.midpoint)) - 1.) / (self.midpoint - 1.)

        elif self.stretch == 'sqrt':

            val = val * val

        elif self.stretch == 'arcsinh':

            val = self.midpoint * \
                  ma.sinh(val * ma.arcsinh(1. / self.midpoint))

        elif self.stretch == 'square':

            val = ma.power(val, (1. / 2))

        elif self.stretch == 'power':

            val = ma.power(val, (1. / self.exponent))

        else:

            raise Exception("Unknown stretch in APLpyNormalize: %s" %
                            self.stretch)

        return vmin + val * (vmax - vmin)
Пример #9
0
 def transform_non_affine(self, a):
     sign = np.sign(a)
     masked = ma.masked_inside(a, -self.invlinthresh, self.invlinthresh, copy=False)
     exp = sign * self.linthresh * (ma.power(self.base, (sign * (masked / self.linthresh)) - self._linscale_adj))
     if masked.mask.any():
         return ma.where(masked.mask, a / self._linscale_adj, exp)
     else:
         return exp
Пример #10
0
def db2lin(array: Union[float, np.ndarray], scale: int = 10) -> np.ndarray:
    """dB to linear conversion."""
    data = array / scale
    with warnings.catch_warnings():
        warnings.simplefilter("ignore", category=RuntimeWarning)
        if ma.isMaskedArray(data):
            return ma.power(10, data)
        return np.power(10, data)
def single_polynomial_regression(x, order):
    m, n = x.shape
    k = n + order
    # Construct the Z matrix from the input data
    z = ones(shape=(m, k))
    for i in range(1, order+1):
        z[:, n+i-1] = power(x[:, 0], i)
    return z
def colorAnalysis(hsv, mask):
    h = np.array(hsv[:,:,0])
    s = np.array(hsv[:,:,1])
    maskedH = ma.array(h, mask=mask)
    maskedS = ma.array(s, mask=mask)
    objH = maskedH.compressed()
    objS = maskedS.compressed()
    
    # histogram
    histH, bin_edges_h = np.histogram(objH, bins=10, range=None, normed=True)
    histS, bin_edges_s = np.histogram(objS, bins=10, range=None, normed=True)
    print "Hue histogram", histH
    
    # color moments
    moments = cv.Moments(h, binary = 0)
    huMoments = cv.GetHuMoments(moments)
    print "Color hu moments", huMoments
    
    # 1D moments
    # mean
    moment_H_1 = objH.mean()
    moment_S_1 = objS.mean()
    # standard deviation
    moment_H_2 = objH.std()
    moment_S_2 = objS.std()
    # skewness
    objHtemp = objH - moment_H_1
    objStemp = objS - moment_S_1
    objHtemp = ma.power(objHtemp, 3)
    objStemp = ma.power(objStemp, 3)
    moment_H_3 = objHtemp.mean() ** (1./3)
    moment_S_3 = objStemp.mean() ** (1./3)
    
    #normalize
    moment_H_1 = moment_H_1/255
    moment_H_2 = moment_H_2/255
    moment_H_3 = moment_H_3/255
    
    
    features = []
    features += list(histH)
    features += list(huMoments)
    features += moment_H_1, moment_H_2, moment_H_3
    
    return features
Пример #13
0
 def inverse(self, value):
     if not self.scaled():
         raise ValueError("Not invertible until scaled")
     vmin, vmax = self.vmin, self.vmax
     if cbook.iterable(value):
         val = ma.asarray(value)
         return vmin * ma.power((vmax / vmin), val)
     else:
         return vmin * pow((vmax / vmin), value)
def colorAnalysis(hsv, mask):
    h = np.array(hsv[:, :, 0])
    s = np.array(hsv[:, :, 1])
    maskedH = ma.array(h, mask=mask)
    maskedS = ma.array(s, mask=mask)
    objH = maskedH.compressed()
    objS = maskedS.compressed()

    # histogram
    histH, bin_edges_h = np.histogram(objH, bins=10, range=None, normed=True)
    histS, bin_edges_s = np.histogram(objS, bins=10, range=None, normed=True)
    print "Hue histogram", histH

    # color moments
    moments = cv.Moments(h, binary=0)
    huMoments = cv.GetHuMoments(moments)
    print "Color hu moments", huMoments

    # 1D moments
    # mean
    moment_H_1 = objH.mean()
    moment_S_1 = objS.mean()
    # standard deviation
    moment_H_2 = objH.std()
    moment_S_2 = objS.std()
    # skewness
    objHtemp = objH - moment_H_1
    objStemp = objS - moment_S_1
    objHtemp = ma.power(objHtemp, 3)
    objStemp = ma.power(objStemp, 3)
    moment_H_3 = objHtemp.mean()**(1. / 3)
    moment_S_3 = objStemp.mean()**(1. / 3)

    #normalize
    moment_H_1 = moment_H_1 / 255
    moment_H_2 = moment_H_2 / 255
    moment_H_3 = moment_H_3 / 255

    features = []
    features += list(histH)
    features += list(huMoments)
    features += moment_H_1, moment_H_2, moment_H_3

    return features
Пример #15
0
def variance(signal, special_parameters):
    # TODO: this 'special_parameters' maybe be useful
    signal = signal - mean(signal)
    squared_signal = power(signal, 2)
    summation = sum(squared_signal)
    statistic = (1 / size(signal)) * summation

    added_label = "var"

    return statistic, added_label
Пример #16
0
 def fit_model(self, ratings=None, max_iter=50, threshold=1e-5):
     X = self.ratings if ratings is None else ratings
     self.ratings = X
     self.U, self.V = als.als(X, self.rank, self.lambda_, max_iter,
                              threshold)
     self.pred = pd.DataFrame(self.U.dot(self.V),
                              index=X.index,
                              columns=X.columns)
     self.error = ma.power(ma.masked_invalid(X - self.pred), 2).sum()
     return self.pred, self.error
Пример #17
0
    def inverse(self, value):
        if not self.scaled():
            raise ValueError("Not invertible until scaled")
        vmin, vmax = self.vmin, self.vmax

        if cbook.iterable(value):
            val = ma.asarray(value)
            return vmin * ma.power((vmax / vmin), val)
        else:
            return vmin * pow((vmax / vmin), value)
Пример #18
0
 def transform(self, a):
     sign = np.sign(a)
     masked = ma.masked_inside(a, -self.invlinthresh, self.invlinthresh, copy=False)
     exp = sign * self.linthresh * (
         ma.power(self.base, (sign * (masked / self.linthresh))
         - self._linscale_adj))
     if masked.mask.any():
         return ma.where(masked.mask, a / self._linscale_adj, exp)
     else:
         return exp
Пример #19
0
    def objFunTheta(self, theta, dataWeightedS, dpsCross, _):

        sqErrorsS = ma.power((dataWeightedS - self.trajFunc(dpsCross, theta)),
                             2)
        meanSSD = ma.sum(sqErrorsS)

        assert not isinstance(meanSSD, ma.MaskedArray)

        logPriorTheta = self.logPriorThetaFunc(theta, self.paramsPriorTheta)

        return meanSSD - logPriorTheta, meanSSD
Пример #20
0
    def inverse(self, value):
        if not self.scaled():
            raise ValueError("Not invertible until scaled")
        gamma = self.gamma
        vmin, vmax = self.vmin, self.vmax

        if cbook.iterable(value):
            val = ma.asarray(value)
            return ma.power(value, 1. / gamma) * (vmax - vmin) + vmin
        else:
            return pow(value, 1. / gamma) * (vmax - vmin) + vmin
Пример #21
0
    def __call__(self, value, clip=None):

        method = self.stretch
        exponent = self.exponent
        midpoint = self.midpoint

        if clip is None:
            clip = self.clip

        if cbook.iterable(value):
            vtype = 'array'
            val = ma.asarray(value).astype(np.float)
        else:
            vtype = 'scalar'
            val = ma.array([value]).astype(np.float)

        self.autoscale_None(val)
        vmin, vmax = self.vmin, self.vmax
        if vmin > vmax:
            raise ValueError("minvalue must be less than or equal to maxvalue")
        elif vmin == vmax:
            return 0.0 * val
        else:
            if clip:
                mask = ma.getmask(val)
                val = ma.array(np.clip(val.filled(vmax), vmin, vmax),
                               mask=mask)
            result = (val - vmin) * (1.0 / (vmax - vmin))

            negative = result < 0.
            if self.stretch == 'linear':
                pass
            elif self.stretch == 'log':
                result = ma.log10(result * (self.midpoint - 1.) + 1.) \
                       / ma.log10(self.midpoint)
            elif self.stretch == 'sqrt':
                result = ma.sqrt(result)
            elif self.stretch == 'arcsinh':
                result = ma.arcsinh(result / self.midpoint) \
                       / ma.arcsinh(1. / self.midpoint)
            elif self.stretch == 'power':
                result = ma.power(result, exponent)
            else:
                raise Exception("Unknown stretch in APLpyNormalize: %s" %
                                self.stretch)
            result[negative] = -np.inf

        if vtype == 'scalar':
            result = result[0]
        return result
Пример #22
0
 def transform_non_affine(self, a):
     lower = a[np.where(a<=factor*np.log10(change))]
     greater = a[np.where(a> factor*np.log10(change))]
     if lower.size:
         if isinstance(lower, ma.MaskedArray):
             lower = ma.power(10.0, lower/float(factor))
         else:
             lower = np.power(10.0, lower/float(factor))
     if greater.size:
         greater = (greater + change - factor*np.log10(change))
     # Only low
     if not(greater.size):
         return lower
     # Only high
     if not(lower.size):
         return greater
     return np.concatenate((lower, greater))
Пример #23
0
 def transform_non_affine(self, a):
     lower = a[np.where(a<=factor*np.log10(change))]
     greater = a[np.where(a> factor*np.log10(change))]
     if lower.size:
         if isinstance(lower, ma.MaskedArray):
             lower = ma.power(10.0, lower/float(factor))
         else:
             lower = np.power(10.0, lower/float(factor))
     if greater.size:
         greater = (greater + change - factor*np.log10(change))
     # Only low
     if not(greater.size):
         return lower
     # Only high
     if not(lower.size):
         return greater
     return np.concatenate((lower, greater))
Пример #24
0
 def fit_model(self, ratings=None, init=None):
     X = self.ratings if ratings is None else ratings
     self.ratings = X
     m, n = X.shape
     known_elements = np.where(~np.isnan(X.values))
     list_of_known_elements = zip(*known_elements)
     data = [X.values[coordinate] for coordinate in list_of_known_elements]
     self.U, self.V, opts = lmafit.lmafit_mc_adp(m,
                                                 n,
                                                 self.rank,
                                                 known_elements,
                                                 data,
                                                 opts=init)
     self.pred = pd.DataFrame(self.U.dot(self.V),
                              index=X.index,
                              columns=X.columns)
     self.error = ma.power(ma.masked_invalid(X - self.pred), 2).sum()
     return self.pred, self.error
Пример #25
0
    def objFunShift(self, shift, dataOneSubjWeightedCT, thetas, variances,
                    ageOneSubj1array, clustProbBC):

        # print('dataOneSubjWeightedCT', dataOneSubjWeightedCT.dtype)
        # print('ageOneSubj1array', ageOneSubj1array.dtype)
        # print('clustProbBC', clustProbBC.dtype)
        # print(adsas)

        dps = np.sum(np.multiply(shift, ageOneSubj1array), 1)
        nrClust = thetas.shape[0]
        # for tp in range(dataOneSubj.shape[0]):
        sumSSD = 0
        gammaInvK = np.sum(clustProbBC, 0)
        # print('dps', dps)
        sqErrorsK = ma.zeros(nrClust)
        for k in range(nrClust):
            sqErrorsK[k] = ma.sum(
                ma.power(
                    dataOneSubjWeightedCT[k, :] -
                    self.trajFunc(dps, thetas[k, :]), 2))

        sumSSD = ma.sum((sqErrorsK * gammaInvK) / (2 * variances))

        assert not isinstance(sumSSD, ma.MaskedArray)

        # print('SqError', sqErrorsK)
        # print('gammaInvK', gammaInvK)
        # print('variances', variances)
        # print('sumSSD', sumSSD)

        logPriorShift = self.logPriorShiftFunc(shift, self.paramsPriorShift)

        # print('logPriorShift', logPriorShift, 'sumSSD', sumSSD)
        # print(sumSSD)
        # print(adsdsa)
        # if shift[0] < -400: # and -67
        #   import pdb
        #   pdb.set_trace()

        return sumSSD - logPriorShift
Пример #26
0
  def inverse(self, value):
    if not self.scaled():
      raise ValueError("Not invertible until scaled")
    vmin, vmax = self.vmin, self.vmax
    vin, cin = self.vin, self.cin

    if cbook.iterable(value):
      val = ma.asarray(value)
      ipos = (val > (0.5 + cin))
      ineg = (val < (0.5 - cin))
      izero = ~(ipos | ineg)

      result = ma.empty_like(val)
      result[izero] = (val[izero] - 0.5) * vin/cin
      result[ipos] = vin * pow((vmax/vin), (val[ipos] - (0.5 + cin))/(0.5 - cin)) 
      result[ineg] = -vin * pow((-vmin/vin), ((0.5 - cin) - val[min])/(0.5 - cin))
                    
      r = vmin * ma.power((vmax/vmin), val)
    else:
      if value > 0.5 + cin: r = vin * pow((vmax/vin), (value - (0.5 + cin))/(0.5 - cin))
      elif value < 0.5 - cin: r = -vin * pow((-vmin/vin), ((0.5 - cin) - value)/(0.5 - cin))
      else: r = (value - 0.5) * vin / cin
    return r
    def __check_gradient__(self, j, g_weight, g_m_bias, g_ngb_n_bias):

        it = np.nditer(g_weight, flags=["multi_index"], op_flags=["readwrite"])
        while not it.finished:
            idx = it.multi_index
            if g_weight[idx] is ma.masked:
                it.iternext()
                continue
            self._weight[idx] += 1e-6
            hat_rating1, _ = self.__forward__(j)
            self._weight[idx] -= 2e-6
            hat_rating2, _ = self.__forward__(j)
            _g_ngb_weight = (
                0.5 * ma.sum(ma.power(self._rating[:, j] - hat_rating1, 2)) -
                0.5 *
                ma.sum(ma.power(self._rating[:, j] - hat_rating2, 2))) / 2e-6
            self._weight[idx] += 1e-6
            assert np.all(np.isclose(_g_ngb_weight, g_weight[idx], rtol=1e-2))
            it.iternext()

        for i in range(self._m_bias.size):
            if g_m_bias[i] is ma.masked:
                continue
            self._m_bias[i] += 1e-6
            hat_rating1, _ = self.__forward__(j)
            self._m_bias[i] -= 2e-6
            hat_rating2, _ = self.__forward__(j)
            _g_m_bias = (
                0.5 * ma.sum(ma.power(self._rating[:, j] - hat_rating1, 2)) -
                0.5 *
                ma.sum(ma.power(self._rating[:, j] - hat_rating2, 2))) / 2e-6
            self._m_bias[i] += 1e-6
            assert np.all(np.isclose(_g_m_bias, g_m_bias[i], rtol=1e-2))

        self._n_bias[j] += 1e-6
        hat_rating1, _ = self.__forward__(j)
        self._n_bias[j] -= 2e-6
        hat_rating2, _ = self.__forward__(j)
        _g_ngb_n_bias = (
            0.5 * ma.sum(ma.power(self._rating[:, j] - hat_rating1, 2)) -
            0.5 * ma.sum(ma.power(self._rating[:, j] - hat_rating2, 2))) / 2e-6
        self._n_bias[j] += 1e-6
        assert np.all(np.isclose(_g_ngb_n_bias, g_ngb_n_bias, rtol=1e-2))
Пример #28
0
def convert_to_slit(m,x,y,nx,ny,gamma=1.0,expand=1.0):
    """compute best slit for PV Slice from set of points or masked array
    using moments of inertia
    m=mass (intensity)  x,y = positions
    """
    # sanity
    if len(m) == 0: return []
    if type(m) == ma.core.MaskedArray:
      if m.count() == 0:  return []
    # apply gamma factor
    logging.debug("Gamma = %f" % gamma)
    mw = ma.power(m,gamma)
    # first find a rough center
    smx = ma.sum(mw*x)
    smy = ma.sum(mw*y)
    sm  = ma.sum(mw)
    xm = smx/sm
    ym = smy/sm
    logging.debug('MOI::center: %f %f' % (xm,ym))
    (xpeak,ypeak) = np.unravel_index(mw.argmax(),mw.shape)
    logging.debug('PEAK: %f %f' % (xpeak,ypeak))
    if True:
      # center on peak
      # @todo but if (xm,ym) and (xpeak,ypeak) differ too much, e.g.
      #       outside of the MOI body, something else is wrong
      xm = xpeak
      ym = ypeak
    # take 2nd moments w.r.t. this center
    x = x-xm
    y = y-ym
    mxx=m*x*x
    mxy=m*x*y
    myy=m*y*y
    #
    smxx=ma.sum(mxx)/sm
    smxy=ma.sum(mxy)/sm
    smyy=ma.sum(myy)/sm
    #  MOI2
    moi = np.array([smxx,smxy,smxy,smyy]).reshape(2,2)
    w,v = la.eig(moi)
    a   = math.sqrt(w[0])
    b   = math.sqrt(w[1])
    phi = -math.atan2(v[0][1],v[0][0])
    if a < b:  
        phi = phi + 0.5*np.pi
    logging.debug('MOI::a,b,phi(deg): %g %g %g' % (a,b,phi*180.0/np.pi))
    #  ds9.reg format (image coords)
    sinp = np.sin(phi)
    cosp = np.cos(phi)
    # compute the line take both a and b into account,
    # since we don't even know or care which is the bigger one
    r  = np.sqrt(a*a+b*b)
    x0 = xm - expand*r*cosp 
    y0 = ym - expand*r*sinp 
    x1 = xm + expand*r*cosp 
    y1 = ym + expand*r*sinp 
    # add 1 for ds9, which used 1 based pixels
    logging.debug("ds9 short line(%g,%g,%g,%g)" % (x0+1,y0+1,x1+1,y1+1))
    if nx > 0:
      s = expand_line(x0,y0,x1,y1,nx,ny)
      logging.debug("ds9 full line(%g,%g,%g,%g)" % (s[0],s[1],s[2],s[3]))
      return [float(s[0]),float(s[1]),float(s[2]),float(s[3])]
    else:
      return [float(x0),float(y0),float(x1),float(y1)]
Пример #29
0
def gauss_yy(x, y, sigma):
    return 1/(2*math.pi*sigma**4) * (y**2/sigma**2 - 1) * power(math.exp(1), -(x**2 + y**2)/(2*sigma**2))
Пример #30
0
    # logging.info("%d different tags" % len(unique(list(chain(*tags)))))
    logging.info("%d different tags" % len(unique(tags)))

    train_idx, test_idx = train_test_split(range(len(content)))
    content_train = FilteredSequence(content, train_idx)
    content_test = FilteredSequence(content, test_idx)

    tags_train = FilteredSequence(tags, train_idx)
    tags_test = FilteredSequence(tags, test_idx)

    pipeline = Pipeline([('vect', TfidfVectorizer(strip_accents='unicode', lowercase=True, max_df=0.5, min_df=2,
                                                  smooth_idf=True, sublinear_tf=True)),
                         ('svm', SVC(kernel='linear'))])

    logging.info("Running meta parameter grid search")
    grid = GridSearchCV(pipeline, {'svm__C': power(10, linspace(-5, 4, num=10))}, verbose=1, n_jobs=p, cv=5)

    grid.fit(content_train, tags_train)

    c = grid.best_params_['svm__C']

    logging.info("Best score %.4f with C = %f" % (grid.best_score_, c))

    pipeline = Pipeline([('vect', TfidfVectorizer(strip_accents='unicode', lowercase=True, max_df=0.5, min_df=2,
                                                  smooth_idf=True, sublinear_tf=True)),
                         ('svm', SVC(kernel='linear', C=c))])
    pipeline.fit(content_train, tags_train)
    pred = pipeline.predict(content_test)

    logging.info("Held out performence F1=%.4f, p=%.4f, r=%.4f, jaccard=%.4f" %
                 (f1_score(tags_test, pred), precision_score(tags_test, pred),
Пример #31
0
import numpy as np
from numpy.ma import power

start = 0
stop = 1
num = 10
endpoint = False
base = 1

y = np.linspace(start, stop, num=num, endpoint=endpoint)

power(base, y).astype(int)

np.logspace(2.0, 3.0, num=4)
np.logspace(2.0, 3.0, num=4, endpoint=False)
np.logspace(2.0, 3.0, num=4, base=2.0)
import matplotlib.pyplot as plt

N = 10
x1 = np.logspace(0.1, 1, N, endpoint=True)
x2 = np.logspace(0.1, 1, N, endpoint=False)
y = np.zeros(N)
plt.plot(x1, y, 'o')
plt.plot(x2, y + 0.5, 'o')
plt.ylim([-0.5, 1])
plt.show()
Пример #32
0
 # generate average colors for each night at each site
 targets['site'] = [row['shortname'].split()[0].lower() if row['shortname'] is not None else None for row in targets]
 extinction = [lsc.sites.extinction[row['site']][row['filter']] for row in targets]
 targets['instmag_amcorr'] = (targets['instmag'].T - extinction * targets['airmass']).T
 targets = targets.group_by(['dayobs', 'shortname', 'instrument'])
 for filters in colors_to_calculate:
     colors, dcolors = [], []
     for group in targets.groups:
         f0 = group['filter'] == filters[0]
         f1 = group['filter'] == filters[1]
         m0, dm0 = average_in_flux(group['instmag_amcorr'][f0], group['dinstmag'][f0], axis=0)
         m1, dm1 = average_in_flux(group['instmag_amcorr'][f1], group['dinstmag'][f1], axis=0)
         z0, dz0 = average_in_flux(group['z1'][f0], group['dz1'][f0])
         z1, dz1 = average_in_flux(group['z2'][f1], group['dz2'][f1])
         if np.all(group['dc1'][f0]):
             dc0 = np.sum(np.power(group['dc1'][f0], -2))**-0.5
             c0 = np.sum(group['c1'][f0] * np.power(group['dc1'][f0], -2)) * dc0**2
         else:
             dc0 = 0.
             c0 = np.mean(group['c1'][f0])
         if np.all(group['dc2'][f1]):
             dc1 = np.sum(np.power(group['dc2'][f1], -2))**-0.5
             c1 = np.sum(group['c2'][f1] * np.power(group['dc2'][f1], -2)) * dc1**2
         else:
             dc1 = 0.
             c1 = np.mean(group['c2'][f1])
         color = np.divide(m0 - m1 + z0 - z1, 1 - c0 + c1)
         dcolor = np.abs(color) * np.sqrt(
                     np.divide(dm0**2 + dm1**2 + dz0**2 + dz1**2, (m0 - m1 + z0 - z1)**2)
                     + np.divide(dc0**2 + dc1**2, (1 - c0 + c1)**2)
                                         )
Пример #33
0
 def transform(self, a):
     return ma.power(self.base, a) / self.base
Пример #34
0
 def transform(self, a):
     return ma.power(2.0, a) / 2.0
Пример #35
0
 def transform_non_affine(self, a):
     return ma.power(np.e, a) / np.e
Пример #36
0
 def transform_non_affine(self, a):
     return ma.power(2.0, a) / 2.0
Пример #37
0
def gauss_xy(x, y, sigma):
    return 1/(2*math.pi*sigma**6) * (x * y)           * power(math.exp(1), -(x**2 + y**2)/(2*sigma**2))
Пример #38
0
 def transform_non_affine(self, a):
     return ma.power(np.e, a) / np.e
Пример #39
0
 def transform(self, a):
     return ma.power(self.base, a) / self.base
Пример #40
0
 def transform(self, a):
     return ma.power(np.e, a) / np.e
Пример #41
0
 def transform(self, a):
     return ma.power(2.0, a) / 2.0
Пример #42
0
def main():
    # parse command-line arguments
    parser = argparse.ArgumentParser(
        formatter_class=argparse.ArgumentDefaultsHelpFormatter)
    parser.add_argument("--verbose", action="store_true",
        help="print verbose output")
    ## targets to fit
    parser.add_argument("--name", type=str, default=None,
        help="target list")
    parser.add_argument("--gamma", type=float, default=3.8,
        help="LSS growth and redshift evolution of mean absorption gamma")
    parser.add_argument("--index", type=int, default=1000,
        help="target index")
    parser.add_argument("--pmf", type=str, default=None,
        help="target plate-mjd-fiber string")
    args = parser.parse_args()

    print 'Loading forest data...'

    # import data
    skim = h5py.File(args.name+'.hdf5', 'r')

    if args.pmf:
        plate, mjd, fiber = [int(val) for val in args.pmf.split('-')]
        index = np.where((skim['meta']['plate'] == plate) & (skim['meta']['mjd'] == mjd) & (skim['meta']['fiber'] == fiber))[0][0]
    else:
        index = args.index

    flux = np.ma.MaskedArray(skim['flux'][index], mask=skim['mask'][index])
    ivar = np.ma.MaskedArray(skim['ivar'][index], mask=skim['mask'][index])
    loglam = skim['loglam'][:]
    wave = np.power(10.0, loglam)

    z = skim['z'][index]
    norm = skim['norm'][index]
    meta = skim['meta'][index]

    linear_continuum = h5py.File(args.name+'-linear-continuum.hdf5', 'r')
    a = linear_continuum['params_a'][index]
    b = linear_continuum['params_b'][index]
    continuum = linear_continuum['continuum']
    continuum_wave = linear_continuum['continuum_wave']
    continuum_interp = scipy.interpolate.UnivariateSpline(continuum_wave, continuum, ext=1, s=0)
    abs_alpha = linear_continuum.attrs['abs_alpha']
    abs_beta = linear_continuum.attrs['abs_beta']

    forest_wave_ref = (1+z)*linear_continuum.attrs['forest_wave_ref']
    wave_lya = linear_continuum.attrs['wave_lya']

    forest_pixel_redshifts = wave/wave_lya - 1
    abs_coefs = abs_alpha*np.power(1+forest_pixel_redshifts, abs_beta)

    print 'flux 1280 Ang: %.2f' % norm
    print 'fit param a: %.2f' % a
    print 'fit param b: %.2f' % b

    def model_flux(a, b):
        return a*np.power(wave/forest_wave_ref, b)*continuum_interp(wave/(1+z))*np.exp(-abs_coefs)

    def chisq(p):
        mflux = model_flux(p[0], p[1])
        res = flux - mflux
        return ma.sum(res*res*ivar)/ma.sum(ivar)

    from scipy.optimize import minimize

    result = minimize(chisq, (a, b))
    a,b = result.x

    print 'fit param a: %.2f' % a
    print 'fit param b: %.2f' % b

    # rest and obs refer to pixel grid
    print 'Estimating deltas in forest frame...'

    mflux = model_flux(a,b)
    delta_flux = flux/mflux - 1.0
    delta_ivar = ivar*mflux*mflux

    forest_min_z = linear_continuum.attrs['forest_min_z']
    forest_max_z = linear_continuum.attrs['forest_max_z']
    forest_dz = 0.1
    forest_z_bins = np.arange(forest_min_z, forest_max_z + forest_dz, forest_dz)

    print 'Adjusting weights for pipeline variance and LSS variance...'

    var_lss = scipy.interpolate.UnivariateSpline(forest_z_bins, 0.05 + 0.06*(forest_z_bins - 2.0)**2, s=0)
    var_pipe_scale = scipy.interpolate.UnivariateSpline(forest_z_bins, 0.7 + 0.2*(forest_z_bins - 2.0)**2, s=0)

    delta_weight = delta_ivar*var_pipe_scale(forest_pixel_redshifts)
    delta_weight = delta_weight/(1 + delta_weight*var_lss(forest_pixel_redshifts))

    thing_id = meta['thing_id']
    pmf = '%s-%s-%s' % (meta['plate'],meta['mjd'],meta['fiber'])

    los = DeltaLOS(thing_id)

    my_msha = norm*a*np.power(wave/forest_wave_ref, b)
    my_wave = wave
    my_flux = norm*flux
    my_cf = my_msha*continuum_interp(wave/(1+z))*np.exp(-abs_coefs)
    my_ivar = ivar/(norm*norm)
    my_delta = delta_flux
    my_weight = delta_weight

    # mean_ratio = np.average(my_msha*continuum)/ma.average(los.msha*los.cont)
    # print mean_ratio

    plt.figure(figsize=(12,4))
    plt.plot(my_wave, my_flux, color='gray')

    my_dflux = ma.power(my_ivar, -0.5)
    plt.fill_between(my_wave, my_flux - my_dflux, my_flux + my_dflux, color='gray', alpha=0.5)

    plt.plot(my_wave, my_msha*continuum_interp(wave/(1+z)), label='My continuum', color='blue')
    plt.plot(los.wave, los.cont, label='Busca continuum', color='red')
    plt.plot(my_wave, my_cf, label='My cf', color='green')
    plt.plot(los.wave, los.cf, label='Busca cf', color='orange')
    plt.legend()
    plt.title(r'%s (%s), $z$ = %.2f' % (pmf, thing_id, z))
    plt.xlabel(r'Observed Wavelength ($\AA$)')
    plt.ylabel(r'Observed Flux')
    plt.xlim(los.wave[[0,-1]])
    plt.savefig(args.name+'-example-flux.png', dpi=100, bbox_inches='tight')
    plt.close()

    plt.figure(figsize=(12,4))
    my_delta_sigma = ma.power(delta_weight, -0.5)
    # plt.fill_between(my_wave, my_delta - my_delta_sigma, my_delta + my_delta_sigma, color='blue', alpha=0.1, label='My Delta')
    plt.scatter(my_wave, my_delta, color='blue', marker='+', label='My Delta')
    plt.plot(my_wave, +my_delta_sigma, color='blue', ls=':')
    plt.plot(my_wave, -my_delta_sigma, color='blue', ls=':')

    los_delta_sigma = ma.power(los.weight, -0.5)
    # plt.fill_between(los.wave, los.delta - los_delta_sigma, los.delta + los_delta_sigma, color='red', alpha=01, label='Busca Delta')
    plt.scatter(los.wave, los.delta, color='red', marker='+', label='Busca Delta')

    plt.plot(los.wave, +los_delta_sigma, color='red', ls=':')
    plt.plot(los.wave, -los_delta_sigma, color='red', ls=':')

    my_lss_sigma = np.sqrt(var_lss(forest_pixel_redshifts))
    plt.plot(my_wave, +my_lss_sigma, color='black', ls='--')
    plt.plot(my_wave, -my_lss_sigma, color='black', ls='--')

    # my_sn_sigma = np.sqrt(np.power(1 + forest_pixel_redshifts, 0.5*abs_beta))/10
    # plt.plot(my_wave, +my_sn_sigma, color='orange', ls='--')
    # plt.plot(my_wave, -my_sn_sigma, color='orange', ls='--')
    # import matplotlib.patches as mpatches
    #
    # blue_patch = mpatches.Patch(color='blue', alpha=0.3, label='My Delta')
    # red_patch = mpatches.Patch(color='red', alpha=0.3, label='Busca Delta')
    # plt.legend(handles=[blue_patch,red_patch])

    plt.title(r'%s (%s), $z$ = %.2f' % (pmf, thing_id, z))
    plt.ylim(-2,2)
    plt.xlim(los.wave[[0,-1]])

    plt.xlabel(r'Observed Wavelength ($\AA$)')
    plt.ylabel(r'Delta')
    plt.legend()
    plt.savefig(args.name+'-example-delta.png', dpi=100, bbox_inches='tight')
    plt.close()
Пример #43
0
 def transform(self, a):
     return ma.power(10.0, a) / 10.0
 def __loss__(self, rating, hat_rating):
     return 0.5 * ma.mean(ma.power(rating - hat_rating, 2))
Пример #45
0
 def transform(self, a):
     return ma.power(np.e, a) / np.e
Пример #46
0
def distEclud(vecA, vecB):
    return sqrt(sum(power(vecA - vecB, 2)))  # la.norm(vecA-vecB)
Пример #47
0
 def transform_non_affine(self, a):
     return ma.power(2.0, a) / 2.0
Пример #48
0
 def transform_non_affine(self, a):
     return ma.power(self.base, a)
Пример #49
0
 def transform_non_affine(self, a):
     return ma.power(self.base, a) / self.base
Пример #50
0
def gauss_x(x, y, sigma):
    return -(x/(2*math.pi*sigma**4)) * power(math.exp(1),-(x**2+y**2)/(2*sigma**2))
Пример #51
0
    def __call__(self, value, clip=None):

        #read in parameters
        method = self.stretch
        exponent = self.exponent
        midpoint = self.midpoint

        # ORIGINAL MATPLOTLIB CODE

        if clip is None:
            clip = self.clip

        if cbook.iterable(value):
            vtype = 'array'
            val = ma.asarray(value).astype(np.float)
        else:
            vtype = 'scalar'
            val = ma.array([value]).astype(np.float)

        self.autoscale_None(val)
        vmin, vmax = self.vmin, self.vmax
        if vmin > vmax:
            raise ValueError("minvalue must be less than or equal to maxvalue")
        elif vmin==vmax:
            return 0.0 * val
        else:
            if clip:
                mask = ma.getmask(val)
                val = ma.array(np.clip(val.filled(vmax), vmin, vmax),
                                mask=mask)
            result = (val-vmin) * (1.0/(vmax-vmin))

            # CUSTOM APLPY CODE

            # Keep track of negative values
            negative = result < 0.

            if self.stretch == 'linear':

                pass

            elif self.stretch == 'log':

                result = ma.log10(result * (self.midpoint - 1.) + 1.) \
                       / ma.log10(self.midpoint)

            elif self.stretch == 'sqrt':

                result = ma.sqrt(result)

            elif self.stretch == 'arcsinh':

                result = ma.arcsinh(result/self.midpoint) \
                       / ma.arcsinh(1./self.midpoint)

            elif self.stretch == 'power':

                result = ma.power(result, exponent)

            else:

                raise Exception("Unknown stretch in APLpyNormalize: %s" %
                                self.stretch)

            # Now set previously negative values to 0, as these are
            # different from true NaN values in the FITS image
            result[negative] = -np.inf

        if vtype == 'scalar':
            result = result[0]

        return result
Пример #52
0
 def transform_non_affine(self, a: np.ndarray):
     masked = ma.masked_where(a <= 0, a)
     if masked.mask.any():
         return ma.power(a, self.exponent)
     else:
         return np.power(a, self.exponent)
Пример #53
0
    def __call__(self, value, clip=None):

        #read in parameters
        method = self.stretch
        exponent = self.exponent
        midpoint = self.midpoint

        # ORIGINAL MATPLOTLIB CODE

        if clip is None:
            clip = self.clip

        if cbook.iterable(value):
            vtype = 'array'
            val = ma.asarray(value).astype(np.float)
        else:
            vtype = 'scalar'
            val = ma.array([value]).astype(np.float)

        self.autoscale_None(val)
        vmin, vmax = self.vmin, self.vmax
        if vmin > vmax:
            raise ValueError("minvalue must be less than or equal to maxvalue")
        elif vmin == vmax:
            return 0.0 * val
        else:
            if clip:
                mask = ma.getmask(val)
                val = ma.array(np.clip(val.filled(vmax), vmin, vmax),
                               mask=mask)
            result = (val - vmin) * (1.0 / (vmax - vmin))

            # CUSTOM APLPY CODE

            # Keep track of negative values
            negative = result < 0.

            if self.stretch == 'linear':

                pass

            elif self.stretch == 'log':

                result = ma.log10(result * (self.midpoint - 1.) + 1.) \
                       / ma.log10(self.midpoint)

            elif self.stretch == 'sqrt':

                result = ma.sqrt(result)

            elif self.stretch == 'arcsinh':

                result = ma.arcsinh(result / self.midpoint) \
                       / ma.arcsinh(1. / self.midpoint)

            elif self.stretch == 'power':

                result = ma.power(result, exponent)

            else:

                raise Exception("Unknown stretch in APLpyNormalize: %s" %
                                self.stretch)

            # Now set previously negative values to 0, as these are
            # different from true NaN values in the FITS image
            result[negative] = -np.inf

        if vtype == 'scalar':
            result = result[0]

        return result
Пример #54
0
 def transform(self, a):
     return ma.power(10.0, a) / 10.0