Пример #1
0
def test1():
    vector_full = NP.array([1.0, 2.5, 2.8, 4.1, 5.1, 5.9, 6.9, 8.1])
    vector = vector_full[:-2]
    t =  NP.arange(vector.shape[0])
    showArray('t', t) 
    showArray('vector', vector)    
    
    mask = [True] * vector.shape[0]
    mask[2] = False
    print 'mask', len(mask), mask
   
    masked_vector = applyMask1D(vector, mask)
    masked_t = applyMask1D(vector, mask)
    trend = getTrend(t, vector)
    print trend
    for i in range(masked_t.shape[0]):
        v_pred = trend[0] + masked_t[i] * trend[1]
        print i, masked_vector[i], v_pred, v_pred - masked_vector[i]
    
    predicted = NP.array([trend[0] + i * trend[1] for i in range(masked_vector.shape[0])])
    corrected = NP.array([masked_vector[i] - predicted[i] for i in range(masked_vector.shape[0])])
    masked_s = NP.transpose(NP.vstack([masked_vector, predicted, corrected]))
    
    showArray('masked_t', masked_t) 
    showArray('masked_s', masked_s)    
    # the main axes is subplot(111) by default
    PL.plot(masked_t, masked_s)
    s_range = PL.amax(masked_s) - PL.amin(masked_s)
    axis([PL.amin(masked_t), PL.amax(masked_t), PL.amin(masked_s) - s_range*0.1, PL.amax(masked_s) + s_range*0.1 ])
    xlabel('time (days)')
    ylabel('downloads')
    title('Dowloads over time')
    show()
    def addDataVectorAccessor(self, data_vector_accessor):
        self.__data_vectors_accessors__.append(data_vector_accessor)

        _sum = pl.sum(data_vector_accessor.signal)
        _min = pl.amin(data_vector_accessor.signal)
        _max = pl.amax(data_vector_accessor.signal)

        if self.__minimal_signal__ == None:
            self.__minimal_signal__ = _sum
            self.__minimal_data_vector_accessor__ = data_vector_accessor

            self.__min_signal__ = _min
            self.__max_signal__ = _max

        if _sum < self.__minimal_signal__:
            self.__minimal_data_vector_accessor__ = data_vector_accessor
            self.__minimal_signal__ = _sum

        if _min < self.__min_signal__:
            self.__min_signal__ = _min

        if _max > self.__max_signal__:
            self.__max_signal__ = _max

        #collects unique annotations (>0) as a set
        if not data_vector_accessor.annotation == None:
            unique_annotations = pl.unique(data_vector_accessor.annotation[
                                pl.where(data_vector_accessor.annotation > 0)])
            if len(unique_annotations) > 0:
                #union of sets
                self.__unique_annotations__ |= set(unique_annotations)
Пример #3
0
def csnormalize(image,f=0.75):
    """Center and size-normalize an image."""
    bimage = 1*(image>mean([amax(image),amin(image)]))
    w,h = bimage.shape
    [xs,ys] = mgrid[0:w,0:h]
    s = sum(bimage)
    if s<1e-4: return image
    s = 1.0/s
    cx = sum(xs*bimage)*s
    cy = sum(ys*bimage)*s
    sxx = sum((xs-cx)**2*bimage)*s
    sxy = sum((xs-cx)*(ys-cy)*bimage)*s
    syy = sum((ys-cy)**2*bimage)*s
    w,v = eigh(array([[sxx,sxy],[sxy,syy]]))
    l = sqrt(amax(w))
    if l>0.01:
        scale = f*max(image.shape)/(4.0*l)
    else:
        scale = 1.0
    m = array([[1.0/scale,0],[0.0,1.0/scale]])
    w,h = image.shape
    c = array([cx,cy])
    d = c-dot(m,array([w/2,h/2]))
    image = interpolation.affine_transform(image,m,offset=d,order=1)
    return image
Пример #4
0
def findFWHM(vector, maxPos=None, amplitude=None):
    """ 
    Find FWHM of vector peak (width at value at maxPos - amplitude /2).
    If maxPos is None, will find maximum in vector.
    If amplitude is None, will calculate amplitude from maximum to minimum of vector.
    """
    if maxPos == None:
        maxPos = vector.argmax()
    if amplitude == None:
        maxVal = pl.amax(vector)
        minVal = pl.amin(vector)
        amplitude = float(maxVal - minVal)

    maxSign = pl.sign(vector[maxPos])
    for pos, val in enumerate(vector[maxPos:]):
        if pl.sign(val) != maxSign:
            # we passed 0
            break
    halfAbove = pos - abs(vector[maxPos + pos]) / abs(vector[maxPos + pos] -
                                                      vector[maxPos + pos - 1])

    for pos, val in enumerate(vector[maxPos:0:-1]):
        if pl.sign(val) != maxSign:
            # we passed 0
            break
    halfBelow = pos - abs(vector[maxPos - pos]) / abs(vector[maxPos - pos] -
                                                      vector[maxPos - pos + 1])

    FWHM = halfBelow + halfAbove

    return FWHM, maxPos, amplitude
Пример #5
0
def csnormalize(image, f=0.75):
    """Center and size-normalize an image."""
    bimage = 1 * (image > mean([amax(image), amin(image)]))
    w, h = bimage.shape
    [xs, ys] = mgrid[0:w, 0:h]
    s = sum(bimage)
    if s < 1e-4: return image
    s = 1.0 / s
    cx = sum(xs * bimage) * s
    cy = sum(ys * bimage) * s
    sxx = sum((xs - cx)**2 * bimage) * s
    sxy = sum((xs - cx) * (ys - cy) * bimage) * s
    syy = sum((ys - cy)**2 * bimage) * s
    w, v = eigh(array([[sxx, sxy], [sxy, syy]]))
    l = sqrt(amax(w))
    if l > 0.01:
        scale = f * max(image.shape) / (4.0 * l)
    else:
        scale = 1.0
    m = array([[1.0 / scale, 0], [0.0, 1.0 / scale]])
    w, h = image.shape
    c = array([cx, cy])
    d = c - dot(m, array([w / 2, h / 2]))
    image = interpolation.affine_transform(image, m, offset=d, order=1)
    return image
Пример #6
0
    def __init__(self, data, sample, rep, well, growth_version=0):
        # data format
        #   Pandas GroupBy group object
        #   Indices: sample rep well time
        #   Value: OD reading

        self.rawcurve = data.values  # OD values
        self.time = py.array(data.index.get_level_values("time"))
        # Lowest y0 is chosen
        # Instances of condensation issue at beginning of experiment can cause
        # high OD values
        self.y0 = py.amin(self.rawcurve[0:3])
        self.asymptote = self.__calcAsymptote()
        self.maxGrowthRate, self.mgrTime = self.__calcMGR()
        self.y0, self.asymptote, self.maxGrowthRate, self.lag = (
            self.__calcParameters(
                (self.y0, self.asymptote, self.maxGrowthRate, 0.01),
                self.time, self.rawcurve, sample, rep, well)
        )

        self.dataLogistic = logistic(self.time,
                                     self.y0,
                                     self.asymptote,
                                     self.maxGrowthRate,
                                     self.lag)
        if growth_version == 0:
            self.growthLevel = default_growth(self.dataLogistic,
                                              self.asymptote,
                                              self.y0)
        elif growth_version == 1:
            self.growthLevel = calcNewGrowth(self.dataLogistic,
                                             self.asymptote,
                                             self.y0)
        elif growth_version == 2:
            self.growthLevel = calcGrowth(self.dataLogistic, self.asymptote)
        elif growth_version == 3:
            self.growthLevel = calcGrowthScore(self.asymptote,
                                               self.maxGrowthRate)
        else:
            util.printStatus("Unexpected growth version:"
                             + str(growth_version))
            util.printStatus("Using default growth calculation instead")
            self.growthLevel = default_growth(self.dataLogistic,
                                              self.asymptote,
                                              self.y0)
        self.glScaled = calcGrowth2(self.dataLogistic, self.asymptote)
        self.expGrowth = calcExpGrowth(self.maxGrowthRate, self.asymptote)

        self.auc_raw = calcAUCData(self.rawcurve, self.time)
        self.auc_rshift = calcShiftAUC(self.auc_raw, self.y0, self.time[-1])
        self.auc_log = calcAUC(self.rawcurve, self.y0, self.lag,
                               self.maxGrowthRate, self.asymptote, self.time)
        self.auc_lshift = calcShiftAUC(self.auc_log, self.y0, self.time[-1])

        self.growthClass = growthClass(self.growthLevel)
        self.sse = sum((self.dataLogistic - self.rawcurve) ** 2)
        self.mse = self.sse / len(self.time)
Пример #7
0
def QuickHull(points):
    """Randomized divide and conquer convex hull.
    
    Args:
        points: NxD matrix of points in dimension D.
    """
    N, D = points.shape
    dim = random.randint(0, D-1)
    min_dim = p.amin(points.T, dim)
    max_dim = p.amax(points.T, dim)
Пример #8
0
def QuickHull(points):
    """Randomized divide and conquer convex hull.
    
    Args:
        points: NxD matrix of points in dimension D.
    """
    N, D = points.shape
    dim = random.randint(0, D - 1)
    min_dim = p.amin(points.T, dim)
    max_dim = p.amax(points.T, dim)
Пример #9
0
def plot_risetimes(a, b, **kwargs):

    # plt.ion()
    # if kwargs is not None:
    #     for key, value in kwargs.iteritems():
    #         if key == 'file_list':
    #             file_list = value
    #         if key == 'scan_line':
    #             scan_line = value
    # varray = plt.array(get_value_from_cfg(file_list, scan_line))

    n_files = a.shape[-1]
    cmap = plt.get_cmap('jet')
    c = [cmap(i) for i in plt.linspace(0, 1, n_files)]

    fig1, (ax1, ax2) = plt.subplots(1, 2, figsize=(20, 10))
    [ax.set_color_cycle(c) for ax in (ax1, ax2)]

    r = []
    for i in xrange(n_files):
        x, y = a[:,i], b[:,i]
        # xo, yo = x, y #, get_envelope(x, y)
        xo, yo = get_envelope(x, y)
        p = plt.polyfit(xo, np.log(yo), 1)

        # Right way to fit... a la Nicolas - the fit expert!
        l = ax1.plot(x, plt.log(plt.absolute(y)))
        lcolor = l[-1].get_color()
        ax1.plot(xo, plt.log(yo), color=lcolor, marker='o', mec=None)
        ax1.plot(x, p[1] + x * p[0], color=lcolor, ls='--', lw=3)

        l = ax2.plot(x, y)
        lcolor = l[-1].get_color()
        ax2.plot(xo, yo, 'o', color=lcolor)
        xi = plt.linspace(plt.amin(x), plt.amax(x))
        yi = plt.exp(p[1] + p[0] * xi)
        ax2.plot(xi, yi, color=lcolor, ls='--', lw=3)

        print p[1], p[0], 1 / p[0]
        # plt.draw()
        # ax1.cla()
        # ax2.cla()

        r.append(1/p[0])

    ax2.set_ylim(0, 1000)
    plt.figure(2)
    plt.plot(r, lw=3, c='purple')
    # plt.gca().set_ylim(0, 10000)

    # ax3 = plt.subplot(111)
    # ax3.semilogy(x, y)
    # ax3.semilogy(xo, yo)

    return r
Пример #10
0
def test2():
    number_samples = 300
    days_to_keep = [2,3,4,5,6]
    vector_full = NP.array([2.0 + i * 10.0/number_samples  + random.uniform(-.5, .5) for i in range(number_samples)])
    mask_full = getDaysOfWeekMask(days_to_keep, vector_full.shape[0])

    vector = vector_full[:int(vector_full.shape[0]*0.8)]
    t = NP.arange(vector.shape[0])
    showArray('t', t) 
    showArray('vector', vector)    
    
    mask = getDaysOfWeekMask(days_to_keep, vector.shape[0])
    print 'mask', len(mask), mask
   
    masked_t = applyMask1D(t, mask)
    masked_vector = applyMask1D(vector, mask)
    showArray('masked_t', masked_t) 
    showArray('masked_vector', masked_vector) 
    
    trend = getTrend(t, vector)
    print trend
    for i in range(masked_t.shape[0]):
        v_pred = trend[0] + masked_t[i] * trend[1]
        print masked_t[i], masked_vector[i], v_pred, v_pred - masked_vector[i]
    
    predicted = NP.array([trend[0] + masked_t[i] * trend[1] for i in range(masked_vector.shape[0])])
    corrected = NP.array([masked_vector[i] - predicted[i] for i in range(masked_vector.shape[0])])
    masked_s = NP.transpose(NP.vstack([masked_vector, predicted, corrected]))
    
    showArray('masked_t', masked_t) 
    showArray('masked_s', masked_s)    
    # the main axes is subplot(111) by default
    PL.plot(masked_t, masked_s)
    s_range = PL.amax(masked_s) - PL.amin(masked_s)
    PL.axis([PL.amin(masked_t), PL.amax(masked_t), PL.amin(masked_s) - s_range*0.1, PL.amax(masked_s) + s_range*0.1 ])
    PL.xlabel('Time (days)')
    PL.ylabel('Downloads')
    PL.title('Dowlnoads over time')
    PL.show()   
Пример #11
0
 def isCorrectSignalRange(self, _signal):
     _min = pl.amin(_signal)
     if _min >= self.__filter__.min_value and \
         _min <= self.__filter__.max_value:
         return True
     _max = pl.amax(_signal)
     if _max >= self.__filter__.min_value and \
         _max <= self.__filter__.max_value:
         return True
     if _min <= self.__filter__.min_value and \
         _max >= self.__filter__.max_value:
         return True
     InformationWindow(message="Signal data out of range !")
     return False
Пример #12
0
 def isCorrectSignalRange(self, _signal):
     _min = pl.amin(_signal)
     if _min >= self.__filter__.min_value and \
         _min <= self.__filter__.max_value:
         return True
     _max = pl.amax(_signal)
     if _max >= self.__filter__.min_value and \
         _max <= self.__filter__.max_value:
         return True
     if _min <= self.__filter__.min_value and \
         _max >= self.__filter__.max_value:
         return True
     InformationWindow(message="Signal data out of range !")
     return False
Пример #13
0
def readDatDirectory(key, directory):
    global stats
    #Don't read data in if it's already read
    if not key in DATA["mean"]:
        data = defaultdict(array)

        #Process the dat files
        for datfile in glob.glob(directory + "/*.dat"):
            fileHandle = open(datfile, 'rb')
            keys, dataDict = csvExtractAllCols(fileHandle)
            stats = union(stats, keys)
            for aKey in keys:
                if not aKey in data:
                    data[aKey] = reshape(array(dataDict[aKey]),
                                         (1, len(dataDict[aKey])))
                else:
                    data[aKey] = append(data[aKey],
                                        reshape(array(dataDict[aKey]),
                                                (1, len(dataDict[aKey]))),
                                        axis=0)

        #Process the div files'
        for datfile in glob.glob(directory + "/*.div"):
            fileHandle = open(datfile, 'rb')
            keys, dataDict = csvExtractAllCols(fileHandle)
            stats = union(stats, keys)
            for aKey in keys:
                if not aKey in data:
                    data[aKey] = reshape(array(dataDict[aKey]),
                                         (1, len(dataDict[aKey])))
                else:
                    data[aKey] = append(data[aKey],
                                        reshape(array(dataDict[aKey]),
                                                (1, len(dataDict[aKey]))),
                                        axis=0)

        #Iterate through the stats and calculate mean/standard deviation
        for aKey in stats:
            if aKey in data:
                DATA["mean"][key][aKey] = mean(data[aKey], axis=0)
                DATA["median"][key][aKey] = median(data[aKey], axis=0)
                DATA["std"][key][aKey] = std(data[aKey], axis=0)
                DATA["ste"][key][aKey] = std(data[aKey], axis=0) / sqrt(
                    len(data[aKey]))
                DATA["min"][key][aKey] = mean(data[aKey], axis=0) - amin(
                    data[aKey], axis=0)
                DATA["max"][key][aKey] = amax(data[aKey], axis=0) - mean(
                    data[aKey], axis=0)
                DATA["actual"][key][aKey] = data[aKey]
Пример #14
0
def plot(df, *args, **kw):
    N = df["count"].sum()
    deltas_ = df["count"].values[:-1] - df["count"].values[1:]
    deltas = pl.array([deltas_[0]] + list(pl.amin([deltas_[1:], deltas_[:-1]], axis=0)) + [deltas_[-1]])
    dfrac = (deltas + 1e-20) / N

    def frac_leaked(q):
        attacked = dfrac * q > (2 * q * df["count"] / N) ** 0.5
        N_a = df["count"][attacked].sum()
        return float(N_a) / N

    Ns = pl.logspace(0, 15, 100)
    fracs = pl.array(map(frac_leaked, Ns))

    pl.semilogx(Ns, fracs * 100, *args, **kw)
def plot(df, *args, **kw):
    N = df["count"].sum()
    deltas_ = df['count'].values[:-1] - df['count'].values[1:]
    deltas = pl.array([deltas_[0]] + list(pl.amin([deltas_[1:], deltas_[:-1]], axis=0)) + [deltas_[-1]])
    dfrac = (deltas + 1e-20) / N

    def frac_leaked(q):
        attacked = dfrac * q > (2 * q * df['count'] / N) ** 0.5
        N_a = df['count'][attacked].sum()
        return float(N_a) / N

    Ns = pl.logspace(0, 15, 100)
    fracs = pl.array(map(frac_leaked, Ns))

    pl.semilogx(Ns, fracs * 100, *args, **kw)
Пример #16
0
def fwhm_2gauss(x, y, dx=0.001):
	'''
	Finds the FWHM for the profile y(x), with accuracy dx=0.001
	Uses a 2-Gauss 1D fit.
	'''
	popt, pcov = curve_fit(gauss2, x, y);
	xx = pl.arange(pl.amin(x), pl.amax(x)+dx, dx);
	ym = gauss2(xx, popt[0], popt[1], popt[2], popt[3], popt[4], popt[5])
	hm = pl.amax(ym/2.0);
	y_diff = pl.absolute(ym-hm);
	y_diff_sorted = pl.sort(y_diff);
	i1 = pl.where(y_diff==y_diff_sorted[0]);
	i2 = pl.where(y_diff==y_diff_sorted[1]);
	fwhm = pl.absolute(xx[i1]-xx[i2]);
	return hm, fwhm, xx, ym
Пример #17
0
def leftover_phc_single(ds, attr="p_filt_value_phc", feature="CuKAlpha", ax=None):
    cal = ds.calibration[attr]
    pulse_timing.choose_laser_dataset(ds, "not_laser")
    if ax is None:
        plt.figure()
        ax = plt.gca()
    ax.plot(ds.p_promptness[ds.cuts.good()], getattr(ds, attr)[ds.cuts.good()],'.')
    # ax.set_xlabel("promptness")
    ax.set_ylabel(attr)
    ax.set_title("chan %d %s"%(ds.channum, feature))
    ax.set_ylim(np.array([.995, 1.005])*cal.name2ph(feature))
    index = np.logical_and(getattr(ds, attr)[ds.cuts.good()]>ax.get_ylim()[0], getattr(ds, attr)[ds.cuts.good()]<ax.get_ylim()[1])
    xmin = plt.amin(ds.p_promptness[ds.cuts.good()][index])
    xmax = plt.amax(ds.p_promptness[ds.cuts.good()][index])
    ax.set_xlim(xmin, xmax)
    def __initiate_movie__(self):
        FFMpegWriter = manimation.writers["ffmpeg"]
        metadata = dict(title="Poincare plot movie", artist="HRV", comment="Movie support!")
        self.writer = FFMpegWriter(fps=self.movie_parameters.fps, metadata=metadata)

        self.fig = plt.figure()
        # l, = plt.plot([], [], 'k-o')
        self.movie_plot, = plt.plot([], [], "bo")

        margin = 50
        signal = self.data_vector_accessor_list[0].signal
        _max = pl.amax(signal)
        _min = pl.amin(signal)
        plt.xlim(_min - margin, _max + margin)
        plt.ylim(_min - margin, _max + margin)
        movie_filename = "/tmp/movie.mp4"
        return self.writer.saving(self.fig, movie_filename, 150)
Пример #19
0
def readDatDirectory(key, directory):
    global stats
    #Don't read data in if it's already read
    if not key in DATA["mean"]:
        data = defaultdict(array)

        #Process the dat files
        for datfile in glob.glob(directory + "/*.dat"):
            fileHandle = open(datfile, 'rb')
            keys, dataDict = csvExtractAllCols(fileHandle)
            stats = union(stats, keys)
            for aKey in keys:
                if not aKey in data:
                    data[aKey] = reshape(array(dataDict[aKey]),
                                         (1, len(dataDict[aKey])))
                else:
                    data[aKey] = append(data[aKey],
                                        reshape(array(dataDict[aKey]),
                                                (1, len(dataDict[aKey]))),
                                        axis=0)

        #Process the div files'
        for datfile in glob.glob(directory + "/*.div"):
            fileHandle = open(datfile, 'rb')
            keys, dataDict = csvExtractAllCols(fileHandle)
            stats = union(stats, keys)
            for aKey in keys:
                if not aKey in data:
                    data[aKey] = reshape(array(dataDict[aKey]),
                                         (1, len(dataDict[aKey])))
                else:
                    data[aKey] = append(data[aKey],
                                        reshape(array(dataDict[aKey]),
                                                (1, len(dataDict[aKey]))),
                                        axis=0)

        #Iterate through the stats and calculate mean/standard deviation
        for aKey in stats:
            if aKey in data:
                DATA["mean"][key][aKey] = mean(data[aKey], axis=0)
                DATA["median"][key][aKey] = median(data[aKey], axis=0)
                DATA["std"][key][aKey] = std(data[aKey], axis=0)
                DATA["ste"][key][aKey] = std(data[aKey], axis=0)/ sqrt(len(data[aKey]))
                DATA["min"][key][aKey] = mean(data[aKey], axis=0)-amin(data[aKey], axis=0)
                DATA["max"][key][aKey] = amax(data[aKey], axis=0)-mean(data[aKey], axis=0)
                DATA["actual"][key][aKey] = data[aKey]
Пример #20
0
def spectrum(wav_file,mi,mx,har,start,end,posX,posY,layer,origin,gap=0,arrange="",radius=30,sinheight=6.1):
    spect = []
    frame_rate, snd = wavfile.read(wav_file)
    sound_info = snd[:,0]
    spectrum, freqs, t, im = plt.specgram(sound_info,NFFT=1024,Fs=frame_rate,noverlap=5,mode='magnitude')
    n = 0
    rotation = 6.2831
    sinpos = {}
    cirpos = {}
    if arrange is "sinus":
        sinpos = sinus(har,radius,sinheight)
        for i in range(har):
            cirpos[i] = 0
    elif arrange is "circle":
        gap = 0
        sinpos, cirpos = circle(har,radius)
        rotation /= har
    else:
        for i in range(har):
            sinpos[i] = 0
        for i in range(har):
            cirpos[i] = 0
    maximum = plt.amax(spectrum)
    minimum = plt.amin(spectrum)
    position = 0
    while n < har:
        lastval = ((spectrum[n][0]-minimum)/(maximum - minimum))*(mx-mi)+mi
        lastval = math.ceil(lastval*1000)/1000
        lasttime = int(round(t[0]*1000))
        spect.append(osbject("bar.png",layer,origin,posX+position*gap+int(round(float(cirpos[n]))),posY+int(round(float(sinpos[n])))))
        position += 1
        if arrange is "circle":
            spect[n].rotate(0,start,start,math.ceil((1.5707+n*rotation)*1000)/1000,math.ceil((1.5707+n*rotation)*1000)/1000)
        for index,power in enumerate(spectrum[n]):
            power = ((power-minimum)/(maximum - minimum))*(mx-mi)+mi
            power = math.ceil(power*1000)/1000
            if power == lastval or int(round(t[index]*1000)) < start or int(round(t[index]*1000)) > end or index % 2 is not 0:
                lasttime = int(round(t[index]*1000))
                continue
            else:
                spect[n].vecscale(0,lasttime,int(round(t[index]*1000)),1,lastval,1,power)
                lastval = power
                lasttime = int(round(t[index]*1000))
        n += 1
    return spect
Пример #21
0
def spectrum(wav_file,mi,mx,har,start,end,posX,posY,layer,origin,gap=0,arrange="",radius=30,sinheight=6.1):
    spect = []
    frame_rate, snd = wavfile.read(wav_file)
    sound_info = snd[:,0]
    spectrum, freqs, t, im = plt.specgram(sound_info,NFFT=1024,Fs=frame_rate,noverlap=5,mode='magnitude')
    n = 0
    rotation = 6.2831
    sinpos = {}
    cirpos = {}
    if arrange is "sinus":
        sinpos = sinus(har,radius,sinheight)
        for i in range(har):
            cirpos[i] = 0
    elif arrange is "circle":
        gap = 0
        sinpos, cirpos = circle(har,radius)
        rotation /= har
    else:
        for i in range(har):
            sinpos[i] = 0
        for i in range(har):
            cirpos[i] = 0
    maximum = plt.amax(spectrum)
    minimum = plt.amin(spectrum)
    position = 0
    while n < har:
        lastval = ((spectrum[n][0]-minimum)/(maximum - minimum))*(mx-mi)+mi
        lastval = math.ceil(lastval*1000)/1000
        lasttime = int(round(t[0]*1000))
        spect.append(osbject("bar.png",layer,origin,posX+position*gap+int(round(float(cirpos[n]))),posY+int(round(float(sinpos[n])))))
        position += 1
        if arrange is "circle":
            spect[n].rotate(0,start,start,math.ceil((1.5707+n*rotation)*1000)/1000,math.ceil((1.5707+n*rotation)*1000)/1000)
        for index,power in enumerate(spectrum[n]):
            power = ((power-minimum)/(maximum - minimum))*(mx-mi)+mi
            power = math.ceil(power*1000)/1000
            if power == lastval or int(round(t[index]*1000)) < start or int(round(t[index]*1000)) > end or index % 2 is not 0:
                lasttime = int(round(t[index]*1000))
                continue
            else:
                spect[n].vecscale(0,lasttime,int(round(t[index]*1000)),1,lastval,1,power)
                lastval = power
                lasttime = int(round(t[index]*1000))
        n += 1
    return spect
Пример #22
0
def center_maxsize(image,r):
    """Center the image and fit it into an r x r output image.
    If the input is larger in any dimension than r, it is
    scaled down."""
    from pylab import amin,amax,array,zeros
    assert amin(image)>=0 and amax(image)<=1
    image = array(image,'f')
    w,h = image.shape
    s = max(w,h)
    # zoom down, but don't zoom up
    if s>r:
        image = interpolation.zoom(image,(r+0.5)/float(s))
        image[image<0] = 0
        image[image>1] = 1
        w,h = image.shape
    output = zeros((r,r),image.dtype)
    dx = (r-w)/2
    dy = (r-h)/2
    output[dx:dx+w,dy:dy+h] = image
    return output
Пример #23
0
    def __initiate_movie__(self):
        FFMpegWriter = manimation.writers['ffmpeg']
        metadata = dict(title='Poincare plot movie',
                        artist='HRV',
                        comment='Movie support!')
        self.writer = FFMpegWriter(fps=self.movie_parameters.fps,
                                   metadata=metadata)

        self.fig = plt.figure()
        #l, = plt.plot([], [], 'k-o')
        self.movie_plot, = plt.plot([], [], 'bo')

        margin = 50
        signal = self.data_vector_accessor_list[0].signal
        _max = pl.amax(signal)
        _min = pl.amin(signal)
        plt.xlim(_min - margin, _max + margin)
        plt.ylim(_min - margin, _max + margin)
        movie_filename = '/tmp/movie.mp4'
        return self.writer.saving(self.fig, movie_filename, 150)
Пример #24
0
def center_maxsize(image, r):
    """Center the image and fit it into an r x r output image.
    If the input is larger in any dimension than r, it is
    scaled down."""
    from pylab import amin, amax, array, zeros
    assert amin(image) >= 0 and amax(image) <= 1
    image = array(image, 'f')
    w, h = image.shape
    s = max(w, h)
    # zoom down, but don't zoom up
    if s > r:
        image = interpolation.zoom(image, (r + 0.5) / float(s))
        image[image < 0] = 0
        image[image > 1] = 1
        w, h = image.shape
    output = zeros((r, r), image.dtype)
    dx = (r - w) / 2
    dy = (r - h) / 2
    output[dx:dx + w, dy:dy + h] = image
    return output
Пример #25
0
    def displayStats(self):
        if self.meas is not None:
            maxPos = pl.unravel_index(self.meas.argmax(), self.meas.shape)
            maxVal = self.meas[maxPos]
            minVal = pl.amin(self.meas)
            amplitude = float(maxVal - minVal)
            self.ui.lcdAmplitude.display('{:.0f}'.format(amplitude))

            xVector = self.meas[maxPos[0], :].astype(float) - amplitude / 2
            yVector = self.meas[:, maxPos[1]].astype(float) - amplitude / 2

            # with interlaced camera in binning mode, pixels are size 2 in height
            if self.ccd.ccdParams['isInterlaced'] and not self.ilAcq:
                yVector = pl.array([yVector, yVector]).flatten('F')
                maxPos = (2 * maxPos[0], maxPos[1])

            xFWHM, _, _ = findFWHM(xVector,
                                   maxPos=maxPos[1],
                                   amplitude=amplitude)
            yFWHM, _, _ = findFWHM(yVector,
                                   maxPos=maxPos[0],
                                   amplitude=amplitude)
            FWHM = pl.mean([xFWHM, yFWHM])

            self.ui.lcdFwhm.display('{:.2f}'.format(FWHM))

            if maxVal == 2**16 - 1:
                self.toggleSatIndicator(True)
            else:
                self.toggleSatIndicator(False)

            thresholdArray = copy.deepcopy(self.meas)
            low_values_indices = thresholdArray < 2 * minVal
            thresholdArray[low_values_indices] = 0
            comPos = center_of_mass(thresholdArray)  #[::-1]
            if self.ccd.ccdParams['isInterlaced'] and not self.ilAcq:
                comPos = (comPos[0], 2 * comPos[1])
            self.verticalLineMax.setValue(comPos)
            self.horizontalLineMax.setValue(comPos)
Пример #26
0
#!/usr/bin/env python

import pandas as pd
import pylab as pl

df = pd.DataFrame.from_csv("surnames.csv", header=1)
N = df["count"].sum()
deltas_ = df["count"].values[:-1] - df["count"].values[1:]
deltas = pl.array([deltas_[0]] + list(pl.amin([deltas_[1:], deltas_[:-1]], axis=0)) + [deltas_[-1]])
dfrac = (deltas + 1e-20) / N


def frac_leaked(q):
    attacked = dfrac * q > (2 * q * df["count"] / N) ** 0.5
    N_a = df["count"][attacked].sum()
    return float(N_a) / N


Ns = pl.logspace(0, 15, 100)
fracs = pl.array(map(frac_leaked, Ns))

pl.semilogx(Ns, fracs * 100)
pl.xlabel("Number of queries")
pl.ylabel("% of surnames revealed")
pl.yticks(range(0, 110, 10))
pl.grid(True)
pl.show()
'''connect neurons with multimeter'''
nest.Connect(mult,neuron,{'rule':'all_to_all'})

''' simulation '''
nest.Simulate(simtime)

endsimulate = time.time()

sim_time = endsimulate - startbuild
print 'simulaiton time: %f' %sim_time
pl.clf
pl.figure(1)
#nest.raster_plot.from_device(sp, hist=True)
y = nest.GetStatus(sp,'events')[0]
z = nest.GetStatus(neuron,'local_id')[0]
pl.scatter(y['times'],y['senders']-pl.amin(z))
pl.xlim([0.0,1000.])
pl.ylim([0,pop])
pl.xlabel('ms')
pl.ylabel('neuron id ')
pl.show()
#%%
''' 3D surface plot of membrane potential'''

m_data = nest.GetStatus(mult,'events')[0]  # data of multimeter
z = nest.GetStatus(neuron,'local_id')[0]

#X, Y = np.meshgrid(y['times'],y['senders']-pl.amin(z))

#ax.plot_surface(X,Y,y['V_m'])
#uniq= pl.diff(pl.find((m_data['senders'])==m_data['senders'][0]))# find the distance of the same sender 
Пример #28
0
 def reset(self):
     self.setSquareFilterParams(SquareFilterParams(
                                 int(pl.amin(self.data_accessor.signal)),
                                 int(pl.amax(self.data_accessor.signal)),
                                 True))
Пример #29
0
def plot_slices(data):

    [n_cols, n_slices, n_turns] = data.shape
    A = data[2,:,:] * data[13,:,:]

    # Color cycle
    n_colors = n_turns
    colmap = plt.get_cmap('jet')
    c = [colmap(i) for i in plt.linspace(0., 1., n_colors)]

    fig1 = plt.figure(figsize=(12, 8))
    # ax1 = plt.gca()
    # [ax1.plot(A[:,i], c=c[i]) for i in range(1, n_turns, 1)]
    # plt.show()

    # Smoothing
    X = plt.arange(0, n_slices, 1)
    Y = plt.arange(0, n_turns, 1)
    A = A[X,:][:,Y]
    
    Xi = plt.linspace(X[0], X[-1], 1000)
    Yi = plt.linspace(Y[0], Y[-1], 1000)
    sp = interpolate.RectBivariateSpline(X, Y, A)
    Ai = sp(Xi, Yi)

    X, Y = plt.meshgrid(X, Y)
    X, Y = X.T, Y.T
    Xi, Yi = plt.meshgrid(Xi, Yi)
    Xi, Yi = Xi.T, Yi.T

    #fig = figure(1)
    #ax3d = fig.gca(projection='3d')
    #pl = ax3d.plot_wireframe(Xi, Yi, Ai, \
        #rstride=ns, cstride=ns, cmap=cm.jet, linewidth=0.1, alpha=0.3)
    #cset = ax3d.contourf(Xi, Yi, Ai, zdir='z')#, offset=-100)
    #cset = ax3d.contourf(Xi, Yi, Ai, zdir='x')#, offset=-40)
    #cset = ax3d.contourf(Xi, Yi, Ai, zdir='y')#, offset=40)
    #ax3d.zaxis.set_major_locator(LinearLocator(10))
    #ax3d.zaxis.set_major_formatter(FormatStrFormatter('%.02f'))
    #ax3d.view_init(elev=25, azim=-150)
    #ax3d.set_xlabel('Slices')
    #ax3d.set_ylabel('Turns')

    #fig = figure(2)
    #ax3d = fig.gca(projection='3d')
    #pl = ax3d.plot_wireframe(X, Y, A, \
        #rstride=ns, cstride=ns, cmap=cm.jet, linewidth=0.1, alpha=0.3)
    #cset = ax3d.contourf(X, Y, A, zdir='z')#, offset=-100)
    #cset = ax3d.contourf(X, Y, A, zdir='x')#, offset=-40)
    #cset = ax3d.contourf(X, Y, A, zdir='y')#, offset=40)
    #ax3d.zaxis.set_major_locator(LinearLocator(10))
    #ax3d.zaxis.set_major_formatter(FormatStrFormatter('%.02f'))
    #ax3d.view_init(elev=25, azim=-150)

    #show()

    from mayavi.modules.grid_plane import GridPlane
    from mayavi.modules.outline import Outline
    from mayavi.modules.volume import Volume
    from mayavi.scripts import mayavi2
    from mayavi import mlab
    mlab.options.backend = 'envisage'
    # graphics card driver problem
    # workaround by casting int in line 246 of enthought/mayavi/tools/figure.py
    #mlab.options.offscreen = True
    #enthought.mayavi.engine.current_scene.scene.off_screen_rendering = True

    mlab.figure(bgcolor=(1,1,1), fgcolor=(0.2,0.2,0.2))
    aspect = (0, 10, 0, 20, -6, 6)
    ranges = (plt.amin(X), plt.amax(X), plt.amin(Y), plt.amax(Y), plt.amin(A), plt.amax(A))
    # s = mlab.surf(Xi, Yi, Ai, colormap='jet', representation='surface',
    #               warp_scale=1e-3)
    s = mlab.surf(Xi, Yi, Ai, colormap='jet', representation='surface', extent=aspect,
                  warp_scale='auto')
    mlab.outline(line_width=1)
    mlab.axes(x_axis_visibility=True,
              xlabel='Slice No.', ylabel='Turn No.', zlabel='BPM signal', ranges=ranges)

    #mlab.title(('Electron cloud dynamics - slice passage: %03d/%03d' % (i, n_blocks)), size=0.25)
    mlab.view(azimuth=230, elevation=60)   
    mlab.show()
#!/usr/bin/env python

import pandas as pd
import pylab as pl

df = pd.read_csv("surnames.csv", header=1)
N = df["count"].sum()
deltas_ = df['count'].values[:-1] - df['count'].values[1:]
deltas = pl.array([deltas_[0]] +
                  list(pl.amin([deltas_[1:], deltas_[:-1]], axis=0)) +
                  [deltas_[-1]])
dfrac = (deltas + 1e-20) / N


def frac_leaked(q):
    attacked = dfrac * q > (2 * q * df['count'] / N)**0.5
    N_a = df['count'][attacked].sum()
    return float(N_a) / N


Ns = pl.logspace(0, 15, 100)
fracs = pl.array(map(frac_leaked, Ns))

pl.semilogx(Ns, fracs * 100)
pl.xlabel("Number of queries")
pl.ylabel("% of surnames revealed")
pl.yticks(range(0, 110, 10))
pl.grid(True)
pl.show()
Пример #31
0
		sys.exit(1)
	F = [];
	F.append(watch_procs(proc, totaltime=options.totaltime));
	
	for child in get_children(proc):
		F.append(watch_procs(child, totaltime=options.totaltime));
	
	for f in F:
		f.start()
	
	for f in F:
		f.join()
		time.sleep(1)
	
	if options.saveas.upper()=='TXT':
		header = "time cputimes cpu ram"
		print f.name
		for f in F:
			x, cpu, ram, z = f.get_result()
			pl.savetxt(proc.name+'.'+f.proc.name+'.'+str(f.proc.pid)+'.txt', zip(x, z, cpu, ram), header=header, fmt='%10.10f');
	else:
		for f in F:
			x, cpu, ram, z = f.get_result()
			pl.step(x-pl.amin(x), cpu, label=f.proc.name)
			pl.step(x-pl.amin(x), ram);
		
		pl.legend(loc=2)
		pl.xlabel('Wall Clock (s)');
		pl.ylabel('% Usage')
		pl.savefig(options.proc+'.eps');
Пример #32
0
    def __init__(self, data_vector, movie_parameters, **params):
        self.p = movie_parameters
        if not self.p.movie_name == None:
            self.params = Params(**params)
            mpl.rcParams['patch.edgecolor'] = 'none'
            mpl.rcParams['savefig.edgecolor'] = 'none'
            self.source = get_filename(self.params.reference_filename)
            self.__prefixed_movie_name__ = self.__get_prefixed_movie_name__()
            self.__prefixed_movie_dir__ = self.__get_prefixed_movie_dir__()

            if not self.params.filter_manager == None:
                data_vector = self.params.filter_manager.run_filters(
                    data_vector)
            _max = pl.amax(data_vector.signal)
            _min = pl.amin(data_vector.signal)
            self.time = data_vector.time

            margin = 50
            self.signal_size = len(data_vector.signal)
            self.range = [
                _min - margin, _max + margin, _min - margin, _max + margin
            ]
            self.x_data = pl.zeros(self.signal_size)
            self.y_data = pl.zeros(self.signal_size)

            self.old_signal_plus = None
            self.idx = 0
            self.active_color = get_extended_color_array(
                movie_parameters.movie_active_color)
            self.inactive_color = get_extended_color_array(
                movie_parameters.movie_inactive_color)
            self.centroid_color = get_extended_color_array(
                movie_parameters.movie_centroid_color)
            self.message = None
            self.core_nums = multiprocessing.cpu_count(
            ) * self.p.movie_multiprocessing_factor  # @IgnorePep8

            self.pp_spec_manager = MiniPoincarePlotSpecManager()
            self.pp_spec_manager.movie_dir = self.__prefixed_movie_dir__
            self.pp_spec_manager.movie_name = self.__prefixed_movie_name__
            self.pp_spec_manager.movie_dpi = self.p.movie_dpi
            self.pp_spec_manager.movie_fps = self.p.movie_fps
            self.pp_spec_manager.movie_height = self.p.movie_height
            self.pp_spec_manager.movie_width = self.p.movie_width
            self.pp_spec_manager.active_color = self.active_color
            self.pp_spec_manager.inactive_color = self.inactive_color
            self.pp_spec_manager.centroid_color = self.centroid_color
            self.pp_spec_manager.active_point_size = self.p.movie_active_size
            self.pp_spec_manager.inactive_point_size = \
                                            self.p.movie_inactive_size
            self.pp_spec_manager.centroid_point_size = \
                                            self.p.movie_centroid_size
            self.pp_spec_manager.show_plot_legends = \
                                        self.p.movie_show_plot_legends
            self.pp_spec_manager.x_label = self.p.x_label
            self.pp_spec_manager.y_label = self.p.y_label
            self.pp_spec_manager.clean_frames = self.p.movie_clean_frames
            self.pp_spec_manager.movie_title = self.p.movie_title
            self.pp_spec_manager.movie_frame_step = self.p.movie_frame_step
            self.pp_spec_manager.movie_identity_line = self.p.movie_identity_line
            self.pp_spec_manager.movie_hour_label = self.p.movie_hour_label
            self.pp_spec_manager.movie_minute_label = self.p.movie_minute_label
            self.pp_spec_manager.movie_second_label = self.p.movie_second_label
            self.pp_spec_manager.movie_time_label_in_line = self.p.movie_time_label_in_line
            self.pp_spec_manager.movie_time_label_font_size = self.p.movie_time_label_font_size
            self.pp_spec_manager.movie_time_label_prefix = self.p.movie_time_label_prefix
            self.pp_spec_manager.movie_title_font_size = self.p.movie_title_font_size
            self.pp_spec_manager.movie_axis_font_size = self.p.movie_axis_font_size
            self.pp_spec_manager.movie_axis_font = self.p.movie_axis_font
            self.pp_spec_manager.movie_title_font = self.p.movie_title_font
            self.pp_spec_manager.movie_tick_font = self.p.movie_tick_font
            self.pp_spec_manager.movie_frame_pad = self.p.movie_frame_pad
            self.pp_spec_manager.movie_create_time_label = self.p.movie_create_time_label
            self.pp_spec_manager.movie_frame_filename_with_time = self.p.movie_frame_filename_with_time

            self.pp_specs_managers = []
            self.pp_specs_managers.append(self.pp_spec_manager)
            self.sub_dir_counter = 0
            self.scatter = None
            self.legend_text = None
            self.pp_specs = []
            self.cum_inactive = 0
            self._pp_spec_old = None
            self.s_size = 0  # current calculated signal size
Пример #33
0
def main():

    args = parseCMD()

    tLoc = args.textLocation
    Temp = args.Temperature
    ChemPot = args.ChemPot
    excVol = args.excVol

    if tLoc == 'R':
        boxSubtract = 1
    else:
        boxSubtract = 4

    alphas = glob.glob('*alpha*')
    if alphas == []:
        sys.exit('Must be in CWD containing alphaN direcs')

    minMus, maxMus = 5000, -5000

    colors = [
        'Salmon', 'Blue', 'DarkViolet', 'MediumSpringGreen', 'Fuchsia',
        'Yellow', 'Maroon'
    ]

    if excVol:
        fig, ax = pl.subplots(1, figsize=(10, 8))
        pl.tick_params(axis='both', which='major', labelsize=16)
        pl.tick_params(axis='both', which='minor', labelsize=16)
        yticks = ax.yaxis.get_major_ticks()
        yticks[0].set_visible(False)

        n = 0
        for alpha in sorted(alphas):
            os.chdir(alpha)
            a = alpha[-1]
            mus, fDens, fErr, bDens, bErr = pl.loadtxt(
                'JackKnifeData_bipart.dat', unpack=True)

            pl.errorbar(mus,
                        fDens,
                        fErr,
                        fmt='^',
                        label=('Film: ' + r'$S = $' + '%s' % a),
                        color=colors[n],
                        markeredgecolor='DarkSlateGray',
                        markersize=8)
            pl.errorbar(mus,
                        bDens,
                        bErr,
                        fmt='o',
                        label=('Bulk: ' + r'$S = $' + '%s' % a),
                        color=colors[n],
                        markeredgecolor='DarkSlateGray',
                        markersize=8)

            # determine max and min values of mu
            if pl.amax(mus) > maxMus:
                maxMus = pl.amax(mus)
            if pl.amin(mus) < minMus:
                minMus = pl.amin(mus)

            os.chdir('..')
            n += 1

        # set up bulk SVP densities for plot
        if Temp != 'T':
            bulkVert = -30
        else:
            bulkVert = 30
        pl.plot([minMus, maxMus], [0.02198, 0.02198], 'k-', lw=3)
        pl.annotate(
            '3D SVP',
            xy=(maxMus - boxSubtract, 0.02195),  #xycoords='data',
            xytext=(-50, bulkVert),
            textcoords='offset points',
            bbox=dict(boxstyle="round", fc="0.8"),
            arrowprops=dict(arrowstyle="->",
                            connectionstyle="angle,angleA=0,angleB=90,rad=10"),
        )

        pl.plot([minMus, maxMus], [0.0432, 0.0432], 'k-', lw=3)
        pl.annotate(
            '2D SVP',
            xy=(maxMus - boxSubtract, 0.0432),  #xycoords='data',
            xytext=(-50, 30),
            textcoords='offset points',
            bbox=dict(boxstyle="round", fc="0.8"),
            arrowprops=dict(arrowstyle="->",
                            connectionstyle="angle,angleA=0,angleB=90,rad=10"),
        )

        if Temp == 'T':
            pl.xlabel('Temperature [K]', fontsize=16)
        else:
            pl.xlabel('Chemical Potential [K]', fontsize=16)
        pl.ylabel('Spatial Density ' + r'$[\AA^{-d}]$', fontsize=16)
        if Temp != 'T':
            pl.title('T = %s K' % Temp)
        else:
            pl.title(r'$\mu\ =\ $' + ChemPot + ' K')
        pl.legend(loc=2)

        pl.savefig('density_vs_mu_allAlphas.pdf',
                   format='pdf',
                   bbox_inches='tight')
        pl.savefig('density_vs_mu_allAlphas_trans.pdf',
                   format='pdf',
                   bbox_inches='tight',
                   transparent=True)

    # SUPERFLUID STIFFNESS
    fig2, ax2 = pl.subplots(1, figsize=(8, 6.5))

    n = 0
    for alpha in sorted(alphas):
        os.chdir(alpha)
        a = alpha[-1]
        if alpha[-1] == '0':
            lab = 'Bulk'
        else:
            lab = r'$\alpha = $' + '%s: ' % a

        mus, stiff, stiffErr = pl.loadtxt('JackKnifeData_super.dat',
                                          unpack=True,
                                          usecols=(0, 1, 2))

        pl.errorbar(mus,
                    stiff,
                    stiffErr,
                    fmt='o',
                    label=lab,
                    color=colors[n],
                    markeredgecolor='DarkSlateGray',
                    markersize=8)

        # determine max and min values of mu
        if pl.amax(mus) > maxMus:
            maxMus = pl.amax(mus)
        if pl.amin(mus) < minMus:
            minMus = pl.amin(mus)

        os.chdir('..')
        n += 1

    if Temp == 'T':
        pl.xlabel('Temperature [K]', fontsize=16)
    else:
        pl.xlabel('Chemical Potential [K]', fontsize=16)
    pl.ylabel('Superfluid Fraction', fontsize=16)
    if Temp != 'T':
        pl.title('T = %s K' % Temp)
    else:
        pl.title(r'$\mu\ =\ $' + ChemPot + ' K')
    pl.legend()

    pl.savefig('superFrac_vs_mu_allAlphas.pdf',
               format='pdf',
               bbox_inches='tight')

    pl.savefig('superFrac_vs_mu_allAlphas_trans.pdf',
               format='pdf',
               bbox_inches='tight',
               transparent=True)
    '''
    # WINDING NUMBER COMPONENTS
    fig3,ax3 = pl.subplots(1, figsize=(8,6.5))

    n = 0
    for alpha in sorted(alphas):
        os.chdir(alpha)
        a = alpha[-1]
        if alpha[-1] == '0':
            lab = 'Bulk'
        else:
            lab = r'$\alpha = $'+'%s: ' % a
        
        mus, Wx2, Wx2Err, Wy2, Wy2Err, Wz2, Wz2Err = pl.loadtxt(
                'JackKnifeData_super.dat', unpack=True,
                usecols=(0,3,4,5,6,7,8))

        pl.errorbar(mus, Wx2, Wx2Err, fmt='o', 
                label=(lab+': '+r'$\langle W_x^2 \rangle$'),
                color = colors[n], markeredgecolor='DarkSlateGray',
                markersize=8)

        pl.errorbar(mus, Wy2, Wy2Err, fmt='v', 
                label=(lab+': ' +r'$\langle W_y^2 \rangle$'),
                color = colors[n], markeredgecolor='DarkSlateGray',
                markersize=8)

        pl.errorbar(mus, Wz2, Wz2Err, fmt='s', 
                label=(lab+': '+r'$\langle W_z^2 \rangle$' ),
                color = colors[n], markeredgecolor='DarkSlateGray',
                markersize=8)

        # determine max and min values of mu
        if pl.amax(mus) > maxMus:
            maxMus = pl.amax(mus)
        if pl.amin(mus) < minMus:
            minMus = pl.amin(mus)

        os.chdir('..')
        n += 1

    if Temp == 'T':
        pl.xlabel('Temperature [K]', fontsize=16)
    else:
        pl.xlabel('Chemical Potential [K]', fontsize=16)
    pl.ylabel(r'$\langle W_i^2 \rangle$', fontsize=16)
    if Temp != 'T':
        pl.title('T = %s K' % Temp)
    else:
        pl.title(r'$\mu\ =\ $'+ChemPot+' K')
    pl.legend()
    
    pl.savefig('windingNumbers_vs_mu_allAlphas.pdf', format='pdf',
            bbox_inches='tight')
     
    pl.savefig('windingNumbers_vs_mu_allAlphas_trans.pdf', format='pdf',
            bbox_inches='tight', transparent=True)
    
    # W_z^2
    fig4,ax4 = pl.subplots(1, figsize=(8,6.5))

    n = 0
    for alpha in sorted(alphas):
        os.chdir(alpha)
        a = alpha[-1]
        if alpha[-1] == '0':
            lab = 'Bulk'
        else:
            lab = r'$\alpha = $'+'%s: ' % a
        
        mus, Wz2, Wz2Err = pl.loadtxt(
                'JackKnifeData_super.dat', unpack=True,
                usecols=(0,7,8))

        pl.errorbar(mus, Wz2, Wz2Err, fmt='s', 
                #label=(r'$\alpha = $'+'%s: ' % a),
                label=lab,
                color = colors[n], markeredgecolor='DarkSlateGray',
                markersize=8)

        # determine max and min values of mu
        if pl.amax(mus) > maxMus:
            maxMus = pl.amax(mus)
        if pl.amin(mus) < minMus:
            minMus = pl.amin(mus)

        os.chdir('..')
        n += 1

    if Temp == 'T':
        pl.xlabel('Temperature [K]', fontsize=16)
    else:
        pl.xlabel('Chemical Potential [K]', fontsize=16)
    pl.ylabel(r'$\langle W_z^2 \rangle$', fontsize=16)
    if Temp != 'T':
        pl.title('T = %s K' % Temp)
    else:
        pl.title(r'$\mu\ =\ $'+ChemPot+' K')
    pl.legend()
    
    pl.savefig('windingZ_vs_mu_allAlphas.pdf', format='pdf',
            bbox_inches='tight')
 
    pl.savefig('windingZ_vs_mu_allAlphas_trans.pdf', format='pdf',
            bbox_inches='tight', transparent=True)
    '''

    pl.show()
Пример #34
0
            if lattice[a, b, c] > 0:
                neighbors = get_neighbors(a, b, c)
                border = False
                for neigh in neighbors:
                    if lattice[neigh] == 0:
                        borderPositions1.append([a, b, c])
                        lattice[a, b, c] = -1
                        break
borderPositions1 = p.array(borderPositions1)
# ii) find closest distance to border site for each 1 and 2 (type one and spanning cluster or not)
distances = []
for a in range(N):
    for b in range(N):
        for c in range(N):
            if lattice[a, b, c] > 0:
                diffs = borderPositions1 - p.array([a, b, c])
                minBorderDist = p.amin(p.sum((diffs * diffs).T, axis=0))
                distances.append(p.sqrt(minBorderDist))

# p.figure()
# p.imshow(init_lattice, interpolation='none')
# p.suptitle("Initial configuration")
p.figure()
p.imshow(p.sum(lattice[N // 2:N // 2 + 1], axis=0),
         interpolation='none',
         cmap=p.get_cmap('afmhot'))
p.suptitle("Final configuration")
p.figure()
p.hist(distances)
p.suptitle("Distance distribution")
p.show()
Пример #35
0
    def run(self, fpath, job):
        param = self.param
        base, _ = ocrolib.allsplitext(fpath)
        basefile = ocrolib.allsplitext(os.path.basename(fpath))[0]

        if param['parallel'] < 2:
            print_info("=== %s %-3d" % (fpath, job))
        raw = ocrolib.read_image_gray(fpath)

        flat = raw
        # estimate skew angle and rotate
        if param['maxskew'] > 0:
            if param['parallel'] < 2:
                print_info("estimating skew angle")
            d0, d1 = flat.shape
            o0, o1 = int(param['bignore']*d0), int(param['bignore']*d1)
            flat = amax(flat)-flat
            flat -= amin(flat)
            est = flat[o0:d0-o0, o1:d1-o1]
            ma = param['maxskew']
            ms = int(2*param['maxskew']*param['skewsteps'])
            angle = self.estimate_skew_angle(est, linspace(-ma, ma, ms+1))
            flat = interpolation.rotate(flat, angle, mode='constant', reshape=0)
            flat = amax(flat)-flat
        else:
            angle = 0

        # estimate low and high thresholds
        if param['parallel'] < 2:
            print_info("estimating thresholds")
        d0, d1 = flat.shape
        o0, o1 = int(param['bignore']*d0), int(param['bignore']*d1)
        est = flat[o0:d0-o0, o1:d1-o1]
        if param['escale'] > 0:
            # by default, we use only regions that contain
            # significant variance; this makes the percentile
            # based low and high estimates more reliable
            e = param['escale']
            v = est-filters.gaussian_filter(est, e*20.0)
            v = filters.gaussian_filter(v**2, e*20.0)**0.5
            v = (v > 0.3*amax(v))
            v = morphology.binary_dilation(v, structure=ones((int(e*50), 1)))
            v = morphology.binary_dilation(v, structure=ones((1, int(e*50))))
            if param['debug'] > 0:
                imshow(v)
                ginput(1, param['debug'])
            est = est[v]
        lo = stats.scoreatpercentile(est.ravel(), param['lo'])
        hi = stats.scoreatpercentile(est.ravel(), param['hi'])
        # rescale the image to get the gray scale image
        if param['parallel'] < 2:
            print_info("rescaling")
        flat -= lo
        flat /= (hi-lo)
        flat = clip(flat, 0, 1)
        if param['debug'] > 0:
            imshow(flat, vmin=0, vmax=1)
            ginput(1, param['debug'])
        deskewed = 1*(flat > param['threshold'])

        # output the normalized grayscale and the thresholded images
        print_info("%s lo-hi (%.2f %.2f) angle %4.1f" % (basefile, lo, hi, angle))
        if param['parallel'] < 2:
            print_info("writing")
        ocrolib.write_image_binary(base+".ds.png", deskewed)
        return base+".ds.png"
Пример #36
0
def add_stations(station_selection,
                 phases0,
                 phases1,
                 flags,
                 mask,
                 station_names,
                 station_positions,
                 source_names,
                 source_selection,
                 times,
                 freqs,
                 r,
                 nband_min=2,
                 soln_type='phase',
                 nstations_max=None,
                 excluded_stations=None,
                 t_step=5,
                 tec_step1=5,
                 tec_step2=21,
                 search_full_tec_range=False):
    """
    Adds stations to TEC fitting using an iterative initial-guess search to
    ensure the global min is found

    Keyword arguments:
    station_selection -- indices of stations to use in fitting
    phases0 -- XX phase solutions
    phases1 -- YY phase solutions
    flags -- phase solution flags (0 = use, 1 = flagged)
    mask -- mask for sources and frequencies (0 = ignore, 1 = use)
    station_names -- array of station names
    source_names -- array of source names
    source_selection -- indices of sources to use in fitting
    times -- array of times
    freqs -- array of frequencies
    r -- array of TEC solutions returned by fit_tec_per_source_pair()
    nband_min -- min number of bands for a source to be used
    soln_type -- type of phase solution: 'phase' or 'scalarphase'
    nstations_max -- max number of stations to use
    excluded_stations -- stations to exclude
    t_step -- try full TEC range every t_step number of solution times
    tec_step1 -- number of steps in TEC subrange (+/- last TEC fit value)
    tec_step2 -- number of steps in full TEC range (-0.1 -- 0.1)
    search_full_tec_range -- always search the full TEC range (-0.1 -- 0.1)
    """
    from pylab import pinv, newaxis, find, amin
    import numpy as np
    from lofar.expion import baselinefitting
    import progressbar

    N_sources_selected = len(source_selection)
    N_stations_selected = len(station_selection)
    N_piercepoints = N_sources_selected * N_stations_selected
    N_times = len(times)
    N_stations = len(station_names)
    N_sources = len(source_names)
    N_pairs = 0
    for ii, i in enumerate(source_selection):
        for jj, j in enumerate(source_selection):
            if j == i:
                break
            subband_selection = find(mask[i, :] * mask[j, :])
            if len(subband_selection) < nband_min:
                continue
            N_pairs += 1

    D = np.resize(station_positions, (N_stations, N_stations, 3))
    D = np.transpose(D, (1, 0, 2)) - D
    D = np.sqrt(np.sum(D**2, axis=2))

    station_selection1 = station_selection
    stations_to_add = np.array([
        i for i in range(len(station_names)) if i not in station_selection1
        and station_names[i] not in excluded_stations
    ])
    if len(stations_to_add) == 0:
        return station_selection1, r

    # Check if desired number of stations is already reached
    if nstations_max is not None:
        if len(station_selection1) >= nstations_max:
            return station_selection1, r

    logging.info("Using fitting with iterative search for remaining stations "
                 "(up to {0} stations in total)".format(nstations_max))
    q = r
    while len(stations_to_add) > 0:
        D1 = D[stations_to_add[:, newaxis], station_selection1[newaxis, :]]

        minimum_distance = amin(D1, axis=1)
        station_to_add = stations_to_add[np.argmin(minimum_distance)]
        station_selection1 = np.append(station_selection1, station_to_add)
        N_stations_selected1 = len(station_selection1)

        # Remove station from list
        stations_to_add = stations_to_add[stations_to_add != station_to_add]

        sols_list = []
        eq_list = []
        min_e_list = []

        ipbar = 0
        logging.info('Fitting TEC values with {0} included...'.format(
            station_names[station_to_add]))
        pbar = progressbar.ProgressBar(maxval=N_pairs * N_times).start()
        for ii, i in enumerate(source_selection):
            for jj, j in enumerate(source_selection):
                if j == i:
                    break
                subband_selection = find(mask[i, :] * mask[j, :])
                if len(subband_selection) < nband_min:
                    continue
                logging.debug('Adding {0} for source pair: {1}-{2}'.format(
                    station_names[station_to_add], i, j))
                p0 = phases0[i, station_selection1[:, newaxis],
                             subband_selection[newaxis, :], :] - phases0[
                                 j, station_selection1[:, newaxis],
                                 subband_selection[newaxis, :], :]
                p0 = p0 - np.mean(p0, axis=0)[newaxis, :, :]
                if soln_type != 'scalarphase':
                    p1 = phases1[i, station_selection1[:, newaxis],
                                 subband_selection[newaxis, :], :] - phases1[
                                     j, station_selection1[:, newaxis],
                                     subband_selection[newaxis, :], :]
                    p1 = p1 - np.mean(p1, axis=0)[newaxis, :, :]
                A = np.zeros((len(subband_selection), 1))
                A[:, 0] = 8.44797245e9 / freqs[subband_selection]

                flags_source_pair = flags[
                    i, station_selection1[:, newaxis],
                    subband_selection[newaxis, :], :] * flags[
                        j, station_selection1[:, newaxis],
                        subband_selection[newaxis, :], :]
                constant_parms = np.zeros((1, N_stations_selected1),
                                          dtype=np.bool)
                sols = np.zeros((N_times, N_stations_selected1),
                                dtype=np.float)
                p_0_best = None
                for t_idx in range(N_times):
                    if np.mod(t_idx, t_step) == 0:
                        min_e = np.Inf
                        if p_0_best is not None and not search_full_tec_range:
                            min_tec = p_0_best[0, -1] - 0.02
                            max_tec = p_0_best[0, -1] + 0.02
                            nsteps = tec_step1
                        else:
                            min_tec = -0.1
                            max_tec = 0.1
                            nsteps = tec_step2
                        logging.debug(
                            '  Trying initial guesses between {0} and '
                            '{1} TECU'.format(min_tec, max_tec))
                        for offset in np.linspace(min_tec, max_tec, nsteps):
                            p_0 = np.zeros((1, N_stations_selected1),
                                           np.double)
                            p_0[0, :N_stations_selected1 -
                                1] = (q[ii, t_idx, :] -
                                      q[jj, t_idx, :])[newaxis, :]
                            p_0[0, -1] = offset

                            x = p0[:, :, t_idx].copy()
                            f = flags_source_pair[:, :, t_idx].copy()
                            sol0 = baselinefitting.fit(x.T, A, p_0, f,
                                                       constant_parms)
                            sol0 -= np.mean(sol0)
                            residual = np.mod(
                                np.dot(A, sol0) - x.T + np.pi,
                                2 * np.pi) - np.pi
                            residual = residual[f.T == 0]
                            e = np.var(residual)

                            if soln_type != 'scalarphase':
                                x = p1[:, :, t_idx].copy()
                                f = flags_source_pair[:, :, t_idx].copy()
                                sol1 = baselinefitting.fit(
                                    x.T, A, p_0, f, constant_parms)
                                sol1 -= np.mean(sol1)
                                residual = np.mod(
                                    np.dot(A, sol1) - x.T + np.pi,
                                    2 * np.pi) - np.pi
                                residual = residual[f.T == 0]
                                e += np.var(residual)
                            else:
                                sol1 = sol0

                            if e < min_e:
                                logging.debug(
                                    '  Found new min variance of {0} '
                                    'with initial guess of {1} TECU'.format(
                                        e, p_0[0, -1]))
                                min_e = e
                                p_0_best = p_0
                                sols[t_idx, :] = (sol0[0, :] + sol1[0, :]) / 2
                    else:
                        # Use previous init
                        x = p0[:, :, t_idx].copy()
                        f = flags_source_pair[:, :, t_idx].copy()
                        sol0 = baselinefitting.fit(x.T, A, p_0_best, f,
                                                   constant_parms)
                        sol0 -= np.mean(sol0)

                        if soln_type != 'scalarphase':
                            x = p1[:, :, t_idx].copy()
                            f = flags_source_pair[:, :, t_idx].copy()
                            sol1 = baselinefitting.fit(x.T, A, p_0_best, f,
                                                       constant_parms)
                            sol1 -= np.mean(sol1)
                        else:
                            sol1 = sol0
                        sols[t_idx, :] = (sol0[0, :] + sol1[0, :]) / 2

                    ipbar += 1
                    pbar.update(ipbar)

                ### Remove outliers
                logging.debug('  Searching for outliers...')
                for kk in range(10):
                    s = sols[:, -1].copy()
                    selection = np.zeros(len(s), np.bool)
                    for t_idx in range(len(s)):
                        start_idx = np.max([t_idx - 10, 0])
                        end_idx = np.min([t_idx + 10, len(s)])
                        selection[t_idx] = np.sum(
                            abs(s[start_idx:end_idx] - s[t_idx]) < 0.02) > (
                                end_idx - start_idx - 8)
                    outliers = find(np.logical_not(selection))
                    if len(outliers) == 0:
                        break
                    for t_idx in outliers:
                        try:
                            idx0 = find(selection[:t_idx])[-1]
                        except IndexError:
                            idx0 = -1
                        try:
                            idx1 = find(selection[t_idx + 1:])[0] + t_idx + 1
                        except IndexError:
                            idx1 = -1
                        if idx0 == -1:
                            s[t_idx] = s[idx1]
                        elif idx1 == -1:
                            s[t_idx] = s[idx0]
                        else:
                            s[t_idx] = (s[idx0] * (idx1 - t_idx) + s[idx1] *
                                        (t_idx - idx0)) / (idx1 - idx0)

                        p_0 = np.zeros((1, N_stations_selected1), np.double)
                        p_0[0, :] = sols[t_idx, :]
                        p_0[0, -1] = s[t_idx]

                        x = p0[:, :, t_idx].copy()
                        f = flags_source_pair[:, :, t_idx].copy()
                        sol0 = baselinefitting.fit(x.T, A, p_0, f,
                                                   constant_parms)
                        sol0 -= np.mean(sol0)

                        if soln_type != 'scalarphase':
                            x = p1[:, :, t_idx].copy()
                            sol1 = baselinefitting.fit(x.T, A, p_0, f,
                                                       constant_parms)
                            sol1 -= np.mean(sol1)
                        else:
                            sol1 = sol0
                        sols[t_idx, :] = (sol0[0, :] + sol1[0, :]) / 2

                weight = 1.0
                sols_list.append(weight * sols)
                min_e_list.append(min_e)
                eq = np.zeros(N_sources)
                eq[ii] = weight
                eq[jj] = -weight
                eq_list.append(eq)

        sols = np.array(sols_list)
        B = np.array(eq_list)
        pinvB = pinv(B)

        q = np.dot(pinvB, sols.transpose([1, 0, 2]))
        pbar.finish()

        if nstations_max is not None:
            if N_stations_selected1 == nstations_max:
                break

    return station_selection1, q
    def __init__(self, data_vector, movie_parameters, **params):
        self.p = movie_parameters
        if not self.p.movie_name == None:
            self.params = Params(**params)
            mpl.rcParams['patch.edgecolor'] = 'none'
            mpl.rcParams['savefig.edgecolor'] = 'none'
            self.source = get_filename(self.params.reference_filename)
            self.__prefixed_movie_name__ = self.__get_prefixed_movie_name__()
            self.__prefixed_movie_dir__ = self.__get_prefixed_movie_dir__()

            if not self.params.filter_manager == None:
                data_vector = self.params.filter_manager.run_filters(
                                                                data_vector)
            _max = pl.amax(data_vector.signal)
            _min = pl.amin(data_vector.signal)
            self.time = data_vector.time

            margin = 50
            self.signal_size = len(data_vector.signal)
            self.range = [_min - margin, _max + margin,
                          _min - margin, _max + margin]
            self.x_data = pl.zeros(self.signal_size)
            self.y_data = pl.zeros(self.signal_size)

            self.old_signal_plus = None
            self.idx = 0
            self.active_color = get_extended_color_array(
                                        movie_parameters.movie_active_color)
            self.inactive_color = get_extended_color_array(
                                        movie_parameters.movie_inactive_color)
            self.centroid_color = get_extended_color_array(
                                        movie_parameters.movie_centroid_color)
            self.message = None
            self.core_nums = multiprocessing.cpu_count() * self.p.movie_multiprocessing_factor # @IgnorePep8

            self.pp_spec_manager = MiniPoincarePlotSpecManager()
            self.pp_spec_manager.movie_dir = self.__prefixed_movie_dir__
            self.pp_spec_manager.movie_name = self.__prefixed_movie_name__
            self.pp_spec_manager.movie_dpi = self.p.movie_dpi
            self.pp_spec_manager.movie_fps = self.p.movie_fps
            self.pp_spec_manager.movie_height = self.p.movie_height
            self.pp_spec_manager.movie_width = self.p.movie_width
            self.pp_spec_manager.active_color = self.active_color
            self.pp_spec_manager.inactive_color = self.inactive_color
            self.pp_spec_manager.centroid_color = self.centroid_color
            self.pp_spec_manager.active_point_size = self.p.movie_active_size
            self.pp_spec_manager.inactive_point_size = \
                                            self.p.movie_inactive_size
            self.pp_spec_manager.centroid_point_size = \
                                            self.p.movie_centroid_size
            self.pp_spec_manager.show_plot_legends = \
                                        self.p.movie_show_plot_legends
            self.pp_spec_manager.x_label = self.p.x_label
            self.pp_spec_manager.y_label = self.p.y_label
            self.pp_spec_manager.clean_frames = self.p.movie_clean_frames
            self.pp_spec_manager.movie_title = self.p.movie_title
            self.pp_spec_manager.movie_frame_step = self.p.movie_frame_step
            self.pp_spec_manager.movie_identity_line = self.p.movie_identity_line
            self.pp_spec_manager.movie_hour_label = self.p.movie_hour_label
            self.pp_spec_manager.movie_minute_label = self.p.movie_minute_label
            self.pp_spec_manager.movie_second_label = self.p.movie_second_label
            self.pp_spec_manager.movie_time_label_in_line = self.p.movie_time_label_in_line
            self.pp_spec_manager.movie_time_label_font_size = self.p.movie_time_label_font_size
            self.pp_spec_manager.movie_time_label_prefix = self.p.movie_time_label_prefix
            self.pp_spec_manager.movie_title_font_size = self.p.movie_title_font_size
            self.pp_spec_manager.movie_axis_font_size = self.p.movie_axis_font_size
            self.pp_spec_manager.movie_axis_font = self.p.movie_axis_font
            self.pp_spec_manager.movie_title_font = self.p.movie_title_font
            self.pp_spec_manager.movie_tick_font = self.p.movie_tick_font
            self.pp_spec_manager.movie_frame_pad = self.p.movie_frame_pad
            self.pp_spec_manager.movie_create_time_label = self.p.movie_create_time_label
            self.pp_spec_manager.movie_frame_filename_with_time = self.p.movie_frame_filename_with_time

            self.pp_specs_managers = []
            self.pp_specs_managers.append(self.pp_spec_manager)
            self.sub_dir_counter = 0
            self.scatter = None
            self.legend_text = None
            self.pp_specs = []
            self.cum_inactive = 0
            self._pp_spec_old = None
            self.s_size = 0  # current calculated signal size
Пример #38
0
	def run(self):
		'''
		watch_process(params): module based on psutil to watch a specific
		program/process. 
		Parameters required are:
		params.cmd = command in a Popen supported list, i.e. no spaces,
			each argument in its own cell.  
		params.outtag = tag handle for the text file and the plot file.
			.txt and .png will be appended to this. 
		params.dt = incremental time-step to watch the process. Default is
			10 milliseconds. 
		params.total = total time to watch the process. Default is 5
			minutes. 
		'''
		fout = open(params.outtag+'.txt', 'w');
		d0 = psutil.disk_usage('/');
		running=True;
		cpu_usage = [];
		mem_usage = [];
		duration = [];
		d = 0.0;
		p = psutil.Popen(params.cmd, stdout=fout);
		
		try:
			while d<params.total:
				d = d+params.dt;
				if p.status=='running':
					# If the program is running, this captures
					# the vital-statistics.
					cpu_usage.append(p.get_cpu_percent());
					mem_usage.append(p.get_memory_percent());
					#duration.append(d);
					duration.append(p.get_cpu_times().user)
				else:
					# This watches and ensures that the
					# process is indeed dead. This is the first
					# level of exception handling and
					# loop-breakage. 
					procs = psutil.get_process_list();
					gone, alive = psutil.wait_procs(procs, 3, callback=on_terminate)
					break
				time.sleep(params.dt)
		except psutil._error.AccessDenied:
			# This exception watches for the natural death of the
			# process. This is the second level of redundancy handling
			# the death of the task, the first one is in the else
			# statement above. 
			p.kill()
			print "It has died and has become a... "+p.status;
			procs = psutil.get_process_list();
			gone, alive = psutil.wait_procs(procs, 3, callback=on_terminate)
		except KeyboardInterrupt:
			# This exception allows the user to Ctrl-C if you thing
			# that the program is hanging.
			p.kill()
			print "I have killed it, and it has become a..."+p.status
			procs = psutil.get_process_list();
			gone, alive = psutil.wait_procs(procs, 3, callback=on_terminate)
		cpu_usage = pl.array(cpu_usage);
		duration = pl.array(duration);
		mem_usage = pl.array(mem_usage);
		pl.step(duration, cpu_usage, label='%CPU');
		pl.step(duration, mem_usage, label='%MEM');
		pl.xlabel('User Time (seconds)');
		pl.ylabel('%');
		d1 = psutil.disk_usage('/')
		D = d1.used-d0.used; 
		MB = 1024.*1024. # 1MB in Bytes
		data_written = D/MB; 
		total_time = pl.amax(duration)-pl.amin(duration);
		dw = str(round(data_written, 2))+"MB"
		tt = str(round(total_time, 5))+"s";
		string_ann = "Writes: "+dw+' in '+tt;
		title='Process: '+str(params.cmd)+'\n / '+string_ann;
		pl.title('\n'.join(wrap(title,50)));
		pl.legend();
		pl.tight_layout();
		pl.savefig(params.outtag+'.png', dpi=300);
		pl.close();
		fout.write(string_ann+'\n')
		fout.close()
		print "Completed."
    def initiate(self):
        if len(self.pp_specs) == 0:
            return False

        # only positive values are accepted
        x = self.p0.x_data[pl.where(self.p0.x_data > 0)]
        y = self.p0.y_data[pl.where(self.p0.y_data > 0)]

        x_min = pl.amin(x)
        x_max = pl.amax(x)
        y_min = pl.amin(y)
        y_max = pl.amax(y)
        value_min = x_min if x_min < y_min else y_min
        self.value_max = x_max if x_max > y_max else y_max

        self.pd = ArrayPlotData()
        self.pd.set_data("index", x)
        self.pd.set_data("value", y)

        index_ds = ArrayDataSource(x)
        value_ds = ArrayDataSource(y)

        # Create the plot
        self._plot = Plot(self.pd)

        axis_defaults = {
                         #'axis_line_weight': 2,
                         #'tick_weight': 2,
                         #'tick_label_color': 'green',
                         'title_font': self.axis_font,
                         }
        if self.tick_font:
            axis_defaults['tick_label_font'] = self.tick_font

        #a very important and weird trick; used to remove default ticks labels
        self._plot.x_axis = None
        self._plot.y_axis = None
        #end trick

        #add new x label and x's ticks labels
        x_axis = PlotAxis(orientation='bottom',
                  title=nvl(self.manager.x_label, 'RR(n) [ms]'),
                  mapper=self._plot.x_mapper,
                  **axis_defaults)
        self._plot.overlays.append(x_axis)

        #add new y label and y's ticks labels
        y_axis = PlotAxis(orientation='left',
                   title=nvl(self.manager.y_label, 'RR(n+1) [ms]'),
                   mapper=self._plot.y_mapper,
                   **axis_defaults)
        self._plot.overlays.append(y_axis)

        self._plot.index_range.add(index_ds)
        self._plot.value_range.add(value_ds)

        self._plot.index_mapper.stretch_data = False
        self._plot.value_mapper.stretch_data = False
        self._plot.value_range.set_bounds(value_min, self.value_max)
        self._plot.index_range.set_bounds(value_min, self.value_max)

        # Create the index and value mappers using the plot data ranges
        imapper = LinearMapper(range=self._plot.index_range)
        vmapper = LinearMapper(range=self._plot.value_range)

        color = "white"

        self.scatter = __PoincarePlotScatterPlot__(
                        self.p0,
                        self.manager,
                        index=index_ds,
                        value=value_ds,
                        #color_data=color_ds,
                        #color_mapper=color_mapper,
                        #fill_alpha=0.4,
                        color=color,
                        index_mapper=imapper,
                        value_mapper=vmapper,
                        marker='circle',
                        marker_size=self.manager.active_point_size,
                        line_width=0
                        #outline_color='white'
                        )
        self._plot.add(self.scatter)

        #self._plot.plots['var_size_scatter'] = [self.scatter]

        # Tweak some of the plot properties
        _title = nvl(self.manager.movie_title, "Poincare plot")
        if len(_title) > 0:
            self._plot.title = _title
            self._plot.title_font = self.title_font

        self._plot.line_width = 0.5
        self._plot.padding = self.frame_pad

        self._plot.do_layout(force=True)
        self._plot.outer_bounds = [self.manager.movie_width,
                                   self.manager.movie_height]

        self.gc = PlotGraphicsContext(self._plot.outer_bounds,
                                      dpi=self.manager.movie_dpi)
        self.gc.render_component(self._plot)
        self.gc.set_line_width(0)

        self.gc.save(self._get_filename(self.p0))

        self.x_mean_old = None
        self.y_mean_old = None
        self._time_label_font = None

        return True
    def process(self):
        for (n, input_file) in enumerate(self.input_files):
            pcgts = page_from_file(self.workspace.download_file(input_file))
            page_id = pcgts.pcGtsId or input_file.pageId or input_file.ID
            page = pcgts.get_Page()

            # why does it save the image ??
            page_image, page_xywh, _ = self.workspace.image_from_page(
                page, page_id)

            if self.parameter['parallel'] < 2:
                LOG.info("INPUT FILE %s ", input_file.pageId or input_file.ID)
            raw = ocrolib.read_image_gray(page_image.filename)

            flat = raw
            #flat = np.array(binImg)
            # estimate skew angle and rotate
            if self.parameter['maxskew'] > 0:
                if self.parameter['parallel'] < 2:
                    LOG.info("Estimating Skew Angle")
                d0, d1 = flat.shape
                o0, o1 = int(self.parameter['bignore'] * d0), int(
                    self.parameter['bignore'] * d1)
                flat = amax(flat) - flat
                flat -= amin(flat)
                est = flat[o0:d0 - o0, o1:d1 - o1]
                ma = self.parameter['maxskew']
                ms = int(2 * self.parameter['maxskew'] *
                         self.parameter['skewsteps'])
                angle = self.estimate_skew_angle(est,
                                                 linspace(-ma, ma, ms + 1))
                flat = interpolation.rotate(flat,
                                            angle,
                                            mode='constant',
                                            reshape=0)
                flat = amax(flat) - flat
            else:
                angle = 0

            # self.write_angles_to_pageXML(base,angle)
            # estimate low and high thresholds
            if self.parameter['parallel'] < 2:
                LOG.info("Estimating Thresholds")
            d0, d1 = flat.shape
            o0, o1 = int(self.parameter['bignore'] * d0), int(
                self.parameter['bignore'] * d1)
            est = flat[o0:d0 - o0, o1:d1 - o1]
            if self.parameter['escale'] > 0:
                # by default, we use only regions that contain
                # significant variance; this makes the percentile
                # based low and high estimates more reliable
                e = self.parameter['escale']
                v = est - filters.gaussian_filter(est, e * 20.0)
                v = filters.gaussian_filter(v**2, e * 20.0)**0.5
                v = (v > 0.3 * amax(v))
                v = morphology.binary_dilation(v,
                                               structure=ones(
                                                   (int(e * 50), 1)))
                v = morphology.binary_dilation(v,
                                               structure=ones(
                                                   (1, int(e * 50))))
                if self.parameter['debug'] > 0:
                    imshow(v)
                    ginput(1, self.parameter['debug'])
                est = est[v]
            lo = stats.scoreatpercentile(est.ravel(), self.parameter['lo'])
            hi = stats.scoreatpercentile(est.ravel(), self.parameter['hi'])
            # rescale the image to get the gray scale image
            if self.parameter['parallel'] < 2:
                LOG.info("Rescaling")
            flat -= lo
            flat /= (hi - lo)
            flat = clip(flat, 0, 1)
            if self.parameter['debug'] > 0:
                imshow(flat, vmin=0, vmax=1)
                ginput(1, self.parameter['debug'])
            deskewed = 1 * (flat > self.parameter['threshold'])

            # output the normalized grayscale and the thresholded images
            LOG.info("%s lo-hi (%.2f %.2f) angle %4.1f" %
                     (pcgts.get_Page().imageFilename, lo, hi, angle))
            if self.parameter['parallel'] < 2:
                LOG.info("Writing")
            #ocrolib.write_image_binary(base+".ds.png", deskewed)

            #TODO: Need some clarification as the results effect the following pre-processing steps.
            #orientation = -angle
            #orientation = 180 - ((180 - orientation) % 360)
            pcgts.get_Page().set_orientation(angle)
            #print(orientation, angle)

            file_id = input_file.ID.replace(self.input_file_grp,
                                            self.output_file_grp)
            if file_id == input_file.ID:
                file_id = concat_padded(self.output_file_grp, n)

            self.workspace.add_file(ID=file_id,
                                    file_grp=self.output_file_grp,
                                    pageId=input_file.pageId,
                                    mimetype=MIMETYPE_PAGE,
                                    local_filename=os.path.join(
                                        self.output_file_grp,
                                        file_id + '.xml'),
                                    content=to_xml(pcgts).encode('utf-8'))
Пример #41
0
def binarize_image(job):
    image_object, i = job
    raw = read_image_gray(image_object)
    image = raw - amin(raw)
    if amax(image) == amin(image):
        return  # Image is empty
    image /= amax(image)
    check = check_page(amax(image) - image)
    if check is not None:
        return
    if args.gray:
        extreme = 0
    else:
        extreme = (sum(image < 0.05) + sum(image > 0.95)) * 1.0 / prod(
            image.shape)

    if extreme > 0.95:
        comment = "no-normalization"
        flat = image
    else:
        comment = ""
        m = interpolation.zoom(image, args.zoom)
        m = filters.percentile_filter(m, args.perc, size=(args.range, 2))
        m = filters.percentile_filter(m, args.perc, size=(2, args.range))
        m = interpolation.zoom(m, 1.0 / args.zoom)
        w, h = minimum(array(image.shape), array(m.shape))
        flat = clip(image[:w, :h] - m[:w, :h] + 1, 0, 1)

    if args.maxskew > 0:
        d0, d1 = flat.shape
        o0, o1 = int(args.bignore * d0), int(args.bignore * d1)
        flat = amax(flat) - flat
        flat -= amin(flat)
        est = flat[o0:d0 - o0, o1:d1 - o1]
        ma = args.maxskew
        ms = int(2 * args.maxskew * args.skewsteps)
        angle = estimate_skew_angle(est, linspace(-ma, ma, ms + 1))
        flat = interpolation.rotate(flat, angle, mode='constant', reshape=0)
        flat = amax(flat) - flat
    else:
        angle = 0

    d0, d1 = flat.shape
    o0, o1 = int(args.bignore * d0), int(args.bignore * d1)
    est = flat[o0:d0 - o0, o1:d1 - o1]

    if args.escale > 0:
        e = args.escale
        v = est - filters.gaussian_filter(est, e * 20.0)
        v = filters.gaussian_filter(v**2, e * 20.0)**0.5
        v = (v > 0.3 * amax(v))
        v = morphology.binary_dilation(v, structure=ones((int(e * 50), 1)))
        v = morphology.binary_dilation(v, structure=ones((1, int(e * 50))))
        est = est[v]
    lo = stats.scoreatpercentile(est.ravel(), args.lo)
    hi = stats.scoreatpercentile(est.ravel(), args.hi)
    flat -= lo
    flat /= (hi - lo)
    flat = clip(flat, 0, 1)
    binary = 1 * (flat > args.threshold)
    return (binary, flat)
Пример #42
0
    def process(self):
        for (n, input_file) in enumerate(self.input_files):
            pcgts = page_from_file(self.workspace.download_file(input_file))
            fname = pcgts.get_Page().imageFilename
            img = self.workspace.resolve_image_as_pil(fname)
            param = self.parameter
            base, _ = ocrolib.allsplitext(fname)
            #basefile = ocrolib.allsplitext(os.path.basename(fpath))[0]

            if param['parallel'] < 2:
                print_info("=== %s " % (fname))
            raw = ocrolib.read_image_gray(img.filename)

            flat = raw
            #flat = np.array(binImg)
            # estimate skew angle and rotate
            if param['maxskew'] > 0:
                if param['parallel'] < 2:
                    print_info("estimating skew angle")
                d0, d1 = flat.shape
                o0, o1 = int(param['bignore'] * d0), int(param['bignore'] * d1)
                flat = amax(flat) - flat
                flat -= amin(flat)
                est = flat[o0:d0 - o0, o1:d1 - o1]
                ma = param['maxskew']
                ms = int(2 * param['maxskew'] * param['skewsteps'])
                angle = self.estimate_skew_angle(est,
                                                 linspace(-ma, ma, ms + 1))
                flat = interpolation.rotate(flat,
                                            angle,
                                            mode='constant',
                                            reshape=0)
                flat = amax(flat) - flat
            else:
                angle = 0

            # self.write_angles_to_pageXML(base,angle)
            # estimate low and high thresholds
            if param['parallel'] < 2:
                print_info("estimating thresholds")
            d0, d1 = flat.shape
            o0, o1 = int(param['bignore'] * d0), int(param['bignore'] * d1)
            est = flat[o0:d0 - o0, o1:d1 - o1]
            if param['escale'] > 0:
                # by default, we use only regions that contain
                # significant variance; this makes the percentile
                # based low and high estimates more reliable
                e = param['escale']
                v = est - filters.gaussian_filter(est, e * 20.0)
                v = filters.gaussian_filter(v**2, e * 20.0)**0.5
                v = (v > 0.3 * amax(v))
                v = morphology.binary_dilation(v,
                                               structure=ones(
                                                   (int(e * 50), 1)))
                v = morphology.binary_dilation(v,
                                               structure=ones(
                                                   (1, int(e * 50))))
                if param['debug'] > 0:
                    imshow(v)
                    ginput(1, param['debug'])
                est = est[v]
            lo = stats.scoreatpercentile(est.ravel(), param['lo'])
            hi = stats.scoreatpercentile(est.ravel(), param['hi'])
            # rescale the image to get the gray scale image
            if param['parallel'] < 2:
                print_info("rescaling")
            flat -= lo
            flat /= (hi - lo)
            flat = clip(flat, 0, 1)
            if param['debug'] > 0:
                imshow(flat, vmin=0, vmax=1)
                ginput(1, param['debug'])
            deskewed = 1 * (flat > param['threshold'])

            # output the normalized grayscale and the thresholded images
            print_info("%s lo-hi (%.2f %.2f) angle %4.1f" %
                       (pcgts.get_Page().imageFilename, lo, hi, angle))
            if param['parallel'] < 2:
                print_info("writing")
            ocrolib.write_image_binary(base + ".ds.png", deskewed)

            orientation = -angle
            orientation = 180 - (180 - orientation) % 360
            pcgts.get_Page().set_orientation(orientation)

            ID = concat_padded(self.output_file_grp, n)
            self.workspace.add_file(ID=ID,
                                    file_grp=self.output_file_grp,
                                    pageId=input_file.pageId,
                                    mimetype="image/png",
                                    url=base + ".ds.png",
                                    local_filename='%s/%s' %
                                    (self.output_file_grp, ID),
                                    content=to_xml(pcgts).encode('utf-8'))
Пример #43
0
print "Proccessing logfile ",inputlogfile
lines=open(inputlogfile,"r").readlines()

metadata=[x for x in lines if x[0]=="#"] #select meta data
fields=[x.split()[2:] for x in  metadata if x.split()[1]=="Fields:"][0]
print fields
R_index=fields.index("R")
Clock_index=fields.index("CLOCKTIME")

q=[x for x in lines if x[0]!="#"] #remove meta data
q=q[:-1] #last line might be malformed
print "first line: ",q[0]
R=P.array([float(x.split()[R_index]) for x in q])
Realtime=P.array([float(x.split()[Clock_index]) for x in q])
N=len(R)
ns=range(1,N+1)
avgR=P.cumsum(R)/ns
n50=range(50,len(R))
l50=[P.mean(R[i-50:i]) for i in n50]
P.plot(ns,avgR,"r")
P.plot(n50,l50,"b")
P.text(N/2, avgR[N/4]-1, "Cumulative Average reward",color="r")
P.text(N/2, P.amax(l50) +1, "Average of last 50 rewards",color="b")
ytime=P.amin(avgR[:20]) +1
for i in P.arange(60,P.amax(Realtime),60):
    ind=bisect.bisect(Realtime,i)
    P.text(ind,ytime,".\n%d minute"%(i/60))
P.xlabel("Timestep")
P.savefig(inputlogfile+".pdf")
Пример #44
0
    def _process_segment(self, page_image, page, page_xywh, page_id,
                         input_file, n):

        raw = ocrolib.pil2array(page_image)
        flat = raw.astype("float64")

        # estimate skew angle and rotate
        if self.parameter['maxskew'] > 0:
            if self.parameter['parallel'] < 2:
                LOG.info("Estimating Skew Angle")
            d0, d1 = flat.shape
            o0, o1 = int(self.parameter['bignore'] * d0), int(
                self.parameter['bignore'] * d1)
            flat = amax(flat) - flat
            flat -= amin(flat)
            est = flat[o0:d0 - o0, o1:d1 - o1]
            ma = self.parameter['maxskew']
            ms = int(2 * self.parameter['maxskew'] *
                     self.parameter['skewsteps'])
            angle = self.estimate_skew_angle(est, linspace(-ma, ma, ms + 1))
            flat = interpolation.rotate(flat,
                                        angle,
                                        mode='constant',
                                        reshape=0)
            flat = amax(flat) - flat
        else:
            angle = 0

        # self.write_angles_to_pageXML(base,angle)
        # estimate low and high thresholds
        if self.parameter['parallel'] < 2:
            LOG.info("Estimating Thresholds")
        d0, d1 = flat.shape
        o0, o1 = int(self.parameter['bignore'] * d0), int(
            self.parameter['bignore'] * d1)
        est = flat[o0:d0 - o0, o1:d1 - o1]
        if self.parameter['escale'] > 0:
            # by default, we use only regions that contain
            # significant variance; this makes the percentile
            # based low and high estimates more reliable
            e = self.parameter['escale']
            v = est - filters.gaussian_filter(est, e * 20.0)
            v = filters.gaussian_filter(v**2, e * 20.0)**0.5
            v = (v > 0.3 * amax(v))
            v = morphology.binary_dilation(v, structure=ones((int(e * 50), 1)))
            v = morphology.binary_dilation(v, structure=ones((1, int(e * 50))))
            if self.parameter['debug'] > 0:
                imshow(v)
                ginput(1, self.parameter['debug'])
            est = est[v]
        lo = stats.scoreatpercentile(est.ravel(), self.parameter['lo'])
        hi = stats.scoreatpercentile(est.ravel(), self.parameter['hi'])

        # rescale the image to get the gray scale image
        if self.parameter['parallel'] < 2:
            LOG.info("Rescaling")
        flat -= lo
        flat /= (hi - lo)
        flat = clip(flat, 0, 1)
        if self.parameter['debug'] > 0:
            imshow(flat, vmin=0, vmax=1)
            ginput(1, self.parameter['debug'])
        deskewed = 1 * (flat > self.parameter['threshold'])

        # output the normalized grayscale and the thresholded images
        #LOG.info("%s lo-hi (%.2f %.2f) angle %4.1f" %(lo, hi, angle))

        #TODO: Need some clarification as the results effect the following pre-processing steps.
        #orientation = -angle
        #orientation = 180 - ((180 - orientation) % 360)

        if angle is None:  # FIXME: quick fix to prevent angle of "none"
            angle = 0

        page.set_orientation(angle)

        page_xywh['features'] += ',deskewed'
        bin_array = array(255 * (deskewed > ocrolib.midrange(deskewed)), 'B')
        page_image = ocrolib.array2pil(bin_array)

        file_id = input_file.ID.replace(self.input_file_grp, self.image_grp)
        if file_id == input_file.ID:
            file_id = concat_padded(self.image_grp, n)
        file_path = self.workspace.save_image_file(page_image,
                                                   file_id,
                                                   page_id=page_id,
                                                   file_grp=self.image_grp)
        page.add_AlternativeImage(
            AlternativeImageType(filename=file_path,
                                 comments=page_xywh['features']))
Пример #45
0
    def _process_segment(self, page, filename, page_id, file_id):
        raw = ocrolib.read_image_gray(filename)
        self.dshow(raw, "input")

        # perform image normalization
        image = raw - amin(raw)
        if amax(image) == amin(image):
            LOG.info("# image is empty: %s" % (page_id))
            return
        image /= amax(image)

        if not self.parameter['nocheck']:
            check = self.check_page(amax(image) - image)
            if check is not None:
                LOG.error(input_file.pageId or input_file.ID + " SKIPPED. " +
                          check + " (use -n to disable this check)")
                return

        # check whether the image is already effectively binarized
        if self.parameter['gray']:
            extreme = 0
        else:
            extreme = (np.sum(image < 0.05) +
                       np.sum(image > 0.95)) * 1.0 / np.prod(image.shape)
        if extreme > 0.95:
            comment = "no-normalization"
            flat = image
        else:
            comment = ""
            # if not, we need to flatten it by estimating the local whitelevel
            LOG.info("Flattening")
            m = interpolation.zoom(image, self.parameter['zoom'])
            m = filters.percentile_filter(m,
                                          self.parameter['perc'],
                                          size=(self.parameter['range'], 2))
            m = filters.percentile_filter(m,
                                          self.parameter['perc'],
                                          size=(2, self.parameter['range']))
            m = interpolation.zoom(m, 1.0 / self.parameter['zoom'])
            if self.parameter['debug'] > 0:
                clf()
                imshow(m, vmin=0, vmax=1)
                ginput(1, self.parameter['debug'])
            w, h = minimum(array(image.shape), array(m.shape))
            flat = clip(image[:w, :h] - m[:w, :h] + 1, 0, 1)
            if self.parameter['debug'] > 0:
                clf()
                imshow(flat, vmin=0, vmax=1)
                ginput(1, self.parameter['debug'])

        # estimate low and high thresholds
        LOG.info("Estimating Thresholds")
        d0, d1 = flat.shape
        o0, o1 = int(self.parameter['bignore'] * d0), int(
            self.parameter['bignore'] * d1)
        est = flat[o0:d0 - o0, o1:d1 - o1]
        if self.parameter['escale'] > 0:
            # by default, we use only regions that contain
            # significant variance; this makes the percentile
            # based low and high estimates more reliable
            e = self.parameter['escale']
            v = est - filters.gaussian_filter(est, e * 20.0)
            v = filters.gaussian_filter(v**2, e * 20.0)**0.5
            v = (v > 0.3 * amax(v))
            v = morphology.binary_dilation(v, structure=ones((int(e * 50), 1)))
            v = morphology.binary_dilation(v, structure=ones((1, int(e * 50))))
            if self.parameter['debug'] > 0:
                imshow(v)
                ginput(1, self.parameter['debug'])
            est = est[v]
        lo = stats.scoreatpercentile(est.ravel(), self.parameter['lo'])
        hi = stats.scoreatpercentile(est.ravel(), self.parameter['hi'])
        # rescale the image to get the gray scale image
        LOG.info("Rescaling")
        flat -= lo
        flat /= (hi - lo)
        flat = clip(flat, 0, 1)
        if self.parameter['debug'] > 0:
            imshow(flat, vmin=0, vmax=1)
            ginput(1, self.parameter['debug'])
        binarized = 1 * (flat > self.parameter['threshold'])

        # output the normalized grayscale and the thresholded images
        # print_info("%s lo-hi (%.2f %.2f) angle %4.1f %s" % (fname, lo, hi, angle, comment))
        LOG.info("%s lo-hi (%.2f %.2f) %s" % (page_id, lo, hi, comment))
        LOG.info("writing")
        if self.parameter['debug'] > 0 or self.parameter['show']:
            clf()
            gray()
            imshow(binarized)
            ginput(1, max(0.1, self.parameter['debug']))
        #base, _ = ocrolib.allsplitext(filename)
        #ocrolib.write_image_binary(base + ".bin.png", binarized)
        # ocrolib.write_image_gray(base +".nrm.png", flat)
        # print("########### File path : ", base+".nrm.png")
        # write_to_xml(base+".bin.png")
        # return base+".bin.png"

        bin_array = array(255 * (binarized > ocrolib.midrange(binarized)), 'B')
        bin_image = ocrolib.array2pil(bin_array)

        file_path = self.workspace.save_image_file(bin_image,
                                                   file_id,
                                                   page_id=page_id,
                                                   file_grp=self.image_grp)
        page.add_AlternativeImage(
            AlternativeImageType(filename=file_path, comment="binarized"))
Пример #46
0
def add_stations(station_selection, phases0, phases1, flags, mask,
    station_names, station_positions, source_names, source_selection,
    times, freqs, r, nband_min=2, soln_type='phase', nstations_max=None,
    excluded_stations=None, t_step=5, tec_step1=5, tec_step2=21,
    search_full_tec_range=True):
    """
    Adds stations to TEC fitting using an iterative initial-guess search to
    ensure the global min is found

    Keyword arguments:
    station_selection -- indices of stations to use in fitting
    phases0 -- XX phase solutions
    phases1 -- YY phase solutions
    flags -- phase solution flags (0 = use, 1 = flagged)
    mask -- mask for sources and frequencies (0 = ignore, 1 = use)
    station_names -- array of station names
    source_names -- array of source names
    source_selection -- indices of sources to use in fitting
    times -- array of times
    freqs -- array of frequencies
    r -- array of TEC solutions returned by fit_tec_per_source_pair()
    nband_min -- min number of bands for a source to be used
    soln_type -- type of phase solution: 'phase' or 'scalarphase'
    nstations_max -- max number of stations to use
    excluded_stations -- stations to exclude
    t_step -- try full TEC range every t_step number of solution times
    tec_step1 -- number of steps in TEC subrange (+/- last TEC fit value)
    tec_step2 -- number of steps in full TEC range (-0.1 -- 0.1)
    search_full_tec_range -- always search the full TEC range (-0.1 -- 0.1)
    """
    from pylab import pinv, newaxis, find, amin
    import numpy as np
    from lofar.expion import baselinefitting
    try:
        import progressbar
    except ImportError:
        import losoto.progressbar as progressbar

    N_sources_selected = len(source_selection)
    N_stations_selected = len(station_selection)
    N_piercepoints = N_sources_selected * N_stations_selected
    N_times = len(times)
    N_stations = len(station_names)
    N_sources = len(source_names)
    N_pairs = 0
    for ii, i in enumerate(source_selection):
        for jj, j in enumerate(source_selection):
            if j == i:
                break
            subband_selection = find(mask[i,:] * mask[j,:])
            if len(subband_selection) < nband_min:
                continue
            N_pairs += 1

    D = np.resize(station_positions, (N_stations, N_stations, 3))
    D = np.transpose(D, (1, 0, 2)) - D
    D = np.sqrt(np.sum(D**2, axis=2))

    station_selection1 = station_selection
    stations_to_add = np.array([i for i in xrange(len(station_names))
        if i not in station_selection1 and station_names[i] not in
        excluded_stations])
    if len(stations_to_add) == 0:
        return station_selection1, r

    # Check if desired number of stations is already reached
    if nstations_max is not None:
        if len(station_selection1) >= nstations_max:
            return station_selection1, r

    logging.info("Using fitting with iterative search for remaining stations "
        "(up to {0} stations in total)".format(nstations_max))
    q = r
    while len(stations_to_add)>0:
        D1 = D[stations_to_add[:,newaxis], station_selection1[newaxis,:]]

        minimum_distance = amin(D1, axis=1)
        station_to_add = stations_to_add[np.argmin(minimum_distance)]
        station_selection1 = np.append(station_selection1, station_to_add)
        N_stations_selected1 = len(station_selection1)

        # Remove station from list
        stations_to_add = stations_to_add[stations_to_add != station_to_add]

        sols_list = []
        eq_list = []
        min_e_list = []

        ipbar = 0
        logging.info('Fitting TEC values with {0} included...'.format(
            station_names[station_to_add]))
        pbar = progressbar.ProgressBar(maxval=N_pairs*N_times).start()
        for ii, i in enumerate(source_selection):
            for jj, j in enumerate(source_selection):
                if j == i:
                    break
                subband_selection = find(mask[i,:] * mask[j,:])
                if len(subband_selection) < nband_min:
                    continue
                logging.debug('Adding {0} for source pair: {1}-{2}'.format(
                    station_names[station_to_add], i, j))
                p0 = phases0[i, station_selection1[:,newaxis],
                    subband_selection[newaxis,:], :] - phases0[j,
                    station_selection1[:,newaxis], subband_selection[newaxis,:], :]
                p0 = p0 - np.mean(p0, axis=0)[newaxis,:,:]
                if soln_type != 'scalarphase':
                    p1 = phases1[i, station_selection1[:,newaxis],
                        subband_selection[newaxis,:], :] - phases1[j,
                        station_selection1[:,newaxis], subband_selection[newaxis,:], :]
                    p1 = p1 - np.mean(p1, axis=0)[newaxis, :, :]
                A = np.zeros((len(subband_selection), 1))
                A[:, 0] = 8.44797245e9 / freqs[subband_selection]
                sd = (0.1 * 30e6) / freqs[subband_selection] # standard deviation of phase solutions as function of frequency (rad)

                flags_source_pair = flags[i, station_selection1[:,newaxis],
                    subband_selection[newaxis,:], :] * flags[j,
                    station_selection1[:,newaxis], subband_selection[newaxis,:], :]
                constant_parms = np.zeros((1, N_stations_selected1), dtype = np.bool)
                sols = np.zeros((N_times, N_stations_selected1), dtype = np.float)
                p_0_best = None
                for t_idx in xrange(N_times):
                    if np.mod(t_idx, t_step) == 0:
                        min_e = np.Inf
                        if p_0_best is not None and not search_full_tec_range:
                            min_tec = p_0_best[0, -1] - 0.02
                            max_tec = p_0_best[0, -1] + 0.02
                            nsteps = tec_step1
                        else:
                            min_tec = -0.1
                            max_tec = 0.1
                            nsteps = tec_step2
                        logging.debug('  Trying initial guesses between {0} and '
                            '{1} TECU'.format(min_tec, max_tec))
                        for offset in np.linspace(min_tec, max_tec, nsteps):
                            p_0 = np.zeros((1, N_stations_selected1), np.double)
                            p_0[0, :N_stations_selected1-1] = (q[ii, t_idx, :] -
                                q[jj, t_idx, :])[newaxis,:]
                            p_0[0, -1] = offset

                            x = p0[:,:,t_idx].copy()
                            f = flags_source_pair[:, :, t_idx].copy()
                            sol0 = baselinefitting.fit(x.T, A, p_0, f, constant_parms)
                            sol0 -= np.mean(sol0)
                            residual = np.mod(np.dot(A, sol0) - x.T + np.pi,
                                2 * np.pi) - np.pi
                            e = np.var(residual[f.T==0])

                            if soln_type != 'scalarphase':
                                x = p1[:,:,t_idx].copy()
                                f = flags_source_pair[:, :, t_idx].copy()
                                sol1 = baselinefitting.fit(x.T, A, p_0, f,
                                    constant_parms)
                                sol1 -= np.mean(sol1)
                                residual = np.mod(np.dot(A, sol1) - x.T + np.pi,
                                    2 * np.pi) - np.pi
                                residual = residual[f.T==0]
                                e += np.var(residual)
                            else:
                                sol1 = sol0

                            if e < min_e:
                                logging.debug('  Found new min variance of {0} '
                                    'with initial guess of {1} TECU'.format(e,
                                    p_0[0, -1]))
                                min_e = e
                                p_0_best = p_0
                                sols[t_idx, :] = (sol0[0, :] + sol1[0, :])/2
                    else:
                        # Use previous init
                        x = p0[:, :, t_idx].copy()
                        f = flags_source_pair[:, :, t_idx].copy()
                        sol0 = baselinefitting.fit(x.T, A, p_0_best, f,
                            constant_parms)
                        sol0 -= np.mean(sol0)

                        if soln_type != 'scalarphase':
                            x = p1[:, :, t_idx].copy()
                            f = flags_source_pair[:, :, t_idx].copy()
                            sol1 = baselinefitting.fit(x.T, A, p_0_best, f,
                                constant_parms)
                            sol1 -= np.mean(sol1)
                        else:
                            sol1 = sol0
                        sols[t_idx, :] = (sol0[0, :] + sol1[0, :])/2

                    ipbar += 1
                    pbar.update(ipbar)

                ### Remove outliers
                logging.debug('  Searching for outliers...')
                for kk in xrange(10):
                    s = sols[:, -1].copy()
                    selection = np.zeros(len(s), np.bool)
                    for t_idx in xrange(len(s)):
                        start_idx = np.max([t_idx-10, 0])
                        end_idx = np.min([t_idx+10, len(s)])
                        selection[t_idx] = np.sum(abs(s[start_idx:end_idx] -
                            s[t_idx]) < 0.02) > (end_idx - start_idx - 8)
                    outliers = find(np.logical_not(selection))
                    if len(outliers) == 0:
                        break
                    for t_idx in outliers:
                        try:
                            idx0 = find(selection[:t_idx])[-1]
                        except IndexError:
                            idx0 = -1
                        try:
                            idx1 = find(selection[t_idx+1:])[0] + t_idx + 1
                        except IndexError:
                            idx1 = -1
                        if idx0 == -1:
                            s[t_idx] = s[idx1]
                        elif idx1 == -1:
                            s[t_idx] = s[idx0]
                        else:
                            s[t_idx] = (s[idx0] * (idx1-t_idx) + s[idx1] *
                                (t_idx-idx0)) / (idx1-idx0)

                        p_0 = np.zeros((1, N_stations_selected1), np.double)
                        p_0[0,:] = sols[t_idx,:]
                        p_0[0,-1] = s[t_idx]

                        x = p0[:,:,t_idx].copy()
                        f = flags_source_pair[:,:,t_idx].copy()
                        sol0 = baselinefitting.fit(x.T, A, p_0, f, constant_parms)
                        sol0 -= np.mean(sol0)

                        if soln_type != 'scalarphase':
                            x = p1[:,:,t_idx].copy()
                            sol1 = baselinefitting.fit(x.T, A, p_0, f,
                                constant_parms)
                            sol1 -= np.mean(sol1)
                        else:
                            sol1 = sol0
                        sols[t_idx, :] = (sol0[0,:] + sol1[0,:])/2

                weight = 1.0
                sols_list.append(weight*sols)
                min_e_list.append(min_e)
                eq = np.zeros(N_sources)
                eq[ii] = weight
                eq[jj] = -weight
                eq_list.append(eq)

        sols = np.array(sols_list)
        B = np.array(eq_list)
        pinvB = pinv(B)

        q = np.dot(pinvB, sols.transpose([1,0,2]))
        pbar.finish()

        if nstations_max is not None:
            if N_stations_selected1 == nstations_max:
                break

    return station_selection1, q
Пример #47
0
pl.figure(figsize=(10,5));

# Minor axis
pl.subplot(121);
pl.title('Minor Axis');
x_minor, y_minor = blib.imslice(myimage=myimage, v='minor', theta=theta, x0=x0, y0=y0, D=D);
pl.plot(x_minor,y_minor, color='black', ls='-', lw=1, label='Major-Axis');
if options.fit.upper()=='TRUE':
	hm_minor, f_minor, xf_minor, yf_minor = blib.fwhm_2gauss(x_minor, y_minor);
	pl.plot(xf_minor, yf_minor, 'k--', label='Fit')
	pl.hlines(hm_minor, -f_minor/2, f_minor/2, color='gray', label='FWHM-Minor');
else:
	hm_minor, f_minor = blib.fwhm(x_minor, y_minor);
	pl.hlines(hm_minor, -f_minor/2, f_minor/2, color='gray', label='FWHM-Minor');

pl.xlim(pl.amin(x_minor), pl.amax(x_minor));
pl.ylim(pl.amin(y_minor)-0.1, pl.amax(y_minor)+0.1);

# Major Axis
pl.subplot(122);
pl.title('Major Axis')
x_major, y_major =  blib.imslice(myimage=myimage, v='major', theta=theta, x0=x0, y0=y0, D=D);
pl.plot(x_major, y_major, color='black', ls='-', lw=1, label='Major-Axis');
hm_major, f_major, xf_major, yf_major = blib.fwhm_2gauss(x_major, y_major);
if options.fit.upper()=='TRUE':
	pl.plot(xf_major, yf_major, 'k--', label='Fit')
	pl.hlines(hm_major, -f_major/2, f_major/2, color='gray', label='FWHM-Major');
	pl.xlim(pl.amin(x_major), pl.amax(x_major));
else:
	hm_major, f_major = blib.fwhm(x_major, y_major);
	pl.hlines(hm_major, -f_major/2, f_major/2, color='gray', label='FWHM-Minor');
Пример #48
0
            H = listener.H.copy()
            A = listener.A.copy()
            B = listener.B.copy()

            # get vertices
            vert = []
            for i in xrange(7):
                for j in xrange(i + 1, 8):  #xrange(8):
                    #if i != j:
                    for u in [A[i], B[i]]:
                        for v in [A[j], B[j]]:
                            # intersection of Ai = Hi.x and Bj = Hj.x
                            x = pl.dot(pl.inv(H[[i, j], :]), pl.array([u, v]))
                            # check constraints: A <= H.x <= B
                            if pl.amin(pl.dot(H, x) - A) >= -1e-6 and pl.amax(
                                    pl.dot(H, x) - B) <= 1e-6:
                                vert.append(x.reshape(2).copy())
            print('the number of vertices', len(vert))

            # continue only if enough vertices
            if len(vert) > 2:
                ax.clear()
                inter = inter + 1
                vert_uns = pl.array(vert + [vert[0]])

                xm, xM, ym, yM = pl.amin(vert_uns[:, 0]), pl.amax(
                    vert_uns[:, 0]), pl.amin(vert_uns[:,
                                                      1]), pl.amax(vert_uns[:,
                                                                            1])
                ax.set_xlim(xm - 0.05 * (xM - xm), xM + 0.05 * (xM - xm))
Пример #49
0
print verifdates
levs = levels[:]
lats = latitudes[:]
lons = longitudes[:]
lons, lats = meshgrid(lons,lats)

# unpack 2-meter temp forecast data.

t2mvar = data['tmp2m']
missval = t2mvar.missing_value
t2m = t2mvar[:,:,:]
if missval < 0:
    t2m = ma.masked_values(where(t2m>-1.e20,t2m,1.e20), 1.e20)
else:
    t2m = ma.masked_values(where(t2m<1.e20,t2m,1.e20), 1.e20)
t2min = amin(t2m.compressed()); t2max= amax(t2m.compressed())
print t2min,t2max
clevs = frange(around(t2min/10.)*10.-5.,around(t2max/10.)*10.+5.,4)
print clevs[0],clevs[-1]
llcrnrlat = 22.0
urcrnrlat = 48.0
latminout = 22.0
llcrnrlon = -125.0
urcrnrlon = -60.0
standardpar = 50.0
centerlon=-105.
# create Basemap instance for Lambert Conformal Conic projection.
m = Basemap(llcrnrlon=llcrnrlon,llcrnrlat=llcrnrlat,
            urcrnrlon=urcrnrlon,urcrnrlat=urcrnrlat,
            rsphere=6371200.,
            resolution='l',area_thresh=5000.,projection='lcc',