Esempio n. 1
0
    def getCompArray(self, datasetA, datasetB, plot):
        warnings.filterwarnings("error")
        aggregateDataA, offScreen = datasetA.getAggregateData()
        aggregateDataB, offScreen = datasetB.getAggregateData()
        results = []
        # get x, y magnitude of difference between sets, and significance
        for i in range(self.params['gridWidth']):
            for j in range(self.params['gridHeight']):
                # get two arrays for given plot
                setA = aggregateDataA[i][j].getResult(plot)
                setB = aggregateDataB[i][j].getResult(plot)
                # only compare if mean counts of both are greater than one 
                if st.nanmean(aggregateDataA[i][j].getResult(0)) > 1 or st.nanmean(aggregateDataB[i][j].getResult(0)) > 1:
                    # print str(i) + ", " + str(j) + ":  " + str(st.nanmean(setA))
                    try:
                        mww_z, p = stats.ranksums(setA, setB)
                    except UserWarning:
                        p = numpy.nan

                    results.append((st.nanmean(setA), st.nanmean(setB), p))
                else:
                    # print str(i) + ", " + str(j) + ":  " + str(0)
                    results.append((numpy.nan, numpy.nan, numpy.nan))
                    
        return results
Esempio n. 2
0
    def getCompArray(self, datasetA, datasetB, plot):
        warnings.filterwarnings("error")
        aggregateDataA, offScreen = datasetA.getAggregateData()
        aggregateDataB, offScreen = datasetB.getAggregateData()
        results = []
        # get x, y magnitude of difference between sets, and significance
        for i in range(self.params['gridWidth']):
            for j in range(self.params['gridHeight']):
                # get two arrays for given plot
                setA = aggregateDataA[i][j].getResult(plot)
                setB = aggregateDataB[i][j].getResult(plot)
                # only compare if mean counts of both are greater than one
                if st.nanmean(
                        aggregateDataA[i][j].getResult(0)) > 1 or st.nanmean(
                            aggregateDataB[i][j].getResult(0)) > 1:
                    # print str(i) + ", " + str(j) + ":  " + str(st.nanmean(setA))
                    try:
                        mww_z, p = stats.ranksums(setA, setB)
                    except UserWarning:
                        p = numpy.nan

                    results.append((st.nanmean(setA), st.nanmean(setB), p))
                else:
                    # print str(i) + ", " + str(j) + ":  " + str(0)
                    results.append((numpy.nan, numpy.nan, numpy.nan))

        return results
Esempio n. 3
0
    def generateMplotStats(self,
                           dataSetA,
                           dataSetB,
                           plot,
                           pairwise=False,
                           sig=0.05):
        # catch warnings - eg we may not have enough samples for wilcoxon test
        warnings.filterwarnings("error")
        aggregateDataA, offScreen = dataSetA.getAggregateData()
        aggregateDataB, offScreen = dataSetB.getAggregateData()
        differenceFound = False
        xvalues = []
        yvalues = []
        sizes = []
        pvals = []
        differenceNum = 0
        # get x, y magnitude of difference between sets, and significance
        for j in range(self.params['gridHeight']):
            for i in range(self.params['gridWidth']):
                # get two arrays for given plot
                setA = aggregateDataA[i][j].getResult(plot)
                setB = aggregateDataB[i][j].getResult(plot)
                # only compare if mean counts of both are greater than one
                if st.nanmean(
                        aggregateDataA[i][j].getResult(0)) > 0.5 or st.nanmean(
                            aggregateDataB[i][j].getResult(0)) > 0.5:
                    try:
                        if pairwise:
                            wilcoxon_t, p = stats.wilcoxon(setA, setB)
                        else:
                            mww_z, p = stats.ranksums(setA, setB)
                    except UserWarning:
                        p = 1
                        print("can't do stats on " + str(i) + " " + str(j))

                    xvalues.append(i)
                    yvalues.append(j)
                    # now work out difference to illustrate scale of difference
                    # given as proportion of the bigger number
                    if st.nanmean(setA) > st.nanmean(setB):
                        size = (st.nanmean(setA) -
                                st.nanmean(setB)) / st.nanmean(setA)
                        sizes.append(500 * size * size)
                        pvals.append(1 - p)
                    else:
                        size = (st.nanmean(setB) -
                                st.nanmean(setA)) / st.nanmean(setB)
                        sizes.append(500 * size * size)
                        pvals.append(p - 1)
                    # print str(i) + " " + str(j) + " " + str(p)
                    if p < sig:
                        differenceFound = True
                        differenceNum += 1

        return differenceFound, {
            'x': xvalues,
            'y': yvalues,
            's': sizes,
            'p': pvals
        }, differenceNum
Esempio n. 4
0
    def printBoxData(self, datasets, boxCoord, plot):
        print "Box " + str(boxCoord)
        means = []
        print "Mean, StdDev, n"
        for ds in datasets:
            alldata = ds.getAggregateDataAsArray(plot)
            boxdata = alldata[boxCoord[0]][boxCoord[1]]
            means.append(st.nanmean(boxdata))
            print str(st.nanmean(boxdata)) + ", " + str(numpy.std(boxdata)) + ", " + str(len(boxdata))
        print "-----"
        print str(st.nanmean(means)) + ", " + str(numpy.std(means)) + ", " + str(len(means))

        for i in range(len(datasets)):
            dsA = datasets[i]
            alldata = dsA.getAggregateDataAsArray(plot)
            boxdata = alldata[boxCoord[0]][boxCoord[1]]
            for j in range(len(datasets))[i+1:]:
                dsB = datasets[j]
                alldataB = dsB.getAggregateDataAsArray(plot)
                boxdataB = alldataB[boxCoord[0]][boxCoord[1]]
                try:
                    mww_z, p = stats.ranksums(boxdata, boxdataB)
                except UserWarning:
                    p = 1

                if p <= 0.05:
                    print "Difference between " + dsA.label + " and " + dsB.label + ".  p = " + str(p)
                else:
                    print "Nothing between " + dsA.label + " and " + dsB.label + "(p=" + str(p) + ")"
Esempio n. 5
0
    def printBoxData(self, datasets, boxCoord, plot):
        print "Box " + str(boxCoord)
        means = []
        print "Mean, StdDev, n"
        for ds in datasets:
            alldata = ds.getAggregateDataAsArray(plot)
            boxdata = alldata[boxCoord[0]][boxCoord[1]]
            means.append(st.nanmean(boxdata))
            print str(st.nanmean(boxdata)) + ", " + str(
                numpy.std(boxdata)) + ", " + str(len(boxdata))
        print "-----"
        print str(st.nanmean(means)) + ", " + str(
            numpy.std(means)) + ", " + str(len(means))

        for i in range(len(datasets)):
            dsA = datasets[i]
            alldata = dsA.getAggregateDataAsArray(plot)
            boxdata = alldata[boxCoord[0]][boxCoord[1]]
            for j in range(len(datasets))[i + 1:]:
                dsB = datasets[j]
                alldataB = dsB.getAggregateDataAsArray(plot)
                boxdataB = alldataB[boxCoord[0]][boxCoord[1]]
                try:
                    mww_z, p = stats.ranksums(boxdata, boxdataB)
                except UserWarning:
                    p = 1

                if p <= 0.05:
                    print "Difference between " + dsA.label + " and " + dsB.label + ".  p = " + str(
                        p)
                else:
                    print "Nothing between " + dsA.label + " and " + dsB.label + "(p=" + str(
                        p) + ")"
Esempio n. 6
0
def reportCreate(data, paramDict):
    report = copy.deepcopy(paramDict)
    setKeys = data["DataSets"].keys()
    # Order all Mod first, then all Org
    setKeys.sort()
    bestRes = ""
    start = 0
    end = len(setKeys)
    middle = end / 2
    i = start
    while i < end / 2:
        # Calculate Score
        modBs = np.array(data["DataSets"][setKeys[i]])
        obsBs = np.array(data["DataSets"][setKeys[middle]])
        modBsmean = nanmean(modBs)
        obsBsmean = nanmean(obsBs)
        obsBsMinModBs = obsBs - modBs
        obsBsMinMean = obsBs - obsBsmean
        SSres = np.nansum(obsBsMinModBs ** 2)
        SStot = np.nansum(obsBsMinMean ** 2)
        ResNorm = SSres ** 0.5
        if i == 0:
            bestRes = copy.copy(ResNorm)
        report[(setKeys[i] + "_RN")] = ResNorm  # Norm of residuals
        i = i + 1
        middle = middle + 1
    return report, bestRes
Esempio n. 7
0
def team_rank(prev_rank_o,prev_rank_d,raw_o,raw_d,A,gp):
    alpha = 0.2

    raw_avg_all_o = st.nanmean(raw_o)
    raw_avg_all_d = st.nanmean(raw_d)

    raw_o = xnan(raw_o)
    raw_d = xnan(raw_d)

    #initialize to raw efficiency
    adj_avg_o = raw_o
    adj_avg_d = raw_d
    #print adj_avg_o

    #print raw_avg_all_o
    #return None
    #print dot(A,gp).T
    cnt = 0
    r_off = 1
    r_def = 1
    while cnt < 100 and not (r_off < 0.001 and r_def < 0.001):
        adj_avg_o_prev = adj_avg_o
        adj_avg_d_prev = adj_avg_d

        adj_avg_o_opp = np.divide(np.dot(A,adj_avg_o_prev),gp)
        adj_avg_d_opp = np.divide(np.dot(A,adj_avg_d_prev),gp)

        '''d_off = np.divide(adj_avg_o_prev,gp)
        d_def = np.divide(adj_avg_d_prev,gp)

        adj_avg_o_opp = np.dot(A,xnan(d_off))
        adj_avg_d_opp = np.dot(A,xnan(d_def))'''
        #print 'adj opp',adj_avg_o_opp, adj_avg_d_opp

        adj_o = raw_avg_all_o*(np.divide(raw_o,adj_avg_d_opp))
        adj_d = raw_avg_all_d*(np.divide(raw_d,adj_avg_o_opp))
        #print 'adj', adj_o, adj_d

        adj_avg_o = prev_rank_o*(1-alpha)+adj_o*alpha
        adj_avg_d = prev_rank_d*(1-alpha)+adj_d*alpha

        r_off = np.linalg.norm(xnan(adj_avg_o_prev - adj_avg_o))
        r_def = np.linalg.norm(xnan(adj_avg_d_prev - adj_avg_d))

        #print r_off, r_def
        #print 'count = %i' % cnt,
        #print adj_avg_o, adj_avg_d

        cnt += 1
    #print adj_avg_o, adj_avg_d
    for k in range(len(adj_avg_o)):
        if np.isnan(adj_avg_o[k]):
            adj_avg_o[k] = prev_rank_o[k]
        if np.isnan(adj_avg_d[k]):
            adj_avg_d[k] = prev_rank_d[k]
    return adj_avg_o, adj_avg_d
Esempio n. 8
0
def average_ams(model, X_train, X_test, Y_train, Y_test, W_train, W_test, cuts,
                prob, bm):

    ams_train_list = []
    ams_test_list = []
    for i in cuts:
        ams_train, ams_test = ams_score(model, X_train, X_test, Y_train,
                                        Y_test, W_train, W_test, i, prob, bm)
        ams_train_list.append(ams_train)
        ams_test_list.append(ams_test)
    return nanmean(ams_train_list), nanmean(ams_test_list)
Esempio n. 9
0
def all_means(cp):
	"""Function accepts a cp object and returns a mean and standard deviation of all
	parameters for all fingerprints in this form:
	list of (feature, mu, std) for each feature
	"""
	data = []
	for parameter_pair in zip(*[finger.items() for finger in cp.fingerprints()]):
		parameter, values = zip(*parameter_pair)
		print parameter, values
		print st.nanmean(values), st.nanstd(values)
		data.append([parameter[0], st.nanmean(values), st.nanstd(values)])
	return data
Esempio n. 10
0
def team_rank2(alpha_raw,beta_raw, ind_mat, alpha_pre, beta_pre ,W):
    #alpha = 0.2

    #initialize to raw efficiency
    alpha_adj = np.array([np.average(alpha_raw[:,k]) for k in range(alpha_raw.shape[1])])
    beta_adj = np.array([np.average(beta_raw[:,k]) for k in range(beta_raw.shape[1])])

    #alpha_adj_all = st.nanmean(alpha_adj)
    #beta_adj_all = st.nanmean(beta_adj)

    alpha_raw_all = st.nanmean(alpha_adj)
    beta_raw_all = st.nanmean(beta_adj)
    print alpha_raw_all, beta_raw_all

    #print raw_avg_all_o
    #return None
    #print dot(A,gp).T
    cnt = 0
    r_off = 1
    r_def = 1
    while cnt < 10 and not (r_off < 0.001 and r_def < 0.001):


        alpha_adj_prev = alpha_adj
        beta_adj_prev = beta_adj

        #test1 = [sum(np.append(alpha_pre[k],np.multiply(np.true_divide(alpha_raw[:,k],beta_adj_prev[np.array([0,1])]),1))*W[:,k]) for k in range(alpha_raw.shape[1])]
        #print test1
        alpha_adj = [sum(np.append(alpha_pre[k],np.multiply(np.true_divide(alpha_raw[:,k],beta_adj_prev[ind_mat[:,k]]),alpha_raw_all))*W[:,k]) for k in range(alpha_raw.shape[1])]
        #alpha_adj = [sum(np.multiply(np.true_divide(alpha_raw[:,k],beta_adj_prev[ind_mat[:,k]]),alpha_adj_all)*W[:,k]) for k in range(alpha_raw.shape[1])]

        alpha_adj = np.array(alpha_adj)
        #print alpha_adj

        beta_adj = [sum(np.append(beta_pre[k],np.multiply(np.true_divide(beta_raw[:,k],alpha_adj_prev[ind_mat[:,k]]),beta_raw_all))*W[:,k]) for k in range(alpha_raw.shape[1])]
        #beta_adj = [sum(np.multiply(np.true_divide(beta_raw[:,k],alpha_adj_prev[ind_mat[:,k]]),beta_adj_all)*W[:,k]) for k in range(alpha_raw.shape[1])]

        beta_adj = np.array(beta_adj)

        r_off = np.linalg.norm(xnan(alpha_adj_prev - alpha_adj))
        r_def = np.linalg.norm(xnan(beta_adj_prev - beta_adj))
        print alpha_adj, beta_adj
        cnt += 1
    #print adj_avg_o, adj_avg_d
    '''for k in range(len(adj_avg_o)):
        if np.isnan(adj_avg_o[k]):
            adj_avg_o[k] = prev_rank_o[k]
        if np.isnan(adj_avg_d[k]):
            adj_avg_d[k] = prev_rank_d[k]'''
    return alpha_adj, beta_adj
Esempio n. 11
0
def plot_ensemble_mean_vectors(adcp,
                               fig=None,
                               title=None,
                               n_vectors=50,
                               return_panel=False):
    """
    Generates a QPanel, plotting mean uv velocity vectors in the x-y plane.
    Inputs:
        adcp = ADCPData object
        fig = input figure number [integer or None]
        title = figure title text [string or None]
        n_vectors = desired number of vectors [integer]
        return_panel = optinally return the QPanel instead of the figure
    Returns:
        fig = matplotlib figure object, or
        vectors = QPanel object
    """
    dude = np.zeros((adcp.n_ensembles, 2), np.float64)
    velocity = adcp.get_unrotated_velocity()
    # this doesn't factor in depth, may integrate bad values if the have not been filtered into NaNs somehow
    dude[:, 0] = sp.nanmean(velocity[:, :, 0], axis=1)
    dude[:, 1] = sp.nanmean(velocity[:, :, 1], axis=1)
    vectors = QPanel(velocity=dude,
                     u_vecs=n_vectors,
                     arrow_color='k',
                     title=title,
                     units='m/s')
    if adcp.xy is not None:
        vectors.x = adcp.xy[:, 0]
        vectors.y = adcp.xy[:, 1]
        vectors.xlabel = 'm'
        vectors.ylabel = 'm'
        vectors.equal_axes = True
    elif adcp.lonlat is not None:
        vectors.x = adcp.lonlat[:, 0]
        vectors.y = adcp.lonlat[:, 1]
        vectors.xy_is_lonlat = True
    else:
        vectors.x = adcp.mtime
        vectors.y = np.zeros(np.size(vectors.x))
        vectors.x_is_mtime = True
    if return_panel:
        return vectors
    else:
        fig = get_fig(fig)
        vectors.plot()
        plt.tight_layout()
        return fig
Esempio n. 12
0
 def getBoxData(self, dataset, plot, coords):
     alldata = dataset.getAggregateDataAsArray(plot)
     box = alldata[coords[0]][coords[1]]
     mean = st.nanmean(box)
     median = st.nanmedian(box)
     sigma = numpy.std(box)
     return box, (mean, median, sigma)
Esempio n. 13
0
    def writeBoxesToCsvFile(self, dataset, filename, plot):
        boxArray = dataset.getAggregateDataAsArray(plot)
        outFile = open(filename, 'w')
        outFile.write("Means:\n")
        #print("Means:\n")
        for j in reversed(range(self.params['gridHeight'])):
            dataline = ""
            for i in range(self.params['gridWidth']):
                dataline += str(st.nanmean(boxArray[i][j]))
                dataline += ","
            #print dataline
            dataline += "\n"
            outFile.write(dataline)
        
        outFile.write("\nStandardDeviations:\n")
        #print("\nStandardDeviations:\n")
        for j in reversed(range(self.params['gridHeight'])):
            dataline = ""
            for i in range(self.params['gridWidth']):
                dataline += str(numpy.std(boxArray[i][j]))
                dataline += ","
            #print dataline
            dataline += "\n"
            outFile.write(dataline)
 
        outFile.close()
Esempio n. 14
0
def cinfo(CL,param):
    """ This property returns information on the parameter in the cloud (all given in the units of the parameter). Note that the parameter is averaged over the entire cloud time at the altitude required (bottom, top or in-cloud) - not the case using vpinfo(CL,param).
        CloudObj.cinfo["bottom"]: param at the cloud base
        CloudObj.cinfo["top"]: param at the cloud top
        CloudObj.cinfo["mean"]: mean param through the cloud (in cloud)
        CloudObj.cinfo["median"]: median param through the cloud (in cloud)
        CloudObj.cinfo["stdev"]: standard deviation of the param through the cloud (in cloud)
        CloudObj.cinfo["delta"]: difference of param between the bottom and the top
        CloudObj.cinfo["slope"]: delta divided by the mean thickness
        The property can be accessed as e.g. CloudObj.cinfo["bottom"] or CloudObj.cinfo (dictionary) """
    H=dict()
    H["bottom"]=list(); H["top"]=list(); H["mean"]=list(); H["median"]=list(); H["stdev"]=list(); H["delta"]=list(); H["slope"]=list(); H["units"]=list(); 
    alt=[i for i,x in enumerate(CL.dttl) if x == 'altitude'][0]
    T=[i for i,x in enumerate(CL.dttl) if x == param][0]
    try:
        for i in range(len(CL.props["height"])):
            ix=nonzero((CL.data[alt]>=CL.props["height"][i][1])*(CL.data[alt]<=CL.props["height"][i][2]))
            H["bottom"].append(float(st.nanmedian(CL.data[T][nonzero((CL.data[alt]>=CL.props["height"][i][0])*(CL.data[alt]<=CL.props["height"][i][1]))])))
            H["top"].append(float(st.nanmedian(CL.data[T][nonzero((CL.data[alt]>=CL.props["height"][i][2])*(CL.data[alt]<=CL.props["height"][i][3]))])))
            H["mean"].append(float(st.nanmean(CL.data[T][ix])))
            H["median"].append(float(st.nanmedian(CL.data[T][ix])))
            H["stdev"].append(float(st.nanstd(CL.data[T][ix])))
            H["delta"].append(H["bottom"][i]-H["top"][i])
            H["slope"].append(H["delta"][i]/(np.mean([CL.props["height"][i][2], CL.props["height"][i][3]])-np.mean([CL.props["height"][i][0], CL.props["height"][i][1]])))     # units/meter
            H["units"].append(CL.dunit[T])
            del ix
    except: print("[cinfo] Height properties must be defined first using the defheight method.")
    return H
Esempio n. 15
0
 def getBoxData(self, dataset, plot, coords):
     alldata = dataset.getAggregateDataAsArray(plot)
     box = alldata[coords[0]][coords[1]]
     mean = st.nanmean(box)
     median = st.nanmedian(box)
     sigma = numpy.std(box)
     return box, (mean, median, sigma)
Esempio n. 16
0
def load_roi(nii, thresh=6000, standardize=True, outlen=None, padding=np.mean, meantc=False):
    """Load a nifti file and apply several functions to it.

    nii -- name of nifti to load or ndarray
    standardize -- return all tcs with mean = 0 and stdev = 1
    outlen -- specify how long resultant tcs should be, pad if necessary
    padding -- function to apply to ndarray, must take axis=-1 argument
    meantc -- return the meantc over all voxels (before evaluating standardize)

    TODO: fix default outlen arg

    """
    if not hasattr(nii, 'shape'): nii = nib.load(nii).get_data()
    nii[nii.mean(axis=-1) < thresh] = np.nan

    if meantc: out = nanmean(nii.reshape([-1, nii.shape[-1]]), axis=0)  #USE apply_over_axes instead?
    else: out = nii

    if standardize: out = (out - out.mean(axis=-1)[..., np.newaxis]) / out.std(axis=-1, ddof=1)[..., np.newaxis] #this is unruly, use the current standardize func
    outdiff = outlen - out.shape[-1]
    if outdiff >= 0:
        if hasattr(padding, 'func_name'): padding = padding(out, axis=-1)[..., np.newaxis]    #call padding if it is a function
        out = np.concatenate([out] + [padding]*outdiff, axis=-1)
    else: 
        out = out[..., :outlen] 
    return out
Esempio n. 17
0
    def writeBoxesToCsvFile(self, dataset, filename, plot):
        boxArray = dataset.getAggregateDataAsArray(plot)
        outFile = open(filename, 'w')
        outFile.write("Means:\n")
        #print("Means:\n")
        for j in reversed(range(self.params['gridHeight'])):
            dataline = ""
            for i in range(self.params['gridWidth']):
                dataline += str(st.nanmean(boxArray[i][j]))
                dataline += ","
            #print dataline
            dataline += "\n"
            outFile.write(dataline)

        outFile.write("\nStandardDeviations:\n")
        #print("\nStandardDeviations:\n")
        for j in reversed(range(self.params['gridHeight'])):
            dataline = ""
            for i in range(self.params['gridWidth']):
                dataline += str(numpy.std(boxArray[i][j]))
                dataline += ","
            #print dataline
            dataline += "\n"
            outFile.write(dataline)

        outFile.close()
Esempio n. 18
0
    def generateMplotStats(self, dataSetA, dataSetB, plot, pairwise=False, sig=0.05):
        # catch warnings - eg we may not have enough samples for wilcoxon test
        warnings.filterwarnings("error")
        aggregateDataA, offScreen = dataSetA.getAggregateData()
        aggregateDataB, offScreen = dataSetB.getAggregateData()
        differenceFound = False
        xvalues = []
        yvalues = []
        sizes = []
        pvals = []
        differenceNum=0
        # get x, y magnitude of difference between sets, and significance
        for j in range(self.params['gridHeight']):
            for i in range(self.params['gridWidth']):
                # get two arrays for given plot
                setA = aggregateDataA[i][j].getResult(plot)
                setB = aggregateDataB[i][j].getResult(plot)
                # only compare if mean counts of both are greater than one 
                if st.nanmean(aggregateDataA[i][j].getResult(0)) > 0.5 or st.nanmean(aggregateDataB[i][j].getResult(0)) > 0.5:
                    try:
                        if pairwise:
                            wilcoxon_t, p = stats.wilcoxon(setA, setB)
                        else:
                            mww_z, p = stats.ranksums(setA, setB)
                    except UserWarning:
                        p = 1
                        print("can't do stats on " + str(i) + " " + str(j))

                    xvalues.append(i)
                    yvalues.append(j)
                    # now work out difference to illustrate scale of difference
                    # given as proportion of the bigger number
                    if st.nanmean(setA) > st.nanmean(setB):
                        size = (st.nanmean(setA) - st.nanmean(setB))/st.nanmean(setA)
                        sizes.append(500*size*size)
                        pvals.append(1-p)
                    else:
                        size = (st.nanmean(setB) - st.nanmean(setA))/st.nanmean(setB)
                        sizes.append(500*size*size)
                        pvals.append(p-1)
                    # print str(i) + " " + str(j) + " " + str(p)
                    if p < sig:
                        differenceFound = True
                        differenceNum+=1
           
        return differenceFound, {'x': xvalues, 'y':yvalues, 's':sizes, 'p':pvals} , differenceNum
Esempio n. 19
0
def plot_ensemble_mean_vectors(adcp,fig=None,title=None,n_vectors=50,return_panel=False):
    """
    Generates a QPanel, plotting mean uv velocity vectors in the x-y plane.
    Inputs:
        adcp = ADCPData object
        fig = input figure number [integer or None]
        title = figure title text [string or None]
        n_vectors = desired number of vectors [integer]
        return_panel = optinally return the QPanel instead of the figure
    Returns:
        fig = matplotlib figure object, or
        vectors = QPanel object
    """
    dude = np.zeros((adcp.n_ensembles,2),np.float64)
    velocity = adcp.get_unrotated_velocity()
    # this doesn't factor in depth, may integrate bad values if the have not been filtered into NaNs somehow
    dude[:,0] = sp.nanmean(velocity[:,:,0],axis=1)
    dude[:,1] = sp.nanmean(velocity[:,:,1],axis=1)
    vectors = QPanel(velocity = dude,
                      u_vecs = n_vectors,
                      arrow_color = 'k',
                      title = title,
                      units = 'm/s')
    if adcp.xy is not None:        
        vectors.x = adcp.xy[:,0]
        vectors.y = adcp.xy[:,1]
        vectors.xlabel = 'm'
        vectors.ylabel = 'm'
        vectors.equal_axes = True
    elif adcp.lonlat is not None:
        vectors.x = adcp.lonlat[:,0]
        vectors.y = adcp.lonlat[:,1]
        vectors.xy_is_lonlat = True
    else:
        vectors.x = adcp.mtime
        vectors.y = np.zeros(np.size(vectors.x))
        vectors.x_is_mtime = True
    if return_panel:
        return vectors
    else:                  
        fig = get_fig(fig)
        vectors.plot()
        plt.tight_layout()
        return fig
def fit_spec_poly5(xData, yData, dyData, order=5):
    """
    Fit a 5th order polynomial to a spectrum. To avoid overflow errors the
    X-axis data should not be large numbers (e.g.: x10^9 Hz; use GHz instead).
    """

    # Lower order limit is a line with slope
    if order < 1:
        order = 1
    if order > 5:
        order = 5

    # Estimate starting coefficients
    C1 = nanmean(np.diff(yData)) / nanmedian(np.diff(xData))
    ind = int(np.median(np.where(~np.isnan(yData))))
    C0 = yData[ind] - (C1 * xData[ind])
    C5 = 0.0
    C4 = 0.0
    C3 = 0.0
    C2 = 0.0
    inParms = [{
        'value': C5,
        'parname': 'C5'
    }, {
        'value': C4,
        'parname': 'C4'
    }, {
        'value': C3,
        'parname': 'C3'
    }, {
        'value': C2,
        'parname': 'C2'
    }, {
        'value': C1,
        'parname': 'C1'
    }, {
        'value': C0,
        'parname': 'C0'
    }]

    # Set the polynomial order
    for i in range(len(inParms)):
        if len(inParms) - i - 1 > order:
            inParms[i]['fixed'] = True
        else:
            inParms[i]['fixed'] = False

    # Function to evaluate the difference between the model and data.
    # This is minimised in the least-squared sense by the fitter
    def errFn(p, fjac=None):
        status = 0
        return status, (poly5(p)(xData) - yData) / dyData

    # Use mpfit to perform the fitting
    mp = mpfit(errFn, parinfo=inParms, quiet=True)
    return mp
Esempio n. 21
0
def produce_stats(data):
    m = {}
    for (server, size, conns, it, time, rate) in data:
        map_add(m, (server, size, conns), rate)
    data = []
    for k, v in m.items():
        mean = stats.nanmean(v)
        stddev = stats.nanstd(v)
        data += [k + (mean, stddev)]
    return data
Esempio n. 22
0
def compute_average(nh=10, lr_num=10, lr_denum=1000, prefix='rbm', smoothing=True):
    cmd = "grep -rl --include='orig.conf' 'lr_num = %i$' . |" % lr_num +\
          "xargs grep 'lr_denum = %i$' " % lr_denum
    print cmd

    p = os.popen(cmd)
    numseeds = len([pi for pi in enumerate(p)])
    
    p = os.popen(cmd)
    x = numpy.ones((numseeds, 20)) * numpy.nan
    y = numpy.ones((numseeds, 20)) * numpy.nan

    for i, match in enumerate(p):

        jid = match.split('/')[1]
        rfname = '%s/%s_train_callback.hdf5' % (jid, prefix)
        if not os.path.exists(rfname):
            continue

        fp = tables.openFile(rfname)
        _x = fp.root.train_ll.col('n')
        _y = fp.root.train_ll.col('train_ll')
        _vlogz = fp.root.var_logz.col('var_logz')
        fp.close()

        if smoothing:
            idx = numpy.where(_vlogz < 50.)[0]
            x[i, idx] = _x[idx]
            y[i, idx] = _y[idx]
        else:
            x[i, :len(_x)] = _x
            y[i, :len(_y)] = _y
    
    print '**** prefix=%s nh=%i lr_num=%s lr_denum=%s ******' % (prefix, nh, lr_num, lr_denum)
    print nanmean(y, axis=0)

    xmean = nanmean(x, axis=0)
    ymean = nanmean(y, axis=0)
    ystd  = nanstd(y, axis=0)
    ystd[numpy.isnan(ystd)] = 0.
    idx =  ~numpy.isnan(xmean)
    return [xmean[idx], ymean[idx], ystd[idx]]
Esempio n. 23
0
 def aggregate_ftr_matrix(self, ftr_matrix):
     sig = []
     for ftr in ftr_matrix:
         median = stats.nanmedian(ftr)
         mean = stats.nanmean(ftr)
         std = stats.nanstd(ftr)
         # Invalid double scalars warning appears here
         skew = stats.skew(ftr) if any(ftr) else 0.0
         kurtosis = stats.kurtosis(ftr)
         sig.extend([median, mean, std, skew, kurtosis])
     return sig
Esempio n. 24
0
 def aggregate_ftr_matrix(self, ftr_matrix):
     sig = []
     for ftr in ftr_matrix:
         median = stats.nanmedian(ftr)
         mean = stats.nanmean(ftr)
         std = stats.nanstd(ftr)
         # Invalid double scalars warning appears here
         skew = stats.skew(ftr) if any(ftr) else 0.0
         kurtosis = stats.kurtosis(ftr)
         sig.extend([median, mean, std, skew, kurtosis])
     return sig
Esempio n. 25
0
def dmso_means(cp):
	"""Function accepts a cp object and returns a mean and standard deviation of all 
	parameters for the dmso fingerprints in this form:
	
	list of (feature, mu, std) for each feature
	"""
	dmsos = [dmso.items() for dmso in get_dmsos(cp)]
	data = []
	for parameter_pair in zip(*dmsos):
		parameter, values = zip(*parameter_pair)
		data.append((parameter[0], st.nanmean(values), st.nanstd(values)))
	return data
Esempio n. 26
0
 def getBoxArray(self, dataset, plot):
     alldata = dataset.getAggregateDataAsArray(plot)
     boxes = []
     for row in alldata:
         for resultArray in row:
             # print box
             # resultArray = box.getResults(plot)
             mean = st.nanmean(resultArray)
             sd = numpy.std(resultArray)
             count = len(resultArray) - numpy.isnan(resultArray).sum()
             boxes.append((mean, sd, count))
     return boxes
Esempio n. 27
0
 def getBoxArray(self, dataset, plot):
     alldata = dataset.getAggregateDataAsArray(plot)
     boxes = []
     for row in alldata:
         for resultArray in row:
         # print box
         # resultArray = box.getResults(plot)
             mean = st.nanmean(resultArray)
             sd = numpy.std(resultArray)
             count = len(resultArray) - numpy.isnan(resultArray).sum()
             boxes.append((mean, sd, count))
     return boxes
Esempio n. 28
0
	def addDescriptives(self):

		"""Adds averages and errors to the PivotMatrix"""

		# Determine the row averages and std
		self.rowMeans = []
		self.rowStds = []
		for rowIndex in range(self.nRows):
			row = self.m[self.rowHeaders+rowIndex][self.colHeaders:-2]
			self.rowMeans.append(nanmean(row, axis=None))
			self.rowStds.append(nanstd(row, axis=None))
			self.m[self.rowHeaders+rowIndex][-2] = nanmean(row, axis=None)
			self.m[self.rowHeaders+rowIndex][-1] = nanstd(row, axis=None)

		# Determine the column averages and std
		_m = self.m.swapaxes(0,1)
		self.colMeans = []
		self.colErrs = []
		for colIndex in range(self.nCols):
			col = _m[self.colHeaders+colIndex][self.rowHeaders:-2]
			_m[self.colHeaders+colIndex][-2] = nanmean(col, axis=None)
			if self.err == '95ci':
				e = nanstd(col, axis=None)/np.sqrt(col.size)*1.96
			elif self.err == 'se':
				e = nanstd(col, axis=None)/np.sqrt(col.size)
			elif self.err == 'std':
				e = nanstd(col, axis=None)
			else:
				raise Exception('Err keyword must be "95ci", "se", or "std"')
			_m[self.colHeaders+colIndex][-1] = e
			self.colMeans.append(nanmean(col, axis=None))
			self.colErrs.append(e)

		# Determine the grand average and std
		self.m[-2,-2] = nanmean(self.m[self.rowHeaders:-2, self.colHeaders:-2], \
			axis=None)
		self.m[-1,-1] = nanstd(self.m[self.rowHeaders:-2, self.colHeaders:-2], \
			axis=None)
Esempio n. 29
0
def avsizedist(CL,prof='belowcloud',scan=0,inst='PCASP',filler=0):
    """ Returning method. This method returns the average size distribution for a given instrument and scan type and number. The returned average size distribution is an array with the mean concentration of particles in the first row and the size of the particles in the second. 
    Example: R=CloudObj.avsizedist(prof='abovecloud',scan=1,inst='PCASP') would return in R the average size distribution for the above cloud #1 for the PCASP.
    The defaults are prof='belowcloud',scan=0,inst='PCASP', filler=0
    filler: filling with a given time step (useful for 2d type data) =1 ON, =0 OFF. Time step can be adjusted in the call of the method filler
    Special profile "belowvert" can also be used, this we return the average size distribution below cloud, where a vertical profile was measured."""

    cancel=0
    instn=[i for i,x in enumerate(CL.sd) if x["Distname"].upper()==inst.upper()]
    if len(instn)<=0: cancel=1; raise ValueError("This instrument name has not been found (inst=%s)." % inst)
    if filler==1: sd=samac.filler(CL,CL.times["cloud"][0],inst=inst)
    else: sd=copy.deepcopy(CL.sd[instn[0]])
    if type(sd["time"])==float: cancel=1
    elif len(sd["time"])==0: cancel=1
    if cancel==1: avsd=[]; return avsd
    if prof=='belowvert':
        if len(sd["time"])<=1:      # interpolation cannot take place if there is not at least 2 points to interpolate from
            print("[avsizedist] Only 1 point available: special profile belowvert cannot be used.")
            avsd=[]; return avsd
        ctime=[i for i,x in enumerate(CL.dttl) if x == 'time']; ctime=ctime[0]
        basictime=CL.data[ctime]
        if sum(basictime==sd["time"])==len(basictime): pass      # the instrument share the timeline of the basic data from which belowvert is calculated
        else:       # the instrument does not share the basic timeline and must be interpolated
            if sum(sd["data"])>0: M=np.min(sd["data"][sd["data"]>0.0])    # minimum non-zero concentration 
            else: M=0
            f=interpolate.RectBivariateSpline(sd["bins"],sd["time"],sd["data"])
            sd["data"]=f(sd["bins"],basictime);  sd["time"]=basictime;
            sd["data"][sd["data"]<0.01*M]=0.0        # all interpolated data smaller than 1% of the minimum.
        R=belowprof(CL)
        try:
            if np.size(R)>0:
                Tix=R[scan]['areaindex']
                concs=sd["data"][:,nonzero(Tix)[0]]
            else: print("[avsizedist] There is no data below cloud under vertical profile #%d." % scan)
        except: print("[avsizedist] The instrument number (%d) may be wrong, or no data corresponds to the profile." % instn[0])            
    else:
        try: 
            T=CL.times[prof][scan]
        except: print("[avsizedist] The profile %s [%d] doesn't exist." % (prof,scan))
        try:
            concs=sd["data"][:,nonzero((sd["time"]>=T[0])*(sd["time"]<=T[1]))[0]]
        except: print("[avsizedist] The instrument number (%d) may be wrong, or no data corresponds to the profile." % instn[0])
    if "concs" in locals():
        if len(sd["time"])==1: avsd=[concs,sd["bins"]]
        else: avsd=np.array([st.nanmean(concs,axis=1), sd["bins"]])
        return avsd
    else: 
        print("[avsizedist] Failed to return the averaged size distribution.")
        avsd=[]; return avsd
Esempio n. 30
0
def reportCreate(data, paramDict):
	report = copy.deepcopy(paramDict)
	setKeys = data['DataSets'].keys()
	# Order all Mod first, then all Org
	setKeys.sort()
	start = 0
	end = len(setKeys)
	middle = end/2
	i = start
	while i < end/2:
		# Calculate Score
		modBs = np.array(data['DataSets'][setKeys[i]])
		obsBs = np.array(data['DataSets'][setKeys[middle]])
		modBsmean = nanmean(modBs)
		obsBsmean = nanmean(obsBs)
		obsBsMinModBs = obsBs - modBs
		obsBsMinMean = obsBs - obsBsmean
		SSres = (np.nansum(obsBsMinModBs**2))
		SStot = (np.nansum(obsBsMinMean**2))
		ResNorm = SSres**0.5
		report[(setKeys[i]+'_RN')] = ResNorm # Norm of residuals version
		i = i+1
		middle = middle+1
	return report, ResNorm
Esempio n. 31
0
	def collapse(self, keys, vName):

		"""
		desc:
			Collapse the data by a (list of) keys and get statistics on a
			dependent variable.

		arguments:
			keys:
				desc:	A key or list of keys to collapse the data on.
				type:	[list, str, unicode]
			vName:
				desc:	The dependent variable to collapse. Alternative, you can
						specifiy a function, in which case the error will be 0.
				type:	[str, unicode, function]

		returns:
			desc:	A DataMatrix with the collapsed data, with the descriptives
					statistics on `vName`.
			type:	DataMatrix
		"""

		if isinstance(keys, basestring):
			keys = [keys]

		m = [keys + ['mean', 'median', 'std', 'se', '95ci', 'count']]
		for g in self.group(keys):
			l = []
			for key in keys:
				l.append(g[key][0])
			if type(vName) == types.FunctionType:
				l.append(vName(g))
				l.append(np.nan)
				l.append(np.nan)
				l.append(np.nan)
				l.append(np.nan)
				l.append(len(g))
			else:
				a = g[vName]
				l.append(nanmean(a))
				l.append(nanmedian(a))
				l.append(nanstd(a))
				l.append(nanstd(a)/np.sqrt(a.size))
				l.append(1.96*nanstd(a)/np.sqrt(a.size))
				l.append(a.size)
			m.append(l)
		return DataMatrix(m)
Esempio n. 32
0
def roimask(data, roi, filter_func = None, proc_func = None, mean_ts = False):
    """Mask data using values in roi > 0

    Parameters:
    data -- numpy array to be masked
    roi -- array to be checked for nonzero values
    filter_func -- function to further subset roi (e.g. remove time courses with mean < 6000)
    proc_func -- TODO

    """
    if type(roi) == str: roi = nib.load(roi).get_data()
    if len(roi.shape) > 3: roi = roi[:,:,:,0]
    shapes = roi.shape, data.shape
    if roi.shape[:3] != data.shape[:3]: raise BaseException('roi shape: %s \ndata shape: %s'%shapes)
    roi_indx = np.nonzero(roi)
    roi = data[roi_indx]
    if filter_func: roi[filter_func(roi)] = np.nan
    if mean_ts: roi = nanmean(roi, axis=0)
    return roi
Esempio n. 33
0
def run_stats(x,n):
    """runstats(x,n). Calculates and returns the running mean, median, standard deviation, and median absolute deviation (MAD). This function handles NaNs and masked values (masked arrays) by ignoring them.
    x (input) is the array on which the running statistics are calculated (only one dimension, 1D array).
    n is the number of points taken in the running statistics window."""
    x=copy.deepcopy(x)
    try: x.mask
    except: 
        x=np.ma.array(x,mask=False)

    if len(np.shape(x))>2: raise ValueError("The array provided has more than 2 dimensions, at most 1 or 2 dimensions can be handled.")
    try: [ro,co]=np.shape(x)
    except: ro=np.shape(x)[0]; co=1
    if ro==1 or co==1: 
        ro=max(ro,co)
        x=x.reshape(ro,)
    else: raise ValueError("The array must be a vector (one column or row)")
    # initializing matrix
    M=ones([ro,n])*NaN;
    M=ma.asanyarray(M)
    
    # building matrix
    if n%2==1:       # if n is odd
        for j in range(int(n/2),0,-1):
            posi=int(n/2)-j       # current position
            M[0:ro-j,posi]=x[j:]
        for j in range(1,2+int(n/2),1):
            posi=int(n/2)+j-1;
            M[j-1:,posi]=x[0:(ro+1)-j]
    elif n%2==0:        # if n is even
        for j in range(n/2,0,-1):
            posi=n/2-j
            M[0:ro-j,posi]=x[j:]
        for j in range(1,n/2+1):
            posi=n/2+j-1;
            M[j-1:,posi]=x[0:(ro+1)-j]
    else: print("Well, that's pretty weird. Are you sure n is an integer?")  
    
    M.data[M.mask]=nan
    ave=st.nanmean(M, axis=1);
    med=st.nanmedian(M, axis=1);
    stde=st.nanstd(M, axis=1);
    mad=medabsdev(M,axis=1)
    return [ave, med, stde, mad]
Esempio n. 34
0
def rasSubras(flist, flistfolder, Afile, outfolder):
    """Subtract one file from others in list
	To use: filnm = rasSubras(flist,flistfolder,Afile,outfolder) """
    # time_one = datetime.now()
    # print time_one.strftime('Start raster subtraction at:%j %H:%M:%S')
    # Get first raster file
    Adata, AXg, AYg, AXg1, AYg1, Arx, Ary, Ademmeta = DemImport(Afile)
    Amean = nanmean(Adata)
    # Get second for creating mask to deal with NaN
    # 	 Bfile = os.path.join(flistfolder,flist[1])
    # 	 Bdata,BXg,BYg,BXg1,BYg1,Brx,Bry,Bdemmeta = DemImport(Bfile)
    # 	 Adata[Adata==Ademmeta[6]]=np.nan
    # 	 Bdata[Bdata==Bdemmeta[6]]=np.nan
    # 	 AdataMasked = np.ma.masked_array(np.nan_to_num(Adata), mask=np.isnan(Adata) & np.isnan(Bdata))
    counter = 1
    print "\n" * 3
    print "*" * 20
    # Go through list of other raster files
    for B in flist:
        print "file ", counter + 1, " of ", len(flist)
        Bfile = os.path.join(flistfolder, B)
        Bdata, BXg, BYg, BXg1, BYg1, Brx, Bry, Bdemmeta = DemImport(Bfile)
        print B, " data type: ", type(Bdata)
        print "Rows (A,B): ", Arx, Brx, " Columns (A,B): ", Ary, Bry
        print "xres (A,B): ", Ademmeta[2][1], Bdemmeta[2][1], " yres (A,B): ", Ademmeta[2][5], Bdemmeta[2][5]
        print "No data (A): ", Ademmeta[6], " No data (B): ", Bdemmeta[6]
        # Check matching resolution
        if Arx != Brx or Ary != Bry:
            print B, " resolution mismatch with ", Afile
            continue
        elif Ademmeta[4] != Bdemmeta[4] or Ademmeta[5] != Bdemmeta[5]:
            print "Size mismatch between ", B, " and ", Afile
            continue
            # Add current file to sum
        Bdata[Bdata == Bdemmeta[6]] = np.nan
        AdataMasked = np.ma.masked_array(np.nan_to_num(Adata), mask=np.isnan(Adata) & np.isnan(Bdata))
        BdataMasked = np.ma.masked_array(np.nan_to_num(Bdata), mask=AdataMasked.mask)
        outdata = (BdataMasked - AdataMasked).filled(np.nan)
        outdata = np.ma.masked_array(np.nan_to_num(outdata), mask=np.isnan(outdata))
        counter = counter + 1
        outname = namer(B)[0] + "Sub" + namer(Afile)[0]
        filnm = datawrite(outdata, Adata, Ademmeta, outname, outfolder)
    return filnm
Esempio n. 35
0
    def generatePlotArray(self, boxArray, plot, sampleSize):
        resultArray = numpy.empty((self.gridy, self.gridx))
        #Spot the artwork : changed from nan to zeros
        resultArray[:] = numpy.nan
        means = []
        for i in range(self.gridx):
            for j in range(self.gridy):
                box = boxArray[i][j]
                count = box.count
                try:
                    count = sum(count)

                except:
                    print "count exception"
                    pass
                if count > sampleSize / 4:  # only plot if averaged at least one fixation per participant
                    # Compute the mean over the given axis ignoring nans.
                    z = st.nanmean(box.getResult(plot))
                    resultArray[j][i] = z
                    means.append(z)
        resultArrayStatistics = {}
        resultArrayStatistics.update(
            {"Minimum value": str(numpy.nanmin(resultArray))})
        resultArrayStatistics.update(
            {"Max value": str(numpy.nanmax(resultArray))})
        resultArrayStatistics.update(
            {"Mean value": str(numpy.nanmean(resultArray))})
        resultArrayStatistics.update({"Std": str(numpy.nanstd(means))})
        print "Min :" + str(numpy.nanmin(resultArray)) + " Box index : " + str(
            numpy.nanargmin(resultArray))
        print "Max :" + str(numpy.nanmax(resultArray)) + " Box index : " + str(
            numpy.nanargmax(resultArray))

        print "Mean:" + str(numpy.nanmean(resultArray))
        print "Standard deviation" + str(numpy.nanstd(means))
        print "Area of interest size : " + str(
            np.count_nonzero(~np.isnan(resultArray))) + " " + str(
                size(resultArray))

        #Spot the artwork : print array to check inconsistencies
        #print resultArray
        return resultArray, resultArrayStatistics
Esempio n. 36
0
def fit_spec_poly5(xData, yData, dyData, order=5):

    xData = np.array(xData, dtype='f8')
    yData = np.array(yData, dtype='f8')
    
    # Estimate starting coefficients
    C1 = nanmean(np.diff(yData)) / nanmedian(np.diff(xData))
    ind = int(np.median(np.where(~np.isnan(yData))))
    C0 = yData[ind] - (C1 * xData[ind])
    if order<1:
        order=1
    p0 = [0.0, 0.0, 0.0, 0.0, C1, C0]

    # Set the order
    p0 = p0[(-order-1):]

    def chisq(p, x, y):
        return np.sum( ((poly5(p)(x) - y)/ dyData)**2.0 )

    # Use minimize to perform the fit
    return op.fmin_bfgs(chisq, p0, args=(xData, yData), full_output=1)
Esempio n. 37
0
def runstats(x,n):
# Stephanie Gagne, UHel, 2010
# converted to Python, Dal, 2012
# x is an array of 1 dimension.
# n is the number of point taken in the running statistic
    """takes data, number of points for the running mean/standard deviation and returns the running mean and running standard deviation."""
    try: x.mask
    except: 
        x=ma.asanyarray(x); 
        x.mask=ones(np.shape(x))*False
    try: [ro,co]=np.shape(x)
    except: ro=np.shape(x)[0]; co=1
    if ro==1 or co==1: x=x.reshape(max(ro,co),)
    else: print("The array must be a vector (one column or row)")
    # initializing matrix
    ro=max(ro,co)
    M=ones([ro,n])*NaN;
    M=ma.asanyarray(M)
    
    # building matrix
    if n%2==1:       # if n is odd
        for j in range(int(n/2),0,-1):
            posi=int(n/2)-j       # current position
            M[0:ro-j,posi]=x[j:]
        for j in range(1,2+int(n/2),1):
            posi=int(n/2)+j-1;
            M[j-1:,posi]=x[0:(ro+1)-j]
    elif n%2==0:        # if n is even
        for j in range(n/2,0,-1):
            posi=n/2-j
            M[0:ro-j,posi]=x[j:]
        for j in range(1,n/2+1):
            posi=n/2+j-1;
            M[j-1:,posi]=x[0:(ro+1)-j]
    else: print("Well, that's pretty weird. Are you sure n is an integer?")  
    
    M.data[M.mask]=NaN
    ave=st.nanmean(M, axis=1);
    stde=st.nanstd(M, axis=1);
    return [ave, stde]      
Esempio n. 38
0
def multilooking(src, xscale, yscale, thresh=0):
    """
    Implementation of Matlab Pirate looks.m function.

    src: numpy array of phase data
    thresh: min number of non-NaNs required for a valid tile resampling
    """
    thresh = int(thresh)
    num_cells = xscale * yscale
    if thresh > num_cells or thresh < 0:
        msg = "Invalid threshold: %s (need 0 <= thr <= %s" % (thresh,
                                                              num_cells)
        raise ValueError(msg)

    rows, cols = src.shape
    rows_lowres = int(floor(rows / yscale))
    cols_lowres = int(floor(cols / xscale))
    dest = ones((rows_lowres, cols_lowres)) * nan

    size = xscale * yscale
    for row in range(rows_lowres):
        for col in range(cols_lowres):
            ys = row * yscale
            ye = ys + yscale
            xs = col * xscale
            xe = xs + xscale

            patch = src[ys:ye, xs:xe]
            num_values = num_cells - npsum(isnan(patch))

            if num_values >= thresh and num_values > 0:
                # nanmean() only works on 1g axis
                reshaped = patch.reshape(size)
                dest[row, col] = nanmean(reshaped)

    return dest
Esempio n. 39
0
    def generatePlotArray(self, boxArray, plot, sampleSize):
        resultArray = numpy.empty((self.gridy, self.gridx))
        #Spot the artwork : changed from nan to zeros
        resultArray[:] = numpy.nan
        means=[]
        for i in range(self.gridx):
            for j in range(self.gridy):
                box = boxArray[i][j]
                count = box.count
                try:
                    count = sum(count)


                except:
                    print "count exception"
                    pass
                if count > sampleSize / 4 :  # only plot if averaged at least one fixation per participant
                    # Compute the mean over the given axis ignoring nans.
                    z = st.nanmean(box.getResult(plot))
                    resultArray[j][i] = z
                    means.append(z)
        resultArrayStatistics = {}
        resultArrayStatistics.update({"Minimum value":str(numpy.nanmin(resultArray))})
        resultArrayStatistics.update({"Max value":str(numpy.nanmax(resultArray))})
        resultArrayStatistics.update({"Mean value":str(numpy.nanmean(resultArray))})
        resultArrayStatistics.update({"Std":str(numpy.nanstd(means))})
        print "Min :" + str(numpy.nanmin(resultArray)) + " Box index : " + str(numpy.nanargmin(resultArray))
        print "Max :" + str(numpy.nanmax(resultArray)) + " Box index : " + str(numpy.nanargmax(resultArray))

        print "Mean:" + str(numpy.nanmean(resultArray))
        print "Standard deviation" + str(numpy.nanstd(means))
        print "Area of interest size : " + str(np.count_nonzero(~np.isnan(resultArray))) + " " + str(size(resultArray))

        #Spot the artwork : print array to check inconsistencies
        #print resultArray
        return resultArray, resultArrayStatistics
Esempio n. 40
0
            csv_writer = csv.writer(csvfile, delimiter=',',quotechar='|')
            csv_writer.writerow(hot[beginning_time:end_time])

        with open('volume all 394 gp data.csv', 'ab') as csvfile:
            csv_writer = csv.writer(csvfile, delimiter=',',quotechar='|')
            csv_writer.writerow(gp[beginning_time:end_time])

        #Append data    
        hot_total_volume.append(hot[beginning_time:end_time])
        gp_total_volume.append(gp[beginning_time:end_time])

    date_1 += timedelta(days=1)


#Average volume at each time across all days
average_hot_volume = sci.nanmean(hot_total_volume,axis=0)
variance_hot_volume = sci.nanstd(hot_total_volume,axis=0)
average_gp_volume = sci.nanmean(gp_total_volume,axis=0)
variance_gp_volume = sci.nanstd(gp_total_volume,axis=0)

#Group into 3 minute increments (6 x 30 seconds)
k=0
resolution = 6
while k in range(0,len(average_hot_volume)):
    average_hot_volume[k:k+resolution] = sum(average_hot_volume[k:k+resolution])/resolution
    variance_hot_volume[k:k+resolution] = sum(variance_hot_volume[k:k+resolution])/resolution
    average_gp_volume[k:k+resolution] = sum(average_gp_volume[k:k+resolution])/resolution
    variance_gp_volume[k:k+resolution] = sum(variance_gp_volume[k:k+resolution])/resolution
    k+=resolution

#Write out averaged 3 minute data
Esempio n. 41
0
def stack(stype='day'):
    db = connect()
    components_to_compute = get_components_to_compute(db)
    output_folder = get_config(db, 'output_folder')
    export_format = get_config(db, 'export_format')

    if export_format == "BOTH":
        mseed = True
        sac = True
    elif export_format == "SAC":
        mseed = False
        sac = True
    elif export_format == "MSEED":
        mseed = True
        sac = False

    if stype == "day":
        start, end, datelist = build_daystack_datelist(db)
        format = "stack"
    elif stype == "mov":
        start, end, datelist = build_movstack_datelist(db)
        format = "matrix"
        mov_stack = get_config(db, "mov_stack")
        if mov_stack.count(',') == 0:
            mov_stacks = [
                int(mov_stack),
            ]
        else:
            mov_stacks = [int(mi) for mi in mov_stack.split(',')]
        mov_stacks.remove(1)  #remove 1 day stack, it should exist already
    elif stype == "ref":
        start, end, datelist = build_ref_datelist(db)
        format = "stack"

    for f in get_filters(db, all=False):
        filterid = int(f.ref)
        for components in components_to_compute:
            for station1, station2 in get_station_pairs(db, used=True):
                sta1 = "%s_%s" % (station1.net, station1.sta)
                sta2 = "%s_%s" % (station2.net, station2.sta)
                pair = "%s:%s" % (sta1, sta2)
                if updated_days_for_dates(db,
                                          start,
                                          end,
                                          pair.replace('_', '.'),
                                          type='CC',
                                          interval='1 DAY'):
                    logging.debug("New Data for %s-%s-%i" %
                                  (pair, components, filterid))
                    if stype in ['mov', 'ref']:
                        nstack, stack_total = get_results(db,
                                                          sta1,
                                                          sta2,
                                                          filterid,
                                                          components,
                                                          datelist,
                                                          format=format)
                        updated_days = updated_days_for_dates(db,
                                                              start,
                                                              end,
                                                              pair.replace(
                                                                  '_', '.'),
                                                              type='CC',
                                                              interval='1 DAY',
                                                              returndays=True)
                        if nstack > 0:
                            if stype == "mov":
                                for i, date in enumerate(datelist):
                                    jobadded = False
                                    for mov_stack in mov_stacks:
                                        if i < mov_stack:
                                            low = 0
                                            high = mov_stack
                                        else:
                                            low = i - mov_stack + 1
                                            high = i + 1
                                        newdata = False
                                        for uday in updated_days:
                                            if uday in datelist[low:high]:
                                                newdata = True
                                                break
                                        if newdata:
                                            corr = stack_total[low:high]

                                            if not np.all(np.isnan(corr)):
                                                day_name = "%s_%s" % (sta1,
                                                                      sta2)
                                                logging.debug(
                                                    "%s %s [%s - %s] (%i day stack)"
                                                    % (day_name, date,
                                                       datelist[low],
                                                       datelist[i], mov_stack))
                                                corr = nanmean(corr, axis=0)
                                                corr = scipy.signal.detrend(
                                                    corr)
                                                stack_path = os.path.join(
                                                    "STACKS",
                                                    "%02i" % filterid,
                                                    "%03i_DAYS" % mov_stack,
                                                    components, day_name)
                                                filename = os.path.join(
                                                    stack_path, str(date))
                                                if mseed:
                                                    export_mseed(
                                                        db, filename, pair,
                                                        components, filterid,
                                                        corr)
                                                if sac:
                                                    export_sac(
                                                        db, filename, pair,
                                                        components, filterid,
                                                        corr)
                                                day_name = "%s:%s" % (sta1,
                                                                      sta2)
                                                if not jobadded:
                                                    add_job(
                                                        db, date,
                                                        day_name.replace(
                                                            '_', '.'), 'DTT')
                                                    jobadded = True
                                            del corr

                            elif stype == "ref":
                                stack_path = os.path.join(
                                    "STACKS", "%02i" % filterid, "REF",
                                    components)
                                ref_name = "%s_%s" % (sta1, sta2)
                                filename = os.path.join(stack_path, ref_name)
                                stack_total = scipy.signal.detrend(stack_total)

                                if mseed:
                                    export_mseed(db, filename, pair,
                                                 components, filterid,
                                                 stack_total)
                                if sac:
                                    export_sac(db, filename, pair, components,
                                               filterid, stack_total)
                                ref_name = "%s:%s" % (sta1, sta2)
                                add_job(db, "REF", ref_name.replace('_', '.'),
                                        'DTT')
                                del stack_total
                    elif stype == 'day':
                        updated_days = updated_days_for_dates(db,
                                                              start,
                                                              end,
                                                              pair.replace(
                                                                  '_', '.'),
                                                              type='CC',
                                                              interval='1 DAY',
                                                              returndays=True)
                        for date in updated_days:
                            date = date
                            print "Stacking %s day=%s" % (pair, date)
                            daystack = os.path.join("STACKS",
                                                    "%02i" % filterid,
                                                    "001_DAYS", components,
                                                    "%s_%s" % (sta1, sta2),
                                                    str(date))
                            stack = np.zeros(get_maxlag_samples(db))
                            ncorr = 0
                            try:
                                os.makedirs(os.path.split(daystack)[0])
                            except:
                                pass
                            path = os.path.join(output_folder,
                                                "%02i" % filterid, sta1, sta2,
                                                components, str(date))
                            if os.path.isdir(path):
                                for file in os.listdir(path):
                                    if len(file) == 8:
                                        st = read(os.path.join(path, file))
                                        if not np.any(np.isnan(
                                                st[0].data)) and not np.any(
                                                    np.isinf(st[0].data)):
                                            stack += st[0].data
                                            ncorr += 1
                                if ncorr > 0:
                                    if mseed:
                                        export_mseed(db, daystack, pair,
                                                     components, filterid,
                                                     stack / ncorr, ncorr)
                                    if sac:
                                        export_sac(db, daystack, pair,
                                                   components, filterid,
                                                   stack / ncorr, ncorr)
                            del stack
Esempio n. 42
0
def turbhrz(CL,MaxCA=2,MinDist=20000,PtNo=25,Pts=5,Pts2=50):
    """ This method calculates the turbulence average over at least 20 km or more in a horizontal enough leg (the plane cannot be above an angle of 2 degrees using a running standard deviation (of 25 points) to calculate the turbulence. It is assumed that the TAS is in the basic data, along with other aircraft related data (e.g. extradata module not handled for TAS). Extradata module handled for updraft velocity.
    Options: 
        MaxCA: maximum change in the combined angle (phi+theta) in degrees. Default is 2.
        MinDist: minimum distance (in meters) an aircraft must be level enough for to calculated the turbulence (updraft). Default is 20000 or 20 km.
        PtNo: number of points from which to calculate the turbulence=stdev(updraft vel). Default is 25
        Pts: number of points taken in running statistics of angles. Default is 5.
        Pts2: number of points taken in running statistics of the combined angle. Default is 50.
    Returns a dictionary:
    R["InCloud"]: List with one item for each stretch level enough for at least MinDist km in cloud (N, first=0). Each item contains an array with the turbulence value R["InCloud"][N][0] and the distance on which it was calculated R["InCloud"][N][1].
    R["BlwCloud"]: List with one item for each stretch level enough for at least 20 km below cloud (N, first=0). Each item contains an array with the turbulence value R["InCloud"][N][0] and the distance on which it was calculated R["InCloud"][N][1].
    R["InCloudStretches"]: List with one item for each stretch level enough during at least 20 km in cloud (N, first=0). Each item contains an array with the indexes corresponding to the stretch used to calculate the corresponding turbulence value.
    R["BlwCloudStretches"]: List with one item for each stretch level enough during at least 20 km below cloud (N, first=0). Each item contains an array with the indexes corresponding to the stretch used to calculate the corresponding turbulence value. """
    
    palt=[i for i,x in enumerate(CL.dttl) if x == 'altitude'][0]
    ptim=[i for i,x in enumerate(CL.dttl) if x == 'time'][0]
    ptas=[i for i,x in enumerate(CL.dttl) if 'TAS' in x.upper()][0]
    pudv=[i for i,x in enumerate(CL.dttl) if x == 'udvel']
    if len(pudv)==1: pudv=pudv[0]; udv=CL.data[pudv][0:-1]
    elif len(pudv)>1: print("[turbhrz] multiple updraft velocities found in the basic data.")
    else:
        posx=[] 
        for i,ttl in enumerate(CL.extrattl):     # for all extra datasets available
            posx=posx+[[i,j] for j,x in enumerate(ttl) if x.lower() == 'udvel']    # check all titles matching with updraft velocity
        if len(posx)==1: 
            udv=CL.extradata[posx[0][0]][posx[0][1]]    # loading the updraft velocity data
            j=[j for j,x in enumerate(CL.extrattl[i]) if x.lower() == 'time'][0]
            udvt=CL.extradata[posx[0][0]][j]     # loading associated time stamp
            # padding so the interpolation works
            if udvt[0]>CL.data[ptim][0]: udv=hstack((nan,udv)); udvt=hstack((CL.data[ptim][0],udvt));
            if udvt[-1]<CL.data[ptim][-1]: udv=hstack((udv,nan)); udvt=hstack((udvt,CL.data[ptim][-1]));
            fudv=interpolate.interp1d(udvt,udv,kind='linear')
            udv=fudv(CL.data[ptim])[0:-1]
        else: print("[turbhrz] No updraft velocity (or multiple) found in the basic or the extra data."); return dict()
    Alt=CL.data[palt][0:-1]
    A=samac.angles(CL)
    theta=samac.runstats(A[1],Pts)[0]  # change of altitude
    phi=diff(A[2])
    phi=np.ma.masked_where((phi>350)+(isnan(phi)), phi)
    phi=np.ma.masked_where((phi<-350+(isnan(phi))), phi)
    phi=hstack([NaN,samac.runstats(phi,Pts)[0]])     # change in direction
    phi[(abs(phi)>=89)]=0   # accounts for jump from 0 to 360 degrees. After smoothing, the limit cannot be too high. 
    CAstats=samac.runstats(abs(theta)+abs(phi),Pts2)      # combined angle
    CA=CAstats[0]
#        udv=CL.data[pudv][0:-1]        # updraft
    Sp=CL.data[ptas][0:-1]
    dt=diff(CL.data[ptim]*24*60*60)
    D=cumsum(Sp*dt)
    # Initializing indexes for below cloud and in-cloud measurements.
    bc=ones(np.shape(CL.data[ptim]))*False
    ic=ones(np.shape(CL.data[ptim]))*False
    for j in range(len(CL.times["belowcloud"])):
        bctmp=(CL.data[ptim]>=CL.times["belowcloud"][j][0])*(CL.data[ptim]<=CL.times["belowcloud"][j][1])
        try: bc=bc+bctmp;
        except: bc=bctmp;
    for j in range(len(CL.times["horicloud"])):
        ictmp=(CL.data[ptim]>=CL.times["horicloud"][j][0])*(CL.data[ptim]<=CL.times["horicloud"][j][1])
        try: ic=ic+ictmp;
        except: ic=ictmp;
    # Below Cloud #
    Stret=list()
    i=0
    while i<len(CA):
        ix=list()
        if (CA[i]<=MaxCA and bc[i]==True):
            while (i<len(CA) and CA[i]<=MaxCA and bc[i]==True):
                ix.append(i)
                i=i+1
            Stret.append(ix)
        else: i=i+1
    BCStretches=list()
    for n,leg in enumerate(Stret):
        Dist=D[leg]; 
        Dist=Dist[(Dist.mask==False)]
        if (Dist[-1]-Dist[0]) >= MinDist:     # if the stretch is longer than the minimum distance.
            BCStretches.append(np.array(leg))
        del Dist
    # In-Cloud #
    Stret=list()
    i=0
    while i<len(CA):
        ix=list()
        if (CA[i]<=MaxCA and ic[i]==True):
            while (i<len(CA) and CA[i]<=MaxCA and ic[i]==True):
                ix.append(i)
                i=i+1
            Stret.append(ix)
        else: i=i+1
    ICStretches=list()
    for n,leg in enumerate(Stret):
        Dist=D[leg]; Dist=Dist[(Dist.mask==False)]
        if (Dist[-1]-Dist[0]) >= MinDist:     # if the strecht is longer than the minimum distance.
            ICStretches.append(np.array(leg))       
        del Dist
    # Calculating the average turbulence.
    ICturb=list(); BCturb=list()
    for leg in ICStretches:
        Dist=D[leg]; Dist=Dist[(Dist.mask==False)]
        if (sum(leg)>PtNo and sum(~isnan(udv[leg]))>PtNo):
            turb=st.nanmean(samac.runstats(udv[leg],PtNo)[1])
            #print("In-Cloud %d: %.4f km. w' = %0.4f m/s" %(cn,(Dist[-1]-Dist[0])/1000,turb))
            ICturb.append(np.array([turb,(Dist[-1]-Dist[0])]))
            del Dist
    for leg in BCStretches:
        Dist=D[leg]; Dist=Dist[(Dist.mask==False)]
        if (sum(leg)>PtNo and sum(~isnan(udv[leg]))>PtNo):
            turb=st.nanmean(samac.runstats(udv[leg],PtNo)[1])
            #print("Below-Cloud %d: %.4f km. w' = %0.4f m/s" %(cn,(Dist[-1]-Dist[0])/1000,turb))
            BCturb.append(np.array([turb,(Dist[-1]-Dist[0])]))
        else: BCturb.append(np.array([nan,(Dist[-1]-Dist[0])]))
        del Dist
            
    R=dict()
    R["InCloud"]=ICturb; R["BlwCloud"]=BCturb; R["InCloudStretches"]=ICStretches; R["BlwCloudStretches"]=BCStretches
    return R        
Esempio n. 43
0
data_array = np.array(map(parse_img, files))
data_array = data_array.astype(float)  # convert to float to use nans
times = np.array(map(parse_time, files))
data_array = data_array[np.argsort(times)]
# median 3x3 filter
data_array = median_filter(data_array, size=(1, 3, 3))

# flatfield parsing
directory = './'+flat_field+'/'
files = [directory+str(i) for i in os.listdir(directory) if 'tif' in i]

print '...start parsing flatfields'
flat_field_array = np.array(map(parse_img, files))
mean = np.mean(flat_field_array, axis=(0))
mean = mean.astype(float)
ff = mean / nanmean(mean)

# print out delta time
print 'parsed all files and flatfields.'
print 'time: {0} minute'.format((time.time()-time_Start) / 60)

data_array = data_array / ff
# divide out background
bkg = data_array[:, 0:20, 0:20]

mean_bkg = np.ma.mean(bkg, axis=(1, 2))
data_array = data_array / mean_bkg[:, np.newaxis, np.newaxis]
data_array[data_array == 0.] = 1


# Save the data into a hdf5 file
Esempio n. 44
0
        while d < days:
            minDTO = startDTO + dt.timedelta(days=d)
            maxDTO = startDTO + dt.timedelta(days=d + 1)

            criterion = (
                (cdata[:, 0] >= calendar.timegm(minDTO.utctimetuple())) &
                (cdata[:, 0] < calendar.timegm(maxDTO.utctimetuple())))
            tind = np.array(np.where(criterion))[0]
            ele_model = []

            # check we have some data for each day
            if np.size(tind) > 0:
                # split the data for this test
                blkm, blkmstd = esm.blockMedian(cdata[tind, 1:4])
                for j in range(0, 181):
                    ele_model.append(nanmean(blkm[:, j]))
                ele_model = np.array(ele_model)
                eleMedians[d, :] = np.array(ele_model)
            d += 1

        elevation = []
        for j in range(0, 181):
            elevation.append(90. - j * 0.5)
        #===========================================================
        fig = plt.figure(figsize=(3.62, 2.76))
        ax = fig.add_subplot(111)

        for i in range(0, np.shape(eleMedians)[0]):
            ax.plot(elevation, eleMedians[i, :], alpha=0.5)

        # now compute the over all median
Esempio n. 45
0
        resultOptim[itr, ridx] = resultOpt
        resultQuant[itr, ridx] = resultQu
        resultBAful[itr, ridx] = resultBA

# Drop data points with more than x percent outage
outageThreshold = 1
resultOptim[:, (np.sum(np.isnan(resultOptim), 0) /
                float(iterations) > outageThreshold)] = np.nan
resultQuant[:, (np.sum(np.isnan(resultQuant), 0) /
                float(iterations) > outageThreshold)] = np.nan
resultBAful[:, (np.sum(np.isnan(resultBAful), 0) /
                float(iterations) > outageThreshold)] = np.nan

# Average over iterations
resultOptim = scistats.nanmean(resultOptim, axis=0)
resultQuant = scistats.nanmean(resultQuant, axis=0)
resultBAful = scistats.nanmean(resultBAful, axis=0)

# Put into single array for storage
upperlimit = (bs.p0 + bs.m * cell.pMax) * np.ones(rateSteps)
result = np.array([rate, upperlimit, resultBAful, resultOptim, resultQuant])

filename = 'resultsJSAC'
from results import resultshandler
resultshandler.saveBin(filename, result)

# timeit
print 'Code time %.0f seconds' % (time.time() - start)

# lastly, try to generate the plot right away
Esempio n. 46
0
def mean(x):
    """ E\{%s\} := Average %s """
    from scipy.stats.stats import nanmean

    x = notnone(x)
    return nanmean(x)
Esempio n. 47
0
def DoSummaryInfo(obs, estimated) :
    return({'rmse': numpy.sqrt(sss.nanmean((estimated - obs) ** 2.0, axis = 1)),
            'mae': sss.nanmean(numpy.abs(estimated - obs), axis=1),
	    'corr': numpy.diag(numpy.corrcoef(estimated, obs), k=estimated.shape[0]),
	    'sse': numpy.sum((estimated - obs) ** 2.0, axis = 1)})
Esempio n. 48
0
def oneIteration(rate, CSI_Optim, SINR_Quant):
    """Perform one iteration and return the supply powers"""
    ### Step 1 ###
    # Optimization call
    import pdb
    pdb.set_trace()

    pSupplyOptim, resourceAlloc, status = optimMinPow.optimizePCDTX(
        CSI_Optim, PnoiseIf_Optim, rate, wrld.PHY.systemBandwidth, cell.pMax,
        bs.p0, bs.m, bs.pS)
    print '{0:50} {1:5.2f} W'.format('Real-valued optimization objective:',
                                     pSupplyOptim)

    ## Plot ##
    if plotting:
        channelplotter.ChannelPlotter().bar(resourceAlloc,
                                            'Resource Share Optim',
                                            'rscshare.pdf')
        channelplotter.ChannelPlotter().bar(PnoiseIf_Optim,
                                            'Interference power Optim',
                                            'ifpower.pdf')
        channelplotter.ChannelPlotter().bar(
            ((np.abs(np.mean(np.mean(CSI_Optim, 1), 1)))**2) /
            (PnoiseIf_Optim), 'SINR Optim', 'OptimSINR.pdf')
        channelplotter.ChannelPlotter().OFDMAchannel(SINR_Quant, 'SINR Quant',
                                                     'sinrquant.pdf')
        channelplotter.ChannelPlotter().OFDMAchannel(
            noiseIfPQuant, 'Noise Interference Power Quant',
            'noiseifquant.pdf')
        import pdb
        pdb.set_trace()

### Step 2 ###
# Map real valued solution to OFDMA frame
# QUANTMAP
    resourcesPerTimeslot = quantmap.quantmap(resourceAlloc, N, T)
    outmap = np.empty([N, T])
    for t in np.arange(T):
        # RCG
        outmap[:, t], _ = rcg.rcg(SINR_Quant[:, t, :], resourcesPerTimeslot[
            t, :])  # outmap.shape = (N,T) tells the user index

    # Given allocation and rate target, we inverse waterfill channels for each user separately on the basis of full CSI
    # IWF
    powerlvls = np.empty([N, T, mob.antennas])
    powerlvls[:] = np.nan
    for idx, obj in enumerate(
            wrld.consideredMobiles):  # WRONG TODO: Only all mobiles of a BS
        # grab user CSI
        CSI_usr = obj.OFDMA_SINR[:, :, outmap ==
                                 idx]  # all CSI assigned to this user
        noiseIfPower_usr = CSI_usr[0, 0, :].repeat(
            2
        ) * 0 + 1  # remove later  #(obj.baseStations[obj.BS].cells[obj.cell].OFDMA_interferencePower + obj.baseStations[obj.BS].cells[obj.cell].OFDMA_noisePower) * np.ones(CSI_user_all[0,0,:,:].shape)[outmap==idx].ravel().repeat(2) # one IF value per resource, so repeat once to match spatial channels
        # create list of eigVals
        eigVals = np.real([
            linalg.eig(CSI_usr[:, :, i])[0]
            for i in np.arange(CSI_usr.shape[2])
        ]).ravel()  # two eigvals (spatial channels) per resource
        targetLoad = rate * wrld.PHY.simulationTime
        # inverse waterfill and fill back to OFDMA position
        powlvl, waterlvl, cap = iwf.inversewaterfill(
            eigVals, targetLoad, noiseIfPower_usr,
            wrld.PHY.systemBandwidth / N, wrld.PHY.simulationTime / T)
        powerlvls[outmap == idx, :] = powlvl.reshape(CSI_usr.shape[2],
                                                     obj.antennas)

    ptx = np.array([
        np.nansum(np.nansum(powerlvls[:, t, :], axis=0), axis=0)
        for t in np.arange(T)
    ])
    if ptx.any() > cell.pMax:
        raise ValueError('Transmission power too high in IWF.')

    psupplyPerSlot = bs.p0 + bs.m * ptx

    psupplyPerSlot[np.isnan(psupplyPerSlot)] = bs.pS
    pSupplyQuant = np.mean(psupplyPerSlot)
    print '{0:50} {1:5.2f} W'.format('Integer-valued optimization objective:',
                                     pSupplyQuant)

    ### SOTA comparison ###
    pSupplyBA = np.nan
    CSI_BA = np.empty([2, 2, N, T, users], dtype=complex)
    CSI_BA[:] = np.nan
    noiseIfPowerPerResource = np.empty([N, T, users])
    noiseIfPowerPerResource[:] = np.nan
    for idx, obj in enumerate(wrld.consideredMobiles):
        CSI_BA[:, :, :, :,
               idx] = obj.baseStations[obj.BS].cells[obj.BS.cells[0]].CSI_OFDMA
        noiseIfPowerPerResource[:, :,
                                idx] = obj.noiseIfPower * np.ones([N, T]) / N
    pTxBA = OFDMA_BA(
        np.ones(users) * rate * wrld.PHY.simulationTime,
        np.ones([N, T, users]) * cell.pMax / N, wrld.PHY.systemBandwidth,
        wrld.PHY.simulationTime, noiseIfPowerPerResource, CSI_BA)

    pSupplyBA = scistats.nanmean(bs.p0 + bs.m * pTxBA)
    print '{0:50} {1:5.2f} W'.format('SOTA objective:', pSupplyBA)
    print ' '

    return pSupplyOptim, pSupplyQuant, pSupplyBA
Esempio n. 49
0
def main(searchpath, outpath):
    """Search all directories in searchpath for settings files. Handle them according to the settings found in those files. Eventually build a csv file for plotting the sum rate agains the power consumption.
    Input: search path, output path
        """

    data_types = 9
    rate = 1e6 * 2.0  # 2 Mbps
    sweep_values = []  # we do not know beforehand how much data we have
    depth = None  # worst case number of data points

    # enum
    axis_index = 0
    sequential_index = 1
    random_shift_each_iter_index = 2
    random_shift_once_index = 3
    random_each_iter_index = 4
    random_once_index = 5
    sinr_index = 6
    static_3_index = 7
    dtx_segregation = 8

    # initially, we only check how much data we have
    for dirname, dirnames, filenames in os.walk(searchpath):
        if depth is None:
            depth = len(dirnames)
        for subdirname in dirnames:
            for filename in glob.glob(
                    os.path.join(dirname, subdirname) + '/*settings*'):
                config = ConfigParser.RawConfigParser()
                config.read(filename)
                sweep_values.append(
                    int(config.getfloat(
                        'General',
                        'user_rate')))  # TODO: handle rate*users automatically
                #sweep_values.append(config.getfloat('General', 'numcenterusers'))

    sweep_values = sorted(set(sweep_values))
    count = np.zeros([len(sweep_values), data_types])

    result = np.empty([len(sweep_values), data_types, depth])
    result[:] = np.nan

    # now start filling the result
    dep = 0
    for dirname, dirnames, filenames in os.walk(searchpath):
        for subdirname in dirnames:
            for filename in glob.glob(
                    os.path.join(dirname, subdirname) + '/*settings*.cfg'):

                config = ConfigParser.RawConfigParser()
                config.read(filename)
                #users = (config.getint('General', 'numcenterusers'))
                users = int(config.getfloat('General', 'user_rate'))
                sleep_alignment = config.get('General', 'sleep_alignment')
                initial_power = config.get('General', 'initial_power')
                pS = config.getint('General', 'pS')
                p0 = config.getint('General', 'p0')

                # seqDTX sequential
                if ('DTX' in filename) and (sleep_alignment == 'none'):
                    filedata = np.genfromtxt(
                        os.path.join(dirname, subdirname) + '/resultDTX.csv',
                        delimiter=',')
                    index = sequential_index
                    result[sweep_values.index(users), index,
                           dep] = filedata[1, -1]
                    if not np.isnan(filedata[1, -1]) and not filedata[1,
                                                                      -1] == 0:
                        count[sweep_values.index(users), index] += 1

                # random shift each iteration
                elif ('DTX' in filename) and (sleep_alignment
                                              == 'random_shift_iter'):
                    filedata = np.genfromtxt(
                        os.path.join(dirname, subdirname) + '/resultDTX.csv',
                        delimiter=',')
                    index = random_shift_each_iter_index
                    result[sweep_values.index(users), index,
                           dep] = filedata[1, -1]
                    if not np.isnan(filedata[1, -1]) and not filedata[1,
                                                                      -1] == 0:
                        count[sweep_values.index(users), index] += 1

                # random shift once
                elif ('DTX' in filename) and (sleep_alignment
                                              == 'random_shift_once'):
                    filedata = np.genfromtxt(
                        os.path.join(dirname, subdirname) + '/resultDTX.csv',
                        delimiter=',')
                    index = random_shift_once_index
                    result[sweep_values.index(users), index,
                           dep] = filedata[1, -1]
                    if not np.isnan(filedata[1, -1]) and not filedata[1,
                                                                      -1] == 0:
                        count[sweep_values.index(users), index] += 1

                # random each iter
                elif ('DTX' in filename) and (sleep_alignment
                                              == 'random_iter'):
                    filedata = np.genfromtxt(
                        os.path.join(dirname, subdirname) + '/resultDTX.csv',
                        delimiter=',')
                    index = random_each_iter_index
                    result[sweep_values.index(users), index,
                           dep] = filedata[1, -1]
                    if not np.isnan(filedata[1, -1]) and not filedata[1,
                                                                      -1] == 0:
                        count[sweep_values.index(users), index] += 1

                # random once
                elif ('DTX' in filename) and (sleep_alignment
                                              == 'random_once'):
                    filedata = np.genfromtxt(
                        os.path.join(dirname, subdirname) + '/resultDTX.csv',
                        delimiter=',')
                    index = random_once_index
                    result[sweep_values.index(users), index,
                           dep] = filedata[1, -1]
                    if not np.isnan(filedata[1, -1]) and not filedata[1,
                                                                      -1] == 0:
                        count[sweep_values.index(users), index] += 1

                # sinr ordering
                elif ('DTX' in filename) and (sleep_alignment == 'sinr'):
                    filedata = np.genfromtxt(
                        os.path.join(dirname, subdirname) + '/resultDTX.csv',
                        delimiter=',')
                    index = sinr_index
                    result[sweep_values.index(users), index,
                           dep] = filedata[1, -1]
                    if not np.isnan(filedata[1, -1]) and not filedata[1,
                                                                      -1] == 0:
                        count[sweep_values.index(users), index] += 1

                # static assignment, reuse 3
                elif ('DTX' in filename) and (sleep_alignment == 'static'):
                    filedata = np.genfromtxt(
                        os.path.join(dirname, subdirname) + '/resultDTX.csv',
                        delimiter=',')
                    index = static_3_index
                    result[sweep_values.index(users), index,
                           dep] = filedata[1, -1]
                    if not np.isnan(filedata[1, -1]) and not filedata[1,
                                                                      -1] == 0:
                        count[sweep_values.index(users), index] += 1

                # dtx segregation
                elif ('DTX' in filename) and (sleep_alignment
                                              == 'dtx_segregation'):
                    filedata = np.genfromtxt(
                        os.path.join(dirname, subdirname) + '/resultDTX.csv',
                        delimiter=',')
                    index = dtx_segregation
                    result[sweep_values.index(users), index,
                           dep] = filedata[1, -1]
                    if not np.isnan(filedata[1, -1]) and not filedata[1,
                                                                      -1] == 0:
                        count[sweep_values.index(users), index] += 1

                else:
                    print 'What is this folder?' + os.path.join(
                        dirname, subdirname) + '/' + filename

                dep += 1

    result = nanmean(result, axis=2)
    result[:,
           axis_index] = np.array(sweep_values) * 10  # provide x-axis in Mbps
    result = result.T  # expected format

    np.savetxt(outpath + '/power_consumption_over_sumrates.csv',
               result,
               delimiter=',')
    #    print 'Count PF_ba: ' + str(count_PF_ba)
    #    print 'Count PF_dtx: ' + str(count_PF_dtx)
    #    print 'Count RAPS_old: ' + str(count_RAPS_naive)
    #    print 'Count RAPS_random: ' + str(count_RAPS_random)
    #    print 'Count RAPS_PC: ' + str(count_RAPS_PC)
    #    print 'Count seqDTX_rand: ' + str(count_seqDTX_rand)
    #    print 'Count seqDTX_noshift: ' + str(count_seqDTX_noshift)
    print count.T
Esempio n. 50
0
def ba(rate, wrld, cell, mobiles, users):
    """The considered State of the Art comparison. Allocated target bitload over resources at an equal power share until user target is fulfilled. If the load is too high, np.nan is returned.
    Input:
        bitload: array of bit load by user index
        pPerResource: (N,T) power level
        systemBandwidth: in Hz
        totalTime: in seconds
        noiseIfPowerPerResource: noise plus interference power (N,T)
        CSI: (n_tx, n_rx, N,T)
    Output:
        lastUsedResource: Flattened CSI index where allocation finished."""

    N = wrld.PHY.numFreqChunks
    T = wrld.PHY.numTimeslots
    K = users
    bitloadPerUser = rate * wrld.PHY.simulationTime * np.ones(users)
    pPerResource = np.ones([N, T, users]) * cell.pMax / N
    systemBandwidth = wrld.PHY.systemBandwidth
    totalTime = wrld.PHY.simulationTime

    pSupplyBA = np.nan
    CSI_BA = np.empty([2, 2, N, T, users], dtype=complex)
    CSI_BA[:] = np.nan
    noiseIfPowerPerResource = np.empty([N, T, users])
    noiseIfPowerPerResource[:] = np.nan
    for idx, obj in enumerate(mobiles):
        CSI_BA[:, :, :, :,
               idx] = obj.baseStations[obj.BS]['cells'][cell]['CSI_OFDMA']
        noiseIfPowerPerResource[:, :,
                                idx] = obj.noiseIfPower * np.ones([N, T]) / N

    resourceTime = totalTime / T
    resourceBandwidth = systemBandwidth / N
    baseSNR = pPerResource / (noiseIfPowerPerResource)
    user = 0
    usedRBCounter = np.zeros(T)
    flag = False

    for n in np.arange(N):
        for t in np.arange(T):
            usedRBCounter[t] = usedRBCounter[t] + 1
            H = np.dot(CSI_BA[:, :, n, t, user], CSI_BA[:, :, n, t,
                                                        user].conj().T)
            bitsInThisRB = resourceBandwidth * resourceTime * np.real(
                utils.ergMIMOCapacityCDITCSIR(H, baseSNR[n, t, user]))
            bitloadPerUser[user] = bitloadPerUser[user] - bitsInThisRB

            if bitloadPerUser[user] <= 0:
                user = user + 1
            if user >= K:
                flag = True
                break

        if flag:
            break

    if (bitloadPerUser < 0).all():
        pTxBA = pPerResource[0, 0, 0] * usedRBCounter
        pSupplyBA = scistats.nanmean(mobiles[0].BS.p0 +
                                     mobiles[0].BS.m * pTxBA)
    else:
        logger.warning('SOTA overload.')
        pSupplyBA = np.nan

    logger.info('{0:50} {1:5.2f} W'.format('SOTA objective:', pSupplyBA))

    return pSupplyBA
Esempio n. 51
0
ax = fig.add_subplot(111)
[plot_ethogram(etho,'jpak',(len(ethograms)-1)*2-i*2) for i,etho in enumerate(ethograms)]
ytks = [i*2+0.5 for i in range(len(ethograms))]
rats = [39,37,29,27,25,23,21,38,36,28,26,24,22,20]
ytklabels = ['jpak' + str(r) for r in rats]
plt.yticks(ytks,ytklabels)
hline(13.5)
xlabel('Time from first contact (seconds)')
xlim([0,3])



lesionfreezetimes = [get_total_freezetime(etho) for etho in ethograms[0:7]]
lesioncrossingtimes = [get_crossing_time(etho) for etho in ethograms[0:7]]
lesioncrossingtimes = [x for x in lesioncrossingtimes if x >= 0]
lesionmeans = [np.mean(lesionfreezetimes),nanmean(lesioncrossingtimes)]
lesionerr= [[0,0],[sem(lesionfreezetimes),sem(lesioncrossingtimes)]]

shamfreezetimes = [get_total_freezetime(etho) for i,etho in enumerate(ethograms[7:])]
shamcrossingtimes = [get_crossing_time(etho) for etho in ethograms[7:]]
shamcrossingtimes = [x for x in shamcrossingtimes if x >= 0]
shammeans = [np.mean(shamfreezetimes),nanmean(shamcrossingtimes)]
shamerr = [[0,0],[sem(shamfreezetimes),sem(shamcrossingtimes)]]

fig = plt.figure()
ax = fig.add_subplot(111)
barwidth = 0.35
ind = np.arange(2)
rects1 = plt.bar(ind,lesionmeans,barwidth,color='r',yerr=lesionerr,ecolor='k')
rects2 = plt.bar(ind+barwidth,shammeans,barwidth,color='g',yerr=shamerr,ecolor='k')
plt.xticks(ind+barwidth,('Freeze time', 'Crossing time'))
Esempio n. 52
0
def path3d(CL):
    """ This method will make a 3D plot of the flightpath of the plane during measurement.
    Colours correspond to the time tracking colours (colours may not work if using an old version of matplotlib)."""
    from mpl_toolkits.mplot3d import Axes3D
    
    plat=[i for i,x in enumerate(CL.dttl) if x == 'latitude'][0]
    lat=copy.deepcopy(CL.data[plat])
    plon=[i for i,x in enumerate(CL.dttl) if x == 'longitude'][0]
    lon=copy.deepcopy(CL.data[plon])
    palt=[i for i,x in enumerate(CL.dttl) if x == 'altitude'][0]
    # Not sure why we used to copy only the data part (and not the mask). If someone knows why, feel free to expain. 
    #alt=copy.deepcopy(CL.data[palt].data) #t=copy.defrom pylab import *epcopy(CL.data[pt].data)
    alt=copy.deepcopy(CL.data[palt])
    pt=[i for i,x in enumerate(CL.dttl) if x == 'time'][0]
    t=copy.deepcopy(CL.data[pt])

    #FIND THE QUADRANT
    if st.nanmean(lat)>0:
        quad_NS = 'N'
    else:
        quad_NS = 'S'

    if st.nanmean(lon)>0:
        quad_EW = 'E'
    else:
        quad_EW = 'W'

    # This piece used to take out anomalously deviant altitude data. We now decided to leave it to the user to go and mask the bad data.
    #M=runstats(alt,20)
    #alt=np.ma.masked_where((alt>(M[0]+M[1]*1.)+(isnan(alt))), alt)
        
    norm = matplotlib.colors.Normalize(vmin=t[0],vmax=t[-1])

    fig = figure()
    ax = Axes3D(fig)
    majorFormatter_lon = FormatStrFormatter('%.2f '+quad_EW)
    majorFormatter_lat = FormatStrFormatter('%.2f '+quad_NS)
    try:
        if int(matplotlib.__version__[0])>0:
            ax.scatter(abs(lat),abs(lon),alt,lw=0,alpha=1,cmap='spectral',norm=norm,c=t)
            ax.view_init(28,145)
            ax.yaxis.set_major_formatter(majorFormatter_lon)
            ax.xaxis.set_major_formatter(majorFormatter_lat)
            if quad_EW == 'E':
                ax.set_ylim(ax.get_ylim()[::-1])
            if quad_NS == 'S':
                ax.set_xlim(ax.get_xlim()[::-1])
        else:       # old version of matplotlib that doesn't support the color tracker
            ax.scatter(abs(lat),abs(lon),alt,lw=0,alpha=1,cmap='spectral',norm=norm)
            ax.view_init(28,145)
            ax.yaxis.set_major_formatter(majorFormatter_lon)
            ax.xaxis.set_major_formatter(majorFormatter_lat)
            if quad_EW == 'E':
                ax.set_ylim(ax.get_ylim()[::-1])
            if quad_NS == 'S':
                ax.set_xlim(ax.get_xlim()[::-1])
    except: print("[path3d] Error evaluating your version of matplotlib.")
    ax.set_xlabel('Latitude')
    ax.set_ylabel('Longitude')
    ax.set_zlabel('Altitude')

    plt.ion()
    plt.show()
Esempio n. 53
0
def min_mean_max(a):
    """ b\{%s\} := Min, mean and max of %s """
    a = np.asarray(a, dtype="float")  # converts bool
    from scipy.stats.stats import nanmean

    return (np.nanmin(a), nanmean(a), np.max(a))
Esempio n. 54
0
def main(searchpath, outpath):
    """Search all directories in searchpath for settings files. Handle them according to the settings found in those files. Eventually build a csv file for plotting the sum rate agains the power consumption.
    Input: search path, output path
        """

    data_types = 9 
    rate = 1e6 * 2.0 # 2 Mbps
    iterations = None
    sweep_values = [] # we do not know beforehand how much data we have
    depth = None

    # enum
    axis_index = 0
    sequential_index = 1
    random_each_iter_index = 4
    sinr_index = 6
    dtx_segregation = 8

    # initially, we only check how much data we have
    for dirname, dirnames, filenames in os.walk(searchpath):
        if depth is None:
            depth = len(dirnames) # worst case amount of data
        for subdirname in dirnames:
            for filename in glob.glob(os.path.join(dirname, subdirname)+'/*settings*'):
                config = ConfigParser.RawConfigParser()
                config.read(filename)
                iterations = int(config.getfloat('General', 'iterations')) 
                sweep_values.append(int(config.getfloat('General', 'user_rate'))) 

    sweep_values = sorted(set(sweep_values))
    result = np.empty([iterations, len(sweep_values), data_types, depth])
    result[:] = np.nan
    count = np.zeros([len(sweep_values), data_types])

    # now start filling the result
    dep = 0
    for dirname, dirnames, filenames in os.walk(searchpath):
        for subdirname in dirnames:
            for filename in glob.glob(os.path.join(dirname, subdirname)+'/*settings*.cfg'):
                
                config = ConfigParser.RawConfigParser()
                config.read(filename)
                rate = int(config.getfloat('General', 'user_rate')) # sum rate
                sleep_alignment = config.get('General', 'sleep_alignment')

                # seqDTX sequential 
                if ('DTX' in filename) and (sleep_alignment == 'none'):
                    filedata = np.genfromtxt(os.path.join(dirname, subdirname)+'/delivered_individual.csv', delimiter=',')
                    index = sequential_index 
                    result[:, sweep_values.index(rate), index, dep] = filedata[1,1:]
                    count[sweep_values.index(rate), index] += 1

                # random each iter
                elif ('DTX' in filename) and (sleep_alignment == 'random_iter'): 
                    filedata = np.genfromtxt(os.path.join(dirname, subdirname)+'/delivered_individual.csv', delimiter=',')
                    index = random_each_iter_index
                    result[:, sweep_values.index(rate), index, dep] = filedata[1,1:]
                    count[sweep_values.index(rate), index] += 1

                # sinr ordering
                elif ('DTX' in filename) and (sleep_alignment == 'sinr'):
                    filedata = np.genfromtxt(os.path.join(dirname, subdirname)+'/delivered_individual.csv', delimiter=',')
                    index = sinr_index 
                    result[:, sweep_values.index(rate), index, dep] = filedata[1,1:]
                    count[sweep_values.index(rate), index] += 1

                # dtx segregation 
                elif ('DTX' in filename) and (sleep_alignment == 'dtx_segregation'):
                    filedata = np.genfromtxt(os.path.join(dirname, subdirname)+'/delivered_individual.csv', delimiter=',')
                    index = dtx_segregation 
                    result[:, sweep_values.index(rate), index, dep] = filedata[1,1:]
                    count[sweep_values.index(rate), index] += 1

                else:
                    print 'What is this folder?'+os.path.join(dirname, subdirname)+'/'+filename

                if (filedata[1,-1]>2*rate/10).any():
                    # find outliers
                    print filedata[1,-1], filename.split('/')[7]

                dep += 1
                
    ####
    result[np.where(result==0)]=np.nan # remove outage data
    ####

    result = nanmean(result, axis=3)

    for i, s in enumerate(sweep_values):
        target = outpath + '/delivered_rate_' + str(s) + '.csv'
        res = result[:, i, :]
        res[:,axis_index] = np.arange(1, iterations+1) # provide x-axis 
        res = res.T # expected format
        np.savetxt(target, res, delimiter=',')

    print count.T
Esempio n. 55
0
import scipy.stats.stats as st
m=st.nanmean(vec)
Esempio n. 56
0
        with open('volume all 394 hot data.csv', 'ab') as csvfile:
            csv_writer = csv.writer(csvfile, delimiter=',', quotechar='|')
            csv_writer.writerow(hot[beginning_time:end_time])

        with open('volume all 394 gp data.csv', 'ab') as csvfile:
            csv_writer = csv.writer(csvfile, delimiter=',', quotechar='|')
            csv_writer.writerow(gp[beginning_time:end_time])

        #Append data
        hot_total_volume.append(hot[beginning_time:end_time])
        gp_total_volume.append(gp[beginning_time:end_time])

    date_1 += timedelta(days=1)

#Average volume at each time across all days
average_hot_volume = sci.nanmean(hot_total_volume, axis=0)
variance_hot_volume = sci.nanstd(hot_total_volume, axis=0)
average_gp_volume = sci.nanmean(gp_total_volume, axis=0)
variance_gp_volume = sci.nanstd(gp_total_volume, axis=0)

#Group into 3 minute increments (6 x 30 seconds)
k = 0
resolution = 6
while k in range(0, len(average_hot_volume)):
    average_hot_volume[k:k + resolution] = sum(
        average_hot_volume[k:k + resolution]) / resolution
    variance_hot_volume[k:k + resolution] = sum(
        variance_hot_volume[k:k + resolution]) / resolution
    average_gp_volume[k:k + resolution] = sum(
        average_gp_volume[k:k + resolution]) / resolution
    variance_gp_volume[k:k + resolution] = sum(
Esempio n. 57
0
def main(searchpath, outpath):
    """Search all directories in searchpath for settings files. Handle them according to the settings found in those files. Eventually build a csv file for plotting the sum rate agains the power consumption.
    Input: search path, output path
    There are the following types of data:
        - BA/SOTA
        - DTX/SOTA
        - PF-BA
        - PF-DTX
        - RAPS without sleep alignment
        - RAPS with random sleep alignment
        """

    data_types = 11
    rate = 1e6 * 2.0  # 2 Mbps
    sweep_values = []  # we do not know beforehand how much data we have
    depth = None  # worst case number of data points

    # enum
    axis_index = 0
    BA_index = 1
    seqDTX_noshift_index = 2
    seqDTX_rand_index = 3
    PF_ba_index = 4
    PF_dtx_index = 5
    RAPS_naive_index = 6
    RAPS_random_index = 7
    RAPS_PC_index = 8
    RAPS_sinr_index = 9
    RAPS_sinr_protect_index = 10

    # initially, we only check how much data we have
    for dirname, dirnames, filenames in os.walk(searchpath):
        if depth is None:
            depth = len(dirnames)
        for subdirname in dirnames:
            for filename in glob.glob(
                    os.path.join(dirname, subdirname) + '/*settings*'):
                config = ConfigParser.RawConfigParser()
                config.read(filename)
                sweep_values.append(
                    int(config.getfloat(
                        'General',
                        'user_rate')))  # TODO: handle rate*users automatically
                #sweep_values.append(config.getfloat('General', 'numcenterusers'))

    sweep_values = sorted(set(sweep_values))
    count = np.zeros([len(sweep_values), data_types])

    result = np.empty([len(sweep_values), data_types, depth])
    result[:] = np.nan

    # now start filling the result
    dep = 0
    for dirname, dirnames, filenames in os.walk(searchpath):
        for subdirname in dirnames:
            for filename in glob.glob(
                    os.path.join(dirname, subdirname) + '/*settings*.cfg'):
                config = ConfigParser.RawConfigParser()
                config.read(filename)
                #users = (config.getint('General', 'numcenterusers'))
                users = int(config.getfloat('General', 'user_rate'))
                sleep_alignment = config.get('General', 'sleep_alignment')
                initial_power = config.get('General', 'initial_power')
                pS = config.getint('General', 'pS')
                p0 = config.getint('General', 'p0')

                if initial_power == 'zero':
                    continue

                # PF_BA
                if 'PF_ba' in filename:
                    # there should be a file called resultPF.csv containing one line of
                    # comma separated entries. We only want the last one.
                    filedata = np.genfromtxt(
                        os.path.join(dirname, subdirname) + '/resultPF.csv',
                        delimiter=',')
                    index = PF_ba_index
                    result[sweep_values.index(users), index,
                           dep] = filedata[1, -1]
                    if not np.isnan(filedata[1, -1]) and not filedata[1,
                                                                      -1] == 0:
                        count[sweep_values.index(users), index] += 1

                # PF_DTX
                elif 'PF_dtx' in filename:
                    # there should be a file called resultPF.csv containing one line of
                    # comma separated entries. We only want the last one.
                    filedata = np.genfromtxt(
                        os.path.join(dirname, subdirname) + '/resultPF.csv',
                        delimiter=',')
                    index = PF_dtx_index
                    result[sweep_values.index(users), index,
                           dep] = filedata[1, -1]
                    if not np.isnan(filedata[1, -1]) and not filedata[1,
                                                                      -1] == 0:
                        count[sweep_values.index(users), index] += 1

                # RAPS_naive
                elif ('RAPS' in filename) and (sleep_alignment
                                               == 'none') and pS < p0:
                    # there should be a file called resultRAPS.csv containing one line of
                    # comma separated entries. We only want the last one.
                    filedata = np.genfromtxt(
                        os.path.join(dirname, subdirname) + '/resultRAPS.csv',
                        delimiter=',')
                    index = RAPS_naive_index
                    result[sweep_values.index(users), index,
                           dep] = filedata[1, -1]
                    if not np.isnan(filedata[1, -1]) and not filedata[1,
                                                                      -1] == 0:
                        count[sweep_values.index(users), index] += 1

                    filedata = np.genfromtxt(
                        os.path.join(dirname, subdirname) + '/resultBA.csv',
                        delimiter=',')
                    index = BA_index
                    result[sweep_values.index(users), index, dep] = filedata

                # RAPS_random
                elif ('RAPS' in filename) and (sleep_alignment
                                               == 'random') and pS < p0:
                    # there should be a file called resultRAPS.csv containing one line of
                    # comma separated entries. We only want the last one.
                    filedata = np.genfromtxt(
                        os.path.join(dirname, subdirname) + '/resultRAPS.csv',
                        delimiter=',')
                    index = RAPS_random_index
                    result[sweep_values.index(users), index,
                           dep] = filedata[1, -1]
                    if not np.isnan(filedata[1, -1]) and not filedata[1,
                                                                      -1] == 0:
                        count[sweep_values.index(users), index] += 1

                    filedata = np.genfromtxt(
                        os.path.join(dirname, subdirname) + '/resultBA.csv',
                        delimiter=',')
                    index = BA_index
                    result[sweep_values.index(users), index, dep] = filedata

                # RAPS_PC
                elif ('RAPS' in filename) and pS > p0:
                    filedata = np.genfromtxt(
                        os.path.join(dirname, subdirname) + '/resultRAPS.csv',
                        delimiter=',')
                    index = RAPS_PC_index
                    result[sweep_values.index(users), index,
                           dep] = filedata[1, -1]
                    if not np.isnan(filedata[1, -1]) and not filedata[1,
                                                                      -1] == 0:
                        count[sweep_values.index(users), index] += 1

                    filedata = np.genfromtxt(
                        os.path.join(dirname, subdirname) + '/resultBA.csv',
                        delimiter=',')
                    index = BA_index
                    result[sweep_values.index(users), index, dep] = filedata

                # seqDTX no shift
                elif ('DTX' in filename) and (sleep_alignment == 'none'):
                    filedata = np.genfromtxt(
                        os.path.join(dirname, subdirname) + '/resultDTX.csv',
                        delimiter=',')
                    index = seqDTX_noshift_index
                    result[sweep_values.index(users), index,
                           dep] = filedata[1, -1]
                    if not np.isnan(filedata[1, -1]) and not filedata[1,
                                                                      -1] == 0:
                        count[sweep_values.index(users), index] += 1

                # seqDTX shift
                elif ('DTX' in filename) and (sleep_alignment == 'random'):
                    filedata = np.genfromtxt(
                        os.path.join(dirname, subdirname) + '/resultDTX.csv',
                        delimiter=',')
                    index = seqDTX_rand_index
                    result[sweep_values.index(users), index,
                           dep] = filedata[1, -1]
                    if not np.isnan(filedata[1, -1]) and not filedata[1,
                                                                      -1] == 0:
                        count[sweep_values.index(users), index] += 1

                # RAPS with sinr based sleep selection
                elif ('RAPS' in filename) and (sleep_alignment
                                               == 'sinr') and pS < p0:
                    # there should be a file called resultRAPS.csv containing one line of
                    # comma separated entries. We only want the last one.
                    filedata = np.genfromtxt(
                        os.path.join(dirname, subdirname) + '/resultRAPS.csv',
                        delimiter=',')
                    index = RAPS_sinr_index
                    result[sweep_values.index(users), index,
                           dep] = filedata[1, -1]
                    if not np.isnan(filedata[1, -1]) and not filedata[1,
                                                                      -1] == 0:
                        count[sweep_values.index(users), index] += 1

                    filedata = np.genfromtxt(
                        os.path.join(dirname, subdirname) + '/resultBA.csv',
                        delimiter=',')
                    index = BA_index
                    result[sweep_values.index(users), index, dep] = filedata

                # RAPS with sinr based sleep selection and protection
                elif ('RAPS' in filename) and (sleep_alignment
                                               == 'sinr_protect') and pS < p0:
                    # there should be a file called resultRAPS.csv containing one line of
                    # comma separated entries. We only want the last one.
                    filedata = np.genfromtxt(
                        os.path.join(dirname, subdirname) + '/resultRAPS.csv',
                        delimiter=',')
                    index = RAPS_sinr_protect_index
                    result[sweep_values.index(users), index,
                           dep] = filedata[1, -1]
                    if not np.isnan(filedata[1, -1]) and not filedata[1,
                                                                      -1] == 0:
                        count[sweep_values.index(users), index] += 1

                    filedata = np.genfromtxt(
                        os.path.join(dirname, subdirname) + '/resultBA.csv',
                        delimiter=',')
                    index = BA_index
                    result[sweep_values.index(users), index, dep] = filedata

                else:
                    print 'What is this folder?' + os.path.join(
                        dirname, subdirname) + '/' + filename

                dep += 1

    result = nanmean(result, axis=2)
    result[:, axis_index] = np.array(
        sweep_values) * rate  # provide x-axis in Mbps
    result = result.T  # expected format

    np.savetxt(outpath + '/sumrate.csv', result, delimiter=',')
    #    print 'Count PF_ba: ' + str(count_PF_ba)
    #    print 'Count PF_dtx: ' + str(count_PF_dtx)
    #    print 'Count RAPS_old: ' + str(count_RAPS_naive)
    #    print 'Count RAPS_random: ' + str(count_RAPS_random)
    #    print 'Count RAPS_PC: ' + str(count_RAPS_PC)
    #    print 'Count seqDTX_rand: ' + str(count_seqDTX_rand)
    #    print 'Count seqDTX_noshift: ' + str(count_seqDTX_noshift)
    print count.T