Exemplo n.º 1
0
    def stats(self):
        res = {}
        for val in self.data.iteritems():
            stat_report = {}
	    full_series = map(lambda x: x, val[1])
	    full_series = full_series[(len(full_series) / 4):] # Cut off first quarter to get more reliable data
	    full_series_sorted = full_series
	    full_series_sorted.sort()
	    steady_series = full_series[int(len(full_series) * 0.7):]
            stat_report['mean'] = stats.mean(full_series)
            try:
                stat_report['stdev'] = stats.stdev(full_series)
            except ZeroDivisionError:
                stat_report['stdev'] = 0
            stat_report['upper_0.1_percentile'] = self.percentile(full_series_sorted, 0.999)
            stat_report['lower_0.1_percentile'] = self.percentile(full_series_sorted, 0.001)
            stat_report['upper_1_percentile'] = self.percentile(full_series_sorted, 0.99)
            stat_report['lower_1_percentile'] = self.percentile(full_series_sorted, 0.01)
            stat_report['upper_5_percentile'] = self.percentile(full_series_sorted, 0.95)
            stat_report['lower_5_percentile'] = self.percentile(full_series_sorted, 0.05)
            stat_report['steady_mean'] = stats.mean(steady_series)
            try:
                stat_report['steady_stdev'] = stats.stdev(full_series)
            except ZeroDivisionError:
                stat_report['steady_stdev'] = 0
	    res[val[0]] = stat_report

        return res
Exemplo n.º 2
0
def getRMSEstd(res, nFolds):
    """
    Method for calculating the std of RMSE of nFolds in a crossvalidation (returned).
    res is the object containing the results from orngTest methods such as crossValidation.
    """

    # Initialize a list to contain lists of errors for each fold.
    errorList = []
    for idx in range(nFolds):
        errorList.append([])

    # ex contains info on the fold number, prediction and actural responses for exah example used in the CV
    # Append ex error to correct fold list
    for ex in res.results:
        error = (ex.classes[0] - ex.actualClass)**2
        errorList[ex.iterationNumber].append(error)

    # RMSE of the different folds
    RMSElist = []
    for idx in range(nFolds):
        average = sum(errorList[idx]) / len(errorList[idx])
        RMSElist.append(math.sqrt(average))
    RMSEstd = stats.stdev(RMSElist)
    RMSEmean = statc.mean(RMSElist)
    if verbose > 0:
        print str(RMSEmean) + "\t" + str(RMSEstd) + "\t" + string.join(
            [str(x) for x in RMSElist], "\t")
    return RMSEstd, RMSElist
Exemplo n.º 3
0
def get_mean_times(data, buildtype, include_outliers):
    return_data = defaultdict(lambda: defaultdict(float))
    all_times = reduce(lambda x,y: x+y, reduce(lambda x,y: x+y, map(lambda d: d.values(), data.values())))
    overall_mean = stats.mean(all_times)
    overall_stdev = stats.stdev(all_times)
    for (date, dateval) in data.iteritems():
        typedict = {}
        for (type, times) in dateval.iteritems():
            mean = stats.mean(times)
            if not include_outliers and len(times) > 1:
                included_values = []
                for time in times:
                    if abs(time - overall_mean) < 1.5*overall_stdev:
                        included_values.append(time)
                if len(included_values) > 0:
                    mean = stats.mean(included_values)
                else:
                    mean = None
            typedict[type] = mean
        if buildtype == "maximum" and max(typedict.values()):
            return_data[date] = max(typedict.values())
        elif typedict.get(buildtype):
            return_data[date] = typedict.get(buildtype, 0)

    return return_data
Exemplo n.º 4
0
    def set_quality_mean_and_variation_per_position(self):
        self.read_all_quality_strings_by_position()
        self.means = [mean(self.quality_string2score(s)) for s in self.quality_strings]
        self.stdevs = [stdev(self.quality_string2score(s)) for s in self.quality_strings]

        # relase the memory for quality_strings
        self.quality_strings = None
Exemplo n.º 5
0
def main():
    (opts, args) = parser.parse_args()
    if len(args) != 1:
        print "\nPlease, select channel: 0/1\n"
        parser.print_help()
    else:
        if opts.verbose:
            verbose = True
            print "connecting to ", opts.ip_addr
            print "port number", opts.ip_port
        message = args[0]
        message = sendReceive(opts.ip_addr, opts.ip_port, opts.timeout,
                              message, opts.verbose)
        #print "String:"
        #print message
        #print "Hex:"
        hexMessage = b2a_hex(message)
        #print hexMessage
        print "No. bytes: ", len(message)
        print "Done!"

        #Unpacking message
        msg_len = len(message)
        num = unpack(str(msg_len) + 'B', message)
        #print num
        num = map(lambda x: x - 128, num)
        #print num
        print "Maximum: ", max(num), "Minimum: ", min(
            num), "Mean: ", stats.mean(
                num), "Standard deviation: ", stats.stdev(
                    num), "Variance: ", stats.var(num)
Exemplo n.º 6
0
def main():
    global qtimes,qerr
    reps = int(sys.argv[1])
    
    for j in range(reps):
        jobs = [gevent.spawn(get_url, url) for url in urls]
        #print("Size of jobs is {n}".format(n=len(jobs)))
        gevent.joinall(jobs, timeout=30)
    
    if not qerr.empty():
        qerr.put(StopIteration)
        for err in qerr:
            print(err)
           
    print("jobs size {s}".format(s=len(jobs)))
    print("qstart size {n}".format(n=qstart.qsize()))
    print("qtimes size {n}".format(n=qtimes.qsize()))
    qtimes.put(StopIteration)
    times = []
    for item in qtimes:
        times.append(item)
        
    print("Min {t}".format(t=min(times)))
    print("Max {t}".format(t=max(times)))
    print("Mean {t}".format(t=stats.mean(times)))
    print("StdDev {t}".format(t=stats.stdev(times)))
Exemplo n.º 7
0
def getRMSEstd(res, nFolds):
    """
    Method for calculating the std of RMSE of nFolds in a crossvalidation (returned).
    res is the object containing the results from orngTest methods such as crossValidation.
    """

    # Initialize a list to contain lists of errors for each fold.
    errorList = []
    for idx in range(nFolds):
        errorList.append([])

    # ex contains info on the fold number, prediction and actural responses for exah example used in the CV
    # Append ex error to correct fold list
    for ex in res.results:
         error = (ex.classes[0]- ex.actualClass)**2
         errorList[ex.iterationNumber].append(error)

    # RMSE of the different folds
    RMSElist = []
    for idx in range(nFolds):
        average =  sum(errorList[idx])/len(errorList[idx])
        RMSElist.append(math.sqrt(average))
    RMSEstd = stats.stdev(RMSElist)
    RMSEmean = statc.mean(RMSElist)
    if verbose > 0: print str(RMSEmean)+"\t"+str(RMSEstd)+"\t"+string.join( [str(x) for x in RMSElist], "\t")
    return RMSEstd, RMSElist
Exemplo n.º 8
0
def main():
    '''
    this is the main section

    {kernel: {machine : {[results]} } }
    '''
    scales = {}
    data = send(template())
    for kernel, results in data.iteritems():
        for testid, run_data in results.iteritems():
            machine = run_data.pop(0)
            print "\n%s" % (kernel,)
            for i in range(len(run_data)):
                for single_bench in run_data[i].itervalues():
                    if type(single_bench) == dict:
                        value = [float(x) for x in
                                single_bench.get("RawString").split(":")]
                        val = stats.mean(value)
                        if len(value) > 1:
                            std = stats.stdev(value)
                        else:
                            std = float(0)
                        name = single_bench.get("Name")
                        attr = single_bench.get("Attributes")
                        scale = single_bench.get("Scale")
                        print "%s: %s (%s): %.2f %s (std %.2f)" % (machine,
                                name, attr, val, scale, std)
                        scales[scale] = scales.get(scale, "")
    print scales
Exemplo n.º 9
0
    def histogram(self, out_fname, large = False):
        assert self.data

        # Hacky workaround in the case that a run had no data.
        for x in self.data.values():
            if len(x) == 0: x.extend([0,0])

        if not large:
            font = fm.FontProperties(family=['sans-serif'],size='small',fname=FONT_FILE)
            mpl.rcParams['xtick.major.pad'] = 4
            mpl.rcParams['ytick.major.pad'] = 4
            mpl.rcParams['lines.linewidth'] = 1
        else:
            font = fm.FontProperties(family=['sans-serif'],size=36,fname=FONT_FILE)
            mpl.rcParams['xtick.major.pad'] = 20
            mpl.rcParams['ytick.major.pad'] = 20
            mpl.rcParams['lines.linewidth'] = 5

        fig = plt.figure()
        # Set the margins for the plot to ensure a minimum of whitespace
        ax = plt.axes([0.12,0.12,0.85,0.85])

        data = map(lambda x: x[1], self.data.iteritems())
        mean = stats.mean(map(lambda x: x, reduce(lambda x, y: x + y, data)))
        stdev = stats.stdev(map(lambda x: x, reduce(lambda x, y: x + y, data)))
        labels = []
        hists = []
        for series, color in zip(self.data.iteritems(), colors):
            clipped_data = clip(series[1], 0, 3 * mean)
            if clipped_data:
                _, _, foo = ax.hist(clipped_data, bins=200, histtype='bar', facecolor = color, alpha = .5, label = series[0])
                hists.append(foo)
                labels.append(series[0])
            else:
                print "Tried to make a histogram of a series of size 0"

        for tick in ax.xaxis.get_major_ticks():
            tick.label1.set_fontproperties(font)
        for tick in ax.yaxis.get_major_ticks():
            tick.label1.set_fontproperties(font)

        ax.set_ylabel('Frequency', fontproperties = font)
        ax.set_xlabel('Latency (microseconds)', fontproperties = font) #simply should not be hardcoded but we want nice pictures now
        ax.grid(True)
        # Dirty hack to get around legend miscoloring: drop all the hists generated into the legend one by one
        if hists:
            plt.legend(map(lambda x: x[0], hists), labels, loc=1, prop = font)
        
        if not large:
            fig.set_size_inches(5,3.7)
            fig.set_dpi(90)
            plt.savefig(out_fname, bbox_inches="tight")
        else:
            ax.yaxis.LABELPAD = 40
            ax.xaxis.LABELPAD = 40
            fig.set_size_inches(20,14.8)
            fig.set_dpi(300)
            plt.savefig(out_fname, bbox_inches="tight")
Exemplo n.º 10
0
    def set_quality_mean_and_variation_per_position(self):
        self.read_all_quality_strings_by_position()
        self.means = [mean(self.quality_string2score(s))
                      for s in self.quality_strings]
        self.stdevs = [stdev(self.quality_string2score(s))
                       for s in self.quality_strings]

        # relase the memory for quality_strings
        self.quality_strings = None
Exemplo n.º 11
0
def weighSample():
    listOfValues = []
    weight = float(0.0)
    count = 0
    kcount = 0
    averageWeight = 0.0
    stdevWeight = 0.0
    statustext = "Weighing sample"
    STATUS.set(statustext)
    statusWindow.update()
    a = []
    ACOUNT.set(0)
    MCOUNT.set(0)
    AVERAGEWEIGHT.set(0.0)
    STDDEVWEIGHT.set(0.0)
    STATUS.set("")
    statusWindow.update()
    weightArray = []
    stopCheck = STOPPED.get()
    averageWeight = 0.0
    stdevWeight = 0.0
    while STOPPED.get() < 1:
        statusWindow.update()
        result = []
        weight = readStandardBalance()
        #print "WEIGHT: ", weight
        ACOUNT.set(ACOUNT.get() + 1)
        statusWindow.update()
        if weight is FALSE:
            pass
        elif weight > 0.0:
            count += 1
            weightArray.append(weight)
            if (STOPPED.get() < 1 ):
                if count > 4:
                    averageWeight = stats.mean(weightArray)
                    stdevWeight = stats.stdev(weightArray)
                MCOUNT.set(count)
                if count < 5:
                    statustext = " Count: %d the average weight of sample is <need at least 5 measurements>" % (count)
                else:
                    statustext = "Count: %d the average weight of sample is: %f with stdev of: %f" % (
                        count, averageWeight, stdevWeight)
                STATUS.set(statustext)
                AVERAGEWEIGHT.set(averageWeight)
                STDDEVWEIGHT.set(stdevWeight)
                statusWindow.update()
                stopCheck = STOPPED.get()
        else:
            is_there_a_sample()

        sleep(1)
    NEXTSTEP.set(1)
    AVERAGEWEIGHT.set(averageWeight)
    STDDEVWEIGHT.set(stdevWeight)
    MCOUNT.set(count)
    return count, averageWeight, stdevWeight
Exemplo n.º 12
0
def letters_between_spaces(str):
    #count the average word size
    words = str.split()
    wordCount = len(words)
    ch = []
    for word in words:
        ch.append(len(word)) 
    letterCountAverage =  stats.mean(ch)
    letterCountStdDev = stats.stdev(ch)
    return None, {'letterAve/StdDev':letterCountAverage / letterCountStdDev}
def getmeanstdev(dist, center):

    # find the mean and standard deviation of the distance of each tract in a district from its centroid

    buildlist = []
    for bginfo in attributes:

        if attributes[bginfo][13] == dist:

            buildlist.append(sqrt((center[0] - attributes[bginfo][1][0])**2 + (center[1] - attributes[bginfo][1][1])**2))

    return stats.mean(buildlist), stats.stdev(buildlist)
Exemplo n.º 14
0
def getmeanstdev(dist, center):

    # find the mean and standard deviation of the distance of each tract in a district from its centroid

    buildlist = []
    for bginfo in attributes:

        if attributes[bginfo][13] == dist:

            buildlist.append(
                sqrt((center[0] - attributes[bginfo][1][0])**2 +
                     (center[1] - attributes[bginfo][1][1])**2))

    return stats.mean(buildlist), stats.stdev(buildlist)
Exemplo n.º 15
0
def main():
    if len(sys.argv) < 3:
        print "Usage!"
        exit(1)

    database = sys.argv[2]
    points = sys.argv[1]

    boxData = csv.reader(open(points, "rb"))

    boxes = []

    for row in boxData:
        if len(row) == 4:
            boxes.append({"time": long(row[0]), "size": float(row[1]), "position": (int(row[2]), int(row[3]))})
        else:
            finishTime = long(row[0].split(":")[1])

    conn = sqlite3.connect(database)
    cur = conn.cursor()
    cur.execute(
        'select datetime,data from clues where kind="touch" and datetime between %d and %d'
        % (boxes[0]["time"], finishTime)
    )

    touches = []
    for touch in cur:
        time = long(touch[0])
        data = touch[1].split(",")
        touches.append({"time": long(touch[0]), "position": (int(data[0]), int(data[1])), "pressure": float(data[2])})

    timesForSize = {30: [], 60: [], 99: [], 129: []}
    mmsizes = {30: 3, 60: 6, 99: 10, 129: 13}

    deltas = []
    for i, box in enumerate(boxes[:-1]):
        delta = (boxes[i + 1]["time"] - box["time"]) / 1000.0
        timesForSize[box["size"]].append(delta)
        deltas.append(delta)
    deltas.append((finishTime - boxes[-1]["time"]) / 1000.0)
    for k, v in sorted(timesForSize.iteritems()):
        print "%d: %.3f/%.3f/%.3f/%.3f (%.3f bps)" % (
            mmsizes[k],
            stats.mean(v),
            min(v),
            max(v),
            stats.stdev(v),
            1 / stats.mean(v),
        )
Exemplo n.º 16
0
    def stats(self):
        res = {}
        for val in self.data.iteritems():
            stat_report = {}
            full_series = map(lambda x: x, val[1])
            full_series = full_series[(
                len(full_series) /
                4):]  # Cut off first quarter to get more reliable data
            full_series_sorted = full_series
            full_series_sorted.sort()
            steady_series = full_series[int(len(full_series) * 0.7):]
            stat_report['mean'] = stats.mean(full_series)
            try:
                stat_report['stdev'] = stats.stdev(full_series)
            except ZeroDivisionError:
                stat_report['stdev'] = 0
            stat_report['upper_0.1_percentile'] = self.percentile(
                full_series_sorted, 0.999)
            stat_report['lower_0.1_percentile'] = self.percentile(
                full_series_sorted, 0.001)
            stat_report['upper_1_percentile'] = self.percentile(
                full_series_sorted, 0.99)
            stat_report['lower_1_percentile'] = self.percentile(
                full_series_sorted, 0.01)
            stat_report['upper_5_percentile'] = self.percentile(
                full_series_sorted, 0.95)
            stat_report['lower_5_percentile'] = self.percentile(
                full_series_sorted, 0.05)
            stat_report['steady_mean'] = stats.mean(steady_series)
            try:
                stat_report['steady_stdev'] = stats.stdev(full_series)
            except ZeroDivisionError:
                stat_report['steady_stdev'] = 0
            res[val[0]] = stat_report

        return res
Exemplo n.º 17
0
def RMSE_obsolete(res=None):
    """
    Calculates the Root Mean Squared Error of orngTest.ExperimentResults in res
    The results res must be from a regressor
    """
    # If Called without arguments, return the type of problems this method can be used for:
    # 1 - Classification problems (Discrete Class)
    # 2 - Regression problems (Continuous Class)
    # 3 - Both Regression and Classification problems (Continuous or Discrete Class)
    if res == None:
        return {"type": REGRESSION}

    if res.numberOfIterations > 1:
        MSEs = [[0.0] * res.numberOfIterations
                for i in range(res.numberOfLearners)]
        nIter = [0] * res.numberOfIterations
        for tex in res.results:
            ac = float(tex.actualClass)
            nIter[tex.iterationNumber] += 1
            for i, cls in enumerate(tex.classes):
                MSEs[i][tex.iterationNumber] += (float(cls) - ac)**2
        MSEs = [[x / ni for x, ni in zip(y, nIter)] for y in MSEs]
        MSEs = [[math.sqrt(x) for x in y] for y in MSEs]

        # Print output from each fold to tem file
        RMSEfoldList = MSEs
        RMSE = [statc.mean(x) for x in RMSEfoldList]
        RMSEstd = stats.stdev(RMSEfoldList[0])
        #print str(RMSE[0])+"\t"+str(RMSEstd)+"\t"+string.join( [str(x) for x in RMSEfoldList[0]] , "\t")

        return [round(statc.mean(x), 2) for x in MSEs]

    else:
        MSEs = [0.0] * res.numberOfLearners
        for tex in res.results:
            MSEs = map(lambda res, cls, ac=float(tex.actualClass): res +
                       (float(cls) - ac)**2,
                       MSEs,
                       tex.classes)

        MSEs = [x / (len(res.results)) for x in MSEs]
        return [round(math.sqrt(x), 2) for x in MSEs]
Exemplo n.º 18
0
def CA_obsolete(res=None, returnFoldStat=False):
    """
    Calculates the classification Accuracy of orngTest.ExperimentResults in res
    The results res must be from a classifier
    """
    # If Called without arguments, return the type of problems this method can be used for:
    # 1 - Classification problems (Discrete Class)
    # 2 - Regression problems (Continuous Class)
    # 3 - Both Regression and Classification problems (Continuous or Discrete Class)
    if res == None:
        return {"type": CLASSIFICATION}

    if res.numberOfIterations > 1:
        CAs = [[0.0] * res.numberOfIterations
               for i in range(res.numberOfLearners)]
        nIter = [0] * res.numberOfIterations
        for tex in res.results:
            ac = tex.actualClass
            nIter[tex.iterationNumber] += 1
            for i, cls in enumerate(tex.classes):
                if cls == ac:
                    CAs[i][tex.iterationNumber] += 1
        CAs = [[x / ni for x, ni in zip(y, nIter)] for y in CAs]

        CAfoldList = CAs
        CA = [statc.mean(x) for x in CAs]
        CAstd = stats.stdev(CAfoldList[0])

        if returnFoldStat:
            return [round(statc.mean(x), 3) for x in CAs], CAfoldList
        else:
            return [round(statc.mean(x), 3) for x in CAs]

    else:
        CAs = [0.0] * res.numberOfLearners
        for tex in res.results:
            CAs = map(lambda res, cls, ac=tex.actualClass: res + types.IntType(
                cls == ac),
                      CAs,
                      tex.classes)
        return [round(x / (len(res.results)), 3) for x in CAs]
Exemplo n.º 19
0
 def printStats(self):
     """
     Print the stats for each of the hammers, and do some on the fly general stats
     """
     allTimes = []
     print "----------------------------\n Per Hammer stats:\n----------------------------"
     #print out the stats for each hammer
     i = 0
     data = (datetime.timedelta(0), 0)
     for history in self.histories:
         point = self._printStat(i, history, allTimes)
         data = (data[0] + point[0], data[1] + point[1])
         print '\n'
         i += 1
         
     #print out general statistics
     print "------------------------------\n General Stats:\n------------------------------"
     print 'Total writes:\t', data[1]
     print 'Total time spent writing:\t', str(data[0])
     print 'Mean time spent writing:\t', str(data[0] / data[1])
     #We could add more information here about the stats, now that all the times are gathered
     print 'Standard deviation:\t\t', stats.stdev(allTimes)
Exemplo n.º 20
0
def RMSE_obsolete(res = None):
    """
    Calculates the Root Mean Squared Error of orngTest.ExperimentResults in res
    The results res must be from a regressor
    """
    # If Called without arguments, return the type of problems this method can be used for: 
    # 1 - Classification problems (Discrete Class)
    # 2 - Regression problems (Continuous Class)
    # 3 - Both Regression and Classification problems (Continuous or Discrete Class)
    if res == None:
        return {"type":REGRESSION}

    if res.numberOfIterations > 1:
        MSEs = [[0.0] * res.numberOfIterations for i in range(res.numberOfLearners)]
        nIter = [0]*res.numberOfIterations
        for tex in res.results:
            ac = float(tex.actualClass)
            nIter[tex.iterationNumber] += 1
            for i, cls in enumerate(tex.classes):
                MSEs[i][tex.iterationNumber] += (float(cls) - ac)**2
        MSEs = [[x/ni for x, ni in zip(y, nIter)] for y in MSEs]
        MSEs = [[math.sqrt(x) for x in y] for y in MSEs]

        # Print output from each fold to tem file
        RMSEfoldList = MSEs
        RMSE = [statc.mean(x) for x in RMSEfoldList]
        RMSEstd = stats.stdev(RMSEfoldList[0])
        #print str(RMSE[0])+"\t"+str(RMSEstd)+"\t"+string.join( [str(x) for x in RMSEfoldList[0]] , "\t")

        return [round(statc.mean(x),2) for x in MSEs]

    else:
        MSEs = [0.0]*res.numberOfLearners
        for tex in res.results:
            MSEs = map(lambda res, cls, ac = float(tex.actualClass):
                       res + (float(cls) - ac)**2, MSEs, tex.classes)

        MSEs = [x/(len(res.results)) for x in MSEs]
        return [round(math.sqrt(x),2)  for x in MSEs]
Exemplo n.º 21
0
def CA_obsolete(res = None, returnFoldStat = False):
    """
    Calculates the classification Accuracy of orngTest.ExperimentResults in res
    The results res must be from a classifier
    """
    # If Called without arguments, return the type of problems this method can be used for: 
    # 1 - Classification problems (Discrete Class)
    # 2 - Regression problems (Continuous Class)
    # 3 - Both Regression and Classification problems (Continuous or Discrete Class)
    if res == None:
        return {"type":CLASSIFICATION}

    if res.numberOfIterations > 1:
        CAs = [[0.0] * res.numberOfIterations for i in range(res.numberOfLearners)]
        nIter = [0]*res.numberOfIterations
        for tex in res.results:
            ac = tex.actualClass
            nIter[tex.iterationNumber] += 1
            for i, cls in enumerate(tex.classes):
                if cls == ac:
                    CAs[i][tex.iterationNumber] += 1
        CAs = [[x/ni for x, ni in zip(y, nIter)] for y in CAs]

        CAfoldList = CAs
        CA = [statc.mean(x) for x in CAs]
        CAstd = stats.stdev(CAfoldList[0])

        if returnFoldStat:
            return [round(statc.mean(x),3) for x in CAs], CAfoldList
        else:
            return [round(statc.mean(x),3) for x in CAs]

    else:
        CAs = [0.0]*res.numberOfLearners
        for tex in res.results:
            CAs = map(lambda res, cls, ac = tex.actualClass:
                       res + types.IntType(cls == ac), CAs, tex.classes)
        return [round(x/(len(res.results)),3) for x in CAs]
def final():
    i = 0
    iteracoes = zip(*totIteracoes)
    print ' '
    print ' '
    print '******************************************************************'
    print 'Media e desvio padrao das iteracoes:'
    for prec in precs:
        print '------------------------------------------------------------------'
        print 'Precisao:    %d' % prec
        print 'Media:       %d' % stats.mean(iteracoes[i])
        print 'Maximo:      %d' % max(iteracoes[i])
        print 'Minimo:      %d' % min(iteracoes[i])
        print 'Desvio:      %d' % stats.stdev(iteracoes[i])
        i+=1
    print '------------------------------------------------------------------'

    ticks = range(1,i+1)
    pylab.boxplot(iteracoes)
    pylab.title('Distribuicao das Iteracoes')
    pylab.xlabel('Precisao da raiz em casas decimais')
    pylab.ylabel('Quantidade de Iteracoes')
    pylab.xticks(ticks,precs)
    pylab.show()
Exemplo n.º 23
0
def main():
  if (len(sys.argv) < 3):
    print "Usage!"
    exit(1)
  
  points = sys.argv[1]
  database = sys.argv[2]
  
  boxData = csv.reader(open(points, 'rb'))

  boxes = []

  for row in boxData:
    if (len(row) == 4):
      boxes.append({"time":long(row[0]), "size":float(row[1]), "position":(int(row[2]),int(row[3]))})
    else:
      finishTime = long(row[0].split(':')[1])
  
  conn = sqlite3.connect(database)
  cur = conn.cursor()
  cur.execute('select datetime,data from clues where kind="touch" and datetime between %d and %d' % (boxes[0]['time'],finishTime))

  touches = []
  for touch in cur:
    time = long(touch[0])
    data = touch[1].split(',')
    touches.append({"time":long(touch[0]), "position":(int(data[0]),int(data[1])), "pressure":float(data[2])})

  timesForSize = {30:[], 60:[], 99:[], 129:[]}

  deltas = []
  for i,box in enumerate(boxes[:-1]):
    delta = (boxes[i+1]['time'] - box['time'])/1000.0
    timesForSize[box['size']].append(delta)
    deltas.append(delta)
  deltas.append((finishTime - boxes[-1]['time'])/1000.0)
  minimum = min(deltas)
  maximum = max(deltas)
  mean = stats.mean(deltas)
  stddev = stats.stdev(deltas)
  for k,v in sorted(timesForSize.iteritems()):
    print "%d: %.3f/%.3f/%.3f/%.3f (%.3f bps)" % (k, min(v), stats.mean(v), max(v), stats.stdev(v), 1/stats.mean(v))
  print "Avg: %.3f/%.3f/%.3f/%.3f (%.3f boxes per second)" % (minimum, mean, maximum, stddev, 1/mean)

  boxesWithTouches = []
  
  for i,box in enumerate(boxes[:-1]):
    time = box['time']
    nextTime = boxes[i+1]['time']
    def f(x): return x['time'] > time and x['time'] < nextTime
    associatedTouches = filter(f, touches)
    boxesWithTouches.append({'size': box['size'], 'position': box['position'], 'touches':associatedTouches})

  mags = []
  magsPerSize = []
  sizes = [30, 60, 99, 129]
  
  for buttonSize in sizes:
    def sizeOfBox(t): return t['size'] == buttonSize
    boxes = filter(sizeOfBox, boxesWithTouches)
    for boxWithTouch in boxes:
      center = boxWithTouch['position']
      for touch in boxWithTouch['touches']:
        tapPos = touch['position']
        deltaX = center[0] - tapPos[0]
        deltaY = center[1]  - tapPos[1]
        mags.append(math.sqrt(pow(deltaX, 2) + pow(deltaY, 2)))
      magsPerSize = magsPerSize + mags
      mags = []
    magsPerSize = []
Exemplo n.º 24
0
def mean_stdev(vals, null_val=-1):
    vals2 = [i for i in vals if i != null_val]
    return int(round(mean(vals2), 0)), stdev(vals2), len(vals2)
Exemplo n.º 25
0
def adc_var ( ip_addr, ip_port, timeout, channel, packetNumber, buffer_size, verbose):
  
  s_adc = socket.socket( socket.AF_INET, socket.SOCK_STREAM )
  s_adc.settimeout( int(timeout) )
  # try connection
  try:
    s_adc.connect( (ip_addr, int( ip_port ) ) )
  except:
    print "adc_var(): Error, no ADC connection, port: ", ip_port,
    s_adc.close()
    sys.exit()

  # open connection
  message = sendReceive ( s_adc, channel, packetNumber, 
                          buffer_size, verbose ) 
  # close connection
  s_adc.close()
                          
  # unpacking message
  msg_len = len(message)
  if msg_len > 0:
    num = unpack(str(msg_len)+'B', message)
    #  add offset, [0 255] -> [-128 128]
    num = map(lambda x: x-128, num)
    if verbose:
      print "Min=", min(num), "Max=", max(num), "Mean=", stats.mean(num), "Var=", stats.var(num),  "Std=", stats.stdev(num)
    var = stats.var(num)
  else:
    var = "N/A"
  return var
Exemplo n.º 26
0
#if qerrors.qsize():
    #for err in qerrors:
        #print(err)
        
 #if qstart.qsize():
    #print("START: {e}".format(e=qstart.qsize()))
        
print("Build times")
bstart = time.time()
times = []    
#i =0
if qtimes.qsize():
    qtimes.put(StopIteration)
    #print(qtimes.qsize())
    for item in qtimes:
        #i += 1
        #print(i, item)
        times.append(item)
        
# The len of times[] indicates the number or successful responses.
# The len should equal max_connections * iterations from the command line.
print("Time spent building time[]: {t} len={l}".format(t=time.time() - bstart, l=len(times)))
print("Round trip time:")
print("Min {t}".format(t=min(times)))
print("Max {t}".format(t=max(times)))
if sys.version[0] == "2":
    print("Mean {t}".format(t=stats.mean(times)))
    print("StdDev {t}".format(t=stats.stdev(times)))
#print("Invalid responses={i}".format(i=invalid_responses))
      
Exemplo n.º 27
0
print '\nVARIATION'
print 'obrientransform:'

l = range(1,21)
a = N.array(l)
ll = [l]*5
aa = N.array(ll)

print stats.obrientransform(l,l,l,l,l)
print stats.obrientransform(a,a,a,a,a)

print 'samplevar:',stats.samplevar(l),stats.samplevar(a)
print 'samplestdev:',stats.samplestdev(l),stats.samplestdev(a)
print 'var:',stats.var(l),stats.var(a)
print 'stdev:',stats.stdev(l),stats.stdev(a)
print 'sterr:',stats.sterr(l),stats.sterr(a)
print 'sem:',stats.sem(l),stats.sem(a)
print 'z:',stats.z(l,4),stats.z(a,4)
print 'zs:'
print stats.zs(l)
print stats.zs(a)

print '\nTRIMMING'
print 'trimboth:'
print stats.trimboth(l,.2)
print stats.trimboth(lf,.2)
print stats.trimboth(a,.2)
print stats.trimboth(af,.2)
print 'trim1:'
print stats.trim1(l,.2)
Exemplo n.º 28
0
def standard_deviation(values):
    try:
        return stats.stdev(values)
    except ZeroDivisionError:
        return None
def compare_test_durations(tree1, revision1, tree2, revision2, submitter):
  revision_count = 10

  if revision1:
    control_revision = revision1
  else:
    if DEBUG:
      print 'finding the most recent changeset with all tests completed'
    control_revision = get_range_of_recent_commits(find_most_recent_completed_commit(),
                                                   count=revision_count)
    if not control_revision:
      return None

  if DEBUG:
    print 'getting durations from ES for changeset', control_revision
  if isinstance(control_revision, list):
    control = get_median_duration_for_ES_commit_list(control_revision)
  else:
    control = [get_durations_for_ES_commit(control_revision)]

  if tree2 == 'try':
    trylogs = get_list_of_try_logs('%s-%s' % (submitter, revision2))

    if DEBUG:
      print "parsing try logs"
    test = get_durations_from_trylogs(trylogs)

  elif tree2 == 'mozilla-central':
    if not revision2:
      revision2 = find_most_recent_completed_commit()
    test = get_durations_for_ES_commit(revision2)

  else:
    raise Exception("Unsupported tree %s" % tree2)

  results = defaultdict(lambda: defaultdict(list))
  totals = [0 for x in range(0, len(control))]
  test_total = 0

  for plat in test:
    test_suites = test.get(plat, {})

    for suite in test_suites:
      testtime = int(test[plat][suite])
      timelist = [int(x.get(plat, {}).get(suite)) for x in control if x.get(plat, {}).get(suite)]
      results[plat][suite] = {'mean': stats.mean(timelist),
                              'stdev': stats.stdev(timelist) if len(timelist) > 1 else 0,
                              'testtime': testtime
                             }
      totallist = [int(x.get(plat, {}).get(suite)) if x.get(plat, {}).get(suite) else testtime for x in control]
      totals = [y + totals[x] for x,y in enumerate(totallist)]
      test_total += testtime

  return { 'durations': results,
           'totals': {
             'mean': stats.mean(totals),
             'stdev': stats.stdev(totals) if len(totals) > 1 else 0,
             'testtime': test_total
           },
           'revisions': [
            { 'tree': 'mozilla-central',
              'revision': control_revision },
            { 'tree': tree2,
              'revision': revision2 }
           ]}
Exemplo n.º 30
0
    output = "Pathway analysis. Result dir: %s. Performed at %s" % (RESULT_DIR, str(datetime.now()))
    output += "\n1. Statistics\n"
    output += "----------------------------------------------------------------------------------------------------------------------------\n"
    output += "                                             Interaction_component_counts            CoulombEnergy_Contribution             \n"
    output += " Box Sum_of_interacting_atoms CloseContact Hydrophobic HydrogenBond Electrostatic   mean      stddev                        \n"
    output += "----------------------------------------------------------------------------------------------------------------------------\n"
    pattern1= " %s  %4s                      %s           %s          %s           %s              %10.3f    %10.3f                        \n"
    for i in range(len(p)):
        elec_energies = p[i].statistics['coulomb_energies']
        output += pattern1 % (i+1, p[i].statistics['sum'],
                                    p[i].statistics['closecontact'],
                                    p[i].statistics['hydrophobic'],
                                    p[i].statistics['hydrogen'],
                                    p[i].statistics['electrostatic'],
                                    (stats.mean(elec_energies),0.0)[len(elec_energies) == 0],
                                    (stats.stdev(elec_energies),0.0)[len(elec_energies) == 0]
                              )
    OFILE = open('pathway_analysis.txt','w')
    OFILE.write(output)

    output  = "\n2.Details. List of residues with high-frequency (>= 40%) of interactions in each box\n"
    output += "---------------------------------------------------------------------------------------------\n"
    output += " Box Residue                 Interaction_frequency                            Receptor_states\n"
    output += "               CloseContact Hydrophobic HydrogenBond Electrostatic All                       \n"
    output += "---------------------------------------------------------------------------------------------\n" 
    pattern2= " %s  %s        %7.3f        %7.3f       %7.3f        %7.3f         %7.3f      %s\n"
    residue_set = [] 
    for i in range(len(p)):
        residue_set.append( [] )
        for res in p[i].all_residues:
            if p[i].frequency(res) >= 0.4:
Exemplo n.º 31
0
Arquivo: fish.py Projeto: ardubs/fish
def main():
	
	townSize = 100
	duration = 100
	
	if debugMode == 1:
		iterations = 1
		trials = 1
	else:
		iterations = 100
		trials = 35
	
	fPercentBase = 0
	fPercentDelta = 0
	
	tPercentBase = 0
	tPercentDelta = 1
	
	deathHungerBase = 2
	deathHungerDelta = 0
	
	maxHarvestBase = 5
	maxHarvestDelta = 0
	
	previousMean = 0
	previousStDev = 0
		
	for iteration in range(iterations):
		fPercent = fPercentBase + (fPercentDelta * iteration)
		tPercent = tPercentBase + (tPercentDelta * iteration)
		deathHunger = deathHungerBase + (deathHungerDelta * iteration)
		maxHarvest = maxHarvestBase + (maxHarvestDelta * iteration)
			
		numFishers = int(fPercent * (townSize / 100))
		numTeachers = int(tPercent * (townSize / 100))
	
		survivingPops = []
		
		for trial in range(trials):
			Canvas = []
	
			# Create first two towns
			Canvas.append(Town())
	
			for i in range(numFishers):
				Canvas[0].Home.add(Man(i, 1))
			if numTeachers > 1:
				for i in range(numTeachers):
					Canvas[0].Home.add(Man(i, 2))
			for i in range(townSize - numFishers - numTeachers):
				Canvas[0].Home.add(Man(i, 0))
					
			for day in range(duration):	
		
				Canvas[day].morningCommute()
		
				Canvas[day].stockMarket()
		
				Canvas[day].sellMarket()
		
				Canvas[day].fishingDay(maxHarvest)
		
				Canvas[day].returnHome()
		
				if debugMode == 1: print "Before deathHunger on day " + str(day) + ", home population is " + str(len(Canvas[day].Home)) + " and there are " + str(len(Canvas[day].Market)) + " people hungry in the market"
		
				Canvas[day].assessHunger(deathHunger)
							
				if debugMode == 1: print "At the end of day " + str(day) + ", home population is " + str(len(Canvas[day].Home)) + " and there are " + str(len(Canvas[day].Market)) + " people hungry in the market"
		
				Canvas.append(Town())
				Canvas[day + 1].clone(Canvas[day])
		
			survivingPops.append(len(Canvas[day].Home))
	
		if displayPops == 1: print survivingPops
		
		currentMean = stats.mean(survivingPops)
		currentStDev = stats.stdev(survivingPops)
		
		if displayPops == 1:
			print str(currentMean) + ", " + str(currentStDev)
		else:
			print str(currentMean) + ", " 
		
		previousMean = currentMean
		previousStDev = currentStDev
Exemplo n.º 32
0
def main(argv):
    global sqlitefile
    global csvfilename
    csvfilename = None  # need to set this to some sane default because ensureValidTestID() won't work without it
    try:
        sqlitefile = 'resultdb.sqlite'
        opts, args = getopt.getopt(argv, 'hf:e:', ('help', 'db=', 'csv='))
    except getopt.GetoptError:
        usage()
    for opt, arg in opts:
        if opt in ('-h', '--help'):
            usage()
        elif opt in ('-f', '--db='):
            sqlitefile = arg
            ensureValidTestID()
        elif opt in ('-e', '--csv='):
            csvfilename = arg
            ensureValidTestID()
            writecsv(csvfilename, sometestid)
            sys.exit(0)


# ---- INIT ----
#    dbconn = sqlite3.connect(sqlitefile)
#    dbconn.row_factory = sqlite3.Row
#    c = dbconn.cursor()
    getSQLiteConn()
    myquery = 'SELECT test_id, date, hits, up, time, data, resptime, trans, bw, concur, ok, fail, long, short FROM siege WHERE test_id LIKE \'' + str(
        sometestid) + '%\''

    #todo = ['hits', 'up', 'time', 'data', 'resptime', 'trans', 'bw', 'concur', 'ok', 'fail', 'long', 'short']
    floats = ('up', 'time', 'data', 'resptime', 'trans', 'bw', 'concur',
              'long', 'short')
    ints = ('hits', 'ok', 'fail')
    # todo is a dict with each key as the col name and each val as a list containing all the vals
    """
    Siege example return and var types
    (u'mysql-1-1', u'1303101006.08', u'6830', u'85.88', u'352.12', u'48.83', u'5.05', u'19.40', u'0.14', u'97.88', u'6840', u'1123', u'15.41', u'0.56')
    str(test_id)
    int(date)
    int(hits)
    float(up)
    float(time)
    float(data)
    float(resptime)
    float(trans)
    float(bw)
    float(concur)
    int(ok)
    int(fail)
    float(long)
    float(short)
    """

    mydict = {}

    # create empty lists
    for i in floats + ints:
        mydict[i] = []
    # we can now just map keys in our dict to results in the DB query since the names are 1:1

    data = c.execute(myquery)
    numrows = c.fetchone()
    if numrows is None:
        print "No tests found when using test ID selector:", sometestid
        print "Displaying a list of test IDs in", sqlitefile
        for row in c.execute('select test_id from siege'):
            print row[0]
        print '(For help / usage info, see', sys.argv[0], '--help)'
        sys.exit(2)
    for row in data:
        for key in floats:
            mydict[key].append(float(row[key]))
        for key in ints:
            mydict[key].append(int(row[key]))

    meandict = {}
    mediandict = {}
    stdevdict = {}
    for key, val in mydict.iteritems():
        meandict[key] = stats.mean(val)
        stdevdict[key] = stats.stdev(val)
        mediandict[key] = getMedian(val)

    print 'Test ID selector: ' + sometestid
    print "Raw dump of dataset to parse: "
    print mydict
    print '\r\nMean: '
    print meandict

    print '\r\nMedian: '
    print mediandict

    print '\r\nStandard Deviation: '
    print stdevdict

    # select test_id, datetime(date, 'unixepoch') from siege where test_id = 'mysql-1-1';
    #print mydict['trans']
    # ---- MAIN ----
    return
Exemplo n.º 33
0
print '\nVARIATION'
print 'obrientransform:'

l = range(1, 21)
a = N.array(l)
ll = [l] * 5
aa = N.array(ll)

print stats.obrientransform(l, l, l, l, l)
print stats.obrientransform(a, a, a, a, a)

print 'samplevar:', stats.samplevar(l), stats.samplevar(a)
print 'samplestdev:', stats.samplestdev(l), stats.samplestdev(a)
print 'var:', stats.var(l), stats.var(a)
print 'stdev:', stats.stdev(l), stats.stdev(a)
print 'sterr:', stats.sterr(l), stats.sterr(a)
print 'sem:', stats.sem(l), stats.sem(a)
print 'z:', stats.z(l, 4), stats.z(a, 4)
print 'zs:'
print stats.zs(l)
print stats.zs(a)

print '\nTRIMMING'
print 'trimboth:'
print stats.trimboth(l, .2)
print stats.trimboth(lf, .2)
print stats.trimboth(a, .2)
print stats.trimboth(af, .2)
print 'trim1:'
print stats.trim1(l, .2)
Exemplo n.º 34
0
    output = "Pathway analysis. Result dir: %s. Performed at %s" % (RESULT_DIR, str(datetime.now()))
    output += "\n1. Statistics\n"
    output += "----------------------------------------------------------------------------------------------------------------------------\n"
    output += "                                             Interaction_component_counts            CoulombEnergy_Contribution             \n"
    output += " Box Sum_of_interacting_atoms CloseContact Hydrophobic HydrogenBond Electrostatic   mean      stddev                        \n"
    output += "----------------------------------------------------------------------------------------------------------------------------\n"
    pattern1= " %s  %4s                      %s           %s          %s           %s              %10.3f    %10.3f                        \n"
    for i in range(len(p)):
        elec_energies = p[i].statistics['coulomb_energies']
        output += pattern1 % (i+1, p[i].statistics['sum'],
                                    p[i].statistics['closecontact'],
                                    p[i].statistics['hydrophobic'],
                                    p[i].statistics['hydrogen'],
                                    p[i].statistics['electrostatic'],
                                    (stats.mean(elec_energies),0.0)[len(elec_energies) == 0],
                                    (stats.stdev(elec_energies),0.0)[len(elec_energies) == 0]
                              )
    OFILE = open('pathway_analysis.txt','w')
    OFILE.write(output)

    output  = "\n2.Details. List of residues with high-frequency (>= 40%) of interactions in each box\n"
    output += "---------------------------------------------------------------------------------------------\n"
    output += " Box Residue                 Interaction_frequency                            Receptor_states\n"
    output += "               CloseContact Hydrophobic HydrogenBond Electrostatic All                       \n"
    output += "---------------------------------------------------------------------------------------------\n" 
    pattern2= " %s  %s        %7.3f        %7.3f       %7.3f        %7.3f         %7.3f      %s\n"
    residue_set = [] 
    for i in range(len(p)):
        residue_set.append( [] )
        for res in p[i].all_residues:
            if p[i].frequency(res) >= 0.3:
Exemplo n.º 35
0
 def test_stdev(self):
     "Testing stdev"
     
     data = [ self.L, self.A ]
     for d in data:
         self.EQ( stats.stdev( d ), 5.9160797831 )
Exemplo n.º 36
0
def cull_outliers(data, n_sigma):
    mean = stats.mean(map(lambda x: x, data))
    sigma = stats.stdev(map(lambda x: x, data))
    return filter(lambda x: abs(x - mean) < n_sigma * sigma, data)
Exemplo n.º 37
0
def main():
  if (len(sys.argv) < 3):
    print "Usage!"
    exit(1)
  
  database = sys.argv[2]
  points = sys.argv[1]
  
  boxData = list(csv.reader(open(points, 'rb')))

  boxes = [{"time":long(row[0]), "size":float(row[1]), "position":(int(row[2]),int(row[3]))} for row in boxData[:-1]]
  finishTime = long(boxData[-1][0].split(':')[1])
    
  conn = sqlite3.connect(database)
  cur = conn.cursor()
  cur.execute('select datetime,data from clues where kind="touch" and datetime between %d and %d' % (boxes[0]['time'],finishTime))

  touches = [{'time':long(t[0]), 'position':(int(t[1].split(',')[0]), int(t[1].split(',')[1])), 'pressure':float(t[1].split(',')[2])} for t in cur]
  
  boxesWithTouches = []
  for i,box in enumerate(boxes[:-1]):
    time = box['time']
    nextTime = boxes[i+1]['time']
    def f(x): return x['time'] > time and x['time'] < nextTime
    associatedTouches = filter(f, touches)
    numTaps = 1
    for j,touch in enumerate(associatedTouches[:-1]):
      currentTouchTime = touch['time']
      nextTouchTime = associatedTouches[j+1]['time']
      if (nextTouchTime - currentTouchTime) > 100:
        numTaps += 1
    boxesWithTouches.append({'size': box['size'], 'position': box['position'], 'touches':associatedTouches, 'attempts':numTaps})
    numTaps = 0
  
  # Finish up with the last box
  box = boxes[-1]
  time = boxes[-1]['time']
  nextTime = finishTime
  def f(x): return x['time'] > time and x['time'] < nextTime
  associatedTouches = filter(f, touches)
  numTaps = 1
  for j,touch in enumerate(associatedTouches[:-1]):
    currentTouchTime = touch['time']
    nextTouchTime = associatedTouches[j+1]['time']
    if (nextTouchTime - currentTouchTime) > 100:
      numTaps += 1
  boxesWithTouches.append({'size': box['size'], 'position': box['position'], 'touches':associatedTouches, 'attempts':numTaps})  
  
  sizes = [30, 60, 99, 129]
  mmsizes = [3,6,10,13]

  for buttonSize in sizes:
    def sizeOfBox(t): return t['size'] == buttonSize
    filteredBoxes = filter(sizeOfBox, boxesWithTouches)
    listOfAttempts = [box['attempts'] for box in filteredBoxes]
    print "%d: %.3f/%.3f/%.3f/%.3f" % (mmsizes[sizes.index(buttonSize)], stats.mean(listOfAttempts), min(listOfAttempts), max(listOfAttempts), stats.stdev(listOfAttempts))
Exemplo n.º 38
0
#!/usr/bin/python

import sys
import re
from statlib import stats

if __name__ == '__main__':
  a = []
  for i in xrange(501):
    a.append([])
  m = re.compile("(?P<l>\d+)\s(?P<m>\d+)")
  L = sys.stdin.readlines()
  for l in L:
    s = m.match(l)
    length = int(s.group("l"))
    length -= length%10 +5
    a[length].append(int(s.group("m"))) 
    
  b = []
  for i in xrange(200,501):
    if (a[i] != []):
      print i, stats.mean(a[i]), stats.stdev(a[i])
#      b.append(len(a[i]))
      
#  print stats.mean(b), stats.stdev(b)

Exemplo n.º 39
0
def cull_outliers(data, n_sigma):
    mean = stats.mean(map(lambda x: x, data))
    sigma  = stats.stdev(map(lambda x: x, data))
    return filter(lambda x: abs(x - mean) < n_sigma * sigma, data)
Exemplo n.º 40
0
    def test_stdev(self):
        "Testing stdev"

        data = [self.L, self.A]
        for d in data:
            self.EQ(stats.stdev(d), 5.9160797831)
Exemplo n.º 41
0
def mean_stdev(vals, null_val=-1):
   vals2 = [i for i in vals if i != null_val]
   return int(round(mean(vals2), 0)), stdev(vals2), len(vals2)
Exemplo n.º 42
0
    if delta_flag:
        return delta


if __name__ == "__main__":
    start_date = datetime.strptime(raw_input("Start date: "), '%Y%m%d')
    day_range = int(raw_input("Day Range: "))
    sample_days = int(raw_input("Sample days: "))
    company_list = CompanyList(raw_input("Company List: "))
    stocks_per_day = int(raw_input("Stocks per day: "))
    percents_correct = [
        float(p) for p in raw_input("Percents correct: ").split(",")
    ]

    results = {}

    for trial in range(len(percents_correct)):
        deltas = run_experiment(company_list=company_list,
                                sample_days=sample_days,
                                start_date=start_date,
                                day_range=day_range,
                                percent_correct=percents_correct[trial],
                                stocks_per_day=stocks_per_day)
        print "Percent correct: %f, %s" % (percents_correct[trial],
                                           str(deltas))
        print "\t (%f, %f)" % (stats.mean(deltas), stats.stdev(deltas))
        results[percents_correct[trial]] = deltas

    print results
Exemplo n.º 43
0
def main(argv):
    global sqlitefile
    global csvfilename
    csvfilename = None # need to set this to some sane default because ensureValidTestID() won't work without it
    try:
        sqlitefile = 'resultdb.sqlite'
        opts, args = getopt.getopt(argv, 'hf:e:', ('help', 'db=', 'csv='))
    except getopt.GetoptError:
        usage()
    for opt, arg in opts:
        if opt in ('-h', '--help'):
            usage()
        elif opt in ('-f', '--db='):
            sqlitefile = arg
            ensureValidTestID()
        elif opt in ('-e','--csv='):
            csvfilename = arg
            ensureValidTestID()
            writecsv(csvfilename, sometestid)
            sys.exit(0)
# ---- INIT ----
#    dbconn = sqlite3.connect(sqlitefile)
#    dbconn.row_factory = sqlite3.Row
#    c = dbconn.cursor()
    getSQLiteConn()
    myquery = 'SELECT test_id, date, hits, up, time, data, resptime, trans, bw, concur, ok, fail, long, short FROM siege WHERE test_id LIKE \'' + str(sometestid) + '%\''

    #todo = ['hits', 'up', 'time', 'data', 'resptime', 'trans', 'bw', 'concur', 'ok', 'fail', 'long', 'short']
    floats = ('up', 'time', 'data', 'resptime', 'trans', 'bw', 'concur', 'long', 'short')
    ints = ('hits', 'ok', 'fail')
    # todo is a dict with each key as the col name and each val as a list containing all the vals

    """
    Siege example return and var types
    (u'mysql-1-1', u'1303101006.08', u'6830', u'85.88', u'352.12', u'48.83', u'5.05', u'19.40', u'0.14', u'97.88', u'6840', u'1123', u'15.41', u'0.56')
    str(test_id)
    int(date)
    int(hits)
    float(up)
    float(time)
    float(data)
    float(resptime)
    float(trans)
    float(bw)
    float(concur)
    int(ok)
    int(fail)
    float(long)
    float(short)
    """

    mydict = {}

    # create empty lists
    for i in floats + ints:
        mydict[i] = []
    # we can now just map keys in our dict to results in the DB query since the names are 1:1

    data = c.execute(myquery)
    numrows = c.fetchone()
    if numrows is None:
        print "No tests found when using test ID selector:", sometestid
        print "Displaying a list of test IDs in", sqlitefile
        for row in c.execute('select test_id from siege'):
            print row[0]
        print '(For help / usage info, see', sys.argv[0], '--help)'
        sys.exit(2)
    for row in data:
        for key in floats:
            mydict[key].append(float(row[key]))
        for key in ints:
            mydict[key].append(int(row[key]))

    meandict = {}
    mediandict = {}
    stdevdict = {}
    for key, val in mydict.iteritems():
        meandict[key] = stats.mean(val)
        stdevdict[key] = stats.stdev(val)
        mediandict[key] = getMedian(val)

    print 'Test ID selector: ' + sometestid
    print "Raw dump of dataset to parse: "
    print mydict
    print '\r\nMean: '
    print meandict

    print '\r\nMedian: '
    print mediandict

    print '\r\nStandard Deviation: '
    print stdevdict

    # select test_id, datetime(date, 'unixepoch') from siege where test_id = 'mysql-1-1';
    #print mydict['trans']
    # ---- MAIN ----
    return
Exemplo n.º 44
0
    def histogram(self, out_fname, large=False):
        assert self.data

        # Hacky workaround in the case that a run had no data.
        for x in self.data.values():
            if len(x) == 0: x.extend([0, 0])

        if not large:
            font = fm.FontProperties(family=['sans-serif'],
                                     size='small',
                                     fname=FONT_FILE)
            mpl.rcParams['xtick.major.pad'] = 4
            mpl.rcParams['ytick.major.pad'] = 4
            mpl.rcParams['lines.linewidth'] = 1
        else:
            font = fm.FontProperties(family=['sans-serif'],
                                     size=36,
                                     fname=FONT_FILE)
            mpl.rcParams['xtick.major.pad'] = 20
            mpl.rcParams['ytick.major.pad'] = 20
            mpl.rcParams['lines.linewidth'] = 5

        fig = plt.figure()
        # Set the margins for the plot to ensure a minimum of whitespace
        ax = plt.axes([0.12, 0.12, 0.85, 0.85])

        data = map(lambda x: x[1], self.data.iteritems())
        mean = stats.mean(map(lambda x: x, reduce(lambda x, y: x + y, data)))
        stdev = stats.stdev(map(lambda x: x, reduce(lambda x, y: x + y, data)))
        labels = []
        hists = []
        for series, color in zip(self.data.iteritems(), colors):
            clipped_data = clip(series[1], 0, 3 * mean)
            if clipped_data:
                _, _, foo = ax.hist(clipped_data,
                                    bins=200,
                                    histtype='bar',
                                    facecolor=color,
                                    alpha=.5,
                                    label=series[0])
                hists.append(foo)
                labels.append(series[0])
            else:
                print "Tried to make a histogram of a series of size 0"

        for tick in ax.xaxis.get_major_ticks():
            tick.label1.set_fontproperties(font)
        for tick in ax.yaxis.get_major_ticks():
            tick.label1.set_fontproperties(font)

        ax.set_ylabel('Frequency', fontproperties=font)
        ax.set_xlabel(
            'Latency (microseconds)', fontproperties=font
        )  #simply should not be hardcoded but we want nice pictures now
        ax.grid(True)
        # Dirty hack to get around legend miscoloring: drop all the hists generated into the legend one by one
        if hists:
            plt.legend(map(lambda x: x[0], hists), labels, loc=1, prop=font)

        if not large:
            fig.set_size_inches(5, 3.7)
            fig.set_dpi(90)
            plt.savefig(out_fname, bbox_inches="tight")
        else:
            ax.yaxis.LABELPAD = 40
            ax.xaxis.LABELPAD = 40
            fig.set_size_inches(20, 14.8)
            fig.set_dpi(300)
            plt.savefig(out_fname, bbox_inches="tight")
 def evaluate(self, *args, **params):
     return _stats.stdev(*args, **params)
Exemplo n.º 46
0
print('\nVARIATION')
print('obrientransform:')

l = list(range(1, 21))
a = N.array(l)
ll = [l] * 5
aa = N.array(ll)

print(stats.obrientransform(l, l, l, l, l))
print(stats.obrientransform(a, a, a, a, a))

print('samplevar:', stats.samplevar(l), stats.samplevar(a))
print('samplestdev:', stats.samplestdev(l), stats.samplestdev(a))
print('var:', stats.var(l), stats.var(a))
print('stdev:', stats.stdev(l), stats.stdev(a))
print('sterr:', stats.sterr(l), stats.sterr(a))
print('sem:', stats.sem(l), stats.sem(a))
print('z:', stats.z(l, 4), stats.z(a, 4))
print('zs:')
print(stats.zs(l))
print(stats.zs(a))

print('\nTRIMMING')
print('trimboth:')
print(stats.trimboth(l, .2))
print(stats.trimboth(lf, .2))
print(stats.trimboth(a, .2))
print(stats.trimboth(af, .2))
print('trim1:')
print(stats.trim1(l, .2))
Exemplo n.º 47
0
# run in wt/visual-cluster
from statlib import stats
OFILE = open("stats.txt",'w')
for i in range(1,8):
    BEs = []
    IFILE = open("visual_cluster-%s.pdb" % i, 'r')
    for l in IFILE:
        values = l.split()
        BEs.append(float(values[9]) / 0.7) # if given BEs are weighted
    OFILE.write("visual_cluster-%s: %s, stddev %s, lower %s, upper %s, min %s, max %s, median %s \n" % (i,stats.mean(BEs), 
stats.stdev(BEs), 
stats.scoreatpercentile(BEs,25), 
stats.scoreatpercentile(BEs,75),
min(BEs), max(BEs),
stats.median(BEs) ))
OFILE.close()
    
Exemplo n.º 48
0
 def stdev(self):
     return stats.stdev(self.deltas)
Exemplo n.º 49
0
    output = "Pathway analysis. Result dir: %s. Performed at %s" % (
        RESULT_DIR, str(datetime.now()))
    output += "\n1. Statistics\n"
    output += "----------------------------------------------------------------------------------------------------------------------------\n"
    output += "                                             Interaction_component_counts            CoulombEnergy_Contribution             \n"
    output += " Box Sum_of_interacting_atoms CloseContact Hydrophobic HydrogenBond Electrostatic   mean      stddev                        \n"
    output += "----------------------------------------------------------------------------------------------------------------------------\n"
    pattern1 = " %s  %4s                      %s           %s          %s           %s              %10.3f    %10.3f                        \n"
    for i in range(len(p)):
        elec_energies = p[i].statistics['coulomb_energies']
        output += pattern1 % (
            i + 1, p[i].statistics['sum'], p[i].statistics['closecontact'],
            p[i].statistics['hydrophobic'], p[i].statistics['hydrogen'],
            p[i].statistics['electrostatic'],
            (stats.mean(elec_energies), 0.0)[len(elec_energies) == 0],
            (stats.stdev(elec_energies), 0.0)[len(elec_energies) == 0])
    OFILE = open('pathway_analysis.txt', 'w')
    OFILE.write(output)

    output = "\n2.Details. List of residues with high-frequency (>= 40%) of interactions in each box\n"
    output += "---------------------------------------------------------------------------------------------\n"
    output += " Box Residue                 Interaction_frequency                            Receptor_states\n"
    output += "               CloseContact Hydrophobic HydrogenBond Electrostatic All                       \n"
    output += "---------------------------------------------------------------------------------------------\n"
    pattern2 = " %s  %s        %7.3f        %7.3f       %7.3f        %7.3f         %7.3f      %s\n"
    residue_set = []
    for i in range(len(p)):
        residue_set.append([])
        for res in p[i].all_residues:
            if p[i].frequency(res) >= 0.4:
                # get list of receptors by specified residue
Exemplo n.º 50
0
def standard_deviation(values):
    try:
        return stats.stdev(values)
    except ZeroDivisionError:
        return None
Exemplo n.º 51
0
 def stdev(self):
     return stats.stdev(self.deltas)