示例#1
0
def request_handler(request):
    URL = "http://608dev.net/sandbox/sc/garciag/all_dataHandler.py"
    r = requests.get(url=URL)
    data = eval(r.text)

    temp1 = data[0]
    temp2 = data[1]
    temp3 = data[2]

    temp = []
    temp.extend(temp1)
    temp.extend(temp2)
    temp.extend(temp3)

    humid1 = data[3]
    humid2 = data[4]
    humid3 = data[5]

    humid = []
    humid.extend(humid1)
    humid.extend(humid2)
    humid.extend(humid3)

    light1 = data[6]
    light2 = data[7]
    light3 = data[8]

    light = []
    light.extend(light1)
    light.extend(light2)
    light.extend(light3)

    moist1 = data[9]
    moist2 = data[10]
    moist3 = data[11]

    moist = []
    moist.extend(moist1)
    moist.extend(moist2)
    moist.extend(moist3)

    k = int(request['values']['k'])

    if k == 1:
        return round(statistics.std(temp) * 1.8 + 32, 2)
    elif k == 2:
        return round(statistics.std(humid), 2)
    elif k == 3:
        return round(statistics.std(light) * 100, 2)
    elif k == 4:
        return round(statistics.std(moist) * 100, 2)
示例#2
0
    def print_table(self):
        import time, os, sys
        import statistics

        # if nothing was run, skip it
        if not len(self._table):
            return
        """Print out in a nice tabular form"""
        print """
                       ========================
                       distcc benchmark results
                       ========================

"""
        print "Date: ", time.ctime()
        print "DISTCC_HOSTS: %s" % ` os.getenv('DISTCC_HOSTS') `
        sys.stdout.flush()
        os.system("uname -a")

        print "%-20s  %-30s   %8s  %8s" % ('project', 'compiler', 'time',
                                           's.d.')

        for row in self._table:
            print "%-20s  %-30s " % row[:2],
            times = row[2]
            if times == 'FAIL':
                print '%9s' % 'FAIL'
            else:
                mean = statistics.mean(times)
                sd = statistics.std(times)
                print "%8.4fs" % mean,
                if sd is None:
                    print "%9s" % "n/a"
                else:
                    print "%8.4fs" % sd
示例#3
0
文件: Summary.py 项目: aosm/distcc
    def print_table(self):
        import time, os, sys
        import statistics

        # if nothing was run, skip it
        if not len(self._table):
            return        

        """Print out in a nice tabular form"""
        print """
                       ========================
                       distcc benchmark results
                       ========================

"""
        print "Date: ", time.ctime()
        print "DISTCC_HOSTS: %s" % `os.getenv('DISTCC_HOSTS')`
        sys.stdout.flush()
        os.system("uname -a")

        print "%-20s  %-30s   %8s  %8s" % ('project', 'compiler', 'time', 's.d.')

        for row in self._table:
            print "%-20s  %-30s " % row[:2],
            times = row[2]
            if times == 'FAIL':
                print '%9s' % 'FAIL'
            else:
                mean = statistics.mean(times)
                sd = statistics.std(times)
                print "%8.4fs" % mean,
                if sd is None:
                    print "%9s" % "n/a"
                else:
                    print "%8.4fs" % sd
 def fit(self, X, y):
     from statistics import pstdev as std
     classes = sorted(frozenset(y))
     yy = tuple(y)
     xx = tuple(X.ravel())
     p = {c: yy.count(c) / len(yy) for c in classes}
     mu = {
         c: sum(x for x, y in zip(xx, yy) if y == c) / yy.count(c)
         for c in classes
     }
     sd = {c: std(x for x, y in zip(xx, yy) if y == c) for c in classes}
     [self.__dict__.update(locals())]
示例#5
0
 def print_mean_and_sd(times, unit='s', no_sd=False):
     assert len(unit) == 1, unit
     mean = statistics.mean(times)
     sd = statistics.std(times)
     if mean is None:
         print "%s%s  " % ("n/a", sd_space),
     else:
         print "%8.1f%s " % (mean, unit),
     if not no_sd:
         if sd is None:
             print "%9s " % "n/a",
         else:
             print "%8.1f%s " % (sd, unit),
示例#6
0
 def print_mean_and_sd(times, unit='s', no_sd=False):
     assert len(unit) == 1, unit
     mean = statistics.mean(times)
     sd = statistics.std(times)
     if mean is None:
         print "%s%s  " % ("n/a", sd_space),
     else:
         print "%8.1f%s " % (mean, unit),
     if not no_sd:
         if sd is None:
             print "%9s " % "n/a",
         else:
             print "%8.1f%s " % (sd, unit),
示例#7
0
    def fit(self, X, y):
        yy = tuple(y)
        m = len(yy)
        self.classes = classes = sorted(set(yy))

        #priors
        self.priors = {c: yy.count(c) / m for c in classes}

        #conditionals = pdf products
        from statistics import pstdev as std, mean
        self.mu = {(j, c): mean(x for x, y in zip(xx, yy) if y == c)
                   for j, xx in enumerate(X.T) for c in classes}
        self.sd = {(j, c): std(x for x, y in zip(xx, yy) if y == c)
                   for j, xx in enumerate(X.T) for c in classes}
        return self
示例#8
0
文件: describe.py 项目: ppicavez/dslr
def describe(fileName):
    
    datas = readCsv(fileName)
    datas = dropColumns(datas, ["Index", "Hogwarts House", "First Name",
                           "Last Name", "Birthday", "Best Hand"])
    result = pd.DataFrame(
        columns=datas.columns,
        index=["Count","Mean","Std","Min","25%","50%","75%","Max"
        ]
    )

    result.iloc[0] = count(datas)
    result.iloc[1] = mean(datas)
    result.iloc[2] = std(datas)
    result.iloc[3] = minimum(datas)
    result.iloc[4] = quantile(datas, 0.25)
    result.iloc[5] = quantile(datas, 0.50)
    result.iloc[6] = quantile(datas, 0.75)
    result.iloc[7] = maximum(datas)

    print(result)
示例#9
0
def categorize(l, expansion_factor=1, min_threshold=0.01):
    if infer_nature(l) == CATEGORICAL:
        return l
    _min_ = min(l)
    _max_ = max(l)
    _std_ = std(l)
    i = 0
    while True:
        if i == 0:
            low = _min_
        else:
            low = round(_min_ + i * expansion_factor * _std_, 3)
        if _std_ == 0:
            high = low + min_threshold
        else:
            high = round(_min_ + (i + 1) * expansion_factor * _std_, 3)
        interval = low, high
        l = map(lambda e: interval if low <= e < high else e, l)
        i += 1
        if high > _max_:
            break
    return l
示例#10
0
def request_handler(request):
	URL = "http://608dev.net/sandbox/sc/garciag/all_dataHandler.py"
	r = requests.get(url=URL)
	data = eval(r.text)

	temp1 = data[0]
	temp2 = data[1]
	temp3 = data[2]

	humid1 = data[3]
	humid2 = data[4]
	humid3 = data[5]

	light1 = data[6]
	light2 = data[7]
	light3 = data[8]

	moist1 = data[9]
	moist2 = data[10]
	moist3 = data[11]

	k = int(request['values']['k'])

	if k ==1:
		return round(statistics.std(temp1)*1.8+32)
	elif k == 2:
		return round(statistics.std(temp2)*1.8+32)
	elif k ==3:
		return round(statistics.std(temp3)*1.8+32)
	elif k == 4:
		return round(statistics.std(humid1))
	elif k == 5:
		return round(statistics.std(humid2))
	elif k == 6:
		return round(statistics.std(humid3))
	elif k == 7:
		return round(statistics.std(light1))
	elif k ==8:
		return round(statistics.std(light2))
	elif k ==9:
		return round(statistics.std(light3))
	elif k ==10:
		return round(statistics.std(moist1))
	elif k ==11:
		return round(statistics.std(moist2))
	elif k ==12:
		return round(statistics.std(moist3))
示例#11
0
    if i % 50 == 0:
        print('Current step:', i)
    random.shuffle(random_signals)
    df_trades['random_signals'] = random_signals
    permutations.append(generate_trading_history('random_signals'))

print('Permutations generated:', len(permutations))
plt.hist(permutations, bins=30)

simulated_permutations = pd.read_csv("*directory*/permutations.csv", sep=',')
stats.shapiro(simulated_permutations.PnL)

#%%
num_bins = 20
mu = mean(simulated_permutations.PnL)
sigma = std(simulated_permutations.PnL)

fig, ax = plt.subplots()

# the histogram of the data
n, bins, patches = ax.hist(simulated_permutations.PnL, num_bins, density=1)

# add a 'best fit' line
y = ((1 / (np.sqrt(2 * np.pi) * sigma)) * np.exp(-0.5 * (1 / sigma *
                                                         (bins - mu))**2))
ax.axvline(x=776.12, ymax=0.75, color='r', ls='--')
ax.plot(
    bins,
    y,
    '--',
)
plt.scatter(df_percent.sp500,df_percent.model,s=7)
plt.xlabel('return S&P500 in pct')
plt.ylabel('return final model in pct')
plt.title('S&P500 and final model daily returns')

#make regression model 
model = sm.OLS(df_percent['model'], df_percent['sp500'])

#fit model and print results
results = model.fit()
print(results.summary())

#Sharpe Ratio
model_return= (daily_data['cumulative_model'].iloc[-1]-daily_data['cumulative_model'].iloc[0])/daily_data['cumulative_model'].iloc[0]
sharpe = (model_return-0.01)/std(df_percent['model'])
print('Sharpe Ratio is:', sharpe)

#%% Graphs

#Histograms
plt.hist(actual_trades_model,bins=70)
plt.xlabel('PnL closed trade')
plt.ylabel('Histogram')
plt.title('Realized PnL per hour - model')

plt.hist(trades_perfect_foresight,color='orange',bins=80) 
plt.title('Distribution PnL per trade - foresight')

##--------------------------------------------------------------------------###
示例#13
0
def params(products_dict, method_x, method_y, x_adjustment=0.85, y_adjustment=1):
    x = products_dict['prices']
    y = products_dict['hit points']

    if method_x == 'Stads':
        price_std_less = mean(x) - 1*std(x)
        # For some products, the standard desviation turns to be huge,
        # making a rest of ONE standar desviation negative. Preventing
        # that case, the max method is forced.
        if price_std_less < 0:
            max_x, min_x = max_and_min(x)
            price_std_plus = max_x
            price_mean = max_x/2
            price_std_less = min_x

        # Is not, the other variables are stimaded
        else:
            price_std_plus =  mean(x) + 1*std(x)
            price_mean = mean(x)

    if method_y == 'Stads':
        hit_emergency = False
        hits_std_plus = mean(y) + 1*std(y)
        hits_mean = mean(y)
        hits_std_less = mean(y) - 5*std(y)
        
        # The emergency in hits means that there's no relevant product in the
        # search. In that case, the emergency is activaded and the price turns
        # to be the only axis of reference.
        if hits_std_plus == 0 and hits_mean == 0 and hits_std_less == 0:
            hit_emergency = True
        else:
            hit_emergency = False
    
    if method_x == 'Max':
        max_x, min_x = max_and_min(x)
        price_std_plus = max_x * x_adjustment
        price_mean = (max_x + min_x)/2
        price_std_less = min_x + (max_x * (1-x_adjustment))

    if method_y == 'Max':
        hit_emergency = False

        max_y, min_y = max_and_min(y)
        hits_std_plus = max_y
        hits_std_less = min_y

        hits_mean = (max_y + min_y)/2
        
    # Params are not added to the product_dict in this function,
    # because they're not someting to storage in DB. Are only saved
    # in case of graphing
    params = {'price_std_plus' : price_std_plus,
                'price_mean' : price_mean,
                'price_std_less' : price_std_less,
                'hits_std_plus' : hits_std_plus,
                'hits_mean' : hits_mean,
                'hits_std_less' : hits_std_less,
                'hit_emergency' : hit_emergency,
                }
    return params
 def histogramVariableBaseline(self, xmin = None, xmax = None, bins = 75, color = None):
     """ ----- Follows the baseline and sets the maxinum current values
     to normalize all values in conductance. ----- """
     # Extracts all data from bin file
     self.extractBinData()
     
     # Calculates the applied voltage form bin file data (based on first 200 datapoints).
     self.appliedVoltage()
     
     # Uses dataToUse function to truncate total dataset
     self.dataToUse()
     
     # delta_g_values = self.positiveCurrent(self.working_baseline)
     # Converting datasets measured current to delta G
     delta_g_values = self.toConductance()
     
     baseline_histogram_data = []
     
     # Number of datapoints in a window for histogramming
     window_size = 20000
     
     # On off switch for excess
     excess = 0
     
     for window_values in list(range(math.ceil(len(delta_g_values) / window_size))):
         try:
             window_dataset = delta_g_values[window_values * window_size : (window_values + 1) * window_size :]
             print(window_values)
         except:
             window_dataset = delta_g_values[window_values * window_size : len(delta_g_values) :]
             print(f"{window_values}, The End...")
             
         bin_width = (3.49 * std(window_dataset))/(len(window_dataset)**(1/3))
         number_of_bins = int((max(window_dataset) - min(window_dataset)) / bin_width)
         
         # np.histogram creates a 2d array: [{counts}, {conductance}] 
         histogram_of_window = np.histogram(window_dataset, bins = number_of_bins)
         
         # Finds the index of the maximum bin
         max_bin_value = np.argmax(histogram_of_window[0])
         
         # Finds the most populated conductance value in histogram
         popular_conductance_value = ((histogram_of_window[1][max_bin_value] + histogram_of_window[1][max_bin_value + 1]) / 2)
         
         # Determines the value associated with the most populated bin
         normalized_values = ([(norm_values / popular_conductance_value) for norm_values in window_dataset])
         sumup = 0
         for values in normalized_values:
             sumup += values
         
         mean = sumup / len(normalized_values)
         
         if mean > 0.999:
             continue
         
         for values in normalized_values:
             if values > 1.02:
                 if excess < 75:
                     excess += 1
                     pass
                 else:
                     baseline_histogram_data.append(values)
                     excess = 0
             else:
                 baseline_histogram_data.append(values)
                 
         
         # baseline_histogram_data.extend(normalized_values)
     
     # Plot a histogram of the normalized data
     fig, ax = plt.subplots(figsize = (9,9))
     """
     starting_weights = np.ones_like(baseline_histogram_data)
     weights = []
     for index, weight_values in enumerate(starting_weights):
         if index == len(starting_weights) - 10000:
             weights.append(0.001)
         else:
             weights.append(weight_values)
     """      
     
     plt.hist(baseline_histogram_data, bins = bins, color = color, histtype = 'stepfilled', log = True) # , weights = weights)
     
     ax.set_xlabel("Normalized Conductance Difference", size = 30, fontname = "Arial")
     ax.set_ylabel("Counts", size = 30, fontname = "Arial")
     ax.set_xlim(xmin, xmax)
     
     # Sets parameters for axis labels
     plt.xticks(fontsize = 30, fontname = 'Arial')
     plt.yticks(fontsize = 30, fontname = 'Arial')
     
     # Sets parametesr for plot formatting
     ax.spines['top'].set_visible(False) # Removes top and right lines of plot
     ax.spines['right'].set_visible(False)
     
     ax.spines['left'].set_linewidth(5) # Makes the boarder bold
     ax.xaxis.set_tick_params(width = 5)
     ax.spines['bottom'].set_linewidth(5) # Makes the boarder bold
     ax.yaxis.set_tick_params(width = 5)
     
     # Making folder and saving plot
     try:
         os.mkdir(os.path.join(self.file_path, self.folder_to_add[0], self.folder_to_add[3]))
     except:
         pass
     file_Name = "{}_{}_{}_{}" .format(self.data_file, self.start_t, self.end_t, "Follow_baseline_Hist")
     
     plt.savefig(os.path.join(self.file_path, self.folder_to_add[0], self.folder_to_add[3]) + "\\" + file_Name + ".png", dpi = 600)
     
     # showing plot and removing it from memory
     plt.show()
     plt.close() 
     
     baseline_histogram_data, histogram_of_window, window_dataset = [], [], []
示例#15
0
 def standardabweichung(self):
     return round(std([a.note() for a in self.abgaben.all()]), 2)