def fill(dataset): """ Fill all nan value in the sensor data for given odtk.data.dataset.Dataset :parameter dataset: Dataset object that wants to fill in missing values :type dataset: odtk.data.dataset.Dataset :return: None """ from ..data import Dataset from numpy import isnan, where, arange, maximum, nonzero if not isinstance(dataset, Dataset): raise TypeError("Dataset has to be class odtk.data.dataset.Dataset") data = dataset.data for _ in range(2): mask = isnan(data.T) idx = where(~mask, arange(mask.shape[1]), 0) maximum.accumulate(idx, axis=1, out=idx) data.T[mask] = data.T[nonzero(mask)[0], idx[mask]] data = data[::-1] dataset.change_values(data)
def ulcer_index(cumulative): ''' Ulcer Index measures downside risk, in terms of both depth and duration of price declines. ''' m = maximum.accumulate(cumulative) r = (cumulative - m) / m * 100.0 r2 = power(r, 2) return sum(r2) / len(cumulative.index)
async def max_dd_func(data): try: max_y = maximum.accumulate(data) dd = data - max_y max_dd = abs(dd.min()) except Exception as e: if settings.SHOW_DEBUG: print("max_dd_func {}\n".format(e)) max_dd = None return max_dd
def plot_results(self, filename=None, show_plot=True): fig = plt.figure(figsize=(10, 4)) ax1 = fig.add_subplot(121) maxvals = maximum.accumulate(self.y) pad = maxvals.ptp() * 0.1 iterations = arange(len(self.y)) + 1 ax1.plot(iterations, maxvals, c='red', alpha=0.6, label='max observed value') ax1.plot(iterations, self.y, '.', label='function evaluations', markersize=10) ax1.set_xlabel('iteration') ax1.set_ylabel('function value') ax1.set_ylim([maxvals.min() - pad, maxvals.max() + pad]) ax1.legend(loc=4) ax1.grid() ax2 = fig.add_subplot(122) ax2.plot(self.iteration_history, self.convergence_metric_history, c='C0', alpha=0.35) ax2.plot(self.iteration_history, self.convergence_metric_history, '.', c='C0', label=self.acquisition.convergence_description, markersize=10) ax2.set_yscale('log') ax2.set_xlabel('iteration') ax2.set_ylabel('acquisition function value') ax2.set_xlim([0, None]) ax2.set_title('Convergence summary') ax2.legend() ax2.grid() fig.tight_layout() if filename is not None: plt.savefig(filename) if show_plot: plt.show() else: plt.close()
def fit_opt_params_monotonic(c, R, Rsem=None, clip_decreasing=False, clip_after=1): # fit optimal parameters for the vanilla Naka-Rushton model up to the maximum value of R, and ignoring subsequent values nsizes = R.shape[0] cmax = (np.argmax(R, axis=1)).astype('int') for isize in range(R.shape[0]): R[isize, cmax[isize] + 2:] = -1 a_0 = R.max(1) b_0 = 0 c50_0 = c[cmax] / 2 n_0 = 2 bds_a = [(0, np.inf) for a in a_0] bds_b = [(0, np.inf)] bds_c50 = [(0, cc + 1) for cc in c[cmax]] bds_n = [(0, 4) for ivar in range(1)] if clip_decreasing: for isize in range(nsizes): #decreasing = (np.diff(R[isize])<0) decreasing = (R[isize] < npmaximum.accumulate(R[isize])) if np.any(decreasing): R[isize, np.where(decreasing)[0][0] + 1 + clip_after:] = -1 bds = zip_pairs(bds_a + bds_b + bds_c50 + bds_n) params_0 = np.concatenate((a_0, (b_0, ), c50_0, (n_0, ))) if Rsem is None: Rsem = np.ones_like(R) else: Rsem[Rsem == 0] = np.min(Rsem[Rsem > 0]) def compute_this_cost(params): return (R > 0) * (R - naka_rushton(c, params, nsizes)) / Rsem params_opt = sop.least_squares( lambda params: compute_this_cost(params).flatten(), params_0, bounds=bds) return params_opt['x']
def max_drawdown(timeseries): i = argmax(maximum.accumulate(timeseries) - timeseries) j = argmax(timeseries[:i]) return (float(timeseries[i]) / timeseries[j]) - 1.
def max_dd_duration(cumulative): i = (maximum.accumulate(cumulative) - cumulative).idxmax() j = cumulative[:i].idxmax() s = to_datetime(j).strftime('%Y-%m-%d') e = to_datetime(i).strftime('%Y-%m-%d') return busday_count(s, e)
def drawdowns(cumulative): maxims = maximum.accumulate(cumulative.dropna()) return cumulative - maxims
date_parser=None, dayfirst=False, iterator=False, chunksize=None, \ compression='infer', thousands=None, decimal='.', lineterminator=None, \ quotechar='"', quoting=0, escapechar=None, comment=None, \ encoding=None, dialect=None, tupleize_cols=False, \ error_bad_lines=True, warn_bad_lines=True, skipfooter=0, \ skip_footer=0, doublequote=True, delim_whitespace=False, \ as_recarray=False, compact_ints=False, use_unsigned=False, \ low_memory=False, buffer_lines=None, memory_map=False, \ float_precision=None) #make index df.sort_index(axis=0, ascending=True, inplace=True) df.index = to_datetime(df.index).to_pydatetime() df.index.name = "DATE_TIME" max_y = maximum.accumulate(df.CLOSE) dd = df.CLOSE - max_y dd = sort(dd.as_matrix(), axis=0) probs, values, patches = plt.hist(dd, bins=50, weights=zeros_like(dd) + 1. / dd.size) p_list = [p for p in probs] v_list = [v for v in values] p = DataFrame(p_list) v = DataFrame(v_list) final = concat([p, v], axis=1) final_filename = join(settings.METATRADER_HISTORY_PATHS[0][1], "hist-{}.csv".format(symbol))
def arrround(a,axis=0,r=5.,s=log(1.5)): a = maximum.accumulate(a,axis=axis) m = amin(where(a!=0,a,infty),axis=axis) a = where(a!=0,a,m) return r*exp(ceil(log(a/r)/s)*s)
from numpy import array, arange, where, maximum, isnan, nan arr = array([4, nan, nan, 2, nan]) mask = isnan(arr) idx = where(~mask, arange(len(mask)), 0) maximum.accumulate(idx, out=idx) out = arr[idx] print(out) # [4. 4. 4. 2. 2.]