def testComplexDelta(self): '''Test for a lagged Delta2 against a double Delta''' lag = 5 ts = self.getts(2, 5., 3.5, 0.2) d = ts.delta2(lag=lag) dd = ts.delta(lag=lag).delta(lag=lag) self.assertEqual(len(d), len(dd)) self.assertEqual(d.count(), dd.count()) for dv, ddv in zip(d.values(), dd.values()): for v, vv in zip(dv, ddv): self.assertAlmostEqual(v, vv)
def testComplexDelta(self): '''Test for a lagged Delta2 against a double Delta''' lag = 5 ts = self.getts(2, 5., 3.5, 0.2) d = ts.delta2(lag = lag) dd = ts.delta(lag = lag).delta(lag = lag) self.assertEqual(len(d),len(dd)) self.assertEqual(d.count(),dd.count()) for dv,ddv in zip(d.values(),dd.values()): for v,vv in zip(dv,ddv): self.assertAlmostEqual(v,vv)
def __call__(self, ts, container=None, desc=False, series_info=None, **kwargs): '''Dump timeseries as a JSON string compatible with ``flot``''' from dynts.web import flot from dynts.conf import settings pydate2flot = flot.pydate2flot result = container or flot.MultiPlot() df = {} series_info = series_info or df if istimeseries(ts): res = flot.Flot(ts.name, type='timeseries', **series_info) dates = asarray(ts.dates()) missing = settings.ismissing for name, serie in zip(ts.names(), ts.series()): info = series_info.get(name, df) data = [] append = data.append for dt, val in zip(dates, serie): if not missing(val): append([pydate2flot(dt), val]) serie = flot.Serie(label=name, data=data, **info) res.add(serie) else: res = flot.Flot(ts.name) if ts.extratype: for name, serie in zip(ts.names(), ts.series()): serie = flot.Serie(label=serie.name, data=serie.data, lines={'show': serie.lines}, points={'show': True}, scatter={ 'show': serie.points, 'extratype': ts.extratype }) res.add(serie) else: for name, serie in zip(ts.names(), ts.series()): serie = flot.Serie(label=serie.name, data=serie.data, lines={'show': serie.lines}, points={'show': serie.points}) res.add(serie) result.add(res) return result
def get(self, code, name=None): if not self._names: return None dnames = self._names # First we check if code is a name if not name: v = dnames.get(code, None) if v: return v[self.default] name = self.defaultname # Check if name is available, otherwise return None nd = dnames.get(name, None) if not nd: return None v = nd.get(code, None) if v: return v v = self.data.get(code, None) if v: for nam, val in zip(self.names, v): dnames[nam][code] = val v = nd.get(code, None) if v: return v func = getattr(self, 'calculate_{0}'.format(code), None) if func: return func(name)
def _rollingTest(self, func): # A rolling function calculation ts = self.getts(cols=2) rollfun = 'roll%s' % func # Calculate the rolling function for two different windows mts30 = getattr(ts, rollfun)(window=30, fallback=self.fallback) mts60 = getattr(ts, rollfun)(window=60, fallback=self.fallback) # Check that dimensions are OK self.assertEqual(len(mts30), len(ts) - 29) self.assertEqual(len(mts60), len(ts) - 59) self.assertEqual(mts30.count(), 2) self.assertEqual(mts60.count(), 2) values = ts.values() v30 = mts30.values() date = asarray(ts.dates()) c = 0 # Loop over the items of the shorter windows rolling function for dt, v in mts30.items(): # Clone the timeseries for this particular window tst = ts.clone(date[c:c + 30], values[c:c + 30]) self.assertEqual(dt, tst.end()) # Get the rolling function applied to the whole timeseries clone tv = getattr(tst, func)() c += 1 for a, b in zip(v, tv): self.assertAlmostEqual(a, b)
def get(self, code, name = None): if not self._names: return None dnames = self._names # First we check if code is a name if not name: v = dnames.get(code,None) if v: return v[self.default] name = self.defaultname # Check if name is available, otherwise return None nd = dnames.get(name,None) if not nd: return None v = nd.get(code,None) if v: return v v = self.data.get(code,None) if v: for nam,val in zip(self.names,v): dnames[nam][code] = val v = nd.get(code,None) if v: return v func = getattr(self,'calculate_{0}'.format(code),None) if func: return func(name)
def _rollingTest(self, func): # A rolling function calculation ts = self.getts(cols=2) rollfun = 'roll%s' % func # Calculate the rolling function for two different windows mts30 = getattr(ts, rollfun)(window=30, fallback=self.fallback) mts60 = getattr(ts, rollfun)(window=60, fallback=self.fallback) # Check that dimensions are OK self.assertEqual(len(mts30), len(ts) - 29) self.assertEqual(len(mts60), len(ts) - 59) self.assertEqual(mts30.count(), 2) self.assertEqual(mts60.count(), 2) values = ts.values() v30 = mts30.values() date = asarray(ts.dates()) c = 0 # Loop over the items of the shorter windows rolling function for dt, v in mts30.items(): # Clone the timeseries for this particular window tst = ts.clone(date[c:c+30], values[c:c+30]) self.assertEqual(dt, tst.end()) # Get the rolling function applied to the whole timeseries clone tv = getattr(tst, func)() c += 1 for a, b in zip(v, tv): self.assertAlmostEqual(a, b)
def testRollingOp(self): data = populate(size=500)[:, 0] roll = rollingOperation(data, 20, skiplist_class=self.skiplist) rmin = roll.min() rmax = roll.max() rmed = roll.median() for m0, m1, m2 in zip(rmin, rmed, rmax): self.assertTrue(m1 >= m0) self.assertTrue(m2 >= m1)
def testFullPivot(self): ts = self.getts(cols = 4) b = BasicStatistics(ts) p = pivottable(b.calculate()) for field in p.fields: for name in p.names: val = p.get(field,name) dt = dict(zip(p.data['names'],p.data[field])) self.assertEqual(val,dt[name])
def testFullPivot(self): ts = self.getts(cols=4) b = BasicStatistics(ts) p = pivottable(b.calculate()) for field in p.fields: for name in p.names: val = p.get(field, name) dt = dict(zip(p.data['names'], p.data[field])) self.assertEqual(val, dt[name])
def __call__(self, ts, container = None, desc = False, series_info = None, **kwargs): '''Dump timeseries as a JSON string compatible with ``flot``''' from dynts.web import flot from dynts.conf import settings pydate2flot = flot.pydate2flot result = container or flot.MultiPlot() df = {} series_info = series_info or df if istimeseries(ts): res = flot.Flot(ts.name, type = 'timeseries', **series_info) dates = asarray(ts.dates()) missing = settings.ismissing for name,serie in zip(ts.names(),ts.series()): info = series_info.get(name,df) data = [] append = data.append for dt,val in zip(dates,serie): if not missing(val): append([pydate2flot(dt),val]) serie = flot.Serie(label = name, data = data, **info) res.add(serie) else: res = flot.Flot(ts.name) if ts.extratype: for name,serie in zip(ts.names(),ts.series()): serie = flot.Serie(label = serie.name, data = serie.data, lines = {'show':serie.lines}, points = {'show':True}, scatter = {'show':serie.points, 'extratype':ts.extratype}) res.add(serie) else: for name,serie in zip(ts.names(),ts.series()): serie = flot.Serie(label = serie.name, data = serie.data, lines = {'show':serie.lines}, points = {'show':serie.points}) res.add(serie) result.add(res) return result
def __init__(self, data, default = 'latest'): self.default = default self.data = data if data: self.names = data['names'] self.defaultname = self.names[0] d = self.default self._names = dict(((name,{d:v}) for name,v in zip(data['names'],data[d]))) else: self._names = None
def table(self): data = self.data if data: iterator = zip(data['names'],data['latest'],data['min'],data['mean'],data['max']) for name,lat,min,mea,max in iterator: range = max - min prange = 0 if not range else 100*(lat-min)/(max-min) yield name,lat,min,mea,max,prange else: raise StopIteration
def testDataProvider(self): result = dynts.evaluate('2*GOOG,GOOG') self.assertEqual(len(result.data),1) self.assertEqual(result.expression,dynts.parse('2*GOOG,GOOG')) data = result.ts() self.assertTrue(dynts.istimeseries(data)) self.assertEqual(data.count(),2) ts1 = data.serie(0) ts2 = data.serie(1) for v1,v2 in zip(ts1,ts2): self.assertAlmostEqual(v1,2.*v2)
def testDataProvider(self): result = dynts.evaluate('2*GOOG, GOOG') self.assertEqual(len(result.data), 1) self.assertEqual(result.expression, dynts.parse('2*GOOG, GOOG')) data = result.ts() self.assertTrue(dynts.istimeseries(data)) self.assertEqual(data.count(), 2) ts1 = data.serie(0) ts2 = data.serie(1) for v1, v2 in zip(ts1, ts2): self.assertAlmostEqual(v1, 2. * v2)
def table(self): data = self.data if data: iterator = zip(data['names'], data['latest'], data['min'], data['mean'], data['max']) for name, lat, min, mea, max in iterator: range = max - min prange = 0 if not range else 100 * (lat - min) / (max - min) yield name, lat, min, mea, max, prange else: raise StopIteration
def __init__(self, data, default='latest'): self.default = default self.data = data if data: self.names = data['names'] self.defaultname = self.names[0] d = self.default self._names = dict(((name, { d: v }) for name, v in zip(data['names'], data[d]))) else: self._names = None
def _testscalar(self, oper, rs, ts): for rv, cv in zip(rs.values(), ts.values()): for r, v in zip(rv, cv): self.assertAlmostEqual(r, oper(v))
def testInit(self): ts, dates, data = self.getts(True) self.assertEqual(ts.type, self.backend) self.assertEqual(len(ts), len(dates)) for dt, dt1 in zip(dates, ts.dates()): self.assertEqual(dt, dt1)
def __le__(self, other): return reduce(lambda x,y : x and y[0] <= y[1], zip(self.elem,asarray(other)), True)
from functools import reduce from dynts.utils.py2py3 import zip crossoperator = lambda func,*args : [func(*vals) for vals in zip(*args)] scalarasiter = lambda x: x if hasattr(x,'__iter__') else [x] from numpy import array, ndarray, double, dtype object_type = dtype(object) __all__ = ['cross', 'asarray', 'ascolumn', 'assimple', 'scalarasiter'] def scalarasiter(x): if x is None: return [] elif hasattr(x, '__iter__'): return x else: return [x] def asarray(x, dtype=None): '''Convert ``x`` into a ``numpy.ndarray``.''' iterable = scalarasiter(x) if isinstance(iterable, ndarray): return iterable else: if not hasattr(iterable,'__len__'):
def __le__(self, other): return reduce(lambda x, y: x and y[0] <= y[1], zip(self.elem, asarray(other)), True)
from functools import reduce from dynts.utils.py2py3 import zip crossoperator = lambda func, *args: [func(*vals) for vals in zip(*args)] scalarasiter = lambda x: x if hasattr(x, '__iter__') else [x] __all__ = ['cross', 'asarray', 'ascolumn', 'assimple', 'scalarasiter'] def asarray(x, dtype=None): '''Convert ``x`` into a ``numpy.ndarray``.''' from numpy import array, ndarray, double iterable = scalarasiter(x) if isinstance(iterable, ndarray): return iterable else: if not hasattr(iterable, '__len__'): iterable = list(iterable) return array(iterable, dtype=dtype) def ascolumn(x, dtype=None): '''Convert ``x`` into a ``column``-type ``numpy.ndarray``.''' x = asarray(x, dtype) return x if len(x.shape) >= 2 else x.reshape(len(x), 1) def assimple(x): if hasattr(x, '__iter__'):