Esempio n. 1
0
 def test_simple_moving_average(self):
     points = [1, 2, 3, 4, 5, 6]
     series = TimeSeries(zip(points, points))
     ma = series.moving_average(3).round()
     self.assertListEqual(ma.points, [ (3, 2), (4, 3), (5, 4), (6, 5) ])
     ma = series.moving_average(5).round()
     self.assertListEqual(ma.points, [ (5, 3), (6, 4) ])
 def test_itertimes(self):
     x = TimeSeries([1, 2, 3, 4],[1, 4, 9, 16])
     i = x.itertimes()
     nextt = next(i)
     self.assertTrue ( nextt == 1 )
     nextt = next(i)
     self.assertTrue ( nextt == 2 )
     self.assertTrue (nextt.dtype == np.int64)
Esempio n. 3
0
 def test024(self):
     'TimeSeries.write_to_pi_file writes dict to stream with 12 offset'
     stream = mock.Stream()
     obj = TimeSeries.as_dict(self.testdata + "read.PI.timezone.2.xml")
     TimeSeries.write_to_pi_file(stream, obj, offset=12)
     target = file(self.testdata + "targetOutput12.xml").read()
     current = ''.join(stream.content)
     self.assertEquals(target.strip(), current.strip())
 def test_iteritems(self):
     x = TimeSeries([1, 2, 3, 4],[1, 4, 9, 16])
     i = x.iteritems()
     nextt = next(i)
     self.assertTrue ( nextt == (1, 1) )
     nextt = next(i)
     self.assertTrue ( nextt == (2, 4) )
     self.assertTrue (len(nextt) ==2 )
Esempio n. 5
0
    def test_111(self):
        'get_event is defined and equal to __getitem__'

        obj = TimeSeries(location_id='loc', parameter_id='par')
        d1 = datetime(1979, 3, 15, 9, 35)
        ## setting
        obj[d1] = 1.23
        ## checking
        self.assertEquals(obj._events[d1], obj.get_event(d1))
Esempio n. 6
0
    def test_112(self):
        'get_value is defined returns only value, no flags'

        obj = TimeSeries(location_id='loc', parameter_id='par')
        d1 = datetime(1979, 3, 15, 9, 35)
        ## setting
        obj[d1] = 1.23
        ## checking
        self.assertEquals(obj._events[d1][0], obj.get_value(d1))
Esempio n. 7
0
    def test_024(self):
        'getting events of a eventless time series'

        obj = TimeSeries(location_id='loc', parameter_id='par')
        d1 = datetime(1979, 3, 15, 9, 35)
        d3 = datetime(1979, 4, 12, 9, 35)
        self.assertEqual(0, len(obj.get_events()))
        self.assertEqual(0, len(obj.get_events(d3)))
        self.assertEqual(0, len(obj.get_events(d1, d3)))
Esempio n. 8
0
 def test000(self):
     'TimeSeries.write_to_pi_file writes list to new file'
     obj = TimeSeries.as_list(self.testdata + "read.PI.timezone.2.xml")
     TimeSeries.write_to_pi_file(self.testdata + "current.xml",
                                 obj,
                                 offset=2)
     target = file(self.testdata + "targetOutput.xml").read()
     current = file(self.testdata + "current.xml").read()
     self.assertEquals(target.strip(), current.strip())
Esempio n. 9
0
 def test030(self):
     'TimeSeries.write_to_pi_file appends children to stream'
     stream = mock.Stream()
     obj = TimeSeries.as_dict(self.testdata + "read.PI.timezone.2.xml")
     TimeSeries.write_to_pi_file(stream, obj, offset=0, append=True)
     target_lines = file(self.testdata + "targetOutput00.xml").readlines()[3:-1]
     target = ''.join(i.strip() for i in target_lines)
     current = ''.join(i.strip() for i in ''.join(stream.content).split('\n'))
     self.assertEquals(target, current)
Esempio n. 10
0
    def test_110(self):
        'add_value is defined and equal to __setitem__'

        obj = TimeSeries(location_id='loc', parameter_id='par')
        d1 = datetime(1979, 3, 15, 9, 35)
        ## setting
        obj.add_value(d1, 1.23)
        ## checking
        self.assertEquals(obj._events[d1], obj[d1])
Esempio n. 11
0
 def test_periodic_decomposition(self):
     series = TimeSeries([ (1, 100), (2, 200), (3, 100), (4, 200), (5, 100) ])
     decomposed = series.decompose(2, periodic=True).round()
     self.assertTrue(isinstance(decomposed, DataFrame))
     self.assertEquals(len(decomposed), 3)
     for series in decomposed.itervalues():
         self.assertListEqual(series.timestamps, [1, 2, 3, 4, 5])
     self.assertListEqual(decomposed['trend'].values, [150] * 5)
     self.assertListEqual(decomposed['seasonal'].values, [-50, 50, -50, 50, -50])
     self.assertListEqual(decomposed['residual'].values, [0] * 5)
Esempio n. 12
0
    def test_115(self):
        'can use .get with default value'

        obj = TimeSeries(location_id='loc', parameter_id='par')
        d1 = datetime(1979, 3, 15, 9, 35)
        obj.add_value(d1, 1.23)  # executing __setitem__
        ## finds values that are there
        [self.assertEquals(obj._events[d], obj.get(d))
         for d in obj._events.keys()]
        d2 = datetime(1979, 5, 15, 9, 35)
        ## returns default value if event is not there
        self.assertEquals(None, obj.get(d2))
Esempio n. 13
0
    def test_200(self):
        'represent empty TimeSeries as Element'

        obj = TimeSeries(location_id='loc', parameter_id='par')
        
        current = obj._as_element()
        self.assertTrue(isinstance(current, ElementTree.Element))
        self.assertEquals('series', current.tag)
        childElements = [i for i in current.getchildren()]
        self.assertEquals(1, len(childElements))
        self.assertEquals(['header'], [i.tag for i in childElements])
        self.assertEquals({}, current.attrib)
Esempio n. 14
0
    def test_022(self):
        'start and end of a time series with events'

        obj = TimeSeries(location_id='loc', parameter_id='par')
        d1 = datetime(1979, 3, 15, 9, 35)
        d3 = datetime(1979, 4, 12, 9, 35)
        d2 = datetime(1979, 5, 15, 9, 35)
        obj[d1] = 1.23
        obj[d3] = 0.23
        obj[d2] = -3.01
        self.assertEqual(d1, obj.get_start_date())
        self.assertEqual(d2, obj.get_end_date())
Esempio n. 15
0
    def test_023(self):
        'getting events of a non empty time series'

        obj = TimeSeries(location_id='loc', parameter_id='par')
        d1 = datetime(1979, 3, 15, 9, 35)
        d3 = datetime(1979, 4, 12, 9, 35)
        d2 = datetime(1979, 5, 15, 9, 35)
        obj[d1] = 1.23
        obj[d3] = 0.23
        obj[d2] = -3.01
        self.assertEqual(3, len(obj.get_events()))
        self.assertEqual(2, len(obj.get_events(d3)))
        self.assertEqual(2, len(obj.get_events(d1, d3)))
Esempio n. 16
0
    def test_201(self):
        'represent TimeSeries with two events as Element'

        obj = TimeSeries(location_id='loc', parameter_id='par')
        obj[datetime(1980, 11, 23, 19, 35)] = -1
        
        current = obj._as_element()
        self.assertTrue(isinstance(current, ElementTree.Element))
        self.assertEquals('series', current.tag)
        childElements = [i for i in current.getchildren()]
        self.assertEquals(2, len(childElements))
        self.assertEquals(['header', 'event'],
                          [i.tag for i in childElements])
        self.assertEquals({}, current.attrib)
Esempio n. 17
0
 def test010(self):
     '''result of TimeSeries.as_dict is indexed on locationId
     parameterId 2-tuples'''
     obj = TimeSeries.as_dict(self.testdata + "read.PI.timezone.2.xml")
     self.assertEquals(set([("600", "P1201"), ("600", "P2504")]),
                       set(obj.keys()))
     self.assertTrue(isinstance(obj[("600", "P1201")], TimeSeries))
     self.assertTrue(isinstance(obj[("600", "P2504")], TimeSeries))
Esempio n. 18
0
 def test111(self):
     'TimeSeries.as_dict reads events of series (b)'
     obj = TimeSeries.as_dict(self.testdata + "read.PI.timezone.2.xml")
     ts = obj[("600", "P2504")]
     self.assertEquals([
             (str_to_datetime("2010-04-05", "00:00:00", 2), 17),
             (str_to_datetime("2010-04-08", "00:00:00", 2), 22),
             (str_to_datetime("2010-04-10", "00:00:00", 2), 24), ],
                       ts.get_values())
Esempio n. 19
0
 def test113(self):
     'TimeSeries.get_values with only requested timeseries'
     obj = TimeSeries.as_dict(self.testdata + "read.PI.timezone.2.xml")
     ts = obj[("600", "P2504")]
     dates = [str_to_datetime("2010-04-05", "00:00:00", 2),
              str_to_datetime("2010-04-10", "00:00:00", 2)]
     self.assertEquals([
             (str_to_datetime("2010-04-05", "00:00:00", 2), (17, 0, '')),
             (str_to_datetime("2010-04-10", "00:00:00", 2), (24, 0, '')), ],
                       ts.get_events(dates=dates))
Esempio n. 20
0
    def test_042(self):
        'using deprecated events function'

        root = logging.getLogger()
        handler = mock.Handler()
        root.addHandler(handler)
        obj = TimeSeries(location_id='loc', parameter_id='par')
        d1 = datetime(1979, 3, 15, 9, 35)
        d3 = datetime(1979, 4, 12, 9, 35)
        d2 = datetime(1979, 5, 15, 9, 35)
        obj[d1] = 1.23
        obj[d3] = 0.23
        obj[d2] = -3.01
        self.assertEqual(62, len(list(obj.events())))
        self.assertEqual(34, len(list(obj.events(d3))))
        self.assertEqual(29, len(list(obj.events(d1, d3))))
        self.assertEqual(3, len(handler.content))
        self.assertEqual("timeseries.timeseries|WARNING|Call to deprecated function events.",
                         handler.content[0])
        root.removeHandler(handler)
Esempio n. 21
0
def write_to_pi_file(*args, **kwargs):
    """Write the given timeseries in PI XML format.

    Parameters:
      *kwargs['filename']*
        name of PI XML file to create and write to
      *kwargs['timeseries']*
        single time series, or a dict of time series, where each time series
        has with a method 'events' to generate all date, value pairs

    """
    multiple_series_stub = kwargs['timeseries']
    if isinstance(multiple_series_stub, dict):
        multiple_series = []
        for parameter_id, series_stub in multiple_series_stub.iteritems():
            my_kwargs = deepcopy(kwargs)
            my_kwargs["parameter_id"] = parameter_id
            series = TimeSeries(*args, **my_kwargs)
            series.sorted_event_items = lambda s=series_stub: list(s.events())
            multiple_series.append(series)
        multiple_series.sort(key=lambda series: series.parameter_id)
    else:
        series = TimeSeries(*args, **kwargs)
        series.sorted_event_items = lambda: list(multiple_series_stub.events())
        multiple_series = [series]

    TimeSeries.write_to_pi_file(kwargs['filename'], multiple_series)
Esempio n. 22
0
def test_kernelcorr():
    t1 = TimeSeries([1, 2, 3, 4], [40, 50, 60, 70])
    t2 = TimeSeries([1, 2, 3, 4], [40, 50, 60, 70])
    standts1 = _corr.stand(t1, t1.mean(), t1.std())
    standts2 = _corr.stand(t2, t2.mean(), t2.std())
    #Kernel_corr should return a correlation of 1.0 since we use similar timeseries
    assert(_corr.kernel_corr(standts1, standts2, mult=1) == 1.0)
Esempio n. 23
0
 def test_invalid_moving_average(self):
     series = TimeSeries([])
     with self.assertRaises(ArithmeticError):
         series.moving_average(3)
     series = TimeSeries([ (1, 1), (2, 2) ])
     with self.assertRaises(ArithmeticError):
         series.moving_average(3)
Esempio n. 24
0
def test_procs():

    # check that standardization is successful
    _, t1 = tsmaker(0.5, 0.1, 0.01)  # ignore meta-data returned
    t2 = random_ts(2)
    standts1 = stand(t1, t1.mean(), t1.std())
    standts2 = stand(t2, t2.mean(), t2.std())
    assert np.round(standts1.mean(), 10) == 0.0
    assert np.round(standts1.std(), 10) == 1.0
    assert np.round(standts2.mean(), 10) == 0.0
    assert np.round(standts2.std(), 10) == 1.0

    # once more, with hard-coded data so we know what answers to expect
    v1 = [2.00984793, 3.94985729, 0.51427819, 4.16184495, 2.73640138,
          0.07386398, 1.32847121, 0.3811719, 4.34006452, 1.86213488]
    v2 = [6.43496991, 10.34439479, 11.71829468, 3.92319708, 7.07694841,
          6.7165553, 5.72293448, 4.79283759, 11.74512723, 11.74048488]
    t1 = TimeSeries(np.arange(0.0, 1.0, 0.1), v1)
    t2 = TimeSeries(np.arange(0.0, 1.0, 0.1), v2)
    standts1 = stand(t1, t1.mean(), t1.std())
    standts2 = stand(t2, t2.mean(), t2.std())
    assert np.round(standts1.mean(), 10) == 0.0
    assert np.round(standts1.std(), 10) == 1.0
    assert np.round(standts2.mean(), 10) == 0.0
    assert np.round(standts2.std(), 10) == 1.0

    idx, mcorr = max_corr_at_phase(standts1, standts2)
    assert idx == 2
    assert np.round(mcorr, 4) == 0.5207

    sumcorr = kernel_corr(standts1, standts2, mult=10)
    assert np.round(sumcorr, 4) == 0.0125
Esempio n. 25
0
    def test_interpolate(self):
        """ test TimeSeries.interpolate """
        tzstr = "US/Eastern"
        tz = pytz.timezone(tzstr)
        h = TimeSeries.hour
        n = datetime.datetime.now()
        times1 = [n, n+h, n+2*h, n+3*h]
        times2 = [n, n+3*h]
        times2 = TimeSeries.makeAware(times2, tz)

        values = [6.0, 8.0]
        ts = TimeSeries("test", times2, values, tzstr)
        res = ts.interpolate(times1)
        target = [6.0, 6.66667, 7.33333, 8.0]
        for i in range(len(res)):
            self.failUnlessAlmostEqual(res[i], target[i], places=4)

        times3 = [n-h, n+2*h, n+4*h]
        res = ts.interpolate(times3)
        target = [None, 7.3333333, None]
        for i in range(len(res)):
            self.failUnlessAlmostEqual(res[i], target[i], places=4)
Esempio n. 26
0
def proc_main(pk, row, arg):
    '''
    Calculates the distance between two time series, using the normalized
    kernelized cross-correlation.
    Note: used directly for augmented selects.

    Parameters
    ----------
    pk : any hashable type
        The primary key of the database entry
    row : dictionary
        The database entry

    Returns
    -------
    [damean, dastd] : list of floats
        Mean and standard deviation of the time series data
    '''

    # recast the argument as a time series (type is lost due to serialization)
    if isinstance(arg, TimeSeries):
        argts = arg  # for server-side testing
    else:
        argts = TimeSeries(*arg)  # for live client-side operations

    # standardize the time series
    stand_argts = stand(argts, argts.mean(), argts.std())

    # standardize each row of the data that has tripped the trigger
    stand_rowts = stand(row['ts'], row['ts'].mean(), row['ts'].std())

    # compute the normalized kernelized cross-correlation between the
    # time series being upserted/selected and the time series argument
    kerncorr = kernel_corr(stand_rowts, stand_argts, 5)

    # use the normalized kernelized cross-correlation to compute the distance
    # between the time series and return
    return [np.sqrt(2*(1-kerncorr))]
Esempio n. 27
0
def test_maxcorr():
    t1 = TimeSeries([1, 2, 3, 4], [40, 50, 60, 70])
    t2 = TimeSeries([1, 2, 3, 4], [50, 60, 70, 40])
    standts1 = _corr.stand(t1, t1.mean(), t1.std())
    standts2 = _corr.stand(t2, t2.mean(), t2.std())
    idx, mcorr = _corr.max_corr_at_phase(standts1, standts2)
    #idx should be equal to one since the second ts is shifted by 1
    assert(idx == 1)
    assert(np.real(mcorr) == 4)
Esempio n. 28
0
    def test352(self):
        'TimeSeries.as_dict filters based on timestamps from django QuerySet'

        DT = datetime
        testdata = django.QuerySet([
                {'location': '124',
                 'parameter': 'Q',
                 'events': [(DT(2011, 11, 11, 12, 20), 0.1, 8, ''),
                            (DT(2011, 11, 11, 12, 25), 0.2, 1, ''),
                            (DT(2011, 11, 11, 12, 30), 0.3, 2, '')]},
                ])
        end = DT(2011, 11, 11, 12, 25)
        obj = TimeSeries.as_dict(testdata, end=end)
        self.assertEquals(set([('124', 'Q')]), set(obj.keys()))
        self.assertEquals([((), {'timestamp__lte': end})], testdata.filtered)
Esempio n. 29
0
 def test100(self):
     'TimeSeries.as_dict reads events of series (a)'
     obj = TimeSeries.as_dict(self.testdata + "read.PI.timezone.2.xml")
     ts = obj[("600", "P1201")]
     self.assertEquals([
             (str_to_datetime("2010-04-03", "00:00:00", 2), (20, 0, '')),
             (str_to_datetime("2010-04-04", "00:00:00", 2), (22, 0, '')),
             (str_to_datetime("2010-04-05", "00:00:00", 2), (17, 0, '')),
             (str_to_datetime("2010-04-06", "00:00:00", 2), (20, 0, '')),
             (str_to_datetime("2010-04-07", "00:00:00", 2), (21, 0, '')),
             (str_to_datetime("2010-04-08", "00:00:00", 2), (22, 0, '')),
             (str_to_datetime("2010-04-09", "00:00:00", 2), (24, 0, '')),
             (str_to_datetime("2010-04-10", "00:00:00", 2), (24, 0, '')),
             (str_to_datetime("2010-04-11", "00:00:00", 2), (24, 0, '')),
             (str_to_datetime("2010-04-12", "00:00:00", 2), (22, 0, '')), ],
                       ts.get_events())
Esempio n. 30
0
 def test110(self):
     'TimeSeries.as_dict reads events of series (a)'
     obj = TimeSeries.as_dict(self.testdata + "read.PI.timezone.2.xml")
     ts = obj[("600", "P1201")]
     self.assertEquals([
             (str_to_datetime("2010-04-03", "00:00:00", 2), 20),
             (str_to_datetime("2010-04-04", "00:00:00", 2), 22),
             (str_to_datetime("2010-04-05", "00:00:00", 2), 17),
             (str_to_datetime("2010-04-06", "00:00:00", 2), 20),
             (str_to_datetime("2010-04-07", "00:00:00", 2), 21),
             (str_to_datetime("2010-04-08", "00:00:00", 2), 22),
             (str_to_datetime("2010-04-09", "00:00:00", 2), 24),
             (str_to_datetime("2010-04-10", "00:00:00", 2), 24),
             (str_to_datetime("2010-04-11", "00:00:00", 2), 24),
             (str_to_datetime("2010-04-12", "00:00:00", 2), 22), ],
                       ts.get_values())
Esempio n. 31
0
 def test_map_return_type(self):
     series = TimeSeries([ (1, 2), (3, 4), (5, 6) ])
     double = series.map(lambda y: y * 2)
     self.assertTrue(isinstance(double, TimeSeries))
     self.assertListEqual([ (1, 4), (3, 8), (5, 12) ], double.points)
Esempio n. 32
0
def tsmaker(m, s, j):
    t = np.arange(0.0, 1.0, 0.01)
    v = norm.pdf(t, m, s) + j * np.random.randn(100)
    return ts.TimeSeries(list(v), list(t))
Esempio n. 33
0
def random_ts(a):
    t = np.arange(0.0, 1.0, 0.01)
    v = a * np.random.random(100)
    return ts.TimeSeries(list(v), list(t))
Esempio n. 34
0
# Read until the horizon
actualData = rawData[rawData.shape[0] - horizon:, 4]
rawData = rawData[:rawData.shape[0] - horizon, :]

# Normalize the raw data
minMax = pp.MinMaxScaler((-1, +1))
rawData[:, 4] = minMax.fit_transform(rawData[:, 4])

# Available data - to store known and unknown
availableData = np.zeros((rawData.shape[0] + horizon, 4))
availableData[:rawData.shape[0], 0:3] = rawData[:, 0:3]
availableData[:rawData.shape[0], 3] = rawData[:, valueIndex]

#Pre-Process the data to form feature and target vectors
tsp = ts.TimeSeriesProcessor(rawData[:, valueIndex], depth, 1)
processedData = tsp.getProcessedData()

#Append bias
processedData = np.hstack((np.ones(
    (processedData.shape[0], 1)), processedData))

#Training data
trainingInputData = processedData[:, :1 + depth]
trainingOutputData = processedData[:, 1 + depth:]

#Validation data
validationInputData = trainingInputData
validationOutputData = trainingOutputData

#Tune the reservoir
Esempio n. 35
0
from timeseries import TimeSeries

threes = TimeSeries(range(0,1000,3))
fives = TimeSeries(range(0,1000,5))

s = 0
for i in range(0,1000):
  if i in threes or i in fives:
    s += i

print("sum",s)
Esempio n. 36
0
        event_list = np.concatenate((event_list, tmp)).flatten()

# Event list
np.random.shuffle(event_list)

# Poisson mean time between events
lam = 1.0

print 'Number of events %i' % (len(event_list))

#
t = 0.01 * np.arange(lam * event_list.size)
tmin = np.min(t)
tmax = np.max(t)
emission = np.zeros_like(t)
start_time = 0.0
for ev in event_list:
    T = (1.0 * ev)**(-power)
    energy = T**(1.0 + gamma)
    start_time = np.random.uniform(low=tmin, high=tmax)
    emnew = event(t, start_time, energy, T)
    if np.max(emnew) >= 0.0:
        emission = emission + emnew
    else:
        print('Not big enough')

ts = TimeSeries(t[::10], emission[::10])
ts.peek_ps()
plt.loglog()
plt.show()
Esempio n. 37
0
 def test_invalid_trend(self):
     series = TimeSeries([])
     with self.assertRaises(ArithmeticError):
         series.trend()
Esempio n. 38
0
 def test_indexing(self):
     series = TimeSeries([ (1, 3), (2, 3), (3, 3) ])
     self.assertEquals(series[1], 3)
     self.assertEquals(series[2], 3)
     with self.assertRaises(KeyError):
         foo = series[4]
        'index': 1
    },
    'vp': {
        'type': "bool",
        'index': 1
    }
}

orders = [0, 3, 1, 2]
blargs = [1, 1, 2, 2]
times = [0, 1, 2, 3, 4]  # Same time basis
values1 = [0, 2, 4, 6, 8]  # Two example time series values
values2 = [2, 4, 6, 8, 10]
vps = [True, False, False,
       True]  # Vantage points for first and last timeseries
tsrs = [TimeSeries(times, values1 if i < 2 else values2)
        for i in range(4)]  # only two value ranges


def setup_module(module):
    if os.path.exists("documents/"):
        shutil.rmtree('documents/')

    # Extend schema
    for i in range(4):
        if vps[i]:
            schema["d_vp-{}".format(i)] = {'type': "float", 'index': 1}

    # Make db
    db = DocDB('pk', schema)
Esempio n. 40
0
    def __init__(self, schema, pkfield, load=False, dbname="db", overwrite=False, dist=procs.corr_indb, threshold = 10, wordlength = 16, tslen = 256, cardinality = 64):
        """
        Parameters
        ----------
        schema : dict
            Key = name of field (e.g. 'ts', 'mean')
            Value = dict of that field's properties.  Recognized keys include:
                'type': Required for all fields except ts.  pkfield must have type str.
                'index': Required for all fields.  
        pkfield : str
            The name of the field which will be the primary key.  Must match a key in schema.
        load : bool
            Whether to populate the database with an existing one on file.
        dbname : str
            Database filename
        overwrite : bool
            If load=False, whether to overwrite an existing database.
        dist : function
            Calculates the distance between two TimeSeries objects, must take arguments (ts1, ts2)
        Attributes
        ----------
        indexes : dict
            Key = fieldname
            Value = binary search tree (if int or float) or dictionary of sets (otherwise) mapping values to pks
        rows : dict
            Key = primary key
            Value = dict of the fields associated with each key
        schema : dict (See above)
        pkfield : str (See above)
        dbname : str (See above)
        tslen : int
            The length of each timeseries in the database, strictly enforced
        """
        # ---- Validating input ---- #
        if not isinstance(pkfield, str):
            raise ValueError("Field name must be of type str")
        if not isinstance(threshold, int):
            raise ValueError("Threshold must be of type int")
        if not isinstance(wordlength, int):
            raise ValueError("Word length must be of type int")
        if threshold <= 0:
            raise ValueError("Threshold must be greater than zero")
        if wordlength <= 0:
            raise ValueError("Word length must be greater than zero")
        if '1' in '{0:b}'.format(wordlength)[1:]:
            raise ValueError("Word length must be a power of two")
        if not isinstance(tslen, int):
            raise ValueError("TimeSeries length must be of type int")
        if tslen < wordlength:
            raise ValueError("TimeSeries length must be greater than or equal to the word length")
        if '1' in '{0:b}'.format(tslen)[1:]:
            raise ValueError("TimeSeries length must be a power of two")
        if not isinstance(cardinality, int):
            raise ValueError("Cardinality must be of type int")
        if cardinality <= 0:
            raise ValueError("Cardinality must be greater than zero")
        if '1' in '{0:b}'.format(cardinality)[1:]:
            raise ValueError("Cardinality must be a power of two")
        if cardinality > 64:
            raise ValueError("Cardinalities greater than 64 are not supported")    
        if not isinstance(load, bool):
            raise ValueError("Load must be of type bool")
        if not isinstance(dbname, str):
            raise ValueError("Database name must be string")
        if not isinstance(overwrite, bool):
            raise ValueError("Overwrite must be of type bool")
        if isinstance(schema, dict):
            for field in schema:
                if field == 'DELETE':
                    raise ValueError("The fieldname 'DELETE' is forbidden")
                if ':' in field:
                    raise ValueError("Field names may not contain the ':' character")
                if field != 'ts':   
                    if 'type' not in schema[field]:
                        raise ValueError("Schema must specify type for each non-ts field")
                    if field == pkfield and schema[field]['type'] != str:
                        raise ValueError("Primary key must be of type str")
                    if schema[field]['type'] not in [int, float, bool, str]:
                        raise ValueError("Only types int, float, bool, and str are supported")
                if field[:5] == 'd_vp-':
                    raise ValueError("Field names beginning with 'd_vp-' are forbidden")
                if field == 'vp' and schema[field]['type'] != bool:
                    raise ValueError("Field 'vp' must be of boolean type")
        else:
            raise ValueError("Schema must be a dictionary")
        if pkfield not in schema:
            raise ValueError("Primary key field must be included in schema")

        # Assign attributes according to schema
        self.indexes = {}
        self.rows = {}
        self.rows_SAX = {}
        self.wordlength = wordlength
        self.threshold = threshold
        self.SAX_tree = Tree_Initializer(threshold = threshold, wordlength = wordlength).tree    
        self.card = cardinality
        self.schema = schema
        self.dbname = dbname
        self.pkfield = pkfield
        self.tslen = None
        self.tslen_SAX = tslen
        self.overwrite = overwrite
        self.dist = dist
        self.vps = []
        for s in schema:
            indexinfo = schema[s]['index']
            if indexinfo is not None:
                if schema[s]['type'] == int or schema[s]['type'] == float:
                    self.indexes[s] = BinarySearchTree()
                else:  # Add a bitmask option for strings?
                    self.indexes[s] = defaultdict(set)

        if load:   
            try:
                fd = open(dbname)
                for l in fd.readlines():
                    [pk, field, val] = l.strip().split(":")
                    if field in self.schema:
                        if pk not in self.rows:
                            self.rows[pk] = {pkfield:pk}
                        else:
                            if self.schema[field]['type'] == bool:
                                if val == 'False': 
                                    self.rows[pk][field] = False
                                else:
                                    self.rows[pk][field] = True
                            else:
                                self.rows[pk][field] = self.schema[field]['type'](val)
                        if pk not in self.rows_SAX:
                            self.rows_SAX[pk] = {pkfield:pk}
                        else:
                            if self.schema[field]['type'] == bool:
                                if val == 'False': 
                                    self.rows_SAX[pk][field] = False
                                else:
                                    self.rows_SAX[pk][field] = True
                            else:
                                self.rows_SAX[pk][field] = self.schema[field]['type'](val)
                        if field == 'vp' and val == 'True':
                            self.vps.append(pk)
                            self.indexes['d_vp-'+pk] = BinarySearchTree()
                    elif field == 'DELETE':
                        if 'vp' in schema and self.rows[pk]['vp'] == True:
                            self.del_vp(pk)
                        del self.rows[pk]
                        del self.rows_SAX[pk]
                    elif field[:5] == 'd_vp-':
                        self.rows[pk][field] = float(val)
                    else:
                        raise IOError("Database is incompatible with input schema")
                fd.close()
                
                # Read in timeseries of non-deleted keys
                for pk in self.rows:
                    tsarray = np.load(self.dbname+"_ts/"+pk+"_ts.npy")
                    self.rows[pk]['ts'] = TimeSeries(tsarray[0,:], tsarray[1,:])
                    self.tslen = tsarray.shape[1]
                    #tsarray2 = np.load(self.dbname+"_ts_SAX/"+pk+"_ts_SAX.npy")
                    x1 = np.linspace(min(tsarray[0,:]),max(tsarray[0,:]), self.tslen_SAX)
                    ts_SAX_data = interp1d(tsarray[0,:], tsarray[1,:])(x1)
                    ts_SAX_time = x1
                    ts_SAX = TimeSeries(ts_SAX_time,ts_SAX_data)
                    self.rows_SAX[pk]['ts'] = ts_SAX
                    rep = isax_indb(ts_SAX,self.card,self.wordlength)
                    self.SAX_tree.insert(pk, rep)
                self.index_bulk(list(self.rows.keys()))
            except:
                raise IOError("Database does not exist or has been corrupted")
        else:
            if os.path.exists(dbname) and overwrite == False:
                raise ValueError("Database of that name already exists. Delete existing db, rename, or set overwrite=True.")
Esempio n. 41
0
    plt.grid(True)
    plt.show()


if __name__ == "__main__":
    events = queue.Queue()  # 同期キュー

    status = dict()  # tick をまたいで記憶しておきたい情報
    status["is_sim"] = True

    portfolio = PortfolioLocal(status)

    execution = SimulatedExecutionHandler(status)

    timeseries = TimeSeries(status)

    strategy = OLSTIME(status)    
#    strategy = SMAKNN(status)
#    strategy = MARTINRSI(status)
#    strategy = SMABOLPIPRSI(status)
#    strategy = SMABOLPIP(status)    
#    strategy = SMAPIP(status)
#    strategy = SMAPIPRSI(status)
#    strategy = SMAOLSPIP(status)
#    strategy = SMABOL(status)
#    strategy = SMARSIOLS(status)
#    strategy = WMA(status)
#    strategy = SMAOLS(status)
#    strategy = SMA(status)
#    strategy = RSI(status)
Esempio n. 42
0
 def _load_ts(self, pk):
     filepath = 'documents/ts/' + pk + '.json'
     with open(filepath, 'r+') as f:
         time_series = json.load(f)
     time_series['ts'] = TimeSeries(*time_series['ts'])
     return time_series
Esempio n. 43
0
    def get_data(self,
                 channels,
                 start_time,
                 end_time,
                 buffer_time=0.0,
                 resampled_rate=None,
                 filt_freq=None,
                 filt_type='stop',
                 filt_order=4,
                 keep_buffer=False,
                 esrc='esrc',
                 eoffset='eoffset',
                 loop_axis=None,
                 num_mp_procs=0,
                 eoffset_in_time=True):
        """
        Return the requested range of data for each event by using the
        proper data retrieval mechanism for each event.

        Parameters
        ----------
        channels: {list,int,None}
            Channels from which to load data.
        start_time: {float}
            Start of epoch to retrieve (in time-unit of the data).
        end_time: {float}
            End of epoch to retrieve (in time-unit of the data).
        buffer_time: {float},optional
            Extra buffer to add on either side of the event in order
            to avoid edge effects when filtering (in time unit of the
            data).
        resampled_rate: {float},optional
            New samplerate to resample the data to after loading.
        filt_freq: {array_like},optional
            The range of frequencies to filter (depends on the filter
            type.)
        filt_type = {scipy.signal.band_dict.keys()},optional
            Filter type.
        filt_order = {int},optional
            The order of the filter.
        keep_buffer: {boolean},optional
            Whether to keep the buffer when returning the data.
        esrc : {string},optional
            Name for the field containing the source for the time
            series data corresponding to the event.
        eoffset: {string},optional
            Name for the field containing the offset (in seconds) for
            the event within the specified source.
        eoffset_in_time: {boolean},optional        
            If True, the unit of the event offsets is taken to be
            time (unit of the data), otherwise samples.
        
        Returns
        -------
        A TimeSeries instance with dimensions (channels,events,time).
        """

        # check for necessary fields
        if not (esrc in self.dtype.names and eoffset in self.dtype.names):
            raise ValueError(esrc + ' and ' + eoffset +
                             ' must be valid fieldnames ' +
                             'specifying source and offset for the data.')

# get ready to load dat
        eventdata = []
        events = []

        # speed up by getting unique event sources first
        usources = np.unique(self[esrc])

        # loop over unique sources
        eventdata = None
        for src in usources:
            # get the eventOffsets from that source
            ind = np.atleast_1d(self[esrc] == src)

            if len(ind) == 1:
                event_offsets = self[eoffset]
                events.append(self)
            else:
                event_offsets = self[ind][eoffset]
                events.append(self[ind])

            #print "Loading %d events from %s" % (ind.sum(),src)
            # get the timeseries for those events
            newdat = src.get_event_data(channels, event_offsets, start_time,
                                        end_time, buffer_time, resampled_rate,
                                        filt_freq, filt_type, filt_order,
                                        keep_buffer, loop_axis, num_mp_procs,
                                        eoffset, eoffset_in_time)
            if eventdata is None:
                eventdata = newdat
            else:
                eventdata = eventdata.extend(newdat, axis=1)

        # concatenate (must eventually check that dims match)
        tdim = eventdata['time']
        cdim = eventdata['channels']
        srate = eventdata.samplerate
        events = np.concatenate(events).view(self.__class__)
        eventdata = TimeSeries(eventdata,
                               'time',
                               srate,
                               dims=[cdim, Dim(events, 'events'), tdim])

        return eventdata
Esempio n. 44
0
def score_all(functionNode):
    """
        score all thresholds again by using the stream implementation
        #works only on context of the class object
    """
    logger = functionNode.get_logger()
    logger.debug("score_all")
    progressNode = functionNode.get_child("control").get_child("progress")
    progressNode.set_value(0)
    model = functionNode.get_model()  # for the model API
    annos = functionNode.get_child("annotations").get_leaves()
    annos = [
        anno for anno in annos if anno.get_child("type").get_value() == "time"
    ]  #only the time annotations
    variableIds = functionNode.get_child(
        "variables").get_leaves_ids()  # the variableids to work on
    try:
        overWrite = functionNode.get_child("overWrite").get_value()
    except:
        overWrite = True

    obj = functionNode.get_parent().get_object()
    obj.reset(
    )  #read the new thresholds into the object!! this also affects parallel streaming processes

    # for each id (variable) that has threshold(s)
    # take the values and times of that varialbe
    # find out the annotations we need, create the stream data blob, send it over
    progressStep = 1 / float(len(obj.get_thresholds()))
    total = None

    for id, thresholdsInfo in obj.get_thresholds().items(
    ):  # thresholds is a dict of {id: {tag:{"min":0,"max":1}, tag2:{} .. ,id2:{}}
        if id not in variableIds:
            continue  # skip this one, is not selected
        progressNode.set_value(progressNode.get_value() + progressStep)
        var = model.get_node(id)
        data = var.get_time_series()
        times = data["__time"]
        #now produce the interesting states
        blob = {
            "type": "timeseries",
            "data": {
                "__time": times,
                id: data["values"],
                "__states": {}
            }
        }
        for state in thresholdsInfo.keys(
        ):  #iterate over the states where the variable has special thresholds
            myAnnos = mh.filter_annotations(annos, state)
            stateMask = mh.annotations_to_class_vector(myAnnos, data["__time"])
            stateMask = numpy.isfinite(stateMask)
            blob["data"]["__states"][state] = stateMask

        #now we have prepared a data and state blob, we will now score by feeding it into the stream scorer
        #del blob["data"]["__states"]#for test, now
        blob = obj.feed(blob)
        #now the blob contains more entries, e.g. the score variable id and the according scores, that is what we want
        for blobId, values in blob["data"].items():
            if blobId not in ["__time", id, "__states"]:
                #this is the score, overwrite the whole thing
                scoreNode = model.get_node(blobId)
                if scoreNode.get_name() == "_total_score":
                    continue  # this is the combined result of several variables going into the stream scoring, not relevant here

                scoreNode.set_time_series(
                    values=values, times=times
                )  # xxx is set ok here, or do we need "insert" to make sure there has not been changed in the meantime?
                model.notify_observers(scoreNode.get_parent().get_id(),
                                       "children")  # we trigger

                # build the total score:
                # merge in the new times, resample the total score, resampel the local score, then merge them
                # the merge function will use the new values whereever there is one (empty fields are named "nan"
                #  for the total score, we need a resampling to avoid the mixing of results e.g.
                # two sensor have different result during a given interval, but different times, if we just merge
                # we get a True, False, True,False mixture
                # so we build the merge vector, first resample then merge

                values[numpy.isfinite(
                    values)] = -1  # set -1 for all out of limit
                if type(total) is type(None):
                    total = TimeSeries(values=values, times=times)
                else:
                    local = TimeSeries(values=values, times=times)
                    total.merge(
                        local
                    )  # the merge resamples the incoming data to the existing time series, NaN will be replaced by new values,
    # finally, write the total
    # if the overWrite is True, we replace, otherwise we merge with the existing, previous result
    totalScoreNode = functionNode.get_parent().get_child("output").get_child(
        "_total_score")
    if overWrite:
        totalScoreNode.set_time_series(values=total.get_values(),
                                       times=total.get_times())
    else:
        totalScoreNode.merge_time_series(values=total.get_values(),
                                         times=total.get_times())

    return True
Esempio n. 45
0
app = Flask(__name__)

database_info = {
    "url": "localhost",
    "port": "27017",
    "database": "Sibyl"
}
label = "analyzer"
db_manager = DatabaseManager(database_info, label)

global pewma
global cl
global ts
cl = StaticControlLimits()
pewma_model = Pewma()
ts = TimeSeries()


@app.route('/static_control_limits', methods=['POST'])
def static_control_limits():
    """ Function used to check if data is above set threshold

        Args:
            data (dict): the raw data

        Returns:
            dict
    """
    try:
        content = request.get_json()
        try:
Esempio n. 46
0
 def test_iteration(self):
     points = [ (1, 2), (3, 4), (5, 6) ]
     series = TimeSeries(points)
     self.assertListEqual([ s for s in series ], points)
# Create a time series object
dt = 12.0
t = dt * np.arange(0, nt)

# Fix the input datacube for any non-finite data
for i in range(0, nx):
    for j in range(0, ny):
        d = dc[j, i, :].flatten()
        # Fix the data for any non-finite entries
        d = tsutils.fix_nonfinite(d)
        # Remove the mean
        d = d - np.mean(d)
        dc[j, i, :] = d

# Time series datacube
dcts = TimeSeries(t, dc)
dcts.name = 'AIA ' + str(wave) + '-' + stype + ': ' + location

# Get the Fourier power
pwr = dcts.ppower

# Arithmetic mean of the Fourier power
iobs = np.mean(pwr, axis=(0, 1))

# Sigma for the fit to the power
sigma = np.std(pwr, axis=(0, 1))

# Result # 1 - add up all the emission and do the analysis on the full FOV
# Also, make a histogram of all the power spectra to get an idea of the
# varition present
Esempio n. 48
0
                                    power_noise=False)

# Noise
noise_pls = SimplePowerLawSpectrum([10.0, 2.0], nt=nt, dt=dt)
rn = TimeSeriesFromPowerSpectrum(noise_pls, V=V, W=W)
noise = 0.0 * rn.sample

# Create the simulated data
amplitude = 100
data = amplitude * tsnew.sample + noise

# Sample times
t = dt * np.arange(0, nt)

# Time series object
ts = TimeSeries(t, data)

# Scaled frequency
freqs = ts.PowerSpectrum.frequencies.positive / ts.PowerSpectrum.frequencies.positive[
    0]

# Form the input for the MCMC algorithm.
this = ([ts.pfreq, ts.ppower], )

norm_estimate = np.zeros((3, ))
norm_estimate[0] = ts.ppower[0]
norm_estimate[1] = norm_estimate[0] / 1000.0
norm_estimate[2] = norm_estimate[0] * 1000.0

background_estimate = np.zeros_like(norm_estimate)
background_estimate[0] = np.mean(ts.ppower[-10:-1])
Esempio n. 49
0
        wave,
        location='/Users/ireland/Data/AIA_Data/shutdownfun/' + choice,
        derotate=False)

#wave = '171'
#dc, location = aia_specific.rn4(wave, Xrange=[-201, -1])

# Get some properties of the datacube
ny = dc.shape[0]
nx = dc.shape[1]
nt = dc.shape[2]

# Create a time series object
dt = 12.0
t = dt * np.arange(0, nt)
tsdummy = TimeSeries(t, t)
iobs = np.zeros(tsdummy.PowerSpectrum.Npower.shape)
logiobs = np.zeros(tsdummy.PowerSpectrum.Npower.shape)
nposfreq = len(iobs)

# Result # 1 - add up all the emission and do the analysis on the full FOV
# Also, make a histogram of all the power spectra to get an idea of the
# varition present

# Sum over all the spatial locations
full_data = sum_over_space(dc)

#
#full_data = tsutils.fix_nonfinite(dc[10, 10, :])

# Average emission over all the data
Esempio n. 50
0
def test_stand():
    t1 = TimeSeries([1, 2, 3, 4], [40, 50, 60, 70])
    val = _corr.stand(np.array(t1.values()), 55.0, 10)
    assert (list(val) == [-1.5, -0.5, 0.5, 1.5])
Esempio n. 51
0
    def get_event_data(self,
                       channels,
                       events,
                       start_time,
                       end_time,
                       buffer_time=0.0,
                       resampled_rate=None,
                       filt_freq=None,
                       filt_type='stop',
                       filt_order=4,
                       keep_buffer=False,
                       loop_axis=None,
                       num_mp_procs=0,
                       eoffset='eoffset',
                       eoffset_in_time=True):
        """
        Return an TimeSeries containing data for the specified channel
        in the form [events,duration].

        Parameters
        ----------
        channels: {int} or {dict}
            Channels from which to load data.
        events: {array_like} or {recarray}
            Array/list of event offsets (in time or samples as
            specified by eoffset_in_time; in time by default) into
            the data, specifying each event onset time.
        start_time: {float}
            Start of epoch to retrieve (in time-unit of the data).
        end_time: {float}
            End of epoch to retrieve (in time-unit of the data).
        buffer_time: {float},optional
            Extra buffer to add on either side of the event in order
            to avoid edge effects when filtering (in time unit of the
            data).
        resampled_rate: {float},optional
            New samplerate to resample the data to after loading.
        filt_freq: {array_like},optional
            The range of frequencies to filter (depends on the filter
            type.)
        filt_type = {scipy.signal.band_dict.keys()},optional
            Filter type.
        filt_order = {int},optional
            The order of the filter.
        keep_buffer: {boolean},optional
            Whether to keep the buffer when returning the data.
        eoffset_in_time: {boolean},optional        
            If True, the unit of the event offsets is taken to be
            time (unit of the data), otherwise samples.
        """

        # translate back to dur and offset
        dur = end_time - start_time
        offset = start_time
        buf = buffer_time

        # get the event offsets
        if ((not (hasattr(events, 'dtype') or hasattr(events, 'columns')))
                or (hasattr(events, 'dtype') and events.dtype.names is None)):
            # they just passed in a list
            event_offsets = events
        elif ((hasattr(events, 'dtype') and (eoffset in events.dtype.names))
              or (hasattr(events, 'columns') and (eoffset in events.columns))):
            event_offsets = events[eoffset]
        else:
            raise ValueError(eoffset + ' must be a valid fieldname ' +
                             'specifying the offset for the data.')

        # Sanity checks:
        if (dur < 0):
            raise ValueError('Duration must not be negative! ' +
                             'Specified duration: ' + str(dur))
        if (np.min(event_offsets) < 0):
            raise ValueError('Event offsets must not be negative!')

        # make sure the events are an actual array:
        event_offsets = np.asarray(event_offsets)
        if eoffset_in_time:
            # convert to samples
            event_offsets = np.atleast_1d(
                np.int64(np.round(event_offsets * self.samplerate)))

        # set event durations from rate
        # get the samplesize
        samplesize = 1. / self.samplerate

        # get the number of buffer samples
        buf_samp = int(np.ceil(buf / samplesize))

        # calculate the offset samples that contains the desired offset
        offset_samp = int(
            np.ceil((np.abs(offset) - samplesize * .5) / samplesize) *
            np.sign(offset))

        # finally get the duration necessary to cover the desired span
        #dur_samp = int(np.ceil((dur - samplesize*.5)/samplesize))
        dur_samp = (int(np.ceil(
            (dur + offset - samplesize * .5) / samplesize)) - offset_samp + 1)

        # add in the buffer
        dur_samp += 2 * buf_samp
        offset_samp -= buf_samp

        # check that we have all the data we need before every event:
        if (np.min(event_offsets + offset_samp) < 0):
            bad_evs = ((event_offsets + offset_samp) < 0)
            raise ValueError('The specified values for offset and buffer ' +
                             'require more data than is available before ' +
                             str(np.sum(bad_evs)) + ' of all ' +
                             str(len(bad_evs)) + ' events.')

        # process the channels
        if isinstance(channels, dict):
            # turn into indices
            ch_info = self.channels
            key = channels.keys()[0]
            channels = [
                np.nonzero(ch_info[key] == c)[0][0] for c in channels[key]
            ]
        elif isinstance(channels, str):
            # find that channel by name
            channels = np.nonzero(self.channels['name'] == channels)[0][0]
        if channels is None or len(np.atleast_1d(channels)) == 0:
            channels = np.arange(self.nchannels)
        channels = np.atleast_1d(channels)
        channels.sort()

        # load the timeseries (this must be implemented by subclasses)
        eventdata = self._load_data(channels, event_offsets, dur_samp,
                                    offset_samp)

        # calc the time range
        # get the samplesize
        samp_start = offset_samp * samplesize
        samp_end = samp_start + (dur_samp - 1) * samplesize
        time_range = np.linspace(samp_start, samp_end, dur_samp)

        # when channels is an array of channels labels i.e. strings like  '002','003',...
        # we need to use xray arrays to do fancy indexing
        if channels.dtype.char == 'S':
            try:
                # perhaps we should vectorize it...
                selector_array = [
                    np.where(self.channels.name == channel)[0][0]
                    for channel in channels
                ]

                selected_channels = self.channels[selector_array]

                # from xray import DataArray
                # self.channels_xray = DataArray(self.channels.number,coords=[self.channels.name],dims=['name'])
                # self.channels_xray = self.channels_xray.loc[channels]
                #
                # self.channels_xray=np.rec.fromarrays([self.channels_xray.values,self.channels_xray.coords['name'].values],names='number,name')

            except ImportError:
                pass

            dims = [
                Dim(selected_channels, 'channels'),  # can index into channels
                Dim(events, 'events'),
                Dim(time_range, 'time')
            ]

            # dims = [Dim(self.channels_xray,'channels'),  # can index into channels
            #         Dim(events,'events'),
            #         Dim(time_range,'time')]

        else:

            # make it a timeseries
            # ORIGINAL CODE
            dims = [
                Dim(self.channels[channels],
                    'channels'),  # can index into channels
                Dim(events, 'events'),
                Dim(time_range, 'time')
            ]

        # # make it a timeseries
        # dims = [Dim(self.channels[channels],'channels'),  # can index into channels
        #         Dim(events,'events'),
        #         Dim(time_range,'time')]

        eventdata = TimeSeries(np.asarray(eventdata),
                               'time',
                               self.samplerate,
                               dims=dims)

        # filter if desired
        if not (filt_freq is None):
            # filter that data
            eventdata = eventdata.filtered(filt_freq,
                                           filt_type=filt_type,
                                           order=filt_order)

# resample if desired
        if (not (resampled_rate is None)
                and not (resampled_rate == eventdata.samplerate)):
            # resample the data
            eventdata = eventdata.resampled(resampled_rate,
                                            loop_axis=loop_axis,
                                            num_mp_procs=num_mp_procs)

        # remove the buffer and set the time range
        if buf > 0 and not (keep_buffer):
            # remove the buffer
            eventdata = eventdata.remove_buffer(buf)

        # return the timeseries
        return eventdata
def test_delete():
    client.insert_ts('test', TimeSeries(times, values1))
    client.delete_ts('test')
red_noise = False

dt = 12.0
nt = 300
np.random.seed(seed=1)
model_param = [10.0, 1.77, -100.0]
pls1 = SimplePowerLawSpectrumWithConstantBackground(model_param,
                                                    nt=nt,
                                                    dt=dt)
data = TimeSeriesFromPowerSpectrum(pls1).sample
t = dt * np.arange(0, nt)
amplitude = 0.0
data = data + amplitude * (data.max() - data.min()) * np.sin(2 * np.pi * t / 300.0)

# Create a time series object
ts = TimeSeries(t, data)
ts.label = 'emission'
ts.units = 'arb. units'
ts.name = 'simulated data [n=%4.2f]' % (model_param[1])

# Get the normalized power and the positive frequencies
iobs = ts.PowerSpectrum.ppower
this = ([ts.PowerSpectrum.frequencies.positive, iobs],)

# _____________________________________________________________________________
# -----------------------------------------------------------------------------
# Wavelet transform using a white noise background
# -----------------------------------------------------------------------------
var = ts.data
# Range of periods to average
avg1, avg2 = (150.0, 400.0)
Esempio n. 54
0
dt = 12.0
nt = 1800
period = 300.0
window = 31
np.random.seed(seed=1)

t = dt * np.arange(0, nt)

noise = np.random.normal(size=nt)

amplitude = 1.0
data1 = amplitude * np.sin(2 * np.pi * t / period) + noise + 10

data2 = tsutils.movingaverage(data1, window)

ts1 = TimeSeries(t, data1)

ts2 = TimeSeries(t, data2)

ts3 = TimeSeries(t, data1 - data2)

plt.figure(1)
plt.plot(t, data1, label='original time series')
plt.plot(t, data2, label='moving average')
plt.plot(t, data1 - data2, label='original - moving average')
plt.xlabel('time')
plt.ylabel('emission (arbitrary units)')
plt.legend()

window = window / 2
w1 = 1.0 / (2 * window + 1)
Esempio n. 55
0
def stand(x, m, s):
    "standardize timeseries x my mean m and std deviation s"
    return ts.TimeSeries(list(x.values() - m / s), list(x.times()))
from performance import ErrorMetrics as rmse
from datetime import datetime
import numpy as np
import os
from datetime import date, timedelta
from timeseries import TimeSeries as ts

# Read data from the file
rawData = np.loadtxt("facebookFansHistory_bmw_raw.txt", delimiter=',')

data = rawData[:rawData.shape[0], rawData.shape[1] - 1].reshape(
    (rawData.shape[0], 1))

depth = 1
horizon = 1
tsp = ts.TimeSeriesProcessor(rawData, depth, horizon, 4)
processedData = tsp.getProcessedData()

inputData = np.hstack((np.ones((processedData.shape[0], 1)),
                       processedData[:processedData.shape[0], :depth]))
outputData = processedData[:processedData.shape[0], depth:depth + horizon]

# Train
inputWeightRandom = np.load("Outputs/inputWeight.npy")
reservoirWeightRandom = np.load("Outputs/reservoirWeight.npy")
res = reservoir.Reservoir(size=600,
                          spectralRadius=1.25,
                          inputScaling=0.50,
                          reservoirScaling=0.50,
                          leakingRate=0.20,
                          initialTransient=0,
Esempio n. 57
0
nt = 300
V = 100
W = 100
no_noise = False
pls = SimplePowerLawSpectrum([10.0, 2.0], nt=nt, dt=dt)

tsnew3 = TimeSeriesFromPowerSpectrum(pls, V=V, W=W)

i = 0
ncount = 10000
data3 = np.zeros(shape=(nt))
while i < ncount:
    i = i + 1
    data3 = data3 + tsnew3.sample * np.random.pareto(2.0)

ts3 = TimeSeries(dt * np.arange(0, nt), data3)

this = ([ts3.pfreq, ts3.ppower], )

norm_estimate = np.zeros((3, ))
norm_estimate[0] = ts3.ppower[0]
norm_estimate[1] = norm_estimate[0] / 1000.0
norm_estimate[2] = norm_estimate[0] * 1000.0

background_estimate = np.zeros_like(norm_estimate)
background_estimate[0] = np.mean(ts3.ppower[-10:-1])
background_estimate[1] = background_estimate[0] / 1000.0
background_estimate[2] = background_estimate[0] * 1000.0

estimate = {
    "norm_estimate": norm_estimate,
Esempio n. 58
0
    def insert_ts(self, pk, ts):    
        try:
            pk = str(pk)
        except:
            raise ValueError("Primary keys must be string-compatible")
        if ':' in pk:
            raise ValueError("Primary keys may not include the ':' character") 
        if not isinstance(ts, TimeSeries):
            raise ValueError('Must insert a TimeSeries object')

        if pk not in self.rows:
            self.rows[pk] = {self.pkfield:pk}
        else:
            raise ValueError('Duplicate primary key found during insert')
        if pk not in self.rows_SAX:
            self.rows_SAX[pk] = {self.pkfield:pk}
        else:
            raise ValueError('Duplicate primary key found during insert')

        # Save timeseries as a 2d numpy array
        if self.tslen is None:
            self.tslen = len(ts)
        elif len(ts) != self.tslen:
            raise ValueError('All timeseries must be of same length')
        if not os.path.exists(self.dbname+"_ts"):
            os.makedirs(self.dbname+"_ts")
        np.save(self.dbname+"_ts/"+pk+"_ts.npy", np.vstack((ts.time, ts.data)))
        
        x1 = np.linspace(min(ts.time),max(ts.time), self.tslen_SAX)
        ts_SAX_data = interp1d(ts.time, ts.data)(x1)
        ts_SAX_time = x1
        ts_SAX = TimeSeries(ts_SAX_time,ts_SAX_data)
        if not os.path.exists(self.dbname+"_ts_SAX"):
            os.makedirs(self.dbname+"_ts_SAX")
        np.save(self.dbname+"_ts_SAX/"+pk+"_ts_SAX.npy", np.vstack((ts_SAX.time, ts_SAX.data)))

        # Save a record in the database file
        if self.overwrite or not os.path.exists(self.dbname):
            fd = open(self.dbname, 'w')
            self.overwrite = False
        else:
            fd = open(self.dbname, 'a')
        fd.write(pk+':'+self.pkfield+':'+pk+'\n')
        if 'vp' in self.schema:
            fd.write(pk+':vp:False\n')
        fd.close()

        self.rows[pk]['ts'] = ts  
        if 'vp' in self.schema:
            self.rows[pk]['vp'] = False

        self.rows_SAX[pk]['ts'] = ts_SAX  
        rep = isax_indb(ts_SAX,self.card,self.wordlength)
        self.SAX_tree.insert(pk, rep)
        if 'vp' in self.schema:
            self.rows_SAX[pk]['vp'] = False

        for vp in self.vps:
            ts1 = self.rows[vp]['ts']
            self.upsert_meta(pk, {'d_vp-'+vp : self.dist(ts1,ts)})

        self.update_indices(pk)
Esempio n. 59
0
# Create a location to save the figures
savefig = os.path.join(os.path.expanduser(savefig), wave)
if not (os.path.isdir(savefig)):
    os.makedirs(savefig)
figname = data_name
savefig = os.path.join(savefig, figname)

# Get some properties of the datacube
ny = dc.shape[0]
nx = dc.shape[1]
nt = dc.shape[2]

# Create a time series object
dt = 12.0
t = dt * np.arange(0, nt)
tsdummy = TimeSeries(t, t)
iobs = np.zeros(tsdummy.PowerSpectrum.Npower.shape)
logiobs = np.zeros(tsdummy.PowerSpectrum.Npower.shape)
nposfreq = len(iobs)

# Result # 1 - add up all the emission and do the analysis on the full FOV
# Also, make a histogram of all the power spectra to get an idea of the
# varition present

# storage
pwr = np.zeros((ny, nx, nposfreq))
logpwr = np.zeros_like(pwr)
full_data = np.zeros((nt))
for i in range(0, nx):
    for j in range(0, ny):
        d = dc[j, i, :].flatten()
Esempio n. 60
0
    def get_event_data(self,
                       channels,
                       event_offsets,
                       start_time,
                       end_time,
                       buffer_time=0.0,
                       resampled_rate=None,
                       filt_freq=None,
                       filt_type='stop',
                       filt_order=4,
                       keep_buffer=False):
        """
        Return an TimeSeries containing data for the specified channel
        in the form [events,duration].

        Parameters
        ----------
        channels: {int}
            Channels from which to load data.
        event_offsets: {array_like}
            Array/list of event offsets (in samples) into the data,
            specifying each event onset time.
        start_time: {float}
            Start of epoch to retrieve (in time-unit of the data).
        end_time: {float}
            End of epoch to retrieve (in time-unit of the data).
        buffer_time: {float},optional
            Extra buffer to add on either side of the event in order
            to avoid edge effects when filtering (in time unit of the
            data).
        resampled_rate: {float},optional
            New samplerate to resample the data to after loading.
        filt_freq: {array_like},optional
            The range of frequencies to filter (depends on the filter
            type.)
        filt_type = {scipy.signal.band_dict.keys()},optional
            Filter type.
        filt_order = {int},optional
            The order of the filter.
        keep_buffer: {boolean},optional
            Whether to keep the buffer when returning the data.
        """

        # translate back to dur and offset
        dur = end_time - start_time
        offset = start_time
        buf = buffer_time

        # Sanity checks:
        if (dur < 0):
            raise ValueError('Duration must not be negative! ' +
                             'Specified duration: ' + str(dur))
        if (np.min(event_offsets) < 0):
            raise ValueError('Event offsets must not be negative!')

        # make sure the events are an actual array
        event_offsets = np.asarray(event_offsets)

        # set event durations from rate
        # get the samplesize
        samplesize = 1. / self.samplerate

        # get the number of buffer samples
        buf_samp = int(np.ceil(buf / samplesize))

        # calculate the offset samples that contains the desired offset
        offset_samp = int(
            np.ceil((np.abs(offset) - samplesize * .5) / samplesize) *
            np.sign(offset))

        # finally get the duration necessary to cover the desired span
        #dur_samp = int(np.ceil((dur - samplesize*.5)/samplesize))
        dur_samp = (int(np.ceil(
            (dur + offset - samplesize * .5) / samplesize)) - offset_samp + 1)

        # add in the buffer
        dur_samp += 2 * buf_samp
        offset_samp -= buf_samp

        # check that we have all the data we need before every event:
        if (np.min(event_offsets + offset_samp) < 0):
            bad_evs = ((event_offsets + offset_samp) < 0)
            raise ValueError('The specified values for offset and buffer ' +
                             'require more data than is available before ' +
                             str(np.sum(bad_evs)) + ' of all ' +
                             str(len(bad_evs)) + ' events.')

        # process the channels
        if channels is None or len(np.atleast_1d(channels)) == 0:
            channels = np.arange(self.nchannels)
        channels = np.atleast_1d(channels)

        # load the timeseries (this must be implemented by subclasses)
        eventdata = self._load_data(channels, event_offsets, dur_samp,
                                    offset_samp)

        # calc the time range
        # get the samplesize
        samp_start = offset_samp * samplesize
        samp_end = samp_start + (dur_samp - 1) * samplesize
        time_range = np.linspace(samp_start, samp_end, dur_samp)

        # make it a timeseries
        dims = [
            Dim(channels, 'channels'),
            Dim(event_offsets, 'event_offsets'),
            Dim(time_range, 'time')
        ]
        eventdata = TimeSeries(np.asarray(eventdata),
                               'time',
                               self.samplerate,
                               dims=dims)

        # filter if desired
        if not (filt_freq is None):
            # filter that data
            eventdata = eventdata.filtered(filt_freq,
                                           filt_type=filt_type,
                                           order=filt_order)

# resample if desired
        if (not (resampled_rate is None)
                and not (resampled_rate == eventdata.samplerate)):
            # resample the data
            eventdata = eventdata.resampled(resampled_rate)

        # remove the buffer and set the time range
        if buf > 0 and not (keep_buffer):
            # remove the buffer
            eventdata = eventdata.remove_buffer(buf)

        # return the timeseries
        return eventdata