def test_explicit_structured_dtype(self): "Test tsfromtxt with an explicit structured dtype" data = StringIO.StringIO("2007,12,31,24,34,56,0") dateconverter = lambda y, m, d: Date( 'D', year=int(y), month=int(m), day=int(d)) ndtype = [('tobs', int), ('tmin', float), ('tmax', float), ('rain', float)] test = tsfromtxt(data, delimiter=",", dtype=ndtype, datecols=(0, 1, 2), dateconverter=dateconverter) control = time_series([(24, 34.0, 56.0, 0.0)], dtype=ndtype, start_date=Date('D', '2007-12-01')) assert_equal(test, control) # data = StringIO.StringIO("2007,12,31,24,34,56,0") ndtype = [('tobs', int), ('tmin', int), ('tmax', int), ('rain', complex)] test = tsfromtxt(data, delimiter=",", dtype=ndtype, datecols=(0, 1, 2), dateconverter=dateconverter) control = time_series([(24, 34.0, 56.0, 0.0)], dtype=ndtype, start_date=Date('D', '2007-12-01')) assert_equal(test, control)
def test_convert_to_annual(self): "Test convert_to_annual" base = dict(D=1, H=24, T=24 * 60, S=24 * 3600) #for fq in ('D', 'H', 'T', 'S'): # Don't test for minuTe and Second frequency, too time consuming. for fq in ('D', 'H'): dates = date_array(start_date=Date(fq, '2001-01-01 00:00:00'), end_date=Date(fq, '2004-12-31 23:59:59')) bq = base[fq] series = time_series(range(365 * bq) * 3 + range(366 * bq), dates=dates) control = ma.masked_all((4, 366 * bq), dtype=series.dtype) control[0, :58 * bq] = range(58 * bq) control[0, 59 * bq:] = range(58 * bq, 365 * bq) control[[1, 2]] = control[0] control[3] = range(366 * bq) test = convert_to_annual(series) assert_equal(test, control) # series = time_series(range(59, 365) + range(366) + range(365), start_date=Date('D', '2003-03-01')) test = convert_to_annual(series) assert_equal( test[:, 59:62], ma.masked_values([[-1, 59, 60], [59, 60, 61], [-1, 59, 60]], -1))
def _set_refperiod(self, period=None): "Sets the period of reference." _optinfo = self._optinfo # Set the refperiod to the range of current dates _refperiod = _optinfo.get('reference_period', None) if period is None: if (_refperiod is None): _optinfo['reference_period'] = self._dates[[0, -1]] return if not isinstance(period, (tuple, list, ndarray)): msg = "The reference period should be a tuple "\ "(start_date,end_date)." raise ValueError(msg) # Check the starting and ending dates ... dates = self._dates # if dates.ndim == 1: # dates_lims = dates[[0,-1]] # else: # dates_lims = dates.ravel()[[0,-1]] (start_date, end_date) = (period[0], period[-1]) if isinstance(start_date, str): start_date = Date(self.freq, string=start_date) elif not isinstance(start_date, (Date, DateArray)): raise DateError, "The starting date should be a valid Date object!"\ " ( got %s instead)" % (start_date.__class__) # if isinstance(end_date, str): end_date = Date(self.freq, string=end_date) elif not isinstance(end_date, Date): raise DateError("The ending date should be a valid Date object!"\ " ( got %s instead)" % (end_date.__class__)) _optinfo['reference_period'] = DateArray((start_date, end_date), freq=self.freq) return
def _idx_from_dates(d1, d2, freq): """ Returns an index offset from datetimes d1 and d2. d1 is expected to be the last date in a date series and d2 is the out of sample date. Note that it rounds down the index if the date is before the next date at freq. """ from scikits.timeseries import Date d1 = Date(freq, datetime=d1) d2 = Date(freq, datetime=d2) return d2 - d1
def test_explicit_names_with_usecols(self): "Make sure the proper names are given to entries when usecols is not None" a = "AAA,2010,1,1,2,3\nBBB,2010,2,10,20,30" dateconv = lambda y, m: Date('M', year=int(y), month=int(m)) kwargs = dict(freq='M', delimiter=',', dateconverter=dateconv, datecols=(1, 2), usecols=(1, 2, 3, 5), names="A, C") test = tsfromtxt(StringIO.StringIO(a), **kwargs) ctrl = time_series([(1, 3), (10, 30)], start_date=Date('M', '2010-01'), dtype=[('A', int), ('C', int)]) assert_equal(test, ctrl)
def test_unsorted_input(self): "Test tsfromtxt when the dates of the input are not sorted." datatxt = """dates,a,b 2007-04-02 01:00,,0. 2007-04-02 02:00,2.,20 2007-04-02 03:00,, 2007-04-02 00:00,0.,10. 2007-04-02 03:00,3.,30 2007-04-02 01:00,1.,10 2007-04-02 02:00,, """ data = StringIO.StringIO(datatxt) dates = [ Date('H', '2007-04-02 0%i:00' % hour) for hour in (1, 2, 3, 0, 1, 2) ] controla = ma.masked_values([0, -1, 1, 2, -1, -1, 3], -1) controlb = ma.masked_values([10, 0, 10, 20, -1, -1, 30], -1) # data = StringIO.StringIO(datatxt) test = tsfromtxt(data, delimiter=',', names=True, freq='H') assert_equal(test.dtype.names, ['a', 'b']) assert_equal(test['a'], controla) assert_equal(test['a'].mask, controla.mask) assert_equal(test['b'], controlb) assert_equal(test['b'].mask, controlb.mask)
def _date_from_idx(d1, idx, freq): """ Returns the date from an index beyond the end of a date series. d1 is the datetime of the last date in the series. idx is the index distance of how far the next date should be from d1. Ie., 1 gives the next date from d1 at freq. """ from scikits.timeseries import Date tsd1 = Date(freq, datetime=d1) tsd2 = datetime.datetime.fromordinal((tsd1 + idx).toordinal()) return tsd2
def get_datevalue(date, freq): if isinstance(date, Date): return date.asfreq(freq).value elif isinstance(date, str): return Date(freq, string=date).value elif isinstance(date, (int, float)) or \ (isinstance(date, np.ndarray) and (date.size == 1)): return date elif date is None: return None raise ValueError("Unrecognizable date '%s'" % date)
def test_missing_values_no_names(self): "Make sure that floating point missing values are kept if no names" a = "AAA,2010,1,-9\nBBB,2010,2,2" dateconv = lambda y, m: Date('M', year=int(y), month=int(m)) kwargs = dict(freq='M', delimiter=',', dateconverter=dateconv, missing_values=-9, datecols=(1, 2), usecols=(1, 2, 3), names="A") test = tsfromtxt(StringIO.StringIO(a), **kwargs) assert_equal(test.mask, np.array([(1, ), (0, )], dtype=[('A', bool)]))
def test_explicit_names(self): "Test w/ explicit dtype (and explicit float)" data = "200510, 380.00, 386.30\n200511, 386.85, 388.55\n" dconverter = lambda x: Date("M", "%s-%s" % (x[:4], x[4:6])) kwargs = dict(delimiter=",", datecols=0, dateconverter=dconverter) ctrl = time_series([(380., 386.30), (386.85, 388.55)], start_date="2005-10", freq="M", dtype=[('open', "f4"), ('close', "f4")]) test = tsfromtxt(StringIO.StringIO(data), dtype=[('open', "f4"), ('close', "f4")], **kwargs) assert_equal(test, ctrl)
def test_with_negative_datecols(self): "Test negative datecols" data = "380.00, 386.30, 200510\n386.85, 388.55, 200511\n" dconverter = lambda x: Date("M", "%s-%s" % (x[:4], x[4:6])) kwargs = dict(delimiter=",", datecols=-1, dateconverter=dconverter) ctrl = time_series([(380., 386.30), (386.85, 388.55)], start_date="2005-10", freq="M", dtype=[('open', "f4"), ('close', "f4")]) test = tsfromtxt(StringIO.StringIO(data), dtype=[('open', "f4"), ('close', "f4")], **kwargs) assert_equal(test, ctrl)
def test_dates_on_several_columns(self): "Test tsfromtxt when the date spans several columns." datatxt = """ 2001, 01, 0.0, 10. 2001, 02, 1.1, 11. 2001, 02, 2.2, 12. """ data = StringIO.StringIO(datatxt) dateconverter = lambda y, m: Date('M', year=int(y), month=int(m)) test = tsfromtxt(data, delimiter=',', dtype=float, datecols=(0, 1), dateconverter=dateconverter) assert_equal(test, [[0., 10.], [1.1, 11.], [2.2, 12.]]) assert_equal(test.dates, date_array(['2001-01', '2001-02', '2001-02'], freq='M'))
def test_with_datecols(self): "Test two datecols" fcontent = StringIO.StringIO(""" year, month, A, B 2009, 01, 1, 1. 2009, 03, 3, 3. """) dateconv = lambda y, m: Date("M", year=int(y), month=int(m)) test = tsfromtxt(fcontent, delimiter=",", skip_header=1, names=True, converters={'dates': dateconv}, datecols=(0, 1)) dates = date_array(['2009-01', '2009-03'], freq='M') assert_equal(test.dates.tovalue(), dates) assert_equal(test['A'], [1, 3]) assert_equal(test['B'], [1., 3.]) assert_equal(test.dtype, np.dtype([('A', int), ('B', float)]))
def parse_nmsu_date(packed_date): m, d, y = packed_date.split('/') date = Date('D', year=int('20%s' % y), month=int(m), day=int(d)) return date
def load_oni(mode='standard', **options): """ Loads the ONI 3-m averaged monthly SST anomalies over the Niño-3.4 region and returns a :class:`~scikits.hydroclimpy.enso.ENSOIndicator` object. Two modes are accepted as arguments: - in the ``standard`` mode, the SSTs are retrieved from the original CPC website_. Data are available from Jan. 1950 to present. - in the ``backup`` mode, the SSTs are retrieved from the CPC `ftp site <ftpsite>`_. Data are available from Jan. 1900 to present. .. _website : http://www.cpc.noaa.gov/products/analysis_monitoring/ensostuff/ensoyears.shtml .. _ftpsite : ftp://eclipse.ncdc.noaa.gov/pub/ersst/pdo/el_nino_v3.dat. Parameters ---------- mode : {'standard','backup'}, optional Mode describing the data to download. options : dictionary Optional parameters to parse to the ENSOIndicator for the definition of ENSO indices. thresholds : tuple of floats, optional Low and high temperature thresholds for the definition of El Niño and La Niña conditions. By default, the CPC uses -0.5oC and +0.5oC. minimum_size : int, optional Minimum number of consecutive months in El Niño / La Niña conditions required for the definition of an episode. By default, the CPC use 5 consecutive months. reference_season : string or tuple, optional Months that must be in an episode for it to be valid. By default, the CPC uses None (no restriction on the months). full_year : boolean, optional The CPC uses ``full_year=False``. References ---------- Xue, Y., T. M. Smith, and R. W. Reynolds, 2003: Interdecadal changes of 30-yr SST normals during 1871-2000. *J. Climate*, 16, 1601-1612. """ # Initialization ....................... ensoarchive = dict(config.items('ENSO'))['ensoarchive'] if ensoarchive[-4:].lower() != '.zip': ensoarchive += '.zip' # mode = mode.lower() cfg = dict(config.items('ENSO.ONI')) cfg.update(options) try: from BeautifulSoup import BeautifulSoup, SoupStrainer except ImportError: warnings.warn("The module 'BeautifulSoup' is unavailable.\n"\ "Reverting to backup mode") mode = 'backup' # datadir = cfg['datadir'] if mode == 'standard': netfile = cfg['netfile'] archive = cfg['archive'] else: netfile = cfg['netfile_backup'] archive = cfg['archive_backup'] # Try to open an existing ENSOIndicator ensoarchive = dict(config.items('ENSO'))['ensoarchive'] if ensoarchive[-4:].lower() != '.zip': ensoarchive += '.zip' # try: zipf = zipfile.ZipFile(ensoarchive, 'r') ensoi = cPickle.loads(zipf.read(archive)) ensologger.info("... Loading from existing archived file") except IOError: zipf = zipfile.ZipFile(ensoarchive, 'w') ensologger.info("... Creating archive") except KeyError: zipf = zipfile.ZipFile(ensoarchive, 'a') ensologger.info("... Appending to archive") else: if isinstance(ensoi, enso.ENSOIndicator): return ensoi # sourcedir = np.lib._datasource.DataSource(datadir) dfile = sourcedir.open(netfile) # # if mode == 'standard': # Load the file as a tree, but only take the SST table (border=1) table = BeautifulSoup(dfile.read(), parseOnlyThese=SoupStrainer("table", border=1)) # Separate it by rows, but skip the first one (the header) years = [] data = [] indices = [] color = {'red': +1, 'white': 0, 'blue': -1} deft = [(None, 'color:white')] for row in table.findAll("tr")[1:]: cols = row.findAll('td') years.append(int(cols.pop(0).strong.string)) data.append([ float(_.fetchText()[-1].string.replace(' ', '99.9')) for _ in cols ]) indices.append([ color[getattr(_.span, 'attrs', deft)[0][-1].split(':')[-1]] for _ in cols ]) # start_date = Date('M', year=years[0], month=1) ensoi = enso.ENSOIndicator( ma.masked_values(data, 99.9).ravel(), start_date=start_date, ) # oni.set_indices(full_year=False, minsize=5, refseason=None) indices = time_series(np.array(indices).ravel(), start_date=start_date) else: rawdata = np.loadtxt(dfile) dates = date_array( [Date('M', year=yy, month=mm) for (yy, mm) in rawdata[:, :2]], freq='M') ensoi = enso.ENSOIndicator( cmov_mean(rawdata[:, -1], 3).round(2), dates, ) # _set_ensoindicator_options(ensoi, **cfg) ensoi.set_indices() # # Store in the archive zipf.writestr(archive, cPickle.dumps(ensoi)) zipf.close() return ensoi
def _mask_period(data, period=None, start_date=None, end_date=None, inside=True, include_edges=True, inplace=False): """ Returns a series masked where dates fall outside the selection period, as well as where data are initially missing (masked). Parameters ---------- data : Timeseries Data to process period : {None, sequence}, optional A sequence of (starting date, ending date). start_date : {None, string/Date }, optional Starting date. If None, uses the first date of the series. end_date : {None, string/Date }, optional Ending date. If None, uses the last date of the series. inside : {True, False}, optional Whether the dates inside the range should be masked. If not, masks outside. include_edges : {True, False}, optional Whether the starting and ending dates should be masked. inplace : {True, False}, optional Whether the data mask should be modified in place. If not, returns a new :class:`~scikits.timeseries.TimeSeries` object. """ data = ma.array(data, subok=True, copy=not inplace) if not isinstance(data, TimeSeries): raise ValueError("Data should be a valid TimeSeries!") dates = data._dates if dates.ndim == 1: dates_lims = dates[[0, -1]] else: dates_lims = dates.ravel()[[0, -1]] # Check the period ..................... if period is not None: if isinstance(period, (tuple, list, ndarray)): (start_date, end_date) = (period[0], period[-1]) else: (start_date, end_date) = (period, start_date) # Check the starting date .............. if start_date is None: start_date = dates_lims[0] elif isinstance(start_date, str): start_date = Date(data.freq, string=start_date) elif not isinstance(start_date, Date): raise DateError("The starting date should be a valid Date object!") # Check the ending date ................ if end_date is None: end_date = dates_lims[-1] elif isinstance(end_date, str): end_date = Date(data.freq, string=end_date) elif not isinstance(end_date, Date): raise DateError("The ending date should be a valid Date object!") # Constructs the selection mask ......... dates = data.dates if inside: if include_edges: selection = (dates >= start_date) & (dates <= end_date) else: selection = (dates > start_date) & (dates < end_date) else: if include_edges: selection = (dates <= start_date) | (dates >= end_date) else: selection = (dates < start_date) | (dates > end_date) data[selection] = masked return data
def __call__(self, x, pos=0): if self.formatdict is None: return '' else: fmt = self.formatdict.pop(x, '') return Date(self.freq, value=int(x)).strftime(fmt)
def _daily_finder(vmin, vmax, freq): periodsperday = -1 if freq >= _c.FR_HR: if freq == _c.FR_SEC: periodsperday = 24 * 60 * 60 elif freq == _c.FR_MIN: periodsperday = 24 * 60 elif freq == _c.FR_HR: periodsperday = 24 else: raise ValueError("unexpected frequency: %s" % check_freq_str(freq)) periodsperyear = 365 * periodsperday periodspermonth = 28 * periodsperday elif freq == _c.FR_BUS: periodsperyear = 261 periodspermonth = 19 elif freq == _c.FR_DAY: periodsperyear = 365 periodspermonth = 28 elif get_freq_group(freq) == _c.FR_WK: periodsperyear = 52 periodspermonth = 3 elif freq == _c.FR_UND: periodsperyear = 100 periodspermonth = 10 else: raise ValueError("unexpected frequency") # save this for later usage vmin_orig = vmin (vmin, vmax) = (int(vmin), int(vmax)) span = vmax - vmin + 1 dates_ = date_array(start_date=Date(freq, vmin), end_date=Date(freq, vmax)) # Initialize the output info = np.zeros(span, dtype=[('val', int), ('maj', bool), ('min', bool), ('fmt', '|S20')]) info['val'][:] = np.arange(vmin, vmax + 1) info['fmt'][:] = '' info['maj'][[0, -1]] = True # .. and set some shortcuts info_maj = info['maj'] info_min = info['min'] info_fmt = info['fmt'] def first_label(label_flags): if (label_flags[0] == 0) and (label_flags.size > 1) and \ ((vmin_orig % 1) > 0.0): return label_flags[1] else: return label_flags[0] # Case 1. Less than a month if span <= periodspermonth: day_start = period_break(dates_, 'day') month_start = period_break(dates_, 'month') def _hour_finder(label_interval, force_year_start): _hour = dates_.hour _prev_hour = (dates_ - 1).hour hour_start = (_hour - _prev_hour) != 0 info_maj[day_start] = True info_min[hour_start & (_hour % label_interval == 0)] = True year_start = period_break(dates_, 'year') info_fmt[hour_start & (_hour % label_interval == 0)] = '%H:%M' info_fmt[day_start] = '%H:%M\n%d-%b' info_fmt[year_start] = '%H:%M\n%d-%b\n%Y' if force_year_start and not has_level_label(year_start, vmin_orig): info_fmt[first_label(day_start)] = '%H:%M\n%d-%b\n%Y' def _minute_finder(label_interval): hour_start = period_break(dates_, 'hour') _minute = dates_.minute _prev_minute = (dates_ - 1).minute minute_start = (_minute - _prev_minute) != 0 info_maj[hour_start] = True info_min[minute_start & (_minute % label_interval == 0)] = True year_start = period_break(dates_, 'year') info_fmt = info['fmt'] info_fmt[minute_start & (_minute % label_interval == 0)] = '%H:%M' info_fmt[day_start] = '%H:%M\n%d-%b' info_fmt[year_start] = '%H:%M\n%d-%b\n%Y' def _second_finder(label_interval): minute_start = period_break(dates_, 'minute') _second = dates_.second _prev_second = (dates_ - 1).second second_start = (_second - _prev_second) != 0 info['maj'][minute_start] = True info['min'][second_start & (_second % label_interval == 0)] = True year_start = period_break(dates_, 'year') info_fmt = info['fmt'] info_fmt[second_start & (_second % label_interval == 0)] = '%H:%M:%S' info_fmt[day_start] = '%H:%M:%S\n%d-%b' info_fmt[year_start] = '%H:%M:%S\n%d-%b\n%Y' if span < periodsperday / 12000.0: _second_finder(1) elif span < periodsperday / 6000.0: _second_finder(2) elif span < periodsperday / 2400.0: _second_finder(5) elif span < periodsperday / 1200.0: _second_finder(10) elif span < periodsperday / 800.0: _second_finder(15) elif span < periodsperday / 400.0: _second_finder(30) elif span < periodsperday / 150.0: _minute_finder(1) elif span < periodsperday / 70.0: _minute_finder(2) elif span < periodsperday / 24.0: _minute_finder(5) elif span < periodsperday / 12.0: _minute_finder(15) elif span < periodsperday / 6.0: _minute_finder(30) elif span < periodsperday / 2.5: _hour_finder(1, False) elif span < periodsperday / 1.5: _hour_finder(2, False) elif span < periodsperday * 1.25: _hour_finder(3, False) elif span < periodsperday * 2.5: _hour_finder(6, True) elif span < periodsperday * 4: _hour_finder(12, True) else: info_maj[month_start] = True info_min[day_start] = True year_start = period_break(dates_, 'year') info_fmt = info['fmt'] info_fmt[day_start] = '%d' info_fmt[month_start] = '%d\n%b' info_fmt[year_start] = '%d\n%b\n%Y' if not has_level_label(year_start, vmin_orig): if not has_level_label(month_start, vmin_orig): info_fmt[first_label(day_start)] = '%d\n%b\n%Y' else: info_fmt[first_label(month_start)] = '%d\n%b\n%Y' # Case 2. Less than three months elif span <= periodsperyear // 4: month_start = period_break(dates_, 'month') info_maj[month_start] = True if freq < _c.FR_HR: info['min'] = True else: day_start = period_break(dates_, 'day') info['min'][day_start] = True week_start = period_break(dates_, 'week') year_start = period_break(dates_, 'year') info_fmt[week_start] = '%d' info_fmt[month_start] = '\n\n%b' info_fmt[year_start] = '\n\n%b\n%Y' if not has_level_label(year_start, vmin_orig): if not has_level_label(month_start, vmin_orig): info_fmt[first_label(week_start)] = '\n\n%b\n%Y' else: info_fmt[first_label(month_start)] = '\n\n%b\n%Y' # Case 3. Less than 14 months ............... elif span <= 1.15 * periodsperyear: year_start = period_break(dates_, 'year') month_start = period_break(dates_, 'month') week_start = period_break(dates_, 'week') info_maj[month_start] = True info_min[week_start] = True info_min[year_start] = False info_min[month_start] = False info_fmt[month_start] = '%b' info_fmt[year_start] = '%b\n%Y' if not has_level_label(year_start, vmin_orig): info_fmt[first_label(month_start)] = '%b\n%Y' # Case 4. Less than 2.5 years ............... elif span <= 2.5 * periodsperyear: year_start = period_break(dates_, 'year') quarter_start = period_break(dates_, 'quarter') month_start = period_break(dates_, 'month') info_maj[quarter_start] = True info_min[month_start] = True info_fmt[quarter_start] = '%b' info_fmt[year_start] = '%b\n%Y' # Case 4. Less than 4 years ................. elif span <= 4 * periodsperyear: year_start = period_break(dates_, 'year') month_start = period_break(dates_, 'month') info_maj[year_start] = True info_min[month_start] = True info_min[year_start] = False month_break = dates_[month_start].month jan_or_jul = month_start[(month_break == 1) | (month_break == 7)] info_fmt[jan_or_jul] = '%b' info_fmt[year_start] = '%b\n%Y' # Case 5. Less than 11 years ................ elif span <= 11 * periodsperyear: year_start = period_break(dates_, 'year') quarter_start = period_break(dates_, 'quarter') info_maj[year_start] = True info_min[quarter_start] = True info_min[year_start] = False info_fmt[year_start] = '%Y' # Case 6. More than 12 years ................ else: year_start = period_break(dates_, 'year') year_break = dates_[year_start].years nyears = span / periodsperyear (min_anndef, maj_anndef) = _get_default_annual_spacing(nyears) major_idx = year_start[(year_break % maj_anndef == 0)] info_maj[major_idx] = True minor_idx = year_start[(year_break % min_anndef == 0)] info_min[minor_idx] = True info_fmt[major_idx] = '%Y' #............................................ return info
def _set_ensoindicator_options(ensoindicator, **options): """ Sets some options of an :class:`~scikits.hydroclimpy.enso.ENSOIndicator` """ # Define some defaults.................. defaults = dict( ensotag='(undefined)', full_year=False, thresholds=(None, None), minimum_size=None, reference_season=None, reference_period=None, ) optinfo = ensoindicator.optinfo # Set the defaults...................... defaults.update([(k, v) for (k, v) in options.items() if k in defaults]) optinfo.update(defaults) # Fix some issues....................... ensoindicator.minimum_size = optinfo['minimum_size'] ensoindicator.refseason = optinfo['reference_season'] try: ensoindicator.full_year = eval(optinfo['full_year']) except NameError: optinfo['full_year'] = False except TypeError: ensoindicator.full_year = optinfo['full_year'] # refstring = (optinfo['reference_period'] or '').split(',') if refstring == ['']: ensoindicator.refperiod = None else: if len(refstring) != 2 and refstring != ['']: raise ValueError("The starting and ending dates of the reference " "period should be separated bv a comma") start_date = refstring[0] if len(start_date) == 4: start_date = Date('M', year=int(start_date), month=1) elif len(start_date) >= 7: start_date = Date('M', year=int(start_date[:4]), month=int(start_date[5:7])) else: start_date = None end_date = refstring[1] if len(end_date) == 4: end_date = Date('M', year=int(end_date), month=12) elif len(end_date) >= 7: end_date = Date('M', year=int(end_date[:4]), month=int(end_date[5:7])) else: end_date = None ensoindicator.refperiod = (start_date, end_date) # opt_thresholds = optinfo['thresholds'] if opt_thresholds != (None, None): if isinstance(opt_thresholds, tuple): ensoindicator.thresholds = opt_thresholds else: ensoindicator.thresholds = eval(opt_thresholds) return
def parse_date(packed_date): packed_date = str(packed_date) y, m, d = packed_date.split('-') return Date('D', year=int(y), month=int(m), day=int(d))
def __init__(self, *args, **kwds): TestCase.__init__(self, *args, **kwds) data = time_series(np.arange(731), start_date=Date(string='2003-01-01', freq='D'), freq='D') self.data = data
_attrs) _methods = {'predict': 'dates'} _wrap_methods = wrap.union_dicts( base.LikelihoodResultsWrapper._wrap_methods, _methods) wrap.populate_wrapper(TimeSeriesResultsWrapper, TimeSeriesModelResults) if __name__ == "__main__": import gwstatsmodels.api as sm import datetime import pandas data = sm.datasets.macrodata.load() #make a DataFrame #TODO: attach a DataFrame to some of the datasets, for quicker use dates = [str(int(x[0])) +':'+ str(int(x[1])) \ for x in data.data[['year','quarter']]] try: import scikits.timeseries as ts ts_dates = date_array(start_date=Date(year=1959, quarter=1, freq='Q'), length=len(data.data)) except: pass df = pandas.DataFrame(data.data[['realgdp', 'realinv', 'realcons']], index=dates) ex_mod = TimeSeriesModel(df) #ts_series = pandas.TimeSeries()