コード例 #1
0
    def extract(self, name, offset=0, interval=3600):
        """
        Return generator of (datetime, array) tuples.

        Yield arrays at times in the simulation accordin to given offset and
        interval. It also yields at the last timestep in the period, because
        animation (elsewhere in this repository) is only detected if there are
        multiple frames to display.

        :param name: name of netcdf variable to extract
        :param offset: offset in seconds from start of calculation
        :param interval: seconds between generated arrays
        """
        start, stop = self.time.period
        current = start + Timedelta(seconds=offset)
        step = Timedelta(seconds=interval)

        def result_for_time(time):
            index, datetime = self.time.find(time)
            array = self._read_snapshot(name=name, index=index)
            return datetime, array

        while current <= stop:
            yield result_for_time(current)
            current += step
        yield result_for_time(stop)
コード例 #2
0
def date_recv_binary(data: bytes, offset: int, length: int) -> date:
    # 86400 seconds per day
    seconds: float = i_unpack(data, offset)[0] * 86400

    # Julian/Gregorian calendar cutoff point
    if seconds < -12219292800:  # October 4, 1582 -> October 15, 1582
        seconds += 864000  # add 10 days worth of seconds
        if seconds < -14825808000:  # 1500-02-28 -> 1500-03-01
            extraLeaps: float = (seconds + 14825808000) / 3155760000
            extraLeaps -= 1
            extraLeaps -= extraLeaps / 4
            seconds += extraLeaps * 86400

    microseconds: float = seconds * 1e6

    try:
        return (EPOCH + Timedelta(microseconds=microseconds)).date()
    except OverflowError:
        if Timedelta(microseconds=microseconds) < Timedelta(
                seconds=EPOCH_SECONDS):
            return date.min
        else:
            return date.max
    except Exception as e:
        raise e
コード例 #3
0
def timestamptz_recv_integer(data: bytes, offset: int,
                             length: int) -> typing.Union[str, Datetime, int]:
    micros: int = q_unpack(data, offset)[0]
    try:
        return EPOCH_TZ + Timedelta(microseconds=micros)
    except OverflowError:
        epoch_delta: Timedelta = Timedelta(seconds=EPOCH_SECONDS)
        d_delta: Timedelta = Timedelta(microseconds=micros)
        if d_delta < epoch_delta:
            return Datetime.min
        else:
            return Datetime.max
コード例 #4
0
ファイル: sync.py プロジェクト: nens/ftp-feeder
    def synchronize(self, keep, text, source, target):
        # determine sources
        dataset = Dataset(**source)
        items = dataset.latest(Timedelta(**keep) // dataset.timedelta)
        transfer = {}
        for item in items:
            filename = item["filename"]
            transfer[item["datetime"].strftime(target["template"])] = filename

        # list and inspect target dir
        target_dir = target['dir']
        threshold = Datetime.utcnow() - Timedelta(**keep)
        for target_name_or_path in self.target.nlst(target_dir):
            # some servers return names, others return paths
            if target_name_or_path.startswith(target_dir):
                target_path = target_name_or_path
                target_name = basename(target_path)
            else:
                target_name = target_name_or_path
                target_path = join(target_dir, target_name)

            # remove from transfer dictionary if it is already present
            if target_name in transfer:
                del transfer[target_name]

            # find old targets by name parsing and delete them
            datetime = Datetime.strptime(
                target_name[target['timestamp']], '%Y%m%d%H',
            )
            if datetime < threshold:
                logger.info('Remove %s', target_name)
                self.target.delete(target_path)

        # transfer the rest
        for target_name, source_name in transfer.items():
            # read
            data = io.BytesIO(dataset.retrieve(source_name))
            logger.info('Retrieved %s', source_name)

            # do not allow null characters in text format products
            if text and b'\x00' in data.getvalue():
                logger.info('Null characters found, skipping this one.')
                continue

            # write
            target_path = join(target_dir, target_name)
            target_path_in = target_path + '.in'
            self.target.storbinary('STOR ' + target_path_in, data)
            self.target.rename(target_path_in, target_path)
            logger.info('Stored %s', target_name)
コード例 #5
0
def test_interval_out(cursor):
    retval = tuple(
        cursor.execute(
            "SELECT '1 month 16 days 12 hours 32 minutes 64 seconds'"
            "::interval"))
    expected_value = pg8000.Interval(
        microseconds=(12 * 60 * 60 * 1000 * 1000) +
        (32 * 60 * 1000 * 1000) + (64 * 1000 * 1000), days=16, months=1)
    assert retval[0][0] == expected_value

    retval = tuple(cursor.execute("select interval '30 seconds'"))
    assert retval[0][0] == Timedelta(seconds=30)

    retval = tuple(cursor.execute("select interval '12 days 30 seconds'"))
    assert retval[0][0] == Timedelta(days=12, seconds=30)
コード例 #6
0
def get_date_last_saturday():

    ### import with conventional class notation
    from datetime import timedelta as Timedelta
    from datetime import datetime as Datetime
    from dateutil import relativedelta

    today = Datetime.now()

    if today.weekday() == 5:
        weeks_back_in_time = 0  ### If today is saturday. we want "last saturday" to adtop today's value

    else:
        weeks_back_in_time = -1

    print('weeks_back_in_time: {}'.format(weeks_back_in_time))
    start = today - Timedelta((today.weekday() + 1) % 7)

    dtm_last_saturday = start + relativedelta.relativedelta(
        weekday=relativedelta.SA(weeks_back_in_time))
    dt_last_saturday = dtm_last_saturday.date()
    str_last_saturday = dt_last_saturday.strftime(format="%Y-%m-%d")
    print('Last Saturday was {}'.format(str_last_saturday))

    return str_last_saturday, dt_last_saturday
コード例 #7
0
def parse_to_date(date_str):
    if len(date_str) == 0:
        return None
    else:
        dt = to_ct(Datetime.strptime(date_str, "%d/%m/%Y"))
        dt += Timedelta(hours=23, minutes=30)
        return to_utc(dt)
コード例 #8
0
def interval_recv_integer(data: bytes, offset: int,
                          length: int) -> typing.Union[Timedelta, Interval]:
    microseconds, days, months = typing.cast(typing.Tuple[int, ...],
                                             qhh_unpack(data, offset))
    seconds, micros = divmod(microseconds, 1e6)
    if months != 0:
        return Interval(microseconds, days, months)
    else:
        return Timedelta(days, seconds, micros)
コード例 #9
0
ファイル: temporal.py プロジェクト: nens/dask-geomodeling
    def get_sources_and_requests(self, **request):
        kwargs = self._snap_kwargs
        start = request.get("start")
        stop = request.get("stop")
        mode = request["mode"]

        start, stop = self._snap_to_resampled_labels(start, stop)
        if start is None:
            return [({"empty": True, "mode": mode}, None)]

        # a time request does not involve a request to self.source
        if mode == "time":
            kwargs["mode"] = "time"
            kwargs["start"] = start
            kwargs["stop"] = stop
            return [(kwargs, None)]

        # vals or source requests do need a request to self.source
        if self.frequency is None:
            request["start"], request["stop"] = self.source.period
        else:
            if stop is None or start == stop:
                # recover the period that is closest to start
                start_period = stop_period = _label_to_period(start, **kwargs)
            else:
                # recover the period that has label >= start
                start_period = _label_to_period(start, **kwargs)
                # recover the period that has label <= stop
                stop_period = _label_to_period(stop, **kwargs)

            # snap request 'start' to the start of the first period
            request["start"] = _ts_to_dt(start_period.start_time, self.timezone)
            # snap request 'stop' to the end of the last period
            request["stop"] = _ts_to_dt(stop_period.end_time, self.timezone)
            if kwargs["closed"] != "left":
                request["stop"] += Timedelta(microseconds=1)

        # return sources and requests depending on the mode
        kwargs["mode"] = request["mode"]
        kwargs["start"] = start
        kwargs["stop"] = stop
        if mode == "vals":
            kwargs["dtype"] = np.dtype(self.dtype).str
            kwargs["statistic"] = self.statistic

        time_request = {
            "mode": "time",
            "start": request["start"],
            "stop": request["stop"],
        }

        # In case the data request dictates a temporal resolution,
        # also set this in temporal request
        if "time_resolution" in request:
            time_request["time_resolution"] = request["time_resolution"]

        return [(kwargs, None), (self.source, time_request), (self.source, request)]
コード例 #10
0
def test_interval_roundtrip(cursor):
    v = pg8000.Interval(microseconds=123456789, days=2, months=24)
    cursor.execute("SELECT %s as f1", (v,))
    retval = cursor.fetchall()
    assert retval[0][0] == v

    v = Timedelta(seconds=30)
    cursor.execute("SELECT %s as f1", (v,))
    retval = cursor.fetchall()
    assert retval[0][0] == v
コード例 #11
0
    def testIntervalRoundtrip(self):
        v = pg8000.Interval(microseconds=123456789, days=2, months=24)
        self.cursor.execute("SELECT %s as f1", (v, ))
        retval = self.cursor.fetchall()
        self.assertEqual(retval[0][0], v)

        v = Timedelta(seconds=30)
        self.cursor.execute("SELECT %s as f1", (v, ))
        retval = self.cursor.fetchall()
        self.assertEqual(retval[0][0], v)
コード例 #12
0
def test_interval_roundtrip(cursor):
    v = nzpy.Interval(microseconds=123456789, days=2, months=24)
    cursor.execute("SELECT '?' as f1", (v,))
    retval = cursor.fetchall()
    assert retval[0][0] == '<Interval 24 months 2 days 123456789 microseconds>'

    v = Timedelta(seconds=30)
    cursor.execute("SELECT '?' as f1", (v,))
    retval = cursor.fetchall()
    assert retval[0][0] == '0:00:30'
コード例 #13
0
ファイル: sync.py プロジェクト: nens/ftp-feeder
    def __init__(self, dataset, version, step, pattern):
        """Represents a Dataplatform Dataset.

        Args:
            dataset (str): dataset name
            version (str): dataset version
        """
        self.url = self.URL.format(dataset=dataset, version=version)
        self.timedelta = Timedelta(**step)
        self.pattern = pattern
コード例 #14
0
def timestamptz_recv_integer(data: bytes, offset: int,
                             length: int) -> typing.Union[str, Datetime, int]:
    micros: int = q_unpack(data, offset)[0]
    try:
        return EPOCH_TZ + Timedelta(microseconds=micros)
    except OverflowError:
        if micros == INFINITY_MICROSECONDS:
            return "infinity"
        elif micros == MINUS_INFINITY_MICROSECONDS:
            return "-infinity"
        else:
            return micros
コード例 #15
0
    def testIntervalOut(self):
        self.cursor.execute(
            "SELECT '1 month 16 days 12 hours 32 minutes 64 seconds'"
            "::interval")
        retval = self.cursor.fetchall()
        expected_value = pg8000.Interval(
            microseconds=(12 * 60 * 60 * 1000 * 1000) +
            (32 * 60 * 1000 * 1000) + (64 * 1000 * 1000),
            days=16,
            months=1)
        self.assertEqual(retval[0][0], expected_value)

        self.cursor.execute("select interval '30 seconds'")
        retval = self.cursor.fetchall()
        expected_value = Timedelta(seconds=30)
        self.assertEqual(retval[0][0], expected_value)

        self.cursor.execute("select interval '12 days 30 seconds'")
        retval = self.cursor.fetchall()
        expected_value = Timedelta(days=12, seconds=30)
        self.assertEqual(retval[0][0], expected_value)
コード例 #16
0
def getWeekdayDates(date, weekday):
    ''' date: Date object - resets day to 1
        weekday: int - 0=mon, 6=sun
        returns: list of dates in month that match weekday
    '''
    date = date.replace(day=1)
    delta = weekday - date.weekday()
    if delta >= 0:
        first_weekday = date + Timedelta(days=delta)
    else:
        first_weekday = date + Timedelta(days=delta + 7)

    result = []
    for i in range(5):
        new_date = first_weekday + Timedelta(days=i * 7)
        if new_date.month == date.month:
            result.append(new_date)
        else:
            break

    return result
コード例 #17
0
ファイル: transfer.py プロジェクト: jrbeckwith/because
    def duration(self):
        # type: () -> float
        """Compute the duration of the transfer, in seconds.

        If the transfer has not started, this returns 0. If the transfer is
        still ongoing, this returns how much time has elapsed so far.
        """
        if not self.started_at:
            timedelta = Timedelta()
        else:
            latest = self.stopped_at or Datetime.utcnow()
            timedelta = latest - self.started_at
        return timedelta.total_seconds()
コード例 #18
0
ファイル: temporal.py プロジェクト: mdkrol/dask-geomodeling
    def get_sources_and_requests(self, **request):
        # a time request does not involve any resampling, so just propagate
        if request["mode"] == "time":
            return [({"mode": "time"}, None), (self.source, request)]

        kwargs = self._snap_kwargs
        start = request.get("start")
        stop = request.get("stop")
        mode = request["mode"]

        # we need to now what times will be returned in order to figure out
        # what times we need to compute the cumulative
        time_data = self.source.get_data(mode="time", start=start, stop=stop)
        if time_data is None or not time_data.get("time"):
            # return early for an empty source
            return [({"empty": True, "mode": mode}, None)]

        # get the periods from the first and last timestamp
        start = time_data["time"][0]
        stop = time_data["time"][-1]

        if self.frequency is None:
            request["start"] = self.period[0]
            request["stop"] = stop
        else:
            start_period = _get_bin_period(start, **kwargs)

            # snap request 'start' to the start of the first period
            request["start"] = _ts_to_dt(start_period.start_time,
                                         self.timezone)
            # snap request 'stop' to the last requested time
            request["stop"] = stop
            if kwargs["closed"] != "left":
                request["stop"] += Timedelta(microseconds=1)

        # return sources and requests depending on the mode
        kwargs["mode"] = request["mode"]
        kwargs["start"] = start
        kwargs["stop"] = stop
        if mode == "vals":
            kwargs["dtype"] = np.dtype(self.dtype).str
            kwargs["statistic"] = self.statistic

        time_request = {
            "mode": "time",
            "start": request["start"],
            "stop": request["stop"],
        }
        return [(kwargs, None), (self.source, time_request),
                (self.source, request)]
コード例 #19
0
ファイル: converters.py プロジェクト: pavithranv/mamonsu
def timedelta_in(data):
    t = {}

    curr_val = None
    for k in data.split():
        if ':' in k:
            t['hours'], t['minutes'], t['seconds'] = map(float, k.split(':'))
        else:
            try:
                curr_val = float(k)
            except ValueError:
                t[PGInterval.UNIT_MAP[k]] = curr_val

    for n in ['weeks', 'months', 'years', 'decades', 'centuries', 'millennia']:
        if n in t:
            raise InterfaceError("Can't fit the interval " + str(t) +
                                 " into a datetime.timedelta.")

    return Timedelta(**t)
コード例 #20
0
def interval_in(data):
    t = {}

    curr_val = None
    for k in data.split():
        if ":" in k:
            t["hours"], t["minutes"], t["seconds"] = map(float, k.split(":"))
        else:
            try:
                curr_val = float(k)
            except ValueError:
                t[PGInterval.UNIT_MAP[k]] = curr_val

    for n in ["weeks", "months", "years", "decades", "centuries", "millennia"]:
        if n in t:
            raise InterfaceError(
                f"Can't fit the interval {t} into a datetime.timedelta.")

    return Timedelta(**t)
コード例 #21
0
ファイル: system_price.py プロジェクト: cavenhe/chellow
def hh(data_source):
    ssp_rate_set = data_source.supplier_rate_sets['ssp-rate']
    sbp_rate_set = data_source.supplier_rate_sets['sbp-rate']

    for h in data_source.hh_data:
        try:
            sbp, ssp = data_source.caches['system_price'][h['start-date']]
        except KeyError:
            try:
                system_price_cache = data_source.caches['system_price']
            except KeyError:
                system_price_cache = data_source.caches['system_price'] = {}

            db_id = get_non_core_contract_id('system_price')
            h_start = h['start-date']
            rates = data_source.hh_rate(db_id, h_start)['gbp_per_nbp_mwh']

            try:
                try:
                    rdict = rates[key_format(h_start)]
                except KeyError:
                    rdict = rates[key_format(h_start - Timedelta(days=3))]
                sbp = float(rdict['sbp'] / 1000)
                ssp = float(rdict['ssp'] / 1000)
                system_price_cache[h_start] = (sbp, ssp)
            except KeyError:
                raise BadRequest("For the System Price rate script at " +
                                 hh_format(h_start) +
                                 " the rate cannot be found.")
            except TypeError:
                raise BadRequest(
                    "For the System Price rate script at " +
                    hh_format(h_start) +
                    " the rate 'rates_gbp_per_mwh' has the problem: " +
                    traceback.format_exc())

        h['sbp'] = sbp
        h['sbp-gbp'] = h['nbp-kwh'] * sbp
        sbp_rate_set.add(sbp)

        h['ssp'] = ssp
        h['ssp-gbp'] = h['nbp-kwh'] * ssp
        ssp_rate_set.add(ssp)
コード例 #22
0
ファイル: system_price.py プロジェクト: WessexWater/chellow
def hh(data_source):
    for h in data_source.hh_data:
        try:
            sbp, ssp = data_source.caches["system_price"][h["start-date"]]
        except KeyError:
            try:
                system_price_cache = data_source.caches["system_price"]
            except KeyError:
                system_price_cache = data_source.caches["system_price"] = {}

            db_id = get_non_core_contract_id("system_price")
            h_start = h["start-date"]
            rates = data_source.hh_rate(db_id, h_start)["gbp_per_nbp_mwh"]

            try:
                try:
                    rdict = rates[key_format(h_start)]
                except KeyError:
                    rdict = rates[key_format(h_start - Timedelta(days=3))]
                sbp = float(rdict["sbp"] / 1000)
                ssp = float(rdict["ssp"] / 1000)
                system_price_cache[h_start] = (sbp, ssp)
            except KeyError:
                raise BadRequest(
                    "For the System Price rate script at "
                    + hh_format(h_start)
                    + " the rate cannot be found."
                )
            except TypeError:
                raise BadRequest(
                    "For the System Price rate script at "
                    + hh_format(h_start)
                    + " the rate 'rates_gbp_per_mwh' has the problem: "
                    + traceback.format_exc()
                )

        h["sbp"] = sbp
        h["sbp-gbp"] = h["nbp-kwh"] * sbp

        h["ssp"] = ssp
        h["ssp-gbp"] = h["nbp-kwh"] * ssp
コード例 #23
0
def test_timedelta_roundtrip(con):
    v = Timedelta(seconds=30)
    retval = con.run("SELECT cast(:v as interval)", v=v)
    assert retval[0][0] == v
コード例 #24
0
ファイル: temporal.py プロジェクト: mdkrol/dask-geomodeling
 def time(self):
     return Timedelta(milliseconds=self.args[1])
コード例 #25
0
    elif 1860 <= now_in_seconds < 3600:
        starting_time = 3600 - now_in_seconds

    i = 1
    while True:
        print("Iteration No. {}: scheduler starts in {} seconds.".format(
            i, starting_time))
        scheduler.enter(starting_time, 1, task)
        scheduler.run()
        starting_time = __period
        if Datetime.now().second % 10 > 2:
            starting_time -= 1
        i += 1


def __to_seconds(t):
    return t.hour * 3600 + t.minute * 60 + t.second


def __convert_to_local_time(t):
    return (t - (get_time_zone_diff() * 3600)) % 86400


####################################################################
test_bot_token = "425426086:AAFtPbcx_YNjAzZgdudQyQ5yuQ48x2g6O6A"
main_bot_token = "413427401:AAEgcTahApxJLAPGHK43TfJAl40K7CdJ8pw"
journal_bot = Bot(main_bot_token)
scheduler = Scheduler()

__period = Timedelta(minutes=30).total_seconds()
コード例 #26
0
def add_gigasecond(date):
    assert type(date) == type(Date.today()), "Input must be a valid date object."
    gigasecond = Timedelta(seconds=(10**9))
    return date + gigasecond
コード例 #27
0
    def job_001(self, context):
        """这个作业的名称是 001
        
        启动方式:$ python jobs.py start 001
        """

        from iproxy import ProxyPoolContext, ProxyLoaderContext, ProxyValidatorContext, \
            FatezeroProxySpider, IPValidator
        from handler import HandlerContext, ProxyValidateHandler, MySQLStreamInserter
        from datetime import timedelta as Timedelta
        from database import MySQLOperation

        ## 0. 配置上下文,为各个组件提供全局环境
        ctx = {
            'job_name': context.job_name,
            'job_time': context.job_time,
            'logger': context.logger,
        }

        ## 1. 创建代理池
        pool = ProxyPool(context=ProxyPoolContext(**ctx))

        ## 2. 加载代理
        # 创建代理加载器
        loader = FatezeroProxySpider(
            timeout=60,                             # 设置超时(可选)
            num=5000,                                 # 需要加载的代理数量
            context=ProxyLoaderContext(**ctx),
        )
        # 执行加载
        pool.load(loader)

        ## 3. 准备验证器
        v = IPValidator(
            **IPValidator.PLAN_IP138,               # 指定验证方式,这里使用预定义方案
            timeout=5,                              # 设置超时(可选)
            context=ProxyValidatorContext(**ctx),
        )

        ## 4. 准备过滤器
        # 创建代理过滤器(以下参数都是可选的)
        pf = SimpleProxyFilter(
            # port_list=[80, 8080],                 # 端口号
            # protocol_list=['http', 'https'],      # 协议
            # local_list=['home'],                  # 验证地区
            # collected_timedelta=Timedelta(days=1),# 收录时间与当前时间的距离(最大值)
        )
        # 创建代理测试过滤器(以下参数都是可选的)
        ptf = SimpleProxyTestFilter(
            proxy_filter=pf,                        # 代理过滤器
            response_elapsed_mean=6,                # 平均响应时长(最大值,秒)
            transfer_elapsed_mean=10,               # 平级传输时长(最大值,秒)
            timeout_exception_pr=0.34,              # 超时异常概率(最大值)
            proxy_exception_pr=0.34,                # 代理异常概率(最大值)
            valid_responses_pr=1,                   # 有效响应概率(最小值)
            pre_tested_timedelta=Timedelta(days=1), # 每个测试的前置条件-测试时间与当前时间的距离(最大值)
            pre_verification_ip=True,               # 每个测试的前置条件-是否经过IP验证
            # pre_valid_responses=True,             # 每个测试的前置条件-测是否有效响应
        )

        ## 5. 准备处理器
        # 创建代理处理器
        ph = MySQLStreamInserter(
            buffer_size=50,                         # 缓冲区大小
            concurrency=10,                         # 最大并发数量
            context=HandlerContext(**ctx),
        )
        # 创建测试日志处理器
        tlh = MySQLStreamInserter(
            buffer_size=50,                         # 缓冲区大小
            concurrency=10,                         # 最大并发数量
            context=HandlerContext(**ctx),
        )
        # 创建验证结果处理器,负责达标代理的处理(如入库)
        h = ProxyValidateHandler(
            proxy_handler=ph,                       # 针对Proxy的子处理器(可选)
            test_log_handler=tlh,                   # 针对TestLog的子处理器(可选)
            proxy_test_filter=ptf,                  # 指定过滤器,筛选达标的代理(可选)
            context=HandlerContext(**ctx),
        )
        
        ## 6. 执行验证
        MySQLOperation.init_pool()
        pool.verify(
            validator=v,                            # 验证器
            handler=h,                              # 处理器
            repeat=3,                               # 每个代理的重复验证次数
            concurrency=10,                         # 最大并发数量
            sleep=1,                                # 线程间歇(秒)
        )
        MySQLOperation.close_pool()
コード例 #28
0
def test_interval_in_30_seconds(con):
    retval = con.run("select interval '30 seconds'")
    assert retval[0][0] == Timedelta(seconds=30)
コード例 #29
0
def test_interval_in_12_days_30_seconds(con):
    retval = con.run("select interval '12 days 30 seconds'")
    assert retval[0][0] == Timedelta(days=12, seconds=30)
コード例 #30
0
def https_handler(sess, log_f, properties, contract, now=None):
    url_template_str = properties["url_template"]
    url_values = properties.get("url_values", {})
    download_days = properties["download_days"]
    if now is None:
        now = utc_datetime_now()
    window_finish = utc_datetime(now.year, now.month, now.day) - HH
    window_start = utc_datetime(now.year, now.month,
                                now.day) - Timedelta(days=download_days)
    log_f(f"Window start: {hh_format(window_start)}")
    log_f(f"Window finish: {hh_format(window_finish)}")
    env = jinja2.Environment(autoescape=True, undefined=jinja2.StrictUndefined)
    url_template = env.from_string(url_template_str)
    for era in (sess.query(Era).filter(
            Era.dc_contract == contract,
            Era.start_date <= window_finish,
            or_(Era.finish_date == null(), Era.finish_date >= window_start),
    ).distinct()):
        chunk_start = hh_max(era.start_date, window_start)
        chunk_finish = hh_min(era.finish_date, window_finish)
        for mpan_core in (era.imp_mpan_core, era.exp_mpan_core):
            if mpan_core is None:
                continue

            log_f(f"Looking at MPAN core {mpan_core}.")

            vals = {"chunk_start": chunk_start, "chunk_finish": chunk_finish}
            vals.update(url_values.get(mpan_core, {}))
            try:
                url = url_template.render(vals)
            except jinja2.exceptions.UndefinedError as e:
                raise BadRequest(
                    f"Problem rendering the URL template: {url_template_str}. "
                    f"The problem is: {e}. This can be fixed by editing the "
                    f"properties of this contract.")

            log_f(f"Retrieving data from {url}.")

            sess.rollback()  # Avoid long transactions
            res = requests.get(url, timeout=120)
            res.raise_for_status()
            result = requests.get(url, timeout=120).json()
            if isinstance(result, dict):
                result_data = result["DataPoints"]
            elif isinstance(result, list):
                result_data = result
            else:
                raise BadRequest(
                    f"Expecting a JSON object at the top level, but instead got "
                    f"{result}")
            raw_data = []
            for jdatum in result_data:
                raw_data.append(
                    dict(
                        mpan_core=mpan_core,
                        start_date=utc_datetime(1, 1, 1) +
                        Timedelta(seconds=jdatum["Time"] / 10000000),
                        channel_type="ACTIVE",
                        value=jdatum["Value"],
                        status="A",
                    ))
            HhDatum.insert(sess, raw_data, contract)
            sess.commit()
    log_f("Finished loading.")
    return False